diff options
522 files changed, 21182 insertions, 3916 deletions
diff --git a/Documentation/DocBook/tracepoint.tmpl b/Documentation/DocBook/tracepoint.tmpl index 8bca1d5cec0..e8473eae2a2 100644 --- a/Documentation/DocBook/tracepoint.tmpl +++ b/Documentation/DocBook/tracepoint.tmpl @@ -16,6 +16,15 @@ </address> </affiliation> </author> + <author> + <firstname>William</firstname> + <surname>Cohen</surname> + <affiliation> + <address> + <email>wcohen@redhat.com</email> + </address> + </affiliation> + </author> </authorgroup> <legalnotice> @@ -91,4 +100,8 @@ !Iinclude/trace/events/signal.h </chapter> + <chapter id="block"> + <title>Block IO</title> +!Iinclude/trace/events/block.h + </chapter> </book> diff --git a/Documentation/RCU/NMI-RCU.txt b/Documentation/RCU/NMI-RCU.txt index a6d32e65d22..a8536cb8809 100644 --- a/Documentation/RCU/NMI-RCU.txt +++ b/Documentation/RCU/NMI-RCU.txt @@ -34,7 +34,7 @@ NMI handler. cpu = smp_processor_id(); ++nmi_count(cpu); - if (!rcu_dereference(nmi_callback)(regs, cpu)) + if (!rcu_dereference_sched(nmi_callback)(regs, cpu)) default_do_nmi(regs); nmi_exit(); @@ -47,12 +47,13 @@ function pointer. If this handler returns zero, do_nmi() invokes the default_do_nmi() function to handle a machine-specific NMI. Finally, preemption is restored. -Strictly speaking, rcu_dereference() is not needed, since this code runs -only on i386, which does not need rcu_dereference() anyway. However, -it is a good documentation aid, particularly for anyone attempting to -do something similar on Alpha. +In theory, rcu_dereference_sched() is not needed, since this code runs +only on i386, which in theory does not need rcu_dereference_sched() +anyway. However, in practice it is a good documentation aid, particularly +for anyone attempting to do something similar on Alpha or on systems +with aggressive optimizing compilers. -Quick Quiz: Why might the rcu_dereference() be necessary on Alpha, +Quick Quiz: Why might the rcu_dereference_sched() be necessary on Alpha, given that the code referenced by the pointer is read-only? @@ -99,17 +100,21 @@ invoke irq_enter() and irq_exit() on NMI entry and exit, respectively. Answer to Quick Quiz - Why might the rcu_dereference() be necessary on Alpha, given + Why might the rcu_dereference_sched() be necessary on Alpha, given that the code referenced by the pointer is read-only? Answer: The caller to set_nmi_callback() might well have - initialized some data that is to be used by the - new NMI handler. In this case, the rcu_dereference() - would be needed, because otherwise a CPU that received - an NMI just after the new handler was set might see - the pointer to the new NMI handler, but the old - pre-initialized version of the handler's data. - - More important, the rcu_dereference() makes it clear - to someone reading the code that the pointer is being - protected by RCU. + initialized some data that is to be used by the new NMI + handler. In this case, the rcu_dereference_sched() would + be needed, because otherwise a CPU that received an NMI + just after the new handler was set might see the pointer + to the new NMI handler, but the old pre-initialized + version of the handler's data. + + This same sad story can happen on other CPUs when using + a compiler with aggressive pointer-value speculation + optimizations. + + More important, the rcu_dereference_sched() makes it + clear to someone reading the code that the pointer is + being protected by RCU-sched. diff --git a/Documentation/RCU/checklist.txt b/Documentation/RCU/checklist.txt index cbc180f9019..790d1a81237 100644 --- a/Documentation/RCU/checklist.txt +++ b/Documentation/RCU/checklist.txt @@ -260,7 +260,8 @@ over a rather long period of time, but improvements are always welcome! The reason that it is permissible to use RCU list-traversal primitives when the update-side lock is held is that doing so can be quite helpful in reducing code bloat when common code is - shared between readers and updaters. + shared between readers and updaters. Additional primitives + are provided for this case, as discussed in lockdep.txt. 10. Conversely, if you are in an RCU read-side critical section, and you don't hold the appropriate update-side lock, you -must- @@ -344,8 +345,8 @@ over a rather long period of time, but improvements are always welcome! requiring SRCU's read-side deadlock immunity or low read-side realtime latency. - Note that, rcu_assign_pointer() and rcu_dereference() relate to - SRCU just as they do to other forms of RCU. + Note that, rcu_assign_pointer() relates to SRCU just as they do + to other forms of RCU. 15. The whole point of call_rcu(), synchronize_rcu(), and friends is to wait until all pre-existing readers have finished before diff --git a/Documentation/RCU/lockdep.txt b/Documentation/RCU/lockdep.txt index fe24b58627b..d7a49b2f699 100644 --- a/Documentation/RCU/lockdep.txt +++ b/Documentation/RCU/lockdep.txt @@ -32,9 +32,20 @@ checking of rcu_dereference() primitives: srcu_dereference(p, sp): Check for SRCU read-side critical section. rcu_dereference_check(p, c): - Use explicit check expression "c". + Use explicit check expression "c". This is useful in + code that is invoked by both readers and updaters. rcu_dereference_raw(p) Don't check. (Use sparingly, if at all.) + rcu_dereference_protected(p, c): + Use explicit check expression "c", and omit all barriers + and compiler constraints. This is useful when the data + structure cannot change, for example, in code that is + invoked only by updaters. + rcu_access_pointer(p): + Return the value of the pointer and omit all barriers, + but retain the compiler constraints that prevent duplicating + or coalescsing. This is useful when when testing the + value of the pointer itself, for example, against NULL. The rcu_dereference_check() check expression can be any boolean expression, but would normally include one of the rcu_read_lock_held() @@ -59,7 +70,20 @@ In case (1), the pointer is picked up in an RCU-safe manner for vanilla RCU read-side critical sections, in case (2) the ->file_lock prevents any change from taking place, and finally, in case (3) the current task is the only task accessing the file_struct, again preventing any change -from taking place. +from taking place. If the above statement was invoked only from updater +code, it could instead be written as follows: + + file = rcu_dereference_protected(fdt->fd[fd], + lockdep_is_held(&files->file_lock) || + atomic_read(&files->count) == 1); + +This would verify cases #2 and #3 above, and furthermore lockdep would +complain if this was used in an RCU read-side critical section unless one +of these two cases held. Because rcu_dereference_protected() omits all +barriers and compiler constraints, it generates better code than do the +other flavors of rcu_dereference(). On the other hand, it is illegal +to use rcu_dereference_protected() if either the RCU-protected pointer +or the RCU-protected data that it points to can change concurrently. There are currently only "universal" versions of the rcu_assign_pointer() and RCU list-/tree-traversal primitives, which do not (yet) check for diff --git a/Documentation/RCU/whatisRCU.txt b/Documentation/RCU/whatisRCU.txt index 1dc00ee9716..cfaac34c455 100644 --- a/Documentation/RCU/whatisRCU.txt +++ b/Documentation/RCU/whatisRCU.txt @@ -840,6 +840,12 @@ SRCU: Initialization/cleanup init_srcu_struct cleanup_srcu_struct +All: lockdep-checked RCU-protected pointer access + + rcu_dereference_check + rcu_dereference_protected + rcu_access_pointer + See the comment headers in the source code (or the docbook generated from them) for more information. diff --git a/Documentation/block/biodoc.txt b/Documentation/block/biodoc.txt index 6fab97ea7e6..508b5b2b028 100644 --- a/Documentation/block/biodoc.txt +++ b/Documentation/block/biodoc.txt @@ -1162,8 +1162,8 @@ where a driver received a request ala this before: As mentioned, there is no virtual mapping of a bio. For DMA, this is not a problem as the driver probably never will need a virtual mapping. -Instead it needs a bus mapping (pci_map_page for a single segment or -use blk_rq_map_sg for scatter gather) to be able to ship it to the driver. For +Instead it needs a bus mapping (dma_map_page for a single segment or +use dma_map_sg for scatter gather) to be able to ship it to the driver. For PIO drivers (or drivers that need to revert to PIO transfer once in a while (IDE for example)), where the CPU is doing the actual data transfer a virtual mapping is needed. If the driver supports highmem I/O, diff --git a/Documentation/fb/imacfb.txt b/Documentation/fb/efifb.txt index 316ec9bb7de..a59916c29b3 100644 --- a/Documentation/fb/imacfb.txt +++ b/Documentation/fb/efifb.txt @@ -1,9 +1,9 @@ -What is imacfb? +What is efifb? =============== This is a generic EFI platform driver for Intel based Apple computers. -Imacfb is only for EFI booted Intel Macs. +efifb is only for EFI booted Intel Macs. Supported Hardware ================== @@ -16,16 +16,16 @@ MacMini How to use it? ============== -Imacfb does not have any kind of autodetection of your machine. +efifb does not have any kind of autodetection of your machine. You have to add the following kernel parameters in your elilo.conf: Macbook : - video=imacfb:macbook + video=efifb:macbook MacMini : - video=imacfb:mini + video=efifb:mini Macbook Pro 15", iMac 17" : - video=imacfb:i17 + video=efifb:i17 Macbook Pro 17", iMac 20" : - video=imacfb:i20 + video=efifb:i20 -- Edgar Hucek <gimli@dark-green.com> diff --git a/Documentation/input/multi-touch-protocol.txt b/Documentation/input/multi-touch-protocol.txt index 8490480ce43..c0fc1c75fd8 100644 --- a/Documentation/input/multi-touch-protocol.txt +++ b/Documentation/input/multi-touch-protocol.txt @@ -68,6 +68,22 @@ like: SYN_MT_REPORT SYN_REPORT +Here is the sequence after lifting one of the fingers: + + ABS_MT_POSITION_X + ABS_MT_POSITION_Y + SYN_MT_REPORT + SYN_REPORT + +And here is the sequence after lifting the remaining finger: + + SYN_MT_REPORT + SYN_REPORT + +If the driver reports one of BTN_TOUCH or ABS_PRESSURE in addition to the +ABS_MT events, the last SYN_MT_REPORT event may be omitted. Otherwise, the +last SYN_REPORT will be dropped by the input core, resulting in no +zero-finger event reaching userland. Event Semantics --------------- @@ -217,11 +233,6 @@ where examples can be found. difference between the contact position and the approaching tool position could be used to derive tilt. [2] The list can of course be extended. -[3] The multi-touch X driver is currently in the prototyping stage. At the -time of writing (April 2009), the MT protocol is not yet merged, and the -prototype implements finger matching, basic mouse support and two-finger -scrolling. The project aims at improving the quality of current multi-touch -functionality available in the Synaptics X driver, and in addition -implement more advanced gestures. +[3] Multitouch X driver project: http://bitmath.org/code/multitouch/. [4] See the section on event computation. [5] See the section on finger tracking. diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt index e4cbca58536..e2202e93b14 100644 --- a/Documentation/kernel-parameters.txt +++ b/Documentation/kernel-parameters.txt @@ -320,11 +320,6 @@ and is between 256 and 4096 characters. It is defined in the file amd_iommu= [HW,X86-84] Pass parameters to the AMD IOMMU driver in the system. Possible values are: - isolate - enable device isolation (each device, as far - as possible, will get its own protection - domain) [default] - share - put every device behind one IOMMU into the - same protection domain fullflush - enable flushing of IO/TLB entries when they are unmapped. Otherwise they are flushed before they will be reused, which diff --git a/Documentation/networking/stmmac.txt b/Documentation/networking/stmmac.txt new file mode 100644 index 00000000000..7ee770b5ef5 --- /dev/null +++ b/Documentation/networking/stmmac.txt @@ -0,0 +1,143 @@ + STMicroelectronics 10/100/1000 Synopsys Ethernet driver + +Copyright (C) 2007-2010 STMicroelectronics Ltd +Author: Giuseppe Cavallaro <peppe.cavallaro@st.com> + +This is the driver for the MAC 10/100/1000 on-chip Ethernet controllers +(Synopsys IP blocks); it has been fully tested on STLinux platforms. + +Currently this network device driver is for all STM embedded MAC/GMAC +(7xxx SoCs). + +DWC Ether MAC 10/100/1000 Universal version 3.41a and DWC Ether MAC 10/100 +Universal version 4.0 have been used for developing the first code +implementation. + +Please, for more information also visit: www.stlinux.com + +1) Kernel Configuration +The kernel configuration option is STMMAC_ETH: + Device Drivers ---> Network device support ---> Ethernet (1000 Mbit) ---> + STMicroelectronics 10/100/1000 Ethernet driver (STMMAC_ETH) + +2) Driver parameters list: + debug: message level (0: no output, 16: all); + phyaddr: to manually provide the physical address to the PHY device; + dma_rxsize: DMA rx ring size; + dma_txsize: DMA tx ring size; + buf_sz: DMA buffer size; + tc: control the HW FIFO threshold; + tx_coe: Enable/Disable Tx Checksum Offload engine; + watchdog: transmit timeout (in milliseconds); + flow_ctrl: Flow control ability [on/off]; + pause: Flow Control Pause Time; + tmrate: timer period (only if timer optimisation is configured). + +3) Command line options +Driver parameters can be also passed in command line by using: + stmmaceth=dma_rxsize:128,dma_txsize:512 + +4) Driver information and notes + +4.1) Transmit process +The xmit method is invoked when the kernel needs to transmit a packet; it sets +the descriptors in the ring and informs the DMA engine that there is a packet +ready to be transmitted. +Once the controller has finished transmitting the packet, an interrupt is +triggered; So the driver will be able to release the socket buffers. +By default, the driver sets the NETIF_F_SG bit in the features field of the +net_device structure enabling the scatter/gather feature. + +4.2) Receive process +When one or more packets are received, an interrupt happens. The interrupts +are not queued so the driver has to scan all the descriptors in the ring during +the receive process. +This is based on NAPI so the interrupt handler signals only if there is work to be +done, and it exits. +Then the poll method will be scheduled at some future point. +The incoming packets are stored, by the DMA, in a list of pre-allocated socket +buffers in order to avoid the memcpy (Zero-copy). + +4.3) Timer-Driver Interrupt +Instead of having the device that asynchronously notifies the frame receptions, the +driver configures a timer to generate an interrupt at regular intervals. +Based on the granularity of the timer, the frames that are received by the device +will experience different levels of latency. Some NICs have dedicated timer +device to perform this task. STMMAC can use either the RTC device or the TMU +channel 2 on STLinux platforms. +The timers frequency can be passed to the driver as parameter; when change it, +take care of both hardware capability and network stability/performance impact. +Several performance tests on STM platforms showed this optimisation allows to spare +the CPU while having the maximum throughput. + +4.4) WOL +Wake up on Lan feature through Magic Frame is only supported for the GMAC +core. + +4.5) DMA descriptors +Driver handles both normal and enhanced descriptors. The latter has been only +tested on DWC Ether MAC 10/100/1000 Universal version 3.41a. + +4.6) Ethtool support +Ethtool is supported. Driver statistics and internal errors can be taken using: +ethtool -S ethX command. It is possible to dump registers etc. + +4.7) Jumbo and Segmentation Offloading +Jumbo frames are supported and tested for the GMAC. +The GSO has been also added but it's performed in software. +LRO is not supported. + +4.8) Physical +The driver is compatible with PAL to work with PHY and GPHY devices. + +4.9) Platform information +Several information came from the platform; please refer to the +driver's Header file in include/linux directory. + +struct plat_stmmacenet_data { + int bus_id; + int pbl; + int has_gmac; + void (*fix_mac_speed)(void *priv, unsigned int speed); + void (*bus_setup)(unsigned long ioaddr); +#ifdef CONFIG_STM_DRIVERS + struct stm_pad_config *pad_config; +#endif + void *bsp_priv; +}; + +Where: +- pbl (Programmable Burst Length) is maximum number of + beats to be transferred in one DMA transaction. + GMAC also enables the 4xPBL by default. +- fix_mac_speed and bus_setup are used to configure internal target + registers (on STM platforms); +- has_gmac: GMAC core is on board (get it at run-time in the next step); +- bus_id: bus identifier. + +struct plat_stmmacphy_data { + int bus_id; + int phy_addr; + unsigned int phy_mask; + int interface; + int (*phy_reset)(void *priv); + void *priv; +}; + +Where: +- bus_id: bus identifier; +- phy_addr: physical address used for the attached phy device; + set it to -1 to get it at run-time; +- interface: physical MII interface mode; +- phy_reset: hook to reset HW function. + +TODO: +- Continue to make the driver more generic and suitable for other Synopsys + Ethernet controllers used on other architectures (i.e. ARM). +- 10G controllers are not supported. +- MAC uses Normal descriptors and GMAC uses enhanced ones. + This is a limit that should be reviewed. MAC could want to + use the enhanced structure. +- Checksumming: Rx/Tx csum is done in HW in case of GMAC only. +- Review the timer optimisation code to use an embedded device that seems to be + available in new chip generations. diff --git a/Documentation/networking/timestamping.txt b/Documentation/networking/timestamping.txt index 0e58b453917..e8c8f4f06c6 100644 --- a/Documentation/networking/timestamping.txt +++ b/Documentation/networking/timestamping.txt @@ -41,11 +41,12 @@ SOF_TIMESTAMPING_SOFTWARE: return system time stamp generated in SOF_TIMESTAMPING_TX/RX determine how time stamps are generated. SOF_TIMESTAMPING_RAW/SYS determine how they are reported in the following control message: - struct scm_timestamping { - struct timespec systime; - struct timespec hwtimetrans; - struct timespec hwtimeraw; - }; + +struct scm_timestamping { + struct timespec systime; + struct timespec hwtimetrans; + struct timespec hwtimeraw; +}; recvmsg() can be used to get this control message for regular incoming packets. For send time stamps the outgoing packet is looped back to @@ -87,12 +88,13 @@ by the network device and will be empty without that support. SIOCSHWTSTAMP: Hardware time stamping must also be initialized for each device driver -that is expected to do hardware time stamping. The parameter is: +that is expected to do hardware time stamping. The parameter is defined in +/include/linux/net_tstamp.h as: struct hwtstamp_config { - int flags; /* no flags defined right now, must be zero */ - int tx_type; /* HWTSTAMP_TX_* */ - int rx_filter; /* HWTSTAMP_FILTER_* */ + int flags; /* no flags defined right now, must be zero */ + int tx_type; /* HWTSTAMP_TX_* */ + int rx_filter; /* HWTSTAMP_FILTER_* */ }; Desired behavior is passed into the kernel and to a specific device by @@ -139,42 +141,56 @@ enum { /* time stamp any incoming packet */ HWTSTAMP_FILTER_ALL, - /* return value: time stamp all packets requested plus some others */ - HWTSTAMP_FILTER_SOME, + /* return value: time stamp all packets requested plus some others */ + HWTSTAMP_FILTER_SOME, /* PTP v1, UDP, any kind of event packet */ HWTSTAMP_FILTER_PTP_V1_L4_EVENT, - ... + /* for the complete list of values, please check + * the include file /include/linux/net_tstamp.h + */ }; DEVICE IMPLEMENTATION A driver which supports hardware time stamping must support the -SIOCSHWTSTAMP ioctl. Time stamps for received packets must be stored -in the skb with skb_hwtstamp_set(). +SIOCSHWTSTAMP ioctl and update the supplied struct hwtstamp_config with +the actual values as described in the section on SIOCSHWTSTAMP. + +Time stamps for received packets must be stored in the skb. To get a pointer +to the shared time stamp structure of the skb call skb_hwtstamps(). Then +set the time stamps in the structure: + +struct skb_shared_hwtstamps { + /* hardware time stamp transformed into duration + * since arbitrary point in time + */ + ktime_t hwtstamp; + ktime_t syststamp; /* hwtstamp transformed to system time base */ +}; Time stamps for outgoing packets are to be generated as follows: -- In hard_start_xmit(), check if skb_hwtstamp_check_tx_hardware() - returns non-zero. If yes, then the driver is expected - to do hardware time stamping. +- In hard_start_xmit(), check if skb_tx(skb)->hardware is set no-zero. + If yes, then the driver is expected to do hardware time stamping. - If this is possible for the skb and requested, then declare - that the driver is doing the time stamping by calling - skb_hwtstamp_tx_in_progress(). A driver not supporting - hardware time stamping doesn't do that. A driver must never - touch sk_buff::tstamp! It is used to store how time stamping - for an outgoing packets is to be done. + that the driver is doing the time stamping by setting the field + skb_tx(skb)->in_progress non-zero. You might want to keep a pointer + to the associated skb for the next step and not free the skb. A driver + not supporting hardware time stamping doesn't do that. A driver must + never touch sk_buff::tstamp! It is used to store software generated + time stamps by the network subsystem. - As soon as the driver has sent the packet and/or obtained a hardware time stamp for it, it passes the time stamp back by calling skb_hwtstamp_tx() with the original skb, the raw - hardware time stamp and a handle to the device (necessary - to convert the hardware time stamp to system time). If obtaining - the hardware time stamp somehow fails, then the driver should - not fall back to software time stamping. The rationale is that - this would occur at a later time in the processing pipeline - than other software time stamping and therefore could lead - to unexpected deltas between time stamps. -- If the driver did not call skb_hwtstamp_tx_in_progress(), then + hardware time stamp. skb_hwtstamp_tx() clones the original skb and + adds the timestamps, therefore the original skb has to be freed now. + If obtaining the hardware time stamp somehow fails, then the driver + should not fall back to software time stamping. The rationale is that + this would occur at a later time in the processing pipeline than other + software time stamping and therefore could lead to unexpected deltas + between time stamps. +- If the driver did not call set skb_tx(skb)->in_progress, then dev_hard_start_xmit() checks whether software time stamping is wanted as fallback and potentially generates the time stamp. diff --git a/Documentation/sound/alsa/HD-Audio.txt b/Documentation/sound/alsa/HD-Audio.txt index f4dd3bf99d1..98d14cb8a85 100644 --- a/Documentation/sound/alsa/HD-Audio.txt +++ b/Documentation/sound/alsa/HD-Audio.txt @@ -119,10 +119,18 @@ the codec slots 0 and 1 no matter what the hardware reports. Interrupt Handling ~~~~~~~~~~~~~~~~~~ -In rare but some cases, the interrupt isn't properly handled as -default. You would notice this by the DMA transfer error reported by -ALSA PCM core, for example. Using MSI might help in such a case. -Pass `enable_msi=1` option for enabling MSI. +HD-audio driver uses MSI as default (if available) since 2.6.33 +kernel as MSI works better on some machines, and in general, it's +better for performance. However, Nvidia controllers showed bad +regressions with MSI (especially in a combination with AMD chipset), +thus we disabled MSI for them. + +There seem also still other devices that don't work with MSI. If you +see a regression wrt the sound quality (stuttering, etc) or a lock-up +in the recent kernel, try to pass `enable_msi=0` option to disable +MSI. If it works, you can add the known bad device to the blacklist +defined in hda_intel.c. In such a case, please report and give the +patch back to the upstream developer. HD-AUDIO CODEC diff --git a/Documentation/watchdog/src/watchdog-simple.c b/Documentation/watchdog/src/watchdog-simple.c index 4cf72f3fa8e..ba45803a221 100644 --- a/Documentation/watchdog/src/watchdog-simple.c +++ b/Documentation/watchdog/src/watchdog-simple.c @@ -17,9 +17,6 @@ int main(void) ret = -1; break; } - ret = fsync(fd); - if (ret) - break; sleep(10); } close(fd); diff --git a/Documentation/watchdog/src/watchdog-test.c b/Documentation/watchdog/src/watchdog-test.c index a750532ffcf..63fdc34ceb9 100644 --- a/Documentation/watchdog/src/watchdog-test.c +++ b/Documentation/watchdog/src/watchdog-test.c @@ -31,6 +31,8 @@ static void keep_alive(void) */ int main(int argc, char *argv[]) { + int flags; + fd = open("/dev/watchdog", O_WRONLY); if (fd == -1) { @@ -41,12 +43,14 @@ int main(int argc, char *argv[]) if (argc > 1) { if (!strncasecmp(argv[1], "-d", 2)) { - ioctl(fd, WDIOC_SETOPTIONS, WDIOS_DISABLECARD); + flags = WDIOS_DISABLECARD; + ioctl(fd, WDIOC_SETOPTIONS, &flags); fprintf(stderr, "Watchdog card disabled.\n"); fflush(stderr); exit(0); } else if (!strncasecmp(argv[1], "-e", 2)) { - ioctl(fd, WDIOC_SETOPTIONS, WDIOS_ENABLECARD); + flags = WDIOS_ENABLECARD; + ioctl(fd, WDIOC_SETOPTIONS, &flags); fprintf(stderr, "Watchdog card enabled.\n"); fflush(stderr); exit(0); diff --git a/Documentation/watchdog/watchdog-api.txt b/Documentation/watchdog/watchdog-api.txt index 4cc4ba9d715..eb7132ed8bb 100644 --- a/Documentation/watchdog/watchdog-api.txt +++ b/Documentation/watchdog/watchdog-api.txt @@ -222,11 +222,10 @@ returned value is the temperature in degrees fahrenheit. ioctl(fd, WDIOC_GETTEMP, &temperature); Finally the SETOPTIONS ioctl can be used to control some aspects of -the cards operation; right now the pcwd driver is the only one -supporting this ioctl. +the cards operation. int options = 0; - ioctl(fd, WDIOC_SETOPTIONS, options); + ioctl(fd, WDIOC_SETOPTIONS, &options); The following options are available: diff --git a/MAINTAINERS b/MAINTAINERS index 3d29fa38988..a0e3c3a47a5 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -485,8 +485,8 @@ S: Maintained F: drivers/input/mouse/bcm5974.c APPLE SMC DRIVER -M: Nicolas Boichat <nicolas@boichat.ch> -L: mactel-linux-devel@lists.sourceforge.net +M: Henrik Rydberg <rydberg@euromail.se> +L: lm-sensors@lm-sensors.org S: Maintained F: drivers/hwmon/applesmc.c @@ -971,6 +971,16 @@ L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) W: http://www.mcuos.com S: Maintained +ARM/U300 MACHINE SUPPORT +M: Linus Walleij <linus.walleij@stericsson.com> +L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) +S: Supported +F: arch/arm/mach-u300/ +F: drivers/i2c/busses/i2c-stu300.c +F: drivers/rtc/rtc-coh901331.c +F: drivers/watchdog/coh901327_wdt.c +F: drivers/dma/coh901318* + ARM/U8500 ARM ARCHITECTURE M: Srinidhi Kasagar <srinidhi.kasagar@stericsson.com> L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) @@ -2474,12 +2484,6 @@ L: linuxppc-dev@ozlabs.org S: Odd Fixes F: drivers/char/hvc_* -VIRTIO CONSOLE DRIVER -M: Amit Shah <amit.shah@redhat.com> -L: virtualization@lists.linux-foundation.org -S: Maintained -F: drivers/char/virtio_console.c - iSCSI BOOT FIRMWARE TABLE (iBFT) DRIVER M: Peter Jones <pjones@redhat.com> M: Konrad Rzeszutek Wilk <konrad@kernel.org> @@ -5971,6 +5975,13 @@ S: Maintained F: Documentation/filesystems/vfat.txt F: fs/fat/ +VIRTIO CONSOLE DRIVER +M: Amit Shah <amit.shah@redhat.com> +L: virtualization@lists.linux-foundation.org +S: Maintained +F: drivers/char/virtio_console.c +F: include/linux/virtio_console.h + VIRTIO HOST (VHOST) M: "Michael S. Tsirkin" <mst@redhat.com> L: kvm@vger.kernel.org @@ -1,8 +1,8 @@ VERSION = 2 PATCHLEVEL = 6 SUBLEVEL = 34 -EXTRAVERSION = -rc3 -NAME = Man-Eating Seals of Antiquity +EXTRAVERSION = -rc5 +NAME = Sheep on Meth # *DOCUMENTATION* # To see a list of typical targets execute "make help" diff --git a/arch/arm/boot/compressed/head.S b/arch/arm/boot/compressed/head.S index 0f23009170a..6ab6b337a91 100644 --- a/arch/arm/boot/compressed/head.S +++ b/arch/arm/boot/compressed/head.S @@ -172,7 +172,7 @@ not_angel: adr r0, LC0 ARM( ldmia r0, {r1, r2, r3, r4, r5, r6, r11, ip, sp}) THUMB( ldmia r0, {r1, r2, r3, r4, r5, r6, r11, ip} ) - THUMB( ldr sp, [r0, #28] ) + THUMB( ldr sp, [r0, #32] ) subs r0, r0, r1 @ calculate the delta offset @ if delta is zero, we are diff --git a/arch/arm/include/asm/highmem.h b/arch/arm/include/asm/highmem.h index 7f36d00600b..feb988a7ec3 100644 --- a/arch/arm/include/asm/highmem.h +++ b/arch/arm/include/asm/highmem.h @@ -11,7 +11,11 @@ #define kmap_prot PAGE_KERNEL -#define flush_cache_kmaps() flush_cache_all() +#define flush_cache_kmaps() \ + do { \ + if (cache_is_vivt()) \ + flush_cache_all(); \ + } while (0) extern pte_t *pkmap_page_table; @@ -21,11 +25,20 @@ extern void *kmap_high(struct page *page); extern void *kmap_high_get(struct page *page); extern void kunmap_high(struct page *page); +extern void *kmap_high_l1_vipt(struct page *page, pte_t *saved_pte); +extern void kunmap_high_l1_vipt(struct page *page, pte_t saved_pte); + +/* + * The following functions are already defined by <linux/highmem.h> + * when CONFIG_HIGHMEM is not set. + */ +#ifdef CONFIG_HIGHMEM extern void *kmap(struct page *page); extern void kunmap(struct page *page); extern void *kmap_atomic(struct page *page, enum km_type type); extern void kunmap_atomic(void *kvaddr, enum km_type type); extern void *kmap_atomic_pfn(unsigned long pfn, enum km_type type); extern struct page *kmap_atomic_to_page(const void *ptr); +#endif #endif diff --git a/arch/arm/include/asm/kmap_types.h b/arch/arm/include/asm/kmap_types.h index c019949a518..c4b2ea3fbe4 100644 --- a/arch/arm/include/asm/kmap_types.h +++ b/arch/arm/include/asm/kmap_types.h @@ -18,6 +18,7 @@ enum km_type { KM_IRQ1, KM_SOFTIRQ0, KM_SOFTIRQ1, + KM_L1_CACHE, KM_L2_CACHE, KM_TYPE_NR }; diff --git a/arch/arm/include/asm/ucontext.h b/arch/arm/include/asm/ucontext.h index bf65e9f4525..47f023aa849 100644 --- a/arch/arm/include/asm/ucontext.h +++ b/arch/arm/include/asm/ucontext.h @@ -59,23 +59,22 @@ struct iwmmxt_sigframe { #endif /* CONFIG_IWMMXT */ #ifdef CONFIG_VFP -#if __LINUX_ARM_ARCH__ < 6 -/* For ARM pre-v6, we use fstmiax and fldmiax. This adds one extra - * word after the registers, and a word of padding at the end for - * alignment. */ #define VFP_MAGIC 0x56465001 -#define VFP_STORAGE_SIZE 152 -#else -#define VFP_MAGIC 0x56465002 -#define VFP_STORAGE_SIZE 144 -#endif struct vfp_sigframe { unsigned long magic; unsigned long size; - union vfp_state storage; -}; + struct user_vfp ufp; + struct user_vfp_exc ufp_exc; +} __attribute__((__aligned__(8))); + +/* + * 8 byte for magic and size, 264 byte for ufp, 12 bytes for ufp_exc, + * 4 bytes padding. + */ +#define VFP_STORAGE_SIZE sizeof(struct vfp_sigframe) + #endif /* CONFIG_VFP */ /* @@ -91,7 +90,7 @@ struct aux_sigframe { #ifdef CONFIG_IWMMXT struct iwmmxt_sigframe iwmmxt; #endif -#if 0 && defined CONFIG_VFP /* Not yet saved. */ +#ifdef CONFIG_VFP struct vfp_sigframe vfp; #endif /* Something that isn't a valid magic number for any coprocessor. */ diff --git a/arch/arm/include/asm/user.h b/arch/arm/include/asm/user.h index df95e050f9d..05ac4b06876 100644 --- a/arch/arm/include/asm/user.h +++ b/arch/arm/include/asm/user.h @@ -83,11 +83,21 @@ struct user{ /* * User specific VFP registers. If only VFPv2 is present, registers 16 to 31 - * are ignored by the ptrace system call. + * are ignored by the ptrace system call and the signal handler. */ struct user_vfp { unsigned long long fpregs[32]; unsigned long fpscr; }; +/* + * VFP exception registers exposed to user space during signal delivery. + * Fields not relavant to the current VFP architecture are ignored. + */ +struct user_vfp_exc { + unsigned long fpexc; + unsigned long fpinst; + unsigned long fpinst2; +}; + #endif /* _ARM_USER_H */ diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c index e7714f367eb..907d5a620bc 100644 --- a/arch/arm/kernel/signal.c +++ b/arch/arm/kernel/signal.c @@ -18,6 +18,7 @@ #include <asm/cacheflush.h> #include <asm/ucontext.h> #include <asm/unistd.h> +#include <asm/vfp.h> #include "ptrace.h" #include "signal.h" @@ -175,6 +176,90 @@ static int restore_iwmmxt_context(struct iwmmxt_sigframe *frame) #endif +#ifdef CONFIG_VFP + +static int preserve_vfp_context(struct vfp_sigframe __user *frame) +{ + struct thread_info *thread = current_thread_info(); + struct vfp_hard_struct *h = &thread->vfpstate.hard; + const unsigned long magic = VFP_MAGIC; + const unsigned long size = VFP_STORAGE_SIZE; + int err = 0; + + vfp_sync_hwstate(thread); + __put_user_error(magic, &frame->magic, err); + __put_user_error(size, &frame->size, err); + + /* + * Copy the floating point registers. There can be unused + * registers see asm/hwcap.h for details. + */ + err |= __copy_to_user(&frame->ufp.fpregs, &h->fpregs, + sizeof(h->fpregs)); + /* + * Copy the status and control register. + */ + __put_user_error(h->fpscr, &frame->ufp.fpscr, err); + + /* + * Copy the exception registers. + */ + __put_user_error(h->fpexc, &frame->ufp_exc.fpexc, err); + __put_user_error(h->fpinst, &frame->ufp_exc.fpinst, err); + __put_user_error(h->fpinst2, &frame->ufp_exc.fpinst2, err); + + return err ? -EFAULT : 0; +} + +static int restore_vfp_context(struct vfp_sigframe __user *frame) +{ + struct thread_info *thread = current_thread_info(); + struct vfp_hard_struct *h = &thread->vfpstate.hard; + unsigned long magic; + unsigned long size; + unsigned long fpexc; + int err = 0; + + __get_user_error(magic, &frame->magic, err); + __get_user_error(size, &frame->size, err); + + if (err) + return -EFAULT; + if (magic != VFP_MAGIC || size != VFP_STORAGE_SIZE) + return -EINVAL; + + /* + * Copy the floating point registers. There can be unused + * registers see asm/hwcap.h for details. + */ + err |= __copy_from_user(&h->fpregs, &frame->ufp.fpregs, + sizeof(h->fpregs)); + /* + * Copy the status and control register. + */ + __get_user_error(h->fpscr, &frame->ufp.fpscr, err); + + /* + * Sanitise and restore the exception registers. + */ + __get_user_error(fpexc, &frame->ufp_exc.fpexc, err); + /* Ensure the VFP is enabled. */ + fpexc |= FPEXC_EN; + /* Ensure FPINST2 is invalid and the exception flag is cleared. */ + fpexc &= ~(FPEXC_EX | FPEXC_FP2V); + h->fpexc = fpexc; + + __get_user_error(h->fpinst, &frame->ufp_exc.fpinst, err); + __get_user_error(h->fpinst2, &frame->ufp_exc.fpinst2, err); + + if (!err) + vfp_flush_hwstate(thread); + + return err ? -EFAULT : 0; +} + +#endif + /* * Do a signal return; undo the signal stack. These are aligned to 64-bit. */ @@ -233,8 +318,8 @@ static int restore_sigframe(struct pt_regs *regs, struct sigframe __user *sf) err |= restore_iwmmxt_context(&aux->iwmmxt); #endif #ifdef CONFIG_VFP -// if (err == 0) -// err |= vfp_restore_state(&sf->aux.vfp); + if (err == 0) + err |= restore_vfp_context(&aux->vfp); #endif return err; @@ -348,8 +433,8 @@ setup_sigframe(struct sigframe __user *sf, struct pt_regs *regs, sigset_t *set) err |= preserve_iwmmxt_context(&aux->iwmmxt); #endif #ifdef CONFIG_VFP -// if (err == 0) -// err |= vfp_save_state(&sf->aux.vfp); + if (err == 0) + err |= preserve_vfp_context(&aux->vfp); #endif __put_user_error(0, &aux->end_magic, err); diff --git a/arch/arm/mach-at91/Makefile b/arch/arm/mach-at91/Makefile index 027dd570dcc..d4004557532 100644 --- a/arch/arm/mach-at91/Makefile +++ b/arch/arm/mach-at91/Makefile @@ -16,8 +16,8 @@ obj-$(CONFIG_ARCH_AT91SAM9261) += at91sam9261.o at91sam926x_time.o at91sam9261_d obj-$(CONFIG_ARCH_AT91SAM9G10) += at91sam9261.o at91sam926x_time.o at91sam9261_devices.o sam9_smc.o obj-$(CONFIG_ARCH_AT91SAM9263) += at91sam9263.o at91sam926x_time.o at91sam9263_devices.o sam9_smc.o obj-$(CONFIG_ARCH_AT91SAM9RL) += at91sam9rl.o at91sam926x_time.o at91sam9rl_devices.o sam9_smc.o -obj-$(CONFIG_ARCH_AT91SAM9G20) += at91sam9260.o at91sam926x_time.o at91sam9260_devices.o sam9_smc.o - obj-$(CONFIG_ARCH_AT91SAM9G45) += at91sam9g45.o at91sam926x_time.o at91sam9g45_devices.o sam9_smc.o +obj-$(CONFIG_ARCH_AT91SAM9G20) += at91sam9260.o at91sam926x_time.o at91sam9260_devices.o sam9_smc.o +obj-$(CONFIG_ARCH_AT91SAM9G45) += at91sam9g45.o at91sam926x_time.o at91sam9g45_devices.o sam9_smc.o obj-$(CONFIG_ARCH_AT91CAP9) += at91cap9.o at91sam926x_time.o at91cap9_devices.o sam9_smc.o obj-$(CONFIG_ARCH_AT572D940HF) += at572d940hf.o at91sam926x_time.o at572d940hf_devices.o sam9_smc.o obj-$(CONFIG_ARCH_AT91X40) += at91x40.o at91x40_time.o diff --git a/arch/arm/mach-at91/pm_slowclock.S b/arch/arm/mach-at91/pm_slowclock.S index 987fab3d846..9c5b48e68a7 100644 --- a/arch/arm/mach-at91/pm_slowclock.S +++ b/arch/arm/mach-at91/pm_slowclock.S @@ -175,8 +175,6 @@ ENTRY(at91_slow_clock) orr r3, r3, #(1 << 29) /* bit 29 always set */ str r3, [r1, #(AT91_CKGR_PLLAR - AT91_PMC)] - wait_pllalock - /* Save PLLB setting and disable it */ ldr r3, [r1, #(AT91_CKGR_PLLBR - AT91_PMC)] str r3, .saved_pllbr @@ -184,8 +182,6 @@ ENTRY(at91_slow_clock) mov r3, #AT91_PMC_PLLCOUNT str r3, [r1, #(AT91_CKGR_PLLBR - AT91_PMC)] - wait_pllblock - /* Turn off the main oscillator */ ldr r3, [r1, #(AT91_CKGR_MOR - AT91_PMC)] bic r3, r3, #AT91_PMC_MOSCEN @@ -205,13 +201,25 @@ ENTRY(at91_slow_clock) ldr r3, .saved_pllbr str r3, [r1, #(AT91_CKGR_PLLBR - AT91_PMC)] + tst r3, #(AT91_PMC_MUL & 0xff0000) + bne 1f + tst r3, #(AT91_PMC_MUL & ~0xff0000) + beq 2f +1: wait_pllblock +2: /* Restore PLLA setting */ ldr r3, .saved_pllar str r3, [r1, #(AT91_CKGR_PLLAR - AT91_PMC)] + tst r3, #(AT91_PMC_MUL & 0xff0000) + bne 3f + tst r3, #(AT91_PMC_MUL & ~0xff0000) + beq 4f +3: wait_pllalock +4: #ifdef SLOWDOWN_MASTER_CLOCK /* diff --git a/arch/arm/mach-bcmring/dma.c b/arch/arm/mach-bcmring/dma.c index 2ccf670ce1a..29c0a911df2 100644 --- a/arch/arm/mach-bcmring/dma.c +++ b/arch/arm/mach-bcmring/dma.c @@ -2221,11 +2221,15 @@ EXPORT_SYMBOL(dma_map_create_descriptor_ring); int dma_unmap(DMA_MemMap_t *memMap, /* Stores state information about the map */ int dirtied /* non-zero if any of the pages were modified */ ) { + + int rc = 0; int regionIdx; int segmentIdx; DMA_Region_t *region; DMA_Segment_t *segment; + down(&memMap->lock); + for (regionIdx = 0; regionIdx < memMap->numRegionsUsed; regionIdx++) { region = &memMap->region[regionIdx]; @@ -2239,7 +2243,8 @@ int dma_unmap(DMA_MemMap_t *memMap, /* Stores state information about the map */ printk(KERN_ERR "%s: vmalloc'd pages are not yet supported\n", __func__); - return -EINVAL; + rc = -EINVAL; + goto out; } case DMA_MEM_TYPE_KMALLOC: @@ -2276,7 +2281,8 @@ int dma_unmap(DMA_MemMap_t *memMap, /* Stores state information about the map */ printk(KERN_ERR "%s: Unsupported memory type: %d\n", __func__, region->memType); - return -EINVAL; + rc = -EINVAL; + goto out; } } @@ -2314,9 +2320,10 @@ int dma_unmap(DMA_MemMap_t *memMap, /* Stores state information about the map */ memMap->numRegionsUsed = 0; memMap->inUse = 0; +out: up(&memMap->lock); - return 0; + return rc; } EXPORT_SYMBOL(dma_unmap); diff --git a/arch/arm/mach-davinci/dm365.c b/arch/arm/mach-davinci/dm365.c index 27772e18e45..0d6ee583f65 100644 --- a/arch/arm/mach-davinci/dm365.c +++ b/arch/arm/mach-davinci/dm365.c @@ -758,7 +758,6 @@ static u8 dm365_default_priorities[DAVINCI_N_AINTC_IRQ] = { [IRQ_MMCINT] = 7, [IRQ_DM365_MMCINT1] = 7, [IRQ_DM365_PWMINT3] = 7, - [IRQ_DDRINT] = 4, [IRQ_AEMIFINT] = 2, [IRQ_DM365_SDIOINT1] = 2, [IRQ_TINT0_TINT12] = 7, diff --git a/arch/arm/mach-davinci/dma.c b/arch/arm/mach-davinci/dma.c index 02d939853b8..53137387aee 100644 --- a/arch/arm/mach-davinci/dma.c +++ b/arch/arm/mach-davinci/dma.c @@ -1267,7 +1267,8 @@ int edma_start(unsigned channel) /* EDMA channel with event association */ pr_debug("EDMA: ER%d %08x\n", j, edma_shadow0_read_array(ctlr, SH_ER, j)); - /* Clear any pending error */ + /* Clear any pending event or error */ + edma_write_array(ctlr, EDMA_ECR, j, mask); edma_write_array(ctlr, EDMA_EMCR, j, mask); /* Clear any SER */ edma_shadow0_write_array(ctlr, SH_SECR, j, mask); diff --git a/arch/arm/mach-davinci/include/mach/da8xx.h b/arch/arm/mach-davinci/include/mach/da8xx.h index cc9be7fee62..03acfd39042 100644 --- a/arch/arm/mach-davinci/include/mach/da8xx.h +++ b/arch/arm/mach-davinci/include/mach/da8xx.h @@ -3,7 +3,7 @@ * * Author: Mark A. Greer <mgreer@mvista.com> * - * 2007, 2009 (c) MontaVista Software, Inc. This file is licensed under + * 2007, 2009-2010 (c) MontaVista Software, Inc. This file is licensed under * the terms of the GNU General Public License version 2. This program * is licensed "as is" without any warranty of any kind, whether express * or implied. @@ -13,7 +13,9 @@ #include <video/da8xx-fb.h> +#include <linux/platform_device.h> #include <linux/davinci_emac.h> + #include <mach/serial.h> #include <mach/edma.h> #include <mach/i2c.h> @@ -144,6 +146,10 @@ extern const short da850_mmcsd0_pins[]; extern const short da850_nand_pins[]; extern const short da850_nor_pins[]; +#ifdef CONFIG_DAVINCI_MUX int da8xx_pinmux_setup(const short pins[]); +#else +static inline int da8xx_pinmux_setup(const short pins[]) { return 0; } +#endif #endif /* __ASM_ARCH_DAVINCI_DA8XX_H */ diff --git a/arch/arm/mach-davinci/time.c b/arch/arm/mach-davinci/time.c index 42d985beece..9e0b106b4f5 100644 --- a/arch/arm/mach-davinci/time.c +++ b/arch/arm/mach-davinci/time.c @@ -253,8 +253,6 @@ static void __init timer_init(void) irq = USING_COMPARE(t) ? dtip[i].cmp_irq : irq; setup_irq(irq, &t->irqaction); } - - timer32_config(&timers[i]); } } @@ -331,6 +329,7 @@ static void __init davinci_timer_init(void) unsigned int clocksource_id; static char err[] __initdata = KERN_ERR "%s: can't register clocksource!\n"; + int i; clockevent_id = soc_info->timer_info->clockevent_id; clocksource_id = soc_info->timer_info->clocksource_id; @@ -389,6 +388,9 @@ static void __init davinci_timer_init(void) clockevent_davinci.cpumask = cpumask_of(0); clockevents_register_device(&clockevent_davinci); + + for (i=0; i< ARRAY_SIZE(timers); i++) + timer32_config(&timers[i]); } struct sys_timer davinci_timer = { diff --git a/arch/arm/mach-ep93xx/gpio.c b/arch/arm/mach-ep93xx/gpio.c index cc377ae8c42..cf547ad7ebd 100644 --- a/arch/arm/mach-ep93xx/gpio.c +++ b/arch/arm/mach-ep93xx/gpio.c @@ -25,7 +25,7 @@ #include <mach/hardware.h> /************************************************************************* - * GPIO handling for EP93xx + * Interrupt handling for EP93xx on-chip GPIOs *************************************************************************/ static unsigned char gpio_int_unmasked[3]; static unsigned char gpio_int_enabled[3]; @@ -40,7 +40,7 @@ static const u8 eoi_register_offset[3] = { 0x98, 0xb4, 0x54 }; static const u8 int_en_register_offset[3] = { 0x9c, 0xb8, 0x58 }; static const u8 int_debounce_register_offset[3] = { 0xa8, 0xc4, 0x64 }; -void ep93xx_gpio_update_int_params(unsigned port) +static void ep93xx_gpio_update_int_params(unsigned port) { BUG_ON(port > 2); @@ -56,7 +56,7 @@ void ep93xx_gpio_update_int_params(unsigned port) EP93XX_GPIO_REG(int_en_register_offset[port])); } -void ep93xx_gpio_int_mask(unsigned line) +static inline void ep93xx_gpio_int_mask(unsigned line) { gpio_int_unmasked[line >> 3] &= ~(1 << (line & 7)); } diff --git a/arch/arm/mach-mx3/Kconfig b/arch/arm/mach-mx3/Kconfig index 3872af1cf2c..170f68e46dd 100644 --- a/arch/arm/mach-mx3/Kconfig +++ b/arch/arm/mach-mx3/Kconfig @@ -62,6 +62,15 @@ config MACH_MX31_3DS Include support for MX31PDK (3DS) platform. This includes specific configurations for the board and its peripherals. +config MACH_MX31_3DS_MXC_NAND_USE_BBT + bool "Make the MXC NAND driver use the in flash Bad Block Table" + depends on MACH_MX31_3DS + depends on MTD_NAND_MXC + help + Enable this if you want that the MXC NAND driver uses the in flash + Bad Block Table to know what blocks are bad instead of scanning the + entire flash looking for bad block markers. + config MACH_MX31MOBOARD bool "Support mx31moboard platforms (EPFL Mobots group)" select ARCH_MX31 @@ -95,6 +104,7 @@ config MACH_PCM043 config MACH_ARMADILLO5X0 bool "Support Atmark Armadillo-500 Development Base Board" select ARCH_MX31 + select MXC_ULPI if USB_ULPI help Include support for Atmark Armadillo-500 platform. This includes specific configurations for the board and its peripherals. diff --git a/arch/arm/mach-mx3/clock-imx31.c b/arch/arm/mach-mx3/clock-imx31.c index 80dba9966b5..9a9eb6de612 100644 --- a/arch/arm/mach-mx3/clock-imx31.c +++ b/arch/arm/mach-mx3/clock-imx31.c @@ -468,6 +468,7 @@ static struct clk ahb_clk = { } DEFINE_CLOCK(perclk_clk, 0, NULL, 0, NULL, NULL, &ipg_clk); +DEFINE_CLOCK(ckil_clk, 0, NULL, 0, clk_ckil_get_rate, NULL, NULL); DEFINE_CLOCK(sdhc1_clk, 0, MXC_CCM_CGR0, 0, NULL, NULL, &perclk_clk); DEFINE_CLOCK(sdhc2_clk, 1, MXC_CCM_CGR0, 2, NULL, NULL, &perclk_clk); @@ -490,7 +491,7 @@ DEFINE_CLOCK(mpeg4_clk, 0, MXC_CCM_CGR1, 0, NULL, NULL, &ahb_clk); DEFINE_CLOCK(mstick1_clk, 0, MXC_CCM_CGR1, 2, mstick1_get_rate, NULL, &usb_pll_clk); DEFINE_CLOCK(mstick2_clk, 1, MXC_CCM_CGR1, 4, mstick2_get_rate, NULL, &usb_pll_clk); DEFINE_CLOCK1(csi_clk, 0, MXC_CCM_CGR1, 6, csi, NULL, &serial_pll_clk); -DEFINE_CLOCK(rtc_clk, 0, MXC_CCM_CGR1, 8, NULL, NULL, &ipg_clk); +DEFINE_CLOCK(rtc_clk, 0, MXC_CCM_CGR1, 8, NULL, NULL, &ckil_clk); DEFINE_CLOCK(wdog_clk, 0, MXC_CCM_CGR1, 10, NULL, NULL, &ipg_clk); DEFINE_CLOCK(pwm_clk, 0, MXC_CCM_CGR1, 12, NULL, NULL, &perclk_clk); DEFINE_CLOCK(usb_clk2, 0, MXC_CCM_CGR1, 18, usb_get_rate, NULL, &ahb_clk); @@ -514,7 +515,6 @@ DEFINE_CLOCK(usb_clk1, 0, NULL, 0, usb_get_rate, NULL, &usb_pll_clk) DEFINE_CLOCK(nfc_clk, 0, NULL, 0, nfc_get_rate, NULL, &ahb_clk); DEFINE_CLOCK(scc_clk, 0, NULL, 0, NULL, NULL, &ipg_clk); DEFINE_CLOCK(ipg_clk, 0, NULL, 0, ipg_get_rate, NULL, &ahb_clk); -DEFINE_CLOCK(ckil_clk, 0, NULL, 0, clk_ckil_get_rate, NULL, NULL); #define _REGISTER_CLOCK(d, n, c) \ { \ @@ -572,7 +572,6 @@ static struct clk_lookup lookups[] = { _REGISTER_CLOCK(NULL, "iim", iim_clk) _REGISTER_CLOCK(NULL, "mpeg4", mpeg4_clk) _REGISTER_CLOCK(NULL, "mbx", mbx_clk) - _REGISTER_CLOCK("mxc_rtc", NULL, ckil_clk) }; int __init mx31_clocks_init(unsigned long fref) diff --git a/arch/arm/mach-mx3/devices.c b/arch/arm/mach-mx3/devices.c index 6adb586515e..f8911154a9f 100644 --- a/arch/arm/mach-mx3/devices.c +++ b/arch/arm/mach-mx3/devices.c @@ -575,11 +575,26 @@ struct platform_device imx_ssi_device1 = { .resource = imx_ssi_resources1, }; -static int mx3_devices_init(void) +static struct resource imx_wdt_resources[] = { + { + .flags = IORESOURCE_MEM, + }, +}; + +struct platform_device imx_wdt_device0 = { + .name = "imx-wdt", + .id = 0, + .num_resources = ARRAY_SIZE(imx_wdt_resources), + .resource = imx_wdt_resources, +}; + +static int __init mx3_devices_init(void) { if (cpu_is_mx31()) { mxc_nand_resources[0].start = MX31_NFC_BASE_ADDR; mxc_nand_resources[0].end = MX31_NFC_BASE_ADDR + 0xfff; + imx_wdt_resources[0].start = MX31_WDOG_BASE_ADDR; + imx_wdt_resources[0].end = MX31_WDOG_BASE_ADDR + 0x3fff; mxc_register_device(&mxc_rnga_device, NULL); } if (cpu_is_mx35()) { @@ -597,6 +612,8 @@ static int mx3_devices_init(void) imx_ssi_resources0[1].end = MX35_INT_SSI1; imx_ssi_resources1[1].start = MX35_INT_SSI2; imx_ssi_resources1[1].end = MX35_INT_SSI2; + imx_wdt_resources[0].start = MX35_WDOG_BASE_ADDR; + imx_wdt_resources[0].end = MX35_WDOG_BASE_ADDR + 0x3fff; } return 0; diff --git a/arch/arm/mach-mx3/devices.h b/arch/arm/mach-mx3/devices.h index 42cf175eac6..4f77eb50127 100644 --- a/arch/arm/mach-mx3/devices.h +++ b/arch/arm/mach-mx3/devices.h @@ -25,4 +25,5 @@ extern struct platform_device mxc_spi_device1; extern struct platform_device mxc_spi_device2; extern struct platform_device imx_ssi_device0; extern struct platform_device imx_ssi_device1; - +extern struct platform_device imx_ssi_device1; +extern struct platform_device imx_wdt_device0; diff --git a/arch/arm/mach-mx3/mach-armadillo5x0.c b/arch/arm/mach-mx3/mach-armadillo5x0.c index 3d72b0b8970..5f72ec91af2 100644 --- a/arch/arm/mach-mx3/mach-armadillo5x0.c +++ b/arch/arm/mach-mx3/mach-armadillo5x0.c @@ -36,6 +36,9 @@ #include <linux/input.h> #include <linux/gpio_keys.h> #include <linux/i2c.h> +#include <linux/usb/otg.h> +#include <linux/usb/ulpi.h> +#include <linux/delay.h> #include <mach/hardware.h> #include <asm/mach-types.h> @@ -52,6 +55,8 @@ #include <mach/ipu.h> #include <mach/mx3fb.h> #include <mach/mxc_nand.h> +#include <mach/mxc_ehci.h> +#include <mach/ulpi.h> #include "devices.h" #include "crm_regs.h" @@ -103,8 +108,158 @@ static int armadillo5x0_pins[] = { /* I2C2 */ MX31_PIN_CSPI2_MOSI__SCL, MX31_PIN_CSPI2_MISO__SDA, + /* OTG */ + MX31_PIN_USBOTG_DATA0__USBOTG_DATA0, + MX31_PIN_USBOTG_DATA1__USBOTG_DATA1, + MX31_PIN_USBOTG_DATA2__USBOTG_DATA2, + MX31_PIN_USBOTG_DATA3__USBOTG_DATA3, + MX31_PIN_USBOTG_DATA4__USBOTG_DATA4, + MX31_PIN_USBOTG_DATA5__USBOTG_DATA5, + MX31_PIN_USBOTG_DATA6__USBOTG_DATA6, + MX31_PIN_USBOTG_DATA7__USBOTG_DATA7, + MX31_PIN_USBOTG_CLK__USBOTG_CLK, + MX31_PIN_USBOTG_DIR__USBOTG_DIR, + MX31_PIN_USBOTG_NXT__USBOTG_NXT, + MX31_PIN_USBOTG_STP__USBOTG_STP, + /* USB host 2 */ + IOMUX_MODE(MX31_PIN_USBH2_CLK, IOMUX_CONFIG_FUNC), + IOMUX_MODE(MX31_PIN_USBH2_DIR, IOMUX_CONFIG_FUNC), + IOMUX_MODE(MX31_PIN_USBH2_NXT, IOMUX_CONFIG_FUNC), + IOMUX_MODE(MX31_PIN_USBH2_STP, IOMUX_CONFIG_FUNC), + IOMUX_MODE(MX31_PIN_USBH2_DATA0, IOMUX_CONFIG_FUNC), + IOMUX_MODE(MX31_PIN_USBH2_DATA1, IOMUX_CONFIG_FUNC), + IOMUX_MODE(MX31_PIN_STXD3, IOMUX_CONFIG_FUNC), + IOMUX_MODE(MX31_PIN_SRXD3, IOMUX_CONFIG_FUNC), + IOMUX_MODE(MX31_PIN_SCK3, IOMUX_CONFIG_FUNC), + IOMUX_MODE(MX31_PIN_SFS3, IOMUX_CONFIG_FUNC), + IOMUX_MODE(MX31_PIN_STXD6, IOMUX_CONFIG_FUNC), + IOMUX_MODE(MX31_PIN_SRXD6, IOMUX_CONFIG_FUNC), }; +/* USB */ +#if defined(CONFIG_USB_ULPI) + +#define OTG_RESET IOMUX_TO_GPIO(MX31_PIN_STXD4) +#define USBH2_RESET IOMUX_TO_GPIO(MX31_PIN_SCK6) +#define USBH2_CS IOMUX_TO_GPIO(MX31_PIN_GPIO1_3) + +#define USB_PAD_CFG (PAD_CTL_DRV_MAX | PAD_CTL_SRE_FAST | PAD_CTL_HYS_CMOS | \ + PAD_CTL_ODE_CMOS | PAD_CTL_100K_PU) + +static int usbotg_init(struct platform_device *pdev) +{ + int err; + + mxc_iomux_set_pad(MX31_PIN_USBOTG_DATA0, USB_PAD_CFG); + mxc_iomux_set_pad(MX31_PIN_USBOTG_DATA1, USB_PAD_CFG); + mxc_iomux_set_pad(MX31_PIN_USBOTG_DATA2, USB_PAD_CFG); + mxc_iomux_set_pad(MX31_PIN_USBOTG_DATA3, USB_PAD_CFG); + mxc_iomux_set_pad(MX31_PIN_USBOTG_DATA4, USB_PAD_CFG); + mxc_iomux_set_pad(MX31_PIN_USBOTG_DATA5, USB_PAD_CFG); + mxc_iomux_set_pad(MX31_PIN_USBOTG_DATA6, USB_PAD_CFG); + mxc_iomux_set_pad(MX31_PIN_USBOTG_DATA7, USB_PAD_CFG); + mxc_iomux_set_pad(MX31_PIN_USBOTG_CLK, USB_PAD_CFG); + mxc_iomux_set_pad(MX31_PIN_USBOTG_DIR, USB_PAD_CFG); + mxc_iomux_set_pad(MX31_PIN_USBOTG_NXT, USB_PAD_CFG); + mxc_iomux_set_pad(MX31_PIN_USBOTG_STP, USB_PAD_CFG); + + /* Chip already enabled by hardware */ + /* OTG phy reset*/ + err = gpio_request(OTG_RESET, "USB-OTG-RESET"); + if (err) { + pr_err("Failed to request the usb otg reset gpio\n"); + return err; + } + + err = gpio_direction_output(OTG_RESET, 1/*HIGH*/); + if (err) { + pr_err("Failed to reset the usb otg phy\n"); + goto otg_free_reset; + } + + gpio_set_value(OTG_RESET, 0/*LOW*/); + mdelay(5); + gpio_set_value(OTG_RESET, 1/*HIGH*/); + + return 0; + +otg_free_reset: + gpio_free(OTG_RESET); + return err; +} + +static int usbh2_init(struct platform_device *pdev) +{ + int err; + + mxc_iomux_set_pad(MX31_PIN_USBH2_CLK, USB_PAD_CFG); + mxc_iomux_set_pad(MX31_PIN_USBH2_DIR, USB_PAD_CFG); + mxc_iomux_set_pad(MX31_PIN_USBH2_NXT, USB_PAD_CFG); + mxc_iomux_set_pad(MX31_PIN_USBH2_STP, USB_PAD_CFG); + mxc_iomux_set_pad(MX31_PIN_USBH2_DATA0, USB_PAD_CFG); + mxc_iomux_set_pad(MX31_PIN_USBH2_DATA1, USB_PAD_CFG); + mxc_iomux_set_pad(MX31_PIN_SRXD6, USB_PAD_CFG); + mxc_iomux_set_pad(MX31_PIN_STXD6, USB_PAD_CFG); + mxc_iomux_set_pad(MX31_PIN_SFS3, USB_PAD_CFG); + mxc_iomux_set_pad(MX31_PIN_SCK3, USB_PAD_CFG); + mxc_iomux_set_pad(MX31_PIN_SRXD3, USB_PAD_CFG); + mxc_iomux_set_pad(MX31_PIN_STXD3, USB_PAD_CFG); + + mxc_iomux_set_gpr(MUX_PGP_UH2, true); + + + /* Enable the chip */ + err = gpio_request(USBH2_CS, "USB-H2-CS"); + if (err) { + pr_err("Failed to request the usb host 2 CS gpio\n"); + return err; + } + + err = gpio_direction_output(USBH2_CS, 0/*Enabled*/); + if (err) { + pr_err("Failed to drive the usb host 2 CS gpio\n"); + goto h2_free_cs; + } + + /* H2 phy reset*/ + err = gpio_request(USBH2_RESET, "USB-H2-RESET"); + if (err) { + pr_err("Failed to request the usb host 2 reset gpio\n"); + goto h2_free_cs; + } + + err = gpio_direction_output(USBH2_RESET, 1/*HIGH*/); + if (err) { + pr_err("Failed to reset the usb host 2 phy\n"); + goto h2_free_reset; + } + + gpio_set_value(USBH2_RESET, 0/*LOW*/); + mdelay(5); + gpio_set_value(USBH2_RESET, 1/*HIGH*/); + + return 0; + +h2_free_reset: + gpio_free(USBH2_RESET); +h2_free_cs: + gpio_free(USBH2_CS); + return err; +} + +static struct mxc_usbh_platform_data usbotg_pdata = { + .init = usbotg_init, + .portsc = MXC_EHCI_MODE_ULPI | MXC_EHCI_UTMI_8BIT, + .flags = MXC_EHCI_POWER_PINS_ENABLED | MXC_EHCI_INTERFACE_DIFF_UNI, +}; + +static struct mxc_usbh_platform_data usbh2_pdata = { + .init = usbh2_init, + .portsc = MXC_EHCI_MODE_ULPI | MXC_EHCI_UTMI_8BIT, + .flags = MXC_EHCI_POWER_PINS_ENABLED | MXC_EHCI_INTERFACE_DIFF_UNI, +}; +#endif /* CONFIG_USB_ULPI */ + /* RTC over I2C*/ #define ARMADILLO5X0_RTC_GPIO IOMUX_TO_GPIO(MX31_PIN_SRXD4) @@ -393,6 +548,17 @@ static void __init armadillo5x0_init(void) if (armadillo5x0_i2c_rtc.irq == 0) pr_warning("armadillo5x0_init: failed to get RTC IRQ\n"); i2c_register_board_info(1, &armadillo5x0_i2c_rtc, 1); + + /* USB */ +#if defined(CONFIG_USB_ULPI) + usbotg_pdata.otg = otg_ulpi_create(&mxc_ulpi_access_ops, + USB_OTG_DRV_VBUS | USB_OTG_DRV_VBUS_EXT); + usbh2_pdata.otg = otg_ulpi_create(&mxc_ulpi_access_ops, + USB_OTG_DRV_VBUS | USB_OTG_DRV_VBUS_EXT); + + mxc_register_device(&mxc_otg_host, &usbotg_pdata); + mxc_register_device(&mxc_usbh2, &usbh2_pdata); +#endif } static void __init armadillo5x0_timer_init(void) diff --git a/arch/arm/mach-mx3/mach-mx31_3ds.c b/arch/arm/mach-mx3/mach-mx31_3ds.c index b88c18ad769..f54af1e29ca 100644 --- a/arch/arm/mach-mx3/mach-mx31_3ds.c +++ b/arch/arm/mach-mx3/mach-mx31_3ds.c @@ -23,6 +23,9 @@ #include <linux/gpio.h> #include <linux/smsc911x.h> #include <linux/platform_device.h> +#include <linux/mfd/mc13783.h> +#include <linux/spi/spi.h> +#include <linux/regulator/machine.h> #include <mach/hardware.h> #include <asm/mach-types.h> @@ -31,26 +34,96 @@ #include <asm/memory.h> #include <asm/mach/map.h> #include <mach/common.h> -#include <mach/board-mx31pdk.h> +#include <mach/board-mx31_3ds.h> #include <mach/imx-uart.h> #include <mach/iomux-mx3.h> +#include <mach/mxc_nand.h> +#include <mach/spi.h> #include "devices.h" /*! - * @file mx31pdk.c + * @file mx31_3ds.c * * @brief This file contains the board-specific initialization routines. * * @ingroup System */ -static int mx31pdk_pins[] = { +static int mx31_3ds_pins[] = { /* UART1 */ MX31_PIN_CTS1__CTS1, MX31_PIN_RTS1__RTS1, MX31_PIN_TXD1__TXD1, MX31_PIN_RXD1__RXD1, IOMUX_MODE(MX31_PIN_GPIO1_1, IOMUX_CONFIG_GPIO), + /* SPI 1 */ + MX31_PIN_CSPI2_SCLK__SCLK, + MX31_PIN_CSPI2_MOSI__MOSI, + MX31_PIN_CSPI2_MISO__MISO, + MX31_PIN_CSPI2_SPI_RDY__SPI_RDY, + MX31_PIN_CSPI2_SS0__SS0, + MX31_PIN_CSPI2_SS2__SS2, /*CS for MC13783 */ + /* MC13783 IRQ */ + IOMUX_MODE(MX31_PIN_GPIO1_3, IOMUX_CONFIG_GPIO), +}; + +/* Regulators */ +static struct regulator_init_data pwgtx_init = { + .constraints = { + .boot_on = 1, + .always_on = 1, + }, +}; + +static struct mc13783_regulator_init_data mx31_3ds_regulators[] = { + { + .id = MC13783_REGU_PWGT1SPI, /* Power Gate for ARM core. */ + .init_data = &pwgtx_init, + }, { + .id = MC13783_REGU_PWGT2SPI, /* Power Gate for L2 Cache. */ + .init_data = &pwgtx_init, + }, +}; + +/* MC13783 */ +static struct mc13783_platform_data mc13783_pdata __initdata = { + .regulators = mx31_3ds_regulators, + .num_regulators = ARRAY_SIZE(mx31_3ds_regulators), + .flags = MC13783_USE_REGULATOR, +}; + +/* SPI */ +static int spi1_internal_chipselect[] = { + MXC_SPI_CS(0), + MXC_SPI_CS(2), +}; + +static struct spi_imx_master spi1_pdata = { + .chipselect = spi1_internal_chipselect, + .num_chipselect = ARRAY_SIZE(spi1_internal_chipselect), +}; + +static struct spi_board_info mx31_3ds_spi_devs[] __initdata = { + { + .modalias = "mc13783", + .max_speed_hz = 1000000, + .bus_num = 1, + .chip_select = 1, /* SS2 */ + .platform_data = &mc13783_pdata, + .irq = IOMUX_TO_IRQ(MX31_PIN_GPIO1_3), + .mode = SPI_CS_HIGH, + }, +}; + +/* + * NAND Flash + */ +static struct mxc_nand_platform_data imx31_3ds_nand_flash_pdata = { + .width = 1, + .hw_ecc = 1, +#ifdef MACH_MX31_3DS_MXC_NAND_USE_BBT + .flash_bbt = 1, +#endif }; static struct imxuart_platform_data uart_pdata = { @@ -95,7 +168,7 @@ static struct platform_device smsc911x_device = { * LEDs, switches, interrupts for Ethernet. */ -static void mx31pdk_expio_irq_handler(uint32_t irq, struct irq_desc *desc) +static void mx31_3ds_expio_irq_handler(uint32_t irq, struct irq_desc *desc) { uint32_t imr_val; uint32_t int_valid; @@ -163,7 +236,7 @@ static struct irq_chip expio_irq_chip = { .unmask = expio_unmask_irq, }; -static int __init mx31pdk_init_expio(void) +static int __init mx31_3ds_init_expio(void) { int i; int ret; @@ -176,7 +249,7 @@ static int __init mx31pdk_init_expio(void) return -ENODEV; } - pr_info("i.MX31PDK Debug board detected, rev = 0x%04X\n", + pr_info("i.MX31 3DS Debug board detected, rev = 0x%04X\n", __raw_readw(CPLD_CODE_VER_REG)); /* @@ -201,7 +274,7 @@ static int __init mx31pdk_init_expio(void) set_irq_flags(i, IRQF_VALID); } set_irq_type(EXPIO_PARENT_INT, IRQ_TYPE_LEVEL_LOW); - set_irq_chained_handler(EXPIO_PARENT_INT, mx31pdk_expio_irq_handler); + set_irq_chained_handler(EXPIO_PARENT_INT, mx31_3ds_expio_irq_handler); return 0; } @@ -209,7 +282,7 @@ static int __init mx31pdk_init_expio(void) /* * This structure defines the MX31 memory map. */ -static struct map_desc mx31pdk_io_desc[] __initdata = { +static struct map_desc mx31_3ds_io_desc[] __initdata = { { .virtual = MX31_CS5_BASE_ADDR_VIRT, .pfn = __phys_to_pfn(MX31_CS5_BASE_ADDR), @@ -221,10 +294,10 @@ static struct map_desc mx31pdk_io_desc[] __initdata = { /* * Set up static virtual mappings. */ -static void __init mx31pdk_map_io(void) +static void __init mx31_3ds_map_io(void) { mx31_map_io(); - iotable_init(mx31pdk_io_desc, ARRAY_SIZE(mx31pdk_io_desc)); + iotable_init(mx31_3ds_io_desc, ARRAY_SIZE(mx31_3ds_io_desc)); } /*! @@ -232,35 +305,40 @@ static void __init mx31pdk_map_io(void) */ static void __init mxc_board_init(void) { - mxc_iomux_setup_multiple_pins(mx31pdk_pins, ARRAY_SIZE(mx31pdk_pins), - "mx31pdk"); + mxc_iomux_setup_multiple_pins(mx31_3ds_pins, ARRAY_SIZE(mx31_3ds_pins), + "mx31_3ds"); mxc_register_device(&mxc_uart_device0, &uart_pdata); + mxc_register_device(&mxc_nand_device, &imx31_3ds_nand_flash_pdata); + + mxc_register_device(&mxc_spi_device1, &spi1_pdata); + spi_register_board_info(mx31_3ds_spi_devs, + ARRAY_SIZE(mx31_3ds_spi_devs)); - if (!mx31pdk_init_expio()) + if (!mx31_3ds_init_expio()) platform_device_register(&smsc911x_device); } -static void __init mx31pdk_timer_init(void) +static void __init mx31_3ds_timer_init(void) { mx31_clocks_init(26000000); } -static struct sys_timer mx31pdk_timer = { - .init = mx31pdk_timer_init, +static struct sys_timer mx31_3ds_timer = { + .init = mx31_3ds_timer_init, }; /* * The following uses standard kernel macros defined in arch.h in order to - * initialize __mach_desc_MX31PDK data structure. + * initialize __mach_desc_MX31_3DS data structure. */ MACHINE_START(MX31_3DS, "Freescale MX31PDK (3DS)") /* Maintainer: Freescale Semiconductor, Inc. */ .phys_io = MX31_AIPS1_BASE_ADDR, .io_pg_offst = (MX31_AIPS1_BASE_ADDR_VIRT >> 18) & 0xfffc, .boot_params = MX3x_PHYS_OFFSET + 0x100, - .map_io = mx31pdk_map_io, + .map_io = mx31_3ds_map_io, .init_irq = mx31_init_irq, .init_machine = mxc_board_init, - .timer = &mx31pdk_timer, + .timer = &mx31_3ds_timer, MACHINE_END diff --git a/arch/arm/mach-mx3/mach-pcm037.c b/arch/arm/mach-mx3/mach-pcm037.c index 034ec819006..2df1ec55a97 100644 --- a/arch/arm/mach-mx3/mach-pcm037.c +++ b/arch/arm/mach-mx3/mach-pcm037.c @@ -35,7 +35,6 @@ #include <linux/can/platform/sja1000.h> #include <linux/usb/otg.h> #include <linux/usb/ulpi.h> -#include <linux/fsl_devices.h> #include <linux/gfp.h> #include <media/soc_camera.h> diff --git a/arch/arm/mach-mx3/mx31lite-db.c b/arch/arm/mach-mx3/mx31lite-db.c index ccd874225c3..093c595ca58 100644 --- a/arch/arm/mach-mx3/mx31lite-db.c +++ b/arch/arm/mach-mx3/mx31lite-db.c @@ -28,7 +28,6 @@ #include <linux/types.h> #include <linux/init.h> #include <linux/gpio.h> -#include <linux/platform_device.h> #include <linux/leds.h> #include <linux/platform_device.h> @@ -206,5 +205,6 @@ void __init mx31lite_db_init(void) mxc_register_device(&mxcsdhc_device0, &mmc_pdata); mxc_register_device(&mxc_spi_device0, &spi0_pdata); platform_device_register(&litekit_led_device); + mxc_register_device(&imx_wdt_device0, NULL); } diff --git a/arch/arm/mach-mx5/clock-mx51.c b/arch/arm/mach-mx5/clock-mx51.c index be90c03101c..8f85f73b83a 100644 --- a/arch/arm/mach-mx5/clock-mx51.c +++ b/arch/arm/mach-mx5/clock-mx51.c @@ -757,7 +757,7 @@ DEFINE_CLOCK(uart3_ipg_clk, 2, MXC_CCM_CCGR1, MXC_CCM_CCGRx_CG7_OFFSET, /* GPT */ DEFINE_CLOCK(gpt_clk, 0, MXC_CCM_CCGR2, MXC_CCM_CCGRx_CG9_OFFSET, - NULL, NULL, &ipg_perclk, NULL); + NULL, NULL, &ipg_clk, NULL); DEFINE_CLOCK(gpt_ipg_clk, 0, MXC_CCM_CCGR2, MXC_CCM_CCGRx_CG10_OFFSET, NULL, NULL, &ipg_clk, NULL); diff --git a/arch/arm/mach-mx5/cpu.c b/arch/arm/mach-mx5/cpu.c index 41c769f08c4..2d37785e385 100644 --- a/arch/arm/mach-mx5/cpu.c +++ b/arch/arm/mach-mx5/cpu.c @@ -14,9 +14,62 @@ #include <linux/types.h> #include <linux/kernel.h> #include <linux/init.h> +#include <linux/module.h> #include <mach/hardware.h> #include <asm/io.h> +static int cpu_silicon_rev = -1; + +#define SI_REV 0x48 + +static void query_silicon_parameter(void) +{ + void __iomem *rom = ioremap(MX51_IROM_BASE_ADDR, MX51_IROM_SIZE); + u32 rev; + + if (!rom) { + cpu_silicon_rev = -EINVAL; + return; + } + + rev = readl(rom + SI_REV); + switch (rev) { + case 0x1: + cpu_silicon_rev = MX51_CHIP_REV_1_0; + break; + case 0x2: + cpu_silicon_rev = MX51_CHIP_REV_1_1; + break; + case 0x10: + cpu_silicon_rev = MX51_CHIP_REV_2_0; + break; + case 0x20: + cpu_silicon_rev = MX51_CHIP_REV_3_0; + break; + default: + cpu_silicon_rev = 0; + } + + iounmap(rom); +} + +/* + * Returns: + * the silicon revision of the cpu + * -EINVAL - not a mx51 + */ +int mx51_revision(void) +{ + if (!cpu_is_mx51()) + return -EINVAL; + + if (cpu_silicon_rev == -1) + query_silicon_parameter(); + + return cpu_silicon_rev; +} +EXPORT_SYMBOL(mx51_revision); + static int __init post_cpu_init(void) { unsigned int reg; diff --git a/arch/arm/mach-mx5/mm.c b/arch/arm/mach-mx5/mm.c index c21e18be7af..b7677ef80cc 100644 --- a/arch/arm/mach-mx5/mm.c +++ b/arch/arm/mach-mx5/mm.c @@ -35,11 +35,6 @@ static struct map_desc mxc_io_desc[] __initdata = { .length = MX51_DEBUG_SIZE, .type = MT_DEVICE }, { - .virtual = MX51_TZIC_BASE_ADDR_VIRT, - .pfn = __phys_to_pfn(MX51_TZIC_BASE_ADDR), - .length = MX51_TZIC_SIZE, - .type = MT_DEVICE - }, { .virtual = MX51_AIPS1_BASE_ADDR_VIRT, .pfn = __phys_to_pfn(MX51_AIPS1_BASE_ADDR), .length = MX51_AIPS1_SIZE, @@ -54,11 +49,6 @@ static struct map_desc mxc_io_desc[] __initdata = { .pfn = __phys_to_pfn(MX51_AIPS2_BASE_ADDR), .length = MX51_AIPS2_SIZE, .type = MT_DEVICE - }, { - .virtual = MX51_NFC_AXI_BASE_ADDR_VIRT, - .pfn = __phys_to_pfn(MX51_NFC_AXI_BASE_ADDR), - .length = MX51_NFC_AXI_SIZE, - .type = MT_DEVICE }, }; @@ -69,14 +59,6 @@ static struct map_desc mxc_io_desc[] __initdata = { */ void __init mx51_map_io(void) { - u32 tzic_addr; - - if (mx51_revision() < MX51_CHIP_REV_2_0) - tzic_addr = 0x8FFFC000; - else - tzic_addr = 0xE0003000; - mxc_io_desc[2].pfn = __phys_to_pfn(tzic_addr); - mxc_set_cpu_type(MXC_CPU_MX51); mxc_iomux_v3_init(MX51_IO_ADDRESS(MX51_IOMUXC_BASE_ADDR)); mxc_arch_reset_init(MX51_IO_ADDRESS(MX51_WDOG_BASE_ADDR)); @@ -85,5 +67,17 @@ void __init mx51_map_io(void) void __init mx51_init_irq(void) { - tzic_init_irq(MX51_IO_ADDRESS(MX51_TZIC_BASE_ADDR)); + unsigned long tzic_addr; + void __iomem *tzic_virt; + + if (mx51_revision() < MX51_CHIP_REV_2_0) + tzic_addr = MX51_TZIC_BASE_ADDR_TO1; + else + tzic_addr = MX51_TZIC_BASE_ADDR; + + tzic_virt = ioremap(tzic_addr, SZ_16K); + if (!tzic_virt) + panic("unable to map TZIC interrupt controller\n"); + + tzic_init_irq(tzic_virt); } diff --git a/arch/arm/mm/copypage-v6.c b/arch/arm/mm/copypage-v6.c index 8bca4dea6df..f55fa1044f7 100644 --- a/arch/arm/mm/copypage-v6.c +++ b/arch/arm/mm/copypage-v6.c @@ -41,14 +41,7 @@ static void v6_copy_user_highpage_nonaliasing(struct page *to, kfrom = kmap_atomic(from, KM_USER0); kto = kmap_atomic(to, KM_USER1); copy_page(kto, kfrom); -#ifdef CONFIG_HIGHMEM - /* - * kmap_atomic() doesn't set the page virtual address, and - * kunmap_atomic() takes care of cache flushing already. - */ - if (page_address(to) != NULL) -#endif - __cpuc_flush_dcache_area(kto, PAGE_SIZE); + __cpuc_flush_dcache_area(kto, PAGE_SIZE); kunmap_atomic(kto, KM_USER1); kunmap_atomic(kfrom, KM_USER0); } diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index 1351edc0b26..13fa536d82e 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c @@ -464,6 +464,11 @@ static void dma_cache_maint_page(struct page *page, unsigned long offset, vaddr += offset; op(vaddr, len, dir); kunmap_high(page); + } else if (cache_is_vipt()) { + pte_t saved_pte; + vaddr = kmap_high_l1_vipt(page, &saved_pte); + op(vaddr + offset, len, dir); + kunmap_high_l1_vipt(page, saved_pte); } } else { vaddr = page_address(page) + offset; diff --git a/arch/arm/mm/flush.c b/arch/arm/mm/flush.c index e34f095e209..c6844cb9b50 100644 --- a/arch/arm/mm/flush.c +++ b/arch/arm/mm/flush.c @@ -13,6 +13,7 @@ #include <asm/cacheflush.h> #include <asm/cachetype.h> +#include <asm/highmem.h> #include <asm/smp_plat.h> #include <asm/system.h> #include <asm/tlbflush.h> @@ -152,21 +153,25 @@ void copy_to_user_page(struct vm_area_struct *vma, struct page *page, void __flush_dcache_page(struct address_space *mapping, struct page *page) { - void *addr = page_address(page); - /* * Writeback any data associated with the kernel mapping of this * page. This ensures that data in the physical page is mutually * coherent with the kernels mapping. */ -#ifdef CONFIG_HIGHMEM - /* - * kmap_atomic() doesn't set the page virtual address, and - * kunmap_atomic() takes care of cache flushing already. - */ - if (addr) -#endif - __cpuc_flush_dcache_area(addr, PAGE_SIZE); + if (!PageHighMem(page)) { + __cpuc_flush_dcache_area(page_address(page), PAGE_SIZE); + } else { + void *addr = kmap_high_get(page); + if (addr) { + __cpuc_flush_dcache_area(addr, PAGE_SIZE); + kunmap_high(page); + } else if (cache_is_vipt()) { + pte_t saved_pte; + addr = kmap_high_l1_vipt(page, &saved_pte); + __cpuc_flush_dcache_area(addr, PAGE_SIZE); + kunmap_high_l1_vipt(page, saved_pte); + } + } /* * If this is a page cache page, and we have an aliasing VIPT cache, diff --git a/arch/arm/mm/highmem.c b/arch/arm/mm/highmem.c index 2be1ec7c1b4..77b030f5ec0 100644 --- a/arch/arm/mm/highmem.c +++ b/arch/arm/mm/highmem.c @@ -79,7 +79,8 @@ void kunmap_atomic(void *kvaddr, enum km_type type) unsigned int idx = type + KM_TYPE_NR * smp_processor_id(); if (kvaddr >= (void *)FIXADDR_START) { - __cpuc_flush_dcache_area((void *)vaddr, PAGE_SIZE); + if (cache_is_vivt()) + __cpuc_flush_dcache_area((void *)vaddr, PAGE_SIZE); #ifdef CONFIG_DEBUG_HIGHMEM BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx)); set_pte_ext(TOP_PTE(vaddr), __pte(0), 0); @@ -124,3 +125,87 @@ struct page *kmap_atomic_to_page(const void *ptr) pte = TOP_PTE(vaddr); return pte_page(*pte); } + +#ifdef CONFIG_CPU_CACHE_VIPT + +#include <linux/percpu.h> + +/* + * The VIVT cache of a highmem page is always flushed before the page + * is unmapped. Hence unmapped highmem pages need no cache maintenance + * in that case. + * + * However unmapped pages may still be cached with a VIPT cache, and + * it is not possible to perform cache maintenance on them using physical + * addresses unfortunately. So we have no choice but to set up a temporary + * virtual mapping for that purpose. + * + * Yet this VIPT cache maintenance may be triggered from DMA support + * functions which are possibly called from interrupt context. As we don't + * want to keep interrupt disabled all the time when such maintenance is + * taking place, we therefore allow for some reentrancy by preserving and + * restoring the previous fixmap entry before the interrupted context is + * resumed. If the reentrancy depth is 0 then there is no need to restore + * the previous fixmap, and leaving the current one in place allow it to + * be reused the next time without a TLB flush (common with DMA). + */ + +static DEFINE_PER_CPU(int, kmap_high_l1_vipt_depth); + +void *kmap_high_l1_vipt(struct page *page, pte_t *saved_pte) +{ + unsigned int idx, cpu = smp_processor_id(); + int *depth = &per_cpu(kmap_high_l1_vipt_depth, cpu); + unsigned long vaddr, flags; + pte_t pte, *ptep; + + idx = KM_L1_CACHE + KM_TYPE_NR * cpu; + vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); + ptep = TOP_PTE(vaddr); + pte = mk_pte(page, kmap_prot); + + if (!in_interrupt()) + preempt_disable(); + + raw_local_irq_save(flags); + (*depth)++; + if (pte_val(*ptep) == pte_val(pte)) { + *saved_pte = pte; + } else { + *saved_pte = *ptep; + set_pte_ext(ptep, pte, 0); + local_flush_tlb_kernel_page(vaddr); + } + raw_local_irq_restore(flags); + + return (void *)vaddr; +} + +void kunmap_high_l1_vipt(struct page *page, pte_t saved_pte) +{ + unsigned int idx, cpu = smp_processor_id(); + int *depth = &per_cpu(kmap_high_l1_vipt_depth, cpu); + unsigned long vaddr, flags; + pte_t pte, *ptep; + + idx = KM_L1_CACHE + KM_TYPE_NR * cpu; + vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); + ptep = TOP_PTE(vaddr); + pte = mk_pte(page, kmap_prot); + + BUG_ON(pte_val(*ptep) != pte_val(pte)); + BUG_ON(*depth <= 0); + + raw_local_irq_save(flags); + (*depth)--; + if (*depth != 0 && pte_val(pte) != pte_val(saved_pte)) { + set_pte_ext(ptep, saved_pte, 0); + local_flush_tlb_kernel_page(vaddr); + } + raw_local_irq_restore(flags); + + if (!in_interrupt()) + preempt_enable(); +} + +#endif /* CONFIG_CPU_CACHE_VIPT */ diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index 9d4da6ac28e..241c24a1c18 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c @@ -420,6 +420,10 @@ static void __init build_mem_type_table(void) user_pgprot |= L_PTE_SHARED; kern_pgprot |= L_PTE_SHARED; vecs_pgprot |= L_PTE_SHARED; + mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_S; + mem_types[MT_DEVICE_WC].prot_pte |= L_PTE_SHARED; + mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_S; + mem_types[MT_DEVICE_CACHED].prot_pte |= L_PTE_SHARED; mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S; mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S; #endif @@ -1050,10 +1054,12 @@ void setup_mm_for_reboot(char mode) pgd_t *pgd; int i; - if (current->mm && current->mm->pgd) - pgd = current->mm->pgd; - else - pgd = init_mm.pgd; + /* + * We need to access to user-mode page tables here. For kernel threads + * we don't have any user-mode mappings so we use the context that we + * "borrowed". + */ + pgd = current->active_mm->pgd; base_pmdval = PMD_SECT_AP_WRITE | PMD_SECT_AP_READ | PMD_TYPE_SECT; if (cpu_architecture() <= CPU_ARCH_ARMv5TEJ && !cpu_is_xscale()) diff --git a/arch/arm/plat-mxc/include/mach/board-mx31pdk.h b/arch/arm/plat-mxc/include/mach/board-mx31_3ds.h index 2bbd6ed17f5..da92933a233 100644 --- a/arch/arm/plat-mxc/include/mach/board-mx31pdk.h +++ b/arch/arm/plat-mxc/include/mach/board-mx31_3ds.h @@ -8,8 +8,8 @@ * published by the Free Software Foundation. */ -#ifndef __ASM_ARCH_MXC_BOARD_MX31PDK_H__ -#define __ASM_ARCH_MXC_BOARD_MX31PDK_H__ +#ifndef __ASM_ARCH_MXC_BOARD_MX31_3DS_H__ +#define __ASM_ARCH_MXC_BOARD_MX31_3DS_H__ /* Definitions for components on the Debug board */ @@ -56,4 +56,4 @@ #define MXC_MAX_EXP_IO_LINES 16 -#endif /* __ASM_ARCH_MXC_BOARD_MX31PDK_H__ */ +#endif /* __ASM_ARCH_MXC_BOARD_MX31_3DS_H__ */ diff --git a/arch/arm/plat-mxc/include/mach/mx51.h b/arch/arm/plat-mxc/include/mach/mx51.h index 771532b6b4a..5aad344d565 100644 --- a/arch/arm/plat-mxc/include/mach/mx51.h +++ b/arch/arm/plat-mxc/include/mach/mx51.h @@ -14,7 +14,7 @@ * FB100000 70000000 1M SPBA 0 * FB000000 73F00000 1M AIPS 1 * FB200000 83F00000 1M AIPS 2 - * FA100000 8FFFC000 16K TZIC (interrupt controller) + * 8FFFC000 16K TZIC (interrupt controller) * 90000000 256M CSD0 SDRAM/DDR * A0000000 256M CSD1 SDRAM/DDR * B0000000 128M CS0 Flash @@ -23,11 +23,17 @@ * C8000000 64M CS3 Flash * CC000000 32M CS4 SRAM * CE000000 32M CS5 SRAM - * F9000000 CFFF0000 64K NFC (NAND Flash AXI) + * CFFF0000 64K NFC (NAND Flash AXI) * */ /* + * IROM + */ +#define MX51_IROM_BASE_ADDR 0x0 +#define MX51_IROM_SIZE SZ_64K + +/* * IRAM */ #define MX51_IRAM_BASE_ADDR 0x1FFE0000 /* internal ram */ @@ -40,7 +46,6 @@ * NFC */ #define MX51_NFC_AXI_BASE_ADDR 0xCFFF0000 /* NAND flash AXI */ -#define MX51_NFC_AXI_BASE_ADDR_VIRT 0xF9000000 #define MX51_NFC_AXI_SIZE SZ_64K /* @@ -49,9 +54,8 @@ #define MX51_GPU_BASE_ADDR 0x20000000 #define MX51_GPU2D_BASE_ADDR 0xD0000000 -#define MX51_TZIC_BASE_ADDR 0x8FFFC000 -#define MX51_TZIC_BASE_ADDR_VIRT 0xFA100000 -#define MX51_TZIC_SIZE SZ_16K +#define MX51_TZIC_BASE_ADDR_TO1 0x8FFFC000 +#define MX51_TZIC_BASE_ADDR 0xE0000000 #define MX51_DEBUG_BASE_ADDR 0x60000000 #define MX51_DEBUG_BASE_ADDR_VIRT 0xFA200000 @@ -232,12 +236,10 @@ #define MX51_IO_ADDRESS(x) \ (void __iomem *) \ (MX51_IS_MODULE(x, IRAM) ? MX51_IRAM_IO_ADDRESS(x) : \ - MX51_IS_MODULE(x, TZIC) ? MX51_TZIC_IO_ADDRESS(x) : \ MX51_IS_MODULE(x, DEBUG) ? MX51_DEBUG_IO_ADDRESS(x) : \ MX51_IS_MODULE(x, SPBA0) ? MX51_SPBA0_IO_ADDRESS(x) : \ MX51_IS_MODULE(x, AIPS1) ? MX51_AIPS1_IO_ADDRESS(x) : \ - MX51_IS_MODULE(x, AIPS2) ? MX51_AIPS2_IO_ADDRESS(x) : \ - MX51_IS_MODULE(x, NFC_AXI) ? MX51_NFC_AXI_IO_ADDRESS(x) : \ + MX51_IS_MODULE(x, AIPS2) ? MX51_AIPS2_IO_ADDRESS(x) : \ 0xDEADBEEF) /* @@ -246,9 +248,6 @@ #define MX51_IRAM_IO_ADDRESS(x) \ (((x) - MX51_IRAM_BASE_ADDR) + MX51_IRAM_BASE_ADDR_VIRT) -#define MX51_TZIC_IO_ADDRESS(x) \ - (((x) - MX51_TZIC_BASE_ADDR) + MX51_TZIC_BASE_ADDR_VIRT) - #define MX51_DEBUG_IO_ADDRESS(x) \ (((x) - MX51_DEBUG_BASE_ADDR) + MX51_DEBUG_BASE_ADDR_VIRT) @@ -261,9 +260,6 @@ #define MX51_AIPS2_IO_ADDRESS(x) \ (((x) - MX51_AIPS2_BASE_ADDR) + MX51_AIPS2_BASE_ADDR_VIRT) -#define MX51_NFC_AXI_IO_ADDRESS(x) \ - (((x) - MX51_NFC_AXI_BASE_ADDR) + MX51_NFC_AXI_BASE_ADDR_VIRT) - #define MX51_IS_MEM_DEVICE_NONSHARED(x) 0 /* @@ -443,12 +439,7 @@ #if !defined(__ASSEMBLY__) && !defined(__MXC_BOOT_UNCOMPRESS) -extern unsigned int system_rev; - -static inline unsigned int mx51_revision(void) -{ - return system_rev; -} +extern int mx51_revision(void); #endif #endif /* __ASM_ARCH_MXC_MX51_H__ */ diff --git a/arch/arm/plat-mxc/include/mach/uncompress.h b/arch/arm/plat-mxc/include/mach/uncompress.h index 52e476a150c..b6d3d0fddc4 100644 --- a/arch/arm/plat-mxc/include/mach/uncompress.h +++ b/arch/arm/plat-mxc/include/mach/uncompress.h @@ -66,6 +66,7 @@ static inline void flush(void) #define MX2X_UART1_BASE_ADDR 0x1000a000 #define MX3X_UART1_BASE_ADDR 0x43F90000 #define MX3X_UART2_BASE_ADDR 0x43F94000 +#define MX51_UART1_BASE_ADDR 0x73fbc000 static __inline__ void __arch_decomp_setup(unsigned long arch_id) { @@ -101,6 +102,9 @@ static __inline__ void __arch_decomp_setup(unsigned long arch_id) case MACH_TYPE_MAGX_ZN5: uart_base = MX3X_UART2_BASE_ADDR; break; + case MACH_TYPE_MX51_BABBAGE: + uart_base = MX51_UART1_BASE_ADDR; + break; default: break; } diff --git a/arch/arm/vfp/vfpmodule.c b/arch/arm/vfp/vfpmodule.c index a420cb94932..315a540c7ce 100644 --- a/arch/arm/vfp/vfpmodule.c +++ b/arch/arm/vfp/vfpmodule.c @@ -428,26 +428,6 @@ static void vfp_pm_init(void) static inline void vfp_pm_init(void) { } #endif /* CONFIG_PM */ -/* - * Synchronise the hardware VFP state of a thread other than current with the - * saved one. This function is used by the ptrace mechanism. - */ -#ifdef CONFIG_SMP -void vfp_sync_hwstate(struct thread_info *thread) -{ -} - -void vfp_flush_hwstate(struct thread_info *thread) -{ - /* - * On SMP systems, the VFP state is automatically saved at every - * context switch. We mark the thread VFP state as belonging to a - * non-existent CPU so that the saved one will be reloaded when - * needed. - */ - thread->vfpstate.hard.cpu = NR_CPUS; -} -#else void vfp_sync_hwstate(struct thread_info *thread) { unsigned int cpu = get_cpu(); @@ -490,9 +470,18 @@ void vfp_flush_hwstate(struct thread_info *thread) last_VFP_context[cpu] = NULL; } +#ifdef CONFIG_SMP + /* + * For SMP we still have to take care of the case where the thread + * migrates to another CPU and then back to the original CPU on which + * the last VFP user is still the same thread. Mark the thread VFP + * state as belonging to a non-existent CPU so that the saved one will + * be reloaded in the above case. + */ + thread->vfpstate.hard.cpu = NR_CPUS; +#endif put_cpu(); } -#endif #include <linux/smp.h> diff --git a/arch/frv/include/asm/segment.h b/arch/frv/include/asm/segment.h index e3616a6f941..a2320a4a004 100644 --- a/arch/frv/include/asm/segment.h +++ b/arch/frv/include/asm/segment.h @@ -21,12 +21,12 @@ typedef struct { #define MAKE_MM_SEG(s) ((mm_segment_t) { (s) }) -#define KERNEL_DS MAKE_MM_SEG(0xdfffffffUL) - #ifdef CONFIG_MMU #define USER_DS MAKE_MM_SEG(TASK_SIZE - 1) +#define KERNEL_DS MAKE_MM_SEG(0xdfffffffUL) #else -#define USER_DS KERNEL_DS +#define USER_DS MAKE_MM_SEG(memory_end) +#define KERNEL_DS MAKE_MM_SEG(0xe0000000UL) #endif #define get_ds() (KERNEL_DS) diff --git a/arch/frv/include/asm/uaccess.h b/arch/frv/include/asm/uaccess.h index 53650c958f4..0b67ec5b441 100644 --- a/arch/frv/include/asm/uaccess.h +++ b/arch/frv/include/asm/uaccess.h @@ -27,8 +27,6 @@ #define VERIFY_READ 0 #define VERIFY_WRITE 1 -#define __addr_ok(addr) ((unsigned long)(addr) < get_addr_limit()) - /* * check that a range of addresses falls within the current address limit */ diff --git a/arch/m68k/include/asm/atomic_mm.h b/arch/m68k/include/asm/atomic_mm.h index 88b7af20a99..d9d2ed64743 100644 --- a/arch/m68k/include/asm/atomic_mm.h +++ b/arch/m68k/include/asm/atomic_mm.h @@ -148,14 +148,18 @@ static inline int atomic_xchg(atomic_t *v, int new) static inline int atomic_sub_and_test(int i, atomic_t *v) { char c; - __asm__ __volatile__("subl %2,%1; seq %0" : "=d" (c), "+m" (*v): "g" (i)); + __asm__ __volatile__("subl %2,%1; seq %0" + : "=d" (c), "+m" (*v) + : "id" (i)); return c != 0; } static inline int atomic_add_negative(int i, atomic_t *v) { char c; - __asm__ __volatile__("addl %2,%1; smi %0" : "=d" (c), "+m" (*v): "g" (i)); + __asm__ __volatile__("addl %2,%1; smi %0" + : "=d" (c), "+m" (*v) + : "id" (i)); return c != 0; } diff --git a/arch/m68k/include/asm/sigcontext.h b/arch/m68k/include/asm/sigcontext.h index 1320eaa4cc2..a29dd74a17c 100644 --- a/arch/m68k/include/asm/sigcontext.h +++ b/arch/m68k/include/asm/sigcontext.h @@ -17,13 +17,11 @@ struct sigcontext { #ifndef __uClinux__ # ifdef __mcoldfire__ unsigned long sc_fpregs[2][2]; /* room for two fp registers */ - unsigned long sc_fpcntl[3]; - unsigned char sc_fpstate[16+6*8]; # else unsigned long sc_fpregs[2*3]; /* room for two fp registers */ +# endif unsigned long sc_fpcntl[3]; unsigned char sc_fpstate[216]; -# endif #endif }; diff --git a/arch/microblaze/include/asm/futex.h b/arch/microblaze/include/asm/futex.h index 8dbb6e7a03a..ad3fd61b2fe 100644 --- a/arch/microblaze/include/asm/futex.h +++ b/arch/microblaze/include/asm/futex.h @@ -55,7 +55,7 @@ futex_atomic_op_inuser(int encoded_op, int __user *uaddr) __futex_atomic_op("or %1,%0,%4;", ret, oldval, uaddr, oparg); break; case FUTEX_OP_ANDN: - __futex_atomic_op("and %1,%0,%4;", ret, oldval, uaddr, oparg); + __futex_atomic_op("andn %1,%0,%4;", ret, oldval, uaddr, oparg); break; case FUTEX_OP_XOR: __futex_atomic_op("xor %1,%0,%4;", ret, oldval, uaddr, oparg); diff --git a/arch/microblaze/include/asm/io.h b/arch/microblaze/include/asm/io.h index 32d621a56ae..e45a6eea92e 100644 --- a/arch/microblaze/include/asm/io.h +++ b/arch/microblaze/include/asm/io.h @@ -108,6 +108,11 @@ static inline void writel(unsigned int v, volatile void __iomem *addr) #define iowrite16(v, addr) __raw_writew((u16)(v), (u16 *)(addr)) #define iowrite32(v, addr) __raw_writel((u32)(v), (u32 *)(addr)) +#define ioread16be(addr) __raw_readw((u16 *)(addr)) +#define ioread32be(addr) __raw_readl((u32 *)(addr)) +#define iowrite16be(v, addr) __raw_writew((u16)(v), (u16 *)(addr)) +#define iowrite32be(v, addr) __raw_writel((u32)(v), (u32 *)(addr)) + /* These are the definitions for the x86 IO instructions * inb/inw/inl/outb/outw/outl, the "string" versions * insb/insw/insl/outsb/outsw/outsl, and the "pausing" versions diff --git a/arch/microblaze/kernel/ftrace.c b/arch/microblaze/kernel/ftrace.c index 388b31ca65a..515feb40455 100644 --- a/arch/microblaze/kernel/ftrace.c +++ b/arch/microblaze/kernel/ftrace.c @@ -151,13 +151,10 @@ int ftrace_make_nop(struct module *mod, return ret; } -static int ret_addr; /* initialized as 0 by default */ - /* I believe that first is called ftrace_make_nop before this function */ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) { int ret; - ret_addr = addr; /* saving where the barrier jump is */ pr_debug("%s: addr:0x%x, rec->ip: 0x%x, imm:0x%x\n", __func__, (unsigned int)addr, (unsigned int)rec->ip, imm); ret = ftrace_modify_code(rec->ip, imm); @@ -194,12 +191,9 @@ int ftrace_update_ftrace_func(ftrace_func_t func) ret = ftrace_modify_code(ip, upper); ret += ftrace_modify_code(ip + 4, lower); - /* We just need to remove the rtsd r15, 8 by NOP */ - BUG_ON(!ret_addr); - if (ret_addr) - ret += ftrace_modify_code(ret_addr, MICROBLAZE_NOP); - else - ret = 1; /* fault */ + /* We just need to replace the rtsd r15, 8 with NOP */ + ret += ftrace_modify_code((unsigned long)&ftrace_caller, + MICROBLAZE_NOP); /* All changes are done - lets do caches consistent */ flush_icache(); diff --git a/arch/microblaze/kernel/ptrace.c b/arch/microblaze/kernel/ptrace.c index 6d6349a145f..a4a7770c614 100644 --- a/arch/microblaze/kernel/ptrace.c +++ b/arch/microblaze/kernel/ptrace.c @@ -75,7 +75,6 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data) { int rval; unsigned long val = 0; - unsigned long copied; switch (request) { /* Read/write the word at location ADDR in the registers. */ diff --git a/arch/mips/alchemy/devboards/db1200/setup.c b/arch/mips/alchemy/devboards/db1200/setup.c index 379536e3abd..be7e92ea01f 100644 --- a/arch/mips/alchemy/devboards/db1200/setup.c +++ b/arch/mips/alchemy/devboards/db1200/setup.c @@ -60,43 +60,6 @@ void __init board_setup(void) wmb(); } -/* use the hexleds to count the number of times the cpu has entered - * wait, the dots to indicate whether the CPU is currently idle or - * active (dots off = sleeping, dots on = working) for cases where - * the number doesn't change for a long(er) period of time. - */ -static void db1200_wait(void) -{ - __asm__(" .set push \n" - " .set mips3 \n" - " .set noreorder \n" - " cache 0x14, 0(%0) \n" - " cache 0x14, 32(%0) \n" - " cache 0x14, 64(%0) \n" - /* dots off: we're about to call wait */ - " lui $26, 0xb980 \n" - " ori $27, $0, 3 \n" - " sb $27, 0x18($26) \n" - " sync \n" - " nop \n" - " wait \n" - " nop \n" - " nop \n" - " nop \n" - " nop \n" - " nop \n" - /* dots on: there's work to do, increment cntr */ - " lui $26, 0xb980 \n" - " sb $0, 0x18($26) \n" - " lui $26, 0xb9c0 \n" - " lb $27, 0($26) \n" - " addiu $27, $27, 1 \n" - " sb $27, 0($26) \n" - " sync \n" - " .set pop \n" - : : "r" (db1200_wait)); -} - static int __init db1200_arch_init(void) { /* GPIO7 is low-level triggered CPLD cascade */ @@ -110,9 +73,6 @@ static int __init db1200_arch_init(void) irq_to_desc(DB1200_SD0_INSERT_INT)->status |= IRQ_NOAUTOEN; irq_to_desc(DB1200_SD0_EJECT_INT)->status |= IRQ_NOAUTOEN; - if (cpu_wait) - cpu_wait = db1200_wait; - return 0; } arch_initcall(db1200_arch_init); diff --git a/arch/mips/ar7/platform.c b/arch/mips/ar7/platform.c index 246df7aca2e..2fafc78e5ce 100644 --- a/arch/mips/ar7/platform.c +++ b/arch/mips/ar7/platform.c @@ -168,7 +168,7 @@ static struct plat_vlynq_data vlynq_high_data = { .on = vlynq_on, .off = vlynq_off, }, - .reset_bit = 26, + .reset_bit = 16, .gpio_bit = 19, }; @@ -600,6 +600,7 @@ static int __init ar7_register_devices(void) } if (ar7_has_high_cpmac()) { + res = fixed_phy_add(PHY_POLL, cpmac_high.id, &fixed_phy_status); if (!res) { cpmac_get_mac(1, cpmac_high_data.dev_addr); diff --git a/arch/mips/bcm63xx/boards/board_bcm963xx.c b/arch/mips/bcm63xx/boards/board_bcm963xx.c index ea17941168c..8dba8cfb752 100644 --- a/arch/mips/bcm63xx/boards/board_bcm963xx.c +++ b/arch/mips/bcm63xx/boards/board_bcm963xx.c @@ -18,6 +18,7 @@ #include <asm/addrspace.h> #include <bcm63xx_board.h> #include <bcm63xx_cpu.h> +#include <bcm63xx_dev_uart.h> #include <bcm63xx_regs.h> #include <bcm63xx_io.h> #include <bcm63xx_dev_pci.h> @@ -40,6 +41,7 @@ static struct board_info __initdata board_96338gw = { .name = "96338GW", .expected_cpu_id = 0x6338, + .has_uart0 = 1, .has_enet0 = 1, .enet0 = { .force_speed_100 = 1, @@ -82,6 +84,7 @@ static struct board_info __initdata board_96338w = { .name = "96338W", .expected_cpu_id = 0x6338, + .has_uart0 = 1, .has_enet0 = 1, .enet0 = { .force_speed_100 = 1, @@ -126,6 +129,8 @@ static struct board_info __initdata board_96338w = { static struct board_info __initdata board_96345gw2 = { .name = "96345GW2", .expected_cpu_id = 0x6345, + + .has_uart0 = 1, }; #endif @@ -137,6 +142,7 @@ static struct board_info __initdata board_96348r = { .name = "96348R", .expected_cpu_id = 0x6348, + .has_uart0 = 1, .has_enet0 = 1, .has_pci = 1, @@ -180,6 +186,7 @@ static struct board_info __initdata board_96348gw_10 = { .name = "96348GW-10", .expected_cpu_id = 0x6348, + .has_uart0 = 1, .has_enet0 = 1, .has_enet1 = 1, .has_pci = 1, @@ -239,6 +246,7 @@ static struct board_info __initdata board_96348gw_11 = { .name = "96348GW-11", .expected_cpu_id = 0x6348, + .has_uart0 = 1, .has_enet0 = 1, .has_enet1 = 1, .has_pci = 1, @@ -292,6 +300,7 @@ static struct board_info __initdata board_96348gw = { .name = "96348GW", .expected_cpu_id = 0x6348, + .has_uart0 = 1, .has_enet0 = 1, .has_enet1 = 1, .has_pci = 1, @@ -349,9 +358,10 @@ static struct board_info __initdata board_FAST2404 = { .name = "F@ST2404", .expected_cpu_id = 0x6348, - .has_enet0 = 1, - .has_enet1 = 1, - .has_pci = 1, + .has_uart0 = 1, + .has_enet0 = 1, + .has_enet1 = 1, + .has_pci = 1, .enet0 = { .has_phy = 1, @@ -368,10 +378,30 @@ static struct board_info __initdata board_FAST2404 = { .has_ehci0 = 1, }; +static struct board_info __initdata board_rta1025w_16 = { + .name = "RTA1025W_16", + .expected_cpu_id = 0x6348, + + .has_enet0 = 1, + .has_enet1 = 1, + .has_pci = 1, + + .enet0 = { + .has_phy = 1, + .use_internal_phy = 1, + }, + .enet1 = { + .force_speed_100 = 1, + .force_duplex_full = 1, + }, +}; + + static struct board_info __initdata board_DV201AMR = { .name = "DV201AMR", .expected_cpu_id = 0x6348, + .has_uart0 = 1, .has_pci = 1, .has_ohci0 = 1, @@ -391,6 +421,7 @@ static struct board_info __initdata board_96348gw_a = { .name = "96348GW-A", .expected_cpu_id = 0x6348, + .has_uart0 = 1, .has_enet0 = 1, .has_enet1 = 1, .has_pci = 1, @@ -416,6 +447,7 @@ static struct board_info __initdata board_96358vw = { .name = "96358VW", .expected_cpu_id = 0x6358, + .has_uart0 = 1, .has_enet0 = 1, .has_enet1 = 1, .has_pci = 1, @@ -467,6 +499,7 @@ static struct board_info __initdata board_96358vw2 = { .name = "96358VW2", .expected_cpu_id = 0x6358, + .has_uart0 = 1, .has_enet0 = 1, .has_enet1 = 1, .has_pci = 1, @@ -514,6 +547,7 @@ static struct board_info __initdata board_AGPFS0 = { .name = "AGPF-S0", .expected_cpu_id = 0x6358, + .has_uart0 = 1, .has_enet0 = 1, .has_enet1 = 1, .has_pci = 1, @@ -531,6 +565,27 @@ static struct board_info __initdata board_AGPFS0 = { .has_ohci0 = 1, .has_ehci0 = 1, }; + +static struct board_info __initdata board_DWVS0 = { + .name = "DWV-S0", + .expected_cpu_id = 0x6358, + + .has_enet0 = 1, + .has_enet1 = 1, + .has_pci = 1, + + .enet0 = { + .has_phy = 1, + .use_internal_phy = 1, + }, + + .enet1 = { + .force_speed_100 = 1, + .force_duplex_full = 1, + }, + + .has_ohci0 = 1, +}; #endif /* @@ -552,16 +607,88 @@ static const struct board_info __initdata *bcm963xx_boards[] = { &board_FAST2404, &board_DV201AMR, &board_96348gw_a, + &board_rta1025w_16, #endif #ifdef CONFIG_BCM63XX_CPU_6358 &board_96358vw, &board_96358vw2, &board_AGPFS0, + &board_DWVS0, #endif }; /* + * Register a sane SPROMv2 to make the on-board + * bcm4318 WLAN work + */ +#ifdef CONFIG_SSB_PCIHOST +static struct ssb_sprom bcm63xx_sprom = { + .revision = 0x02, + .board_rev = 0x17, + .country_code = 0x0, + .ant_available_bg = 0x3, + .pa0b0 = 0x15ae, + .pa0b1 = 0xfa85, + .pa0b2 = 0xfe8d, + .pa1b0 = 0xffff, + .pa1b1 = 0xffff, + .pa1b2 = 0xffff, + .gpio0 = 0xff, + .gpio1 = 0xff, + .gpio2 = 0xff, + .gpio3 = 0xff, + .maxpwr_bg = 0x004c, + .itssi_bg = 0x00, + .boardflags_lo = 0x2848, + .boardflags_hi = 0x0000, +}; +#endif + +/* + * return board name for /proc/cpuinfo + */ +const char *board_get_name(void) +{ + return board.name; +} + +/* + * register & return a new board mac address + */ +static int board_get_mac_address(u8 *mac) +{ + u8 *p; + int count; + + if (mac_addr_used >= nvram.mac_addr_count) { + printk(KERN_ERR PFX "not enough mac address\n"); + return -ENODEV; + } + + memcpy(mac, nvram.mac_addr_base, ETH_ALEN); + p = mac + ETH_ALEN - 1; + count = mac_addr_used; + + while (count--) { + do { + (*p)++; + if (*p != 0) + break; + p--; + } while (p != mac); + } + + if (p == mac) { + printk(KERN_ERR PFX "unable to fetch mac address\n"); + return -ENODEV; + } + + mac_addr_used++; + return 0; +} + +/* * early init callback, read nvram data from flash and checksum it */ void __init board_prom_init(void) @@ -659,6 +786,17 @@ void __init board_prom_init(void) } bcm_gpio_writel(val, GPIO_MODE_REG); + + /* Generate MAC address for WLAN and + * register our SPROM */ +#ifdef CONFIG_SSB_PCIHOST + if (!board_get_mac_address(bcm63xx_sprom.il0mac)) { + memcpy(bcm63xx_sprom.et0mac, bcm63xx_sprom.il0mac, ETH_ALEN); + memcpy(bcm63xx_sprom.et1mac, bcm63xx_sprom.il0mac, ETH_ALEN); + if (ssb_arch_set_fallback_sprom(&bcm63xx_sprom) < 0) + printk(KERN_ERR "failed to register fallback SPROM\n"); + } +#endif } /* @@ -676,49 +814,6 @@ void __init board_setup(void) panic("unexpected CPU for bcm963xx board"); } -/* - * return board name for /proc/cpuinfo - */ -const char *board_get_name(void) -{ - return board.name; -} - -/* - * register & return a new board mac address - */ -static int board_get_mac_address(u8 *mac) -{ - u8 *p; - int count; - - if (mac_addr_used >= nvram.mac_addr_count) { - printk(KERN_ERR PFX "not enough mac address\n"); - return -ENODEV; - } - - memcpy(mac, nvram.mac_addr_base, ETH_ALEN); - p = mac + ETH_ALEN - 1; - count = mac_addr_used; - - while (count--) { - do { - (*p)++; - if (*p != 0) - break; - p--; - } while (p != mac); - } - - if (p == mac) { - printk(KERN_ERR PFX "unable to fetch mac address\n"); - return -ENODEV; - } - - mac_addr_used++; - return 0; -} - static struct mtd_partition mtd_partitions[] = { { .name = "cfe", @@ -750,33 +845,6 @@ static struct platform_device mtd_dev = { }, }; -/* - * Register a sane SPROMv2 to make the on-board - * bcm4318 WLAN work - */ -#ifdef CONFIG_SSB_PCIHOST -static struct ssb_sprom bcm63xx_sprom = { - .revision = 0x02, - .board_rev = 0x17, - .country_code = 0x0, - .ant_available_bg = 0x3, - .pa0b0 = 0x15ae, - .pa0b1 = 0xfa85, - .pa0b2 = 0xfe8d, - .pa1b0 = 0xffff, - .pa1b1 = 0xffff, - .pa1b2 = 0xffff, - .gpio0 = 0xff, - .gpio1 = 0xff, - .gpio2 = 0xff, - .gpio3 = 0xff, - .maxpwr_bg = 0x004c, - .itssi_bg = 0x00, - .boardflags_lo = 0x2848, - .boardflags_hi = 0x0000, -}; -#endif - static struct gpio_led_platform_data bcm63xx_led_data; static struct platform_device bcm63xx_gpio_leds = { @@ -792,6 +860,12 @@ int __init board_register_devices(void) { u32 val; + if (board.has_uart0) + bcm63xx_uart_register(0); + + if (board.has_uart1) + bcm63xx_uart_register(1); + if (board.has_pccard) bcm63xx_pcmcia_register(); @@ -806,17 +880,6 @@ int __init board_register_devices(void) if (board.has_dsp) bcm63xx_dsp_register(&board.dsp); - /* Generate MAC address for WLAN and - * register our SPROM */ -#ifdef CONFIG_SSB_PCIHOST - if (!board_get_mac_address(bcm63xx_sprom.il0mac)) { - memcpy(bcm63xx_sprom.et0mac, bcm63xx_sprom.il0mac, ETH_ALEN); - memcpy(bcm63xx_sprom.et1mac, bcm63xx_sprom.il0mac, ETH_ALEN); - if (ssb_arch_set_fallback_sprom(&bcm63xx_sprom) < 0) - printk(KERN_ERR "failed to register fallback SPROM\n"); - } -#endif - /* read base address of boot chip select (0) */ if (BCMCPU_IS_6345()) val = 0x1fc00000; diff --git a/arch/mips/bcm63xx/cpu.c b/arch/mips/bcm63xx/cpu.c index 70378bb5e3f..cbb7caf86d7 100644 --- a/arch/mips/bcm63xx/cpu.c +++ b/arch/mips/bcm63xx/cpu.c @@ -36,6 +36,7 @@ static const unsigned long bcm96338_regs_base[] = { [RSET_TIMER] = BCM_6338_TIMER_BASE, [RSET_WDT] = BCM_6338_WDT_BASE, [RSET_UART0] = BCM_6338_UART0_BASE, + [RSET_UART1] = BCM_6338_UART1_BASE, [RSET_GPIO] = BCM_6338_GPIO_BASE, [RSET_SPI] = BCM_6338_SPI_BASE, [RSET_OHCI0] = BCM_6338_OHCI0_BASE, @@ -72,6 +73,7 @@ static const unsigned long bcm96345_regs_base[] = { [RSET_TIMER] = BCM_6345_TIMER_BASE, [RSET_WDT] = BCM_6345_WDT_BASE, [RSET_UART0] = BCM_6345_UART0_BASE, + [RSET_UART1] = BCM_6345_UART1_BASE, [RSET_GPIO] = BCM_6345_GPIO_BASE, [RSET_SPI] = BCM_6345_SPI_BASE, [RSET_UDC0] = BCM_6345_UDC0_BASE, @@ -109,6 +111,7 @@ static const unsigned long bcm96348_regs_base[] = { [RSET_TIMER] = BCM_6348_TIMER_BASE, [RSET_WDT] = BCM_6348_WDT_BASE, [RSET_UART0] = BCM_6348_UART0_BASE, + [RSET_UART1] = BCM_6348_UART1_BASE, [RSET_GPIO] = BCM_6348_GPIO_BASE, [RSET_SPI] = BCM_6348_SPI_BASE, [RSET_OHCI0] = BCM_6348_OHCI0_BASE, @@ -150,6 +153,7 @@ static const unsigned long bcm96358_regs_base[] = { [RSET_TIMER] = BCM_6358_TIMER_BASE, [RSET_WDT] = BCM_6358_WDT_BASE, [RSET_UART0] = BCM_6358_UART0_BASE, + [RSET_UART1] = BCM_6358_UART1_BASE, [RSET_GPIO] = BCM_6358_GPIO_BASE, [RSET_SPI] = BCM_6358_SPI_BASE, [RSET_OHCI0] = BCM_6358_OHCI0_BASE, @@ -170,6 +174,7 @@ static const unsigned long bcm96358_regs_base[] = { static const int bcm96358_irqs[] = { [IRQ_TIMER] = BCM_6358_TIMER_IRQ, [IRQ_UART0] = BCM_6358_UART0_IRQ, + [IRQ_UART1] = BCM_6358_UART1_IRQ, [IRQ_DSL] = BCM_6358_DSL_IRQ, [IRQ_ENET0] = BCM_6358_ENET0_IRQ, [IRQ_ENET1] = BCM_6358_ENET1_IRQ, diff --git a/arch/mips/bcm63xx/dev-uart.c b/arch/mips/bcm63xx/dev-uart.c index b0519461ad9..c2963da0253 100644 --- a/arch/mips/bcm63xx/dev-uart.c +++ b/arch/mips/bcm63xx/dev-uart.c @@ -11,31 +11,65 @@ #include <linux/platform_device.h> #include <bcm63xx_cpu.h> -static struct resource uart_resources[] = { +static struct resource uart0_resources[] = { { - .start = -1, /* filled at runtime */ - .end = -1, /* filled at runtime */ + /* start & end filled at runtime */ .flags = IORESOURCE_MEM, }, { - .start = -1, /* filled at runtime */ + /* start filled at runtime */ .flags = IORESOURCE_IRQ, }, }; -static struct platform_device bcm63xx_uart_device = { - .name = "bcm63xx_uart", - .id = 0, - .num_resources = ARRAY_SIZE(uart_resources), - .resource = uart_resources, +static struct resource uart1_resources[] = { + { + /* start & end filled at runtime */ + .flags = IORESOURCE_MEM, + }, + { + /* start filled at runtime */ + .flags = IORESOURCE_IRQ, + }, +}; + +static struct platform_device bcm63xx_uart_devices[] = { + { + .name = "bcm63xx_uart", + .id = 0, + .num_resources = ARRAY_SIZE(uart0_resources), + .resource = uart0_resources, + }, + + { + .name = "bcm63xx_uart", + .id = 1, + .num_resources = ARRAY_SIZE(uart1_resources), + .resource = uart1_resources, + } }; -int __init bcm63xx_uart_register(void) +int __init bcm63xx_uart_register(unsigned int id) { - uart_resources[0].start = bcm63xx_regset_address(RSET_UART0); - uart_resources[0].end = uart_resources[0].start; - uart_resources[0].end += RSET_UART_SIZE - 1; - uart_resources[1].start = bcm63xx_get_irq_number(IRQ_UART0); - return platform_device_register(&bcm63xx_uart_device); + if (id >= ARRAY_SIZE(bcm63xx_uart_devices)) + return -ENODEV; + + if (id == 1 && !BCMCPU_IS_6358()) + return -ENODEV; + + if (id == 0) { + uart0_resources[0].start = bcm63xx_regset_address(RSET_UART0); + uart0_resources[0].end = uart0_resources[0].start + + RSET_UART_SIZE - 1; + uart0_resources[1].start = bcm63xx_get_irq_number(IRQ_UART0); + } + + if (id == 1) { + uart1_resources[0].start = bcm63xx_regset_address(RSET_UART1); + uart1_resources[0].end = uart1_resources[0].start + + RSET_UART_SIZE - 1; + uart1_resources[1].start = bcm63xx_get_irq_number(IRQ_UART1); + } + + return platform_device_register(&bcm63xx_uart_devices[id]); } -arch_initcall(bcm63xx_uart_register); diff --git a/arch/mips/bcm63xx/gpio.c b/arch/mips/bcm63xx/gpio.c index 87ca3904633..315bc7f79ce 100644 --- a/arch/mips/bcm63xx/gpio.c +++ b/arch/mips/bcm63xx/gpio.c @@ -125,10 +125,10 @@ static struct gpio_chip bcm63xx_gpio_chip = { int __init bcm63xx_gpio_init(void) { + gpio_out_low = bcm_gpio_readl(GPIO_DATA_LO_REG); + gpio_out_high = bcm_gpio_readl(GPIO_DATA_HI_REG); bcm63xx_gpio_chip.ngpio = bcm63xx_gpio_count(); pr_info("registering %d GPIOs\n", bcm63xx_gpio_chip.ngpio); return gpiochip_add(&bcm63xx_gpio_chip); } - -arch_initcall(bcm63xx_gpio_init); diff --git a/arch/mips/cavium-octeon/setup.c b/arch/mips/cavium-octeon/setup.c index b321d3b1687..9a06fa9f9f0 100644 --- a/arch/mips/cavium-octeon/setup.c +++ b/arch/mips/cavium-octeon/setup.c @@ -45,9 +45,6 @@ extern struct plat_smp_ops octeon_smp_ops; extern void pci_console_init(const char *arg); #endif -#ifdef CONFIG_CAVIUM_RESERVE32 -extern uint64_t octeon_reserve32_memory; -#endif static unsigned long long MAX_MEMORY = 512ull << 20; struct octeon_boot_descriptor *octeon_boot_desc_ptr; @@ -186,54 +183,6 @@ void octeon_check_cpu_bist(void) write_octeon_c0_dcacheerr(0); } -#ifdef CONFIG_CAVIUM_RESERVE32_USE_WIRED_TLB -/** - * Called on every core to setup the wired tlb entry needed - * if CONFIG_CAVIUM_RESERVE32_USE_WIRED_TLB is set. - * - */ -static void octeon_hal_setup_per_cpu_reserved32(void *unused) -{ - /* - * The config has selected to wire the reserve32 memory for all - * userspace applications. We need to put a wired TLB entry in for each - * 512MB of reserve32 memory. We only handle double 256MB pages here, - * so reserve32 must be multiple of 512MB. - */ - uint32_t size = CONFIG_CAVIUM_RESERVE32; - uint32_t entrylo0 = - 0x7 | ((octeon_reserve32_memory & ((1ul << 40) - 1)) >> 6); - uint32_t entrylo1 = entrylo0 + (256 << 14); - uint32_t entryhi = (0x80000000UL - (CONFIG_CAVIUM_RESERVE32 << 20)); - while (size >= 512) { -#if 0 - pr_info("CPU%d: Adding double wired TLB entry for 0x%lx\n", - smp_processor_id(), entryhi); -#endif - add_wired_entry(entrylo0, entrylo1, entryhi, PM_256M); - entrylo0 += 512 << 14; - entrylo1 += 512 << 14; - entryhi += 512 << 20; - size -= 512; - } -} -#endif /* CONFIG_CAVIUM_RESERVE32_USE_WIRED_TLB */ - -/** - * Called to release the named block which was used to made sure - * that nobody used the memory for something else during - * init. Now we'll free it so userspace apps can use this - * memory region with bootmem_alloc. - * - * This function is called only once from prom_free_prom_memory(). - */ -void octeon_hal_setup_reserved32(void) -{ -#ifdef CONFIG_CAVIUM_RESERVE32_USE_WIRED_TLB - on_each_cpu(octeon_hal_setup_per_cpu_reserved32, NULL, 0, 1); -#endif -} - /** * Reboot Octeon * @@ -294,18 +243,6 @@ static void octeon_halt(void) octeon_kill_core(NULL); } -#if 0 -/** - * Platform time init specifics. - * Returns - */ -void __init plat_time_init(void) -{ - /* Nothing special here, but we are required to have one */ -} - -#endif - /** * Handle all the error condition interrupts that might occur. * @@ -502,25 +439,13 @@ void __init prom_init(void) * memory when it is getting memory from the * bootloader. Later, after the memory allocations are * complete, the reserve32 will be freed. - */ -#ifdef CONFIG_CAVIUM_RESERVE32_USE_WIRED_TLB - if (CONFIG_CAVIUM_RESERVE32 & 0x1ff) - pr_err("CAVIUM_RESERVE32 isn't a multiple of 512MB. " - "This is required if CAVIUM_RESERVE32_USE_WIRED_TLB " - "is set\n"); - else - addr = cvmx_bootmem_phy_named_block_alloc(CONFIG_CAVIUM_RESERVE32 << 20, - 0, 0, 512 << 20, - "CAVIUM_RESERVE32", 0); -#else - /* + * * Allocate memory for RESERVED32 aligned on 2MB boundary. This * is in case we later use hugetlb entries with it. */ addr = cvmx_bootmem_phy_named_block_alloc(CONFIG_CAVIUM_RESERVE32 << 20, 0, 0, 2 << 20, "CAVIUM_RESERVE32", 0); -#endif if (addr < 0) pr_err("Failed to allocate CAVIUM_RESERVE32 memory area\n"); else @@ -817,9 +742,4 @@ void prom_free_prom_memory(void) panic("Unable to request_irq(OCTEON_IRQ_RML)\n"); } #endif - - /* This call is here so that it is performed after any TLB - initializations. It needs to be after these in case the - CONFIG_CAVIUM_RESERVE32_USE_WIRED_TLB option is set */ - octeon_hal_setup_reserved32(); } diff --git a/arch/mips/cavium-octeon/smp.c b/arch/mips/cavium-octeon/smp.c index 51e980290ce..6d99b9d8887 100644 --- a/arch/mips/cavium-octeon/smp.c +++ b/arch/mips/cavium-octeon/smp.c @@ -279,14 +279,6 @@ static void octeon_cpu_die(unsigned int cpu) uint32_t avail_coremask; struct cvmx_bootmem_named_block_desc *block_desc; -#ifdef CONFIG_CAVIUM_OCTEON_WATCHDOG - /* Disable the watchdog */ - cvmx_ciu_wdogx_t ciu_wdog; - ciu_wdog.u64 = cvmx_read_csr(CVMX_CIU_WDOGX(cpu)); - ciu_wdog.s.mode = 0; - cvmx_write_csr(CVMX_CIU_WDOGX(cpu), ciu_wdog.u64); -#endif - while (per_cpu(cpu_state, cpu) != CPU_DEAD) cpu_relax(); diff --git a/arch/mips/configs/bigsur_defconfig b/arch/mips/configs/bigsur_defconfig index c2f06e38c85..0583bb29150 100644 --- a/arch/mips/configs/bigsur_defconfig +++ b/arch/mips/configs/bigsur_defconfig @@ -1,7 +1,7 @@ # # Automatically generated make config: don't edit -# Linux kernel version: 2.6.26-rc8 -# Wed Jul 2 17:02:55 2008 +# Linux kernel version: 2.6.34-rc3 +# Sat Apr 3 16:32:11 2010 # CONFIG_MIPS=y @@ -9,20 +9,25 @@ CONFIG_MIPS=y # Machine selection # # CONFIG_MACH_ALCHEMY is not set +# CONFIG_AR7 is not set # CONFIG_BCM47XX is not set +# CONFIG_BCM63XX is not set # CONFIG_MIPS_COBALT is not set # CONFIG_MACH_DECSTATION is not set # CONFIG_MACH_JAZZ is not set # CONFIG_LASAT is not set -# CONFIG_LEMOTE_FULONG is not set +# CONFIG_MACH_LOONGSON is not set # CONFIG_MIPS_MALTA is not set # CONFIG_MIPS_SIM is not set -# CONFIG_MARKEINS is not set +# CONFIG_NEC_MARKEINS is not set # CONFIG_MACH_VR41XX is not set +# CONFIG_NXP_STB220 is not set +# CONFIG_NXP_STB225 is not set # CONFIG_PNX8550_JBS is not set # CONFIG_PNX8550_STB810 is not set # CONFIG_PMC_MSP is not set # CONFIG_PMC_YOSEMITE is not set +# CONFIG_POWERTV is not set # CONFIG_SGI_IP22 is not set # CONFIG_SGI_IP27 is not set # CONFIG_SGI_IP28 is not set @@ -36,10 +41,13 @@ CONFIG_MIPS=y # CONFIG_SIBYTE_SENTOSA is not set CONFIG_SIBYTE_BIGSUR=y # CONFIG_SNI_RM is not set -# CONFIG_TOSHIBA_JMR3927 is not set -# CONFIG_TOSHIBA_RBTX4927 is not set -# CONFIG_TOSHIBA_RBTX4938 is not set +# CONFIG_MACH_TX39XX is not set +# CONFIG_MACH_TX49XX is not set +# CONFIG_MIKROTIK_RB532 is not set # CONFIG_WR_PPMC is not set +# CONFIG_CAVIUM_OCTEON_SIMULATOR is not set +# CONFIG_CAVIUM_OCTEON_REFERENCE_BOARD is not set +# CONFIG_ALCHEMY_GPIO_INDIRECT is not set CONFIG_SIBYTE_BCM1x80=y CONFIG_SIBYTE_SB1xxx_SOC=y # CONFIG_CPU_SB1_PASS_1 is not set @@ -48,14 +56,13 @@ CONFIG_SIBYTE_SB1xxx_SOC=y # CONFIG_CPU_SB1_PASS_4 is not set # CONFIG_CPU_SB1_PASS_2_112x is not set # CONFIG_CPU_SB1_PASS_3 is not set -# CONFIG_SIMULATION is not set # CONFIG_SB1_CEX_ALWAYS_FATAL is not set # CONFIG_SB1_CERR_STALL is not set -CONFIG_SIBYTE_CFE=y # CONFIG_SIBYTE_CFE_CONSOLE is not set # CONFIG_SIBYTE_BUS_WATCHER is not set # CONFIG_SIBYTE_TBPROF is not set CONFIG_SIBYTE_HAS_ZBUS_PROFILING=y +CONFIG_LOONGSON_UART_BASE=y CONFIG_RWSEM_GENERIC_SPINLOCK=y # CONFIG_ARCH_HAS_ILOG2_U32 is not set # CONFIG_ARCH_HAS_ILOG2_U64 is not set @@ -66,15 +73,13 @@ CONFIG_GENERIC_CALIBRATE_DELAY=y CONFIG_GENERIC_CLOCKEVENTS=y CONFIG_GENERIC_TIME=y CONFIG_GENERIC_CMOS_UPDATE=y -CONFIG_SCHED_NO_NO_OMIT_FRAME_POINTER=y -# CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ is not set +CONFIG_SCHED_OMIT_FRAME_POINTER=y +CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y CONFIG_CEVT_BCM1480=y CONFIG_CSRC_BCM1480=y CONFIG_CFE=y CONFIG_DMA_COHERENT=y -CONFIG_EARLY_PRINTK=y CONFIG_SYS_HAS_EARLY_PRINTK=y -# CONFIG_HOTPLUG_CPU is not set # CONFIG_NO_IOPORT is not set CONFIG_CPU_BIG_ENDIAN=y # CONFIG_CPU_LITTLE_ENDIAN is not set @@ -88,7 +93,8 @@ CONFIG_MIPS_L1_CACHE_SHIFT=5 # # CPU selection # -# CONFIG_CPU_LOONGSON2 is not set +# CONFIG_CPU_LOONGSON2E is not set +# CONFIG_CPU_LOONGSON2F is not set # CONFIG_CPU_MIPS32_R1 is not set # CONFIG_CPU_MIPS32_R2 is not set # CONFIG_CPU_MIPS64_R1 is not set @@ -101,6 +107,7 @@ CONFIG_MIPS_L1_CACHE_SHIFT=5 # CONFIG_CPU_TX49XX is not set # CONFIG_CPU_R5000 is not set # CONFIG_CPU_R5432 is not set +# CONFIG_CPU_R5500 is not set # CONFIG_CPU_R6000 is not set # CONFIG_CPU_NEVADA is not set # CONFIG_CPU_R8000 is not set @@ -108,6 +115,7 @@ CONFIG_MIPS_L1_CACHE_SHIFT=5 # CONFIG_CPU_RM7000 is not set # CONFIG_CPU_RM9000 is not set CONFIG_CPU_SB1=y +# CONFIG_CPU_CAVIUM_OCTEON is not set CONFIG_SYS_HAS_CPU_SB1=y CONFIG_WEAK_ORDERING=y CONFIG_SYS_SUPPORTS_32BIT_KERNEL=y @@ -123,11 +131,13 @@ CONFIG_64BIT=y CONFIG_PAGE_SIZE_4KB=y # CONFIG_PAGE_SIZE_8KB is not set # CONFIG_PAGE_SIZE_16KB is not set +# CONFIG_PAGE_SIZE_32KB is not set # CONFIG_PAGE_SIZE_64KB is not set # CONFIG_SIBYTE_DMA_PAGEOPS is not set CONFIG_MIPS_MT_DISABLED=y # CONFIG_MIPS_MT_SMP is not set # CONFIG_MIPS_MT_SMTC is not set +# CONFIG_ARCH_PHYS_ADDR_T_64BIT is not set CONFIG_CPU_HAS_SYNC=y CONFIG_GENERIC_HARDIRQS=y CONFIG_GENERIC_IRQ_PROBE=y @@ -142,18 +152,17 @@ CONFIG_FLATMEM_MANUAL=y # CONFIG_SPARSEMEM_MANUAL is not set CONFIG_FLATMEM=y CONFIG_FLAT_NODE_MEM_MAP=y -# CONFIG_SPARSEMEM_STATIC is not set -# CONFIG_SPARSEMEM_VMEMMAP_ENABLE is not set CONFIG_PAGEFLAGS_EXTENDED=y CONFIG_SPLIT_PTLOCK_CPUS=4 -CONFIG_RESOURCES_64BIT=y +CONFIG_PHYS_ADDR_T_64BIT=y CONFIG_ZONE_DMA_FLAG=0 CONFIG_VIRT_TO_BUS=y +# CONFIG_KSM is not set +CONFIG_DEFAULT_MMAP_MIN_ADDR=4096 CONFIG_SMP=y CONFIG_SYS_SUPPORTS_SMP=y CONFIG_NR_CPUS_DEFAULT_4=y CONFIG_NR_CPUS=4 -# CONFIG_MIPS_CMP is not set CONFIG_TICK_ONESHOT=y CONFIG_NO_HZ=y CONFIG_HIGH_RES_TIMERS=y @@ -175,6 +184,7 @@ CONFIG_SECCOMP=y CONFIG_LOCKDEP_SUPPORT=y CONFIG_STACKTRACE_SUPPORT=y CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config" +CONFIG_CONSTRUCTORS=y # # General setup @@ -188,6 +198,7 @@ CONFIG_SWAP=y CONFIG_SYSVIPC=y CONFIG_SYSVIPC_SYSCTL=y CONFIG_POSIX_MQUEUE=y +CONFIG_POSIX_MQUEUE_SYSCTL=y CONFIG_BSD_PROCESS_ACCT=y CONFIG_BSD_PROCESS_ACCT_V3=y CONFIG_TASKSTATS=y @@ -195,23 +206,39 @@ CONFIG_TASK_DELAY_ACCT=y CONFIG_TASK_XACCT=y CONFIG_TASK_IO_ACCOUNTING=y CONFIG_AUDIT=y + +# +# RCU Subsystem +# +CONFIG_TREE_RCU=y +# CONFIG_TREE_PREEMPT_RCU is not set +# CONFIG_TINY_RCU is not set +# CONFIG_RCU_TRACE is not set +CONFIG_RCU_FANOUT=64 +# CONFIG_RCU_FANOUT_EXACT is not set +# CONFIG_RCU_FAST_NO_HZ is not set +# CONFIG_TREE_RCU_TRACE is not set CONFIG_IKCONFIG=y CONFIG_IKCONFIG_PROC=y CONFIG_LOG_BUF_SHIFT=16 # CONFIG_CGROUPS is not set -CONFIG_GROUP_SCHED=y -CONFIG_FAIR_GROUP_SCHED=y -# CONFIG_RT_GROUP_SCHED is not set -CONFIG_USER_SCHED=y -# CONFIG_CGROUP_SCHED is not set -CONFIG_SYSFS_DEPRECATED=y -CONFIG_SYSFS_DEPRECATED_V2=y +# CONFIG_SYSFS_DEPRECATED_V2 is not set CONFIG_RELAY=y -# CONFIG_NAMESPACES is not set +CONFIG_NAMESPACES=y +CONFIG_UTS_NS=y +CONFIG_IPC_NS=y +CONFIG_USER_NS=y +CONFIG_PID_NS=y +CONFIG_NET_NS=y CONFIG_BLK_DEV_INITRD=y CONFIG_INITRAMFS_SOURCE="" +CONFIG_RD_GZIP=y +# CONFIG_RD_BZIP2 is not set +# CONFIG_RD_LZMA is not set +# CONFIG_RD_LZO is not set # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set CONFIG_SYSCTL=y +CONFIG_ANON_INODES=y CONFIG_EMBEDDED=y # CONFIG_SYSCTL_SYSCALL is not set CONFIG_KALLSYMS=y @@ -222,29 +249,36 @@ CONFIG_PRINTK=y CONFIG_BUG=y CONFIG_ELF_CORE=y # CONFIG_PCSPKR_PLATFORM is not set -CONFIG_COMPAT_BRK=y CONFIG_BASE_FULL=y CONFIG_FUTEX=y -CONFIG_ANON_INODES=y CONFIG_EPOLL=y CONFIG_SIGNALFD=y CONFIG_TIMERFD=y CONFIG_EVENTFD=y CONFIG_SHMEM=y +CONFIG_AIO=y + +# +# Kernel Performance Events And Counters +# CONFIG_VM_EVENT_COUNTERS=y +CONFIG_PCI_QUIRKS=y +CONFIG_COMPAT_BRK=y CONFIG_SLAB=y # CONFIG_SLUB is not set # CONFIG_SLOB is not set # CONFIG_PROFILING is not set -# CONFIG_MARKERS is not set CONFIG_HAVE_OPROFILE=y -# CONFIG_HAVE_KPROBES is not set -# CONFIG_HAVE_KRETPROBES is not set -# CONFIG_HAVE_DMA_ATTRS is not set -CONFIG_PROC_PAGE_MONITOR=y +CONFIG_HAVE_SYSCALL_WRAPPERS=y +CONFIG_USE_GENERIC_SMP_HELPERS=y + +# +# GCOV-based kernel profiling +# +# CONFIG_SLOW_WORK is not set +CONFIG_HAVE_GENERIC_DMA_COHERENT=y CONFIG_SLABINFO=y CONFIG_RT_MUTEXES=y -# CONFIG_TINY_SHMEM is not set CONFIG_BASE_SMALL=0 CONFIG_MODULES=y # CONFIG_MODULE_FORCE_LOAD is not set @@ -252,26 +286,52 @@ CONFIG_MODULE_UNLOAD=y # CONFIG_MODULE_FORCE_UNLOAD is not set CONFIG_MODVERSIONS=y CONFIG_MODULE_SRCVERSION_ALL=y -CONFIG_KMOD=y CONFIG_STOP_MACHINE=y CONFIG_BLOCK=y -# CONFIG_BLK_DEV_IO_TRACE is not set # CONFIG_BLK_DEV_BSG is not set +# CONFIG_BLK_DEV_INTEGRITY is not set CONFIG_BLOCK_COMPAT=y # # IO Schedulers # CONFIG_IOSCHED_NOOP=y -CONFIG_IOSCHED_AS=y CONFIG_IOSCHED_DEADLINE=y CONFIG_IOSCHED_CFQ=y -CONFIG_DEFAULT_AS=y # CONFIG_DEFAULT_DEADLINE is not set -# CONFIG_DEFAULT_CFQ is not set +CONFIG_DEFAULT_CFQ=y # CONFIG_DEFAULT_NOOP is not set -CONFIG_DEFAULT_IOSCHED="anticipatory" -CONFIG_CLASSIC_RCU=y +CONFIG_DEFAULT_IOSCHED="cfq" +# CONFIG_INLINE_SPIN_TRYLOCK is not set +# CONFIG_INLINE_SPIN_TRYLOCK_BH is not set +# CONFIG_INLINE_SPIN_LOCK is not set +# CONFIG_INLINE_SPIN_LOCK_BH is not set +# CONFIG_INLINE_SPIN_LOCK_IRQ is not set +# CONFIG_INLINE_SPIN_LOCK_IRQSAVE is not set +CONFIG_INLINE_SPIN_UNLOCK=y +# CONFIG_INLINE_SPIN_UNLOCK_BH is not set +CONFIG_INLINE_SPIN_UNLOCK_IRQ=y +# CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE is not set +# CONFIG_INLINE_READ_TRYLOCK is not set +# CONFIG_INLINE_READ_LOCK is not set +# CONFIG_INLINE_READ_LOCK_BH is not set +# CONFIG_INLINE_READ_LOCK_IRQ is not set +# CONFIG_INLINE_READ_LOCK_IRQSAVE is not set +CONFIG_INLINE_READ_UNLOCK=y +# CONFIG_INLINE_READ_UNLOCK_BH is not set +CONFIG_INLINE_READ_UNLOCK_IRQ=y +# CONFIG_INLINE_READ_UNLOCK_IRQRESTORE is not set +# CONFIG_INLINE_WRITE_TRYLOCK is not set +# CONFIG_INLINE_WRITE_LOCK is not set +# CONFIG_INLINE_WRITE_LOCK_BH is not set +# CONFIG_INLINE_WRITE_LOCK_IRQ is not set +# CONFIG_INLINE_WRITE_LOCK_IRQSAVE is not set +CONFIG_INLINE_WRITE_UNLOCK=y +# CONFIG_INLINE_WRITE_UNLOCK_BH is not set +CONFIG_INLINE_WRITE_UNLOCK_IRQ=y +# CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE is not set +CONFIG_MUTEX_SPIN_ON_OWNER=y +# CONFIG_FREEZER is not set # # Bus options (PCI, PCMCIA, EISA, ISA, TC) @@ -280,8 +340,9 @@ CONFIG_HW_HAS_PCI=y CONFIG_PCI=y CONFIG_PCI_DOMAINS=y # CONFIG_ARCH_SUPPORTS_MSI is not set -CONFIG_PCI_LEGACY=y CONFIG_PCI_DEBUG=y +# CONFIG_PCI_STUB is not set +# CONFIG_PCI_IOV is not set CONFIG_MMU=y CONFIG_ZONE_DMA32=y # CONFIG_PCCARD is not set @@ -291,6 +352,8 @@ CONFIG_ZONE_DMA32=y # Executable file formats # CONFIG_BINFMT_ELF=y +# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set +# CONFIG_HAVE_AOUT is not set # CONFIG_BINFMT_MISC is not set CONFIG_MIPS32_COMPAT=y CONFIG_COMPAT=y @@ -304,23 +367,20 @@ CONFIG_BINFMT_ELF32=y # CONFIG_PM=y # CONFIG_PM_DEBUG is not set - -# -# Networking -# +# CONFIG_PM_RUNTIME is not set CONFIG_NET=y # # Networking options # CONFIG_PACKET=y -CONFIG_PACKET_MMAP=y CONFIG_UNIX=y CONFIG_XFRM=y CONFIG_XFRM_USER=m # CONFIG_XFRM_SUB_POLICY is not set CONFIG_XFRM_MIGRATE=y # CONFIG_XFRM_STATISTICS is not set +CONFIG_XFRM_IPCOMP=m CONFIG_NET_KEY=y CONFIG_NET_KEY_MIGRATE=y CONFIG_INET=y @@ -353,36 +413,6 @@ CONFIG_INET_TCP_DIAG=y CONFIG_TCP_CONG_CUBIC=y CONFIG_DEFAULT_TCP_CONG="cubic" CONFIG_TCP_MD5SIG=y -CONFIG_IP_VS=m -# CONFIG_IP_VS_DEBUG is not set -CONFIG_IP_VS_TAB_BITS=12 - -# -# IPVS transport protocol load balancing support -# -CONFIG_IP_VS_PROTO_TCP=y -CONFIG_IP_VS_PROTO_UDP=y -CONFIG_IP_VS_PROTO_ESP=y -CONFIG_IP_VS_PROTO_AH=y - -# -# IPVS scheduler -# -CONFIG_IP_VS_RR=m -CONFIG_IP_VS_WRR=m -CONFIG_IP_VS_LC=m -CONFIG_IP_VS_WLC=m -CONFIG_IP_VS_LBLC=m -CONFIG_IP_VS_LBLCR=m -CONFIG_IP_VS_DH=m -CONFIG_IP_VS_SH=m -CONFIG_IP_VS_SED=m -CONFIG_IP_VS_NQ=m - -# -# IPVS application helper -# -CONFIG_IP_VS_FTP=m CONFIG_IPV6=m CONFIG_IPV6_PRIVACY=y CONFIG_IPV6_ROUTER_PREF=y @@ -399,11 +429,13 @@ CONFIG_INET6_XFRM_MODE_TUNNEL=m CONFIG_INET6_XFRM_MODE_BEET=m CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION=m CONFIG_IPV6_SIT=m +CONFIG_IPV6_SIT_6RD=y CONFIG_IPV6_NDISC_NODETYPE=y CONFIG_IPV6_TUNNEL=m CONFIG_IPV6_MULTIPLE_TABLES=y CONFIG_IPV6_SUBTREES=y # CONFIG_IPV6_MROUTE is not set +CONFIG_NETLABEL=y CONFIG_NETWORK_SECMARK=y CONFIG_NETFILTER=y # CONFIG_NETFILTER_DEBUG is not set @@ -421,19 +453,53 @@ CONFIG_NF_CONNTRACK_IRC=m CONFIG_NF_CONNTRACK_SIP=m CONFIG_NF_CT_NETLINK=m CONFIG_NETFILTER_XTABLES=m +CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=m CONFIG_NETFILTER_XT_TARGET_MARK=m CONFIG_NETFILTER_XT_TARGET_NFLOG=m CONFIG_NETFILTER_XT_TARGET_SECMARK=m -CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=m CONFIG_NETFILTER_XT_TARGET_TCPMSS=m CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m CONFIG_NETFILTER_XT_MATCH_MARK=m CONFIG_NETFILTER_XT_MATCH_POLICY=m CONFIG_NETFILTER_XT_MATCH_STATE=m +CONFIG_IP_VS=m +CONFIG_IP_VS_IPV6=y +# CONFIG_IP_VS_DEBUG is not set +CONFIG_IP_VS_TAB_BITS=12 + +# +# IPVS transport protocol load balancing support +# +CONFIG_IP_VS_PROTO_TCP=y +CONFIG_IP_VS_PROTO_UDP=y +CONFIG_IP_VS_PROTO_AH_ESP=y +CONFIG_IP_VS_PROTO_ESP=y +CONFIG_IP_VS_PROTO_AH=y +CONFIG_IP_VS_PROTO_SCTP=y + +# +# IPVS scheduler +# +CONFIG_IP_VS_RR=m +CONFIG_IP_VS_WRR=m +CONFIG_IP_VS_LC=m +CONFIG_IP_VS_WLC=m +CONFIG_IP_VS_LBLC=m +CONFIG_IP_VS_LBLCR=m +CONFIG_IP_VS_DH=m +CONFIG_IP_VS_SH=m +CONFIG_IP_VS_SED=m +CONFIG_IP_VS_NQ=m + +# +# IPVS application helper +# +CONFIG_IP_VS_FTP=m # # IP: Netfilter Configuration # +CONFIG_NF_DEFRAG_IPV4=m CONFIG_NF_CONNTRACK_IPV4=m CONFIG_NF_CONNTRACK_PROC_COMPAT=y CONFIG_IP_NF_IPTABLES=m @@ -459,22 +525,44 @@ CONFIG_IP_NF_MANGLE=m CONFIG_NF_CONNTRACK_IPV6=m CONFIG_IP6_NF_IPTABLES=m CONFIG_IP6_NF_MATCH_IPV6HEADER=m -CONFIG_IP6_NF_FILTER=m CONFIG_IP6_NF_TARGET_LOG=m +CONFIG_IP6_NF_FILTER=m CONFIG_IP6_NF_TARGET_REJECT=m CONFIG_IP6_NF_MANGLE=m -# CONFIG_IP_DCCP is not set +CONFIG_IP_DCCP=m +CONFIG_INET_DCCP_DIAG=m + +# +# DCCP CCIDs Configuration (EXPERIMENTAL) +# +# CONFIG_IP_DCCP_CCID2_DEBUG is not set +CONFIG_IP_DCCP_CCID3=y +# CONFIG_IP_DCCP_CCID3_DEBUG is not set +CONFIG_IP_DCCP_CCID3_RTO=100 +CONFIG_IP_DCCP_TFRC_LIB=y + +# +# DCCP Kernel Hacking +# +# CONFIG_IP_DCCP_DEBUG is not set CONFIG_IP_SCTP=m # CONFIG_SCTP_DBG_MSG is not set # CONFIG_SCTP_DBG_OBJCNT is not set # CONFIG_SCTP_HMAC_NONE is not set -# CONFIG_SCTP_HMAC_SHA1 is not set -CONFIG_SCTP_HMAC_MD5=y +CONFIG_SCTP_HMAC_SHA1=y +# CONFIG_SCTP_HMAC_MD5 is not set +# CONFIG_RDS is not set # CONFIG_TIPC is not set # CONFIG_ATM is not set -# CONFIG_BRIDGE is not set -# CONFIG_VLAN_8021Q is not set +CONFIG_STP=m +CONFIG_GARP=m +CONFIG_BRIDGE=m +CONFIG_BRIDGE_IGMP_SNOOPING=y +# CONFIG_NET_DSA is not set +CONFIG_VLAN_8021Q=m +CONFIG_VLAN_8021Q_GVRP=y # CONFIG_DECNET is not set +CONFIG_LLC=m # CONFIG_LLC2 is not set # CONFIG_IPX is not set # CONFIG_ATALK is not set @@ -482,26 +570,47 @@ CONFIG_SCTP_HMAC_MD5=y # CONFIG_LAPB is not set # CONFIG_ECONET is not set # CONFIG_WAN_ROUTER is not set +# CONFIG_PHONET is not set +# CONFIG_IEEE802154 is not set # CONFIG_NET_SCHED is not set +# CONFIG_DCB is not set # # Network testing # # CONFIG_NET_PKTGEN is not set -# CONFIG_HAMRADIO is not set +CONFIG_HAMRADIO=y + +# +# Packet Radio protocols +# +CONFIG_AX25=m +CONFIG_AX25_DAMA_SLAVE=y +CONFIG_NETROM=m +CONFIG_ROSE=m + +# +# AX.25 network device drivers +# +CONFIG_MKISS=m +CONFIG_6PACK=m +CONFIG_BPQETHER=m +CONFIG_BAYCOM_SER_FDX=m +CONFIG_BAYCOM_SER_HDX=m +CONFIG_YAM=m # CONFIG_CAN is not set # CONFIG_IRDA is not set # CONFIG_BT is not set # CONFIG_AF_RXRPC is not set CONFIG_FIB_RULES=y +CONFIG_WIRELESS=y +# CONFIG_CFG80211 is not set +# CONFIG_LIB80211 is not set # -# Wireless +# CFG80211 needs to be enabled for MAC80211 # -# CONFIG_CFG80211 is not set -# CONFIG_WIRELESS_EXT is not set -# CONFIG_MAC80211 is not set -# CONFIG_IEEE80211 is not set +# CONFIG_WIMAX is not set # CONFIG_RFKILL is not set # CONFIG_NET_9P is not set @@ -513,9 +622,12 @@ CONFIG_FIB_RULES=y # Generic Driver Options # CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" +# CONFIG_DEVTMPFS is not set CONFIG_STANDALONE=y CONFIG_PREVENT_FIRMWARE_BUILD=y CONFIG_FW_LOADER=m +CONFIG_FIRMWARE_IN_KERNEL=y +CONFIG_EXTRA_FIRMWARE="" # CONFIG_DEBUG_DRIVER is not set # CONFIG_DEBUG_DEVRES is not set # CONFIG_SYS_HYPERVISOR is not set @@ -530,33 +642,53 @@ CONFIG_BLK_DEV=y # CONFIG_BLK_DEV_COW_COMMON is not set CONFIG_BLK_DEV_LOOP=m CONFIG_BLK_DEV_CRYPTOLOOP=m + +# +# DRBD disabled because PROC_FS, INET or CONNECTOR not selected +# CONFIG_BLK_DEV_NBD=m # CONFIG_BLK_DEV_SX8 is not set # CONFIG_BLK_DEV_RAM is not set # CONFIG_CDROM_PKTCDVD is not set # CONFIG_ATA_OVER_ETH is not set +# CONFIG_BLK_DEV_HD is not set CONFIG_MISC_DEVICES=y +# CONFIG_AD525X_DPOT is not set # CONFIG_PHANTOM is not set -# CONFIG_EEPROM_93CX6 is not set CONFIG_SGI_IOC4=m # CONFIG_TIFM_CORE is not set +# CONFIG_ICS932S401 is not set # CONFIG_ENCLOSURE_SERVICES is not set +# CONFIG_HP_ILO is not set +# CONFIG_ISL29003 is not set +# CONFIG_SENSORS_TSL2550 is not set +# CONFIG_DS1682 is not set +# CONFIG_C2PORT is not set + +# +# EEPROM support +# +# CONFIG_EEPROM_AT24 is not set +CONFIG_EEPROM_LEGACY=y +CONFIG_EEPROM_MAX6875=y +# CONFIG_EEPROM_93CX6 is not set +# CONFIG_CB710_CORE is not set CONFIG_HAVE_IDE=y CONFIG_IDE=y -CONFIG_IDE_MAX_HWIFS=4 -CONFIG_BLK_DEV_IDE=y # # Please see Documentation/ide/ide.txt for help/info on IDE drives # +CONFIG_IDE_XFER_MODE=y +CONFIG_IDE_TIMINGS=y +CONFIG_IDE_ATAPI=y # CONFIG_BLK_DEV_IDE_SATA is not set -CONFIG_BLK_DEV_IDEDISK=y -# CONFIG_IDEDISK_MULTI_MODE is not set +CONFIG_IDE_GD=y +CONFIG_IDE_GD_ATA=y +# CONFIG_IDE_GD_ATAPI is not set CONFIG_BLK_DEV_IDECD=y CONFIG_BLK_DEV_IDECD_VERBOSE_ERRORS=y CONFIG_BLK_DEV_IDETAPE=y -CONFIG_BLK_DEV_IDEFLOPPY=y -# CONFIG_BLK_DEV_IDESCSI is not set # CONFIG_IDE_TASK_IOCTL is not set CONFIG_IDE_PROC_FS=y @@ -581,14 +713,13 @@ CONFIG_BLK_DEV_IDEDMA_PCI=y # CONFIG_BLK_DEV_AMD74XX is not set CONFIG_BLK_DEV_CMD64X=y # CONFIG_BLK_DEV_TRIFLEX is not set -# CONFIG_BLK_DEV_CY82C693 is not set # CONFIG_BLK_DEV_CS5520 is not set # CONFIG_BLK_DEV_CS5530 is not set -# CONFIG_BLK_DEV_HPT34X is not set # CONFIG_BLK_DEV_HPT366 is not set # CONFIG_BLK_DEV_JMICRON is not set # CONFIG_BLK_DEV_SC1200 is not set # CONFIG_BLK_DEV_PIIX is not set +# CONFIG_BLK_DEV_IT8172 is not set CONFIG_BLK_DEV_IT8213=m # CONFIG_BLK_DEV_IT821X is not set # CONFIG_BLK_DEV_NS87415 is not set @@ -600,14 +731,12 @@ CONFIG_BLK_DEV_IT8213=m # CONFIG_BLK_DEV_TRM290 is not set # CONFIG_BLK_DEV_VIA82CXXX is not set CONFIG_BLK_DEV_TC86C001=m -# CONFIG_BLK_DEV_IDE_SWARM is not set CONFIG_BLK_DEV_IDEDMA=y -# CONFIG_BLK_DEV_HD_ONLY is not set -# CONFIG_BLK_DEV_HD is not set # # SCSI device support # +CONFIG_SCSI_MOD=y # CONFIG_RAID_ATTRS is not set CONFIG_SCSI=y CONFIG_SCSI_DMA=y @@ -625,10 +754,6 @@ CONFIG_BLK_DEV_SR=m CONFIG_BLK_DEV_SR_VENDOR=y CONFIG_CHR_DEV_SG=m CONFIG_CHR_DEV_SCH=m - -# -# Some SCSI devices (e.g. CD jukebox) support multiple LUNs -# # CONFIG_SCSI_MULTI_LUN is not set # CONFIG_SCSI_CONSTANTS is not set # CONFIG_SCSI_LOGGING is not set @@ -645,27 +770,36 @@ CONFIG_SCSI_WAIT_SCAN=m # CONFIG_SCSI_SRP_ATTRS is not set CONFIG_SCSI_LOWLEVEL=y # CONFIG_ISCSI_TCP is not set +# CONFIG_SCSI_CXGB3_ISCSI is not set +# CONFIG_SCSI_BNX2_ISCSI is not set +# CONFIG_BE2ISCSI is not set # CONFIG_BLK_DEV_3W_XXXX_RAID is not set +# CONFIG_SCSI_HPSA is not set # CONFIG_SCSI_3W_9XXX is not set +# CONFIG_SCSI_3W_SAS is not set # CONFIG_SCSI_ACARD is not set # CONFIG_SCSI_AACRAID is not set # CONFIG_SCSI_AIC7XXX is not set # CONFIG_SCSI_AIC7XXX_OLD is not set # CONFIG_SCSI_AIC79XX is not set # CONFIG_SCSI_AIC94XX is not set +# CONFIG_SCSI_MVSAS is not set # CONFIG_SCSI_DPT_I2O is not set # CONFIG_SCSI_ADVANSYS is not set # CONFIG_SCSI_ARCMSR is not set # CONFIG_MEGARAID_NEWGEN is not set # CONFIG_MEGARAID_LEGACY is not set # CONFIG_MEGARAID_SAS is not set +# CONFIG_SCSI_MPT2SAS is not set # CONFIG_SCSI_HPTIOP is not set +# CONFIG_LIBFC is not set +# CONFIG_LIBFCOE is not set +# CONFIG_FCOE is not set # CONFIG_SCSI_DMX3191D is not set # CONFIG_SCSI_FUTURE_DOMAIN is not set # CONFIG_SCSI_IPS is not set # CONFIG_SCSI_INITIO is not set # CONFIG_SCSI_INIA100 is not set -# CONFIG_SCSI_MVSAS is not set # CONFIG_SCSI_STEX is not set # CONFIG_SCSI_SYM53C8XX_2 is not set # CONFIG_SCSI_IPR is not set @@ -676,9 +810,15 @@ CONFIG_SCSI_LOWLEVEL=y # CONFIG_SCSI_DC395x is not set # CONFIG_SCSI_DC390T is not set # CONFIG_SCSI_DEBUG is not set +# CONFIG_SCSI_PMCRAID is not set +# CONFIG_SCSI_PM8001 is not set # CONFIG_SCSI_SRP is not set +# CONFIG_SCSI_BFA_FC is not set +# CONFIG_SCSI_DH is not set +# CONFIG_SCSI_OSD_INITIATOR is not set CONFIG_ATA=y # CONFIG_ATA_NONSTANDARD is not set +CONFIG_ATA_VERBOSE_ERROR=y CONFIG_SATA_PMP=y # CONFIG_SATA_AHCI is not set CONFIG_SATA_SIL24=y @@ -700,6 +840,7 @@ CONFIG_ATA_SFF=y # CONFIG_PATA_ALI is not set # CONFIG_PATA_AMD is not set # CONFIG_PATA_ARTOP is not set +# CONFIG_PATA_ATP867X is not set # CONFIG_PATA_ATIIXP is not set # CONFIG_PATA_CMD640_PCI is not set # CONFIG_PATA_CMD64X is not set @@ -715,6 +856,7 @@ CONFIG_ATA_SFF=y # CONFIG_PATA_IT821X is not set # CONFIG_PATA_IT8213 is not set # CONFIG_PATA_JMICRON is not set +# CONFIG_PATA_LEGACY is not set # CONFIG_PATA_TRIFLEX is not set # CONFIG_PATA_MARVELL is not set # CONFIG_PATA_MPIIX is not set @@ -725,14 +867,16 @@ CONFIG_ATA_SFF=y # CONFIG_PATA_NS87415 is not set # CONFIG_PATA_OPTI is not set # CONFIG_PATA_OPTIDMA is not set +# CONFIG_PATA_PDC2027X is not set # CONFIG_PATA_PDC_OLD is not set # CONFIG_PATA_RADISYS is not set +# CONFIG_PATA_RDC is not set # CONFIG_PATA_RZ1000 is not set # CONFIG_PATA_SC1200 is not set # CONFIG_PATA_SERVERWORKS is not set -# CONFIG_PATA_PDC2027X is not set CONFIG_PATA_SIL680=y # CONFIG_PATA_SIS is not set +# CONFIG_PATA_TOSHIBA is not set # CONFIG_PATA_VIA is not set # CONFIG_PATA_WINBOND is not set # CONFIG_PATA_PLATFORM is not set @@ -745,13 +889,16 @@ CONFIG_PATA_SIL680=y # # -# Enable only one of the two stacks, unless you know what you are doing +# You can enable one or both FireWire driver stacks. +# + +# +# The newer stack is recommended. # # CONFIG_FIREWIRE is not set # CONFIG_IEEE1394 is not set # CONFIG_I2O is not set CONFIG_NETDEVICES=y -# CONFIG_NETDEVICES_MULTIQUEUE is not set # CONFIG_DUMMY is not set # CONFIG_BONDING is not set # CONFIG_MACVLAN is not set @@ -774,6 +921,9 @@ CONFIG_PHYLIB=y # CONFIG_BROADCOM_PHY is not set # CONFIG_ICPLUS_PHY is not set # CONFIG_REALTEK_PHY is not set +# CONFIG_NATIONAL_PHY is not set +# CONFIG_STE10XP is not set +# CONFIG_LSI_ET1011C_PHY is not set # CONFIG_FIXED_PHY is not set # CONFIG_MDIO_BITBANG is not set CONFIG_NET_ETHERNET=y @@ -783,23 +933,33 @@ CONFIG_MII=y # CONFIG_SUNGEM is not set # CONFIG_CASSINI is not set # CONFIG_NET_VENDOR_3COM is not set +# CONFIG_SMC91X is not set # CONFIG_DM9000 is not set +# CONFIG_ETHOC is not set +# CONFIG_SMSC911X is not set +# CONFIG_DNET is not set # CONFIG_NET_TULIP is not set # CONFIG_HP100 is not set # CONFIG_IBM_NEW_EMAC_ZMII is not set # CONFIG_IBM_NEW_EMAC_RGMII is not set # CONFIG_IBM_NEW_EMAC_TAH is not set # CONFIG_IBM_NEW_EMAC_EMAC4 is not set +# CONFIG_IBM_NEW_EMAC_NO_FLOW_CTRL is not set +# CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set +# CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set # CONFIG_NET_PCI is not set # CONFIG_B44 is not set +# CONFIG_KS8842 is not set +# CONFIG_KS8851_MLL is not set +# CONFIG_ATL2 is not set CONFIG_NETDEV_1000=y # CONFIG_ACENIC is not set # CONFIG_DL2K is not set # CONFIG_E1000 is not set # CONFIG_E1000E is not set -# CONFIG_E1000E_ENABLED is not set # CONFIG_IP1000 is not set # CONFIG_IGB is not set +# CONFIG_IGBVF is not set # CONFIG_NS83820 is not set # CONFIG_HAMACHI is not set # CONFIG_YELLOWFIN is not set @@ -811,29 +971,42 @@ CONFIG_SB1250_MAC=y # CONFIG_VIA_VELOCITY is not set # CONFIG_TIGON3 is not set # CONFIG_BNX2 is not set +# CONFIG_CNIC is not set # CONFIG_QLA3XXX is not set # CONFIG_ATL1 is not set +# CONFIG_ATL1E is not set +# CONFIG_ATL1C is not set +# CONFIG_JME is not set CONFIG_NETDEV_10000=y +CONFIG_MDIO=m # CONFIG_CHELSIO_T1 is not set +CONFIG_CHELSIO_T3_DEPENDS=y CONFIG_CHELSIO_T3=m +# CONFIG_ENIC is not set # CONFIG_IXGBE is not set # CONFIG_IXGB is not set # CONFIG_S2IO is not set +# CONFIG_VXGE is not set # CONFIG_MYRI10GE is not set CONFIG_NETXEN_NIC=m # CONFIG_NIU is not set +# CONFIG_MLX4_EN is not set # CONFIG_MLX4_CORE is not set # CONFIG_TEHUTI is not set # CONFIG_BNX2X is not set +# CONFIG_QLCNIC is not set +# CONFIG_QLGE is not set # CONFIG_SFC is not set +# CONFIG_BE2NET is not set # CONFIG_TR is not set +CONFIG_WLAN=y +# CONFIG_ATMEL is not set +# CONFIG_PRISM54 is not set +# CONFIG_HOSTAP is not set # -# Wireless LAN +# Enable WiMAX (Networking options) to see the WiMAX drivers # -# CONFIG_WLAN_PRE80211 is not set -# CONFIG_WLAN_80211 is not set -# CONFIG_IWLWIFI_LEDS is not set # CONFIG_WAN is not set # CONFIG_FDDI is not set # CONFIG_HIPPI is not set @@ -856,6 +1029,7 @@ CONFIG_SLIP_MODE_SLIP6=y # CONFIG_NETCONSOLE is not set # CONFIG_NETPOLL is not set # CONFIG_NET_POLL_CONTROLLER is not set +# CONFIG_VMXNET3 is not set # CONFIG_ISDN is not set # CONFIG_PHONE is not set @@ -873,6 +1047,7 @@ CONFIG_SERIO_SERPORT=y # CONFIG_SERIO_PCIPS2 is not set # CONFIG_SERIO_LIBPS2 is not set CONFIG_SERIO_RAW=m +# CONFIG_SERIO_ALTERA_PS2 is not set # CONFIG_GAMEPORT is not set # @@ -893,8 +1068,6 @@ CONFIG_SERIAL_NONSTANDARD=y # CONFIG_N_HDLC is not set # CONFIG_RISCOM8 is not set # CONFIG_SPECIALIX is not set -# CONFIG_SX is not set -# CONFIG_RIO is not set # CONFIG_STALDRV is not set # CONFIG_NOZOMI is not set @@ -911,7 +1084,9 @@ CONFIG_SERIAL_SB1250_DUART_CONSOLE=y CONFIG_SERIAL_CORE=y CONFIG_SERIAL_CORE_CONSOLE=y # CONFIG_SERIAL_JSM is not set +# CONFIG_SERIAL_TIMBERDALE is not set CONFIG_UNIX98_PTYS=y +# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set CONFIG_LEGACY_PTYS=y CONFIG_LEGACY_PTY_COUNT=256 # CONFIG_IPMI_HANDLER is not set @@ -923,89 +1098,99 @@ CONFIG_LEGACY_PTY_COUNT=256 CONFIG_DEVPORT=y CONFIG_I2C=y CONFIG_I2C_BOARDINFO=y +CONFIG_I2C_COMPAT=y CONFIG_I2C_CHARDEV=y +CONFIG_I2C_HELPER_AUTO=y # # I2C Hardware Bus support # + +# +# PC SMBus host controller drivers +# # CONFIG_I2C_ALI1535 is not set # CONFIG_I2C_ALI1563 is not set # CONFIG_I2C_ALI15X3 is not set # CONFIG_I2C_AMD756 is not set # CONFIG_I2C_AMD8111 is not set # CONFIG_I2C_I801 is not set -# CONFIG_I2C_I810 is not set +# CONFIG_I2C_ISCH is not set # CONFIG_I2C_PIIX4 is not set # CONFIG_I2C_NFORCE2 is not set -# CONFIG_I2C_OCORES is not set -# CONFIG_I2C_PARPORT_LIGHT is not set -# CONFIG_I2C_PROSAVAGE is not set -# CONFIG_I2C_SAVAGE4 is not set -CONFIG_I2C_SIBYTE=y -# CONFIG_I2C_SIMTEC is not set # CONFIG_I2C_SIS5595 is not set # CONFIG_I2C_SIS630 is not set # CONFIG_I2C_SIS96X is not set -# CONFIG_I2C_TAOS_EVM is not set -# CONFIG_I2C_STUB is not set # CONFIG_I2C_VIA is not set # CONFIG_I2C_VIAPRO is not set -# CONFIG_I2C_VOODOO3 is not set -# CONFIG_I2C_PCA_PLATFORM is not set # -# Miscellaneous I2C Chip support +# I2C system bus drivers (mostly embedded / system-on-chip) # -# CONFIG_DS1682 is not set -CONFIG_EEPROM_LEGACY=y -CONFIG_SENSORS_PCF8574=y -# CONFIG_PCF8575 is not set -CONFIG_SENSORS_PCF8591=y -CONFIG_EEPROM_MAX6875=y -# CONFIG_SENSORS_TSL2550 is not set +# CONFIG_I2C_OCORES is not set +# CONFIG_I2C_SIMTEC is not set +# CONFIG_I2C_XILINX is not set + +# +# External I2C/SMBus adapter drivers +# +# CONFIG_I2C_PARPORT_LIGHT is not set +# CONFIG_I2C_TAOS_EVM is not set + +# +# Other I2C/SMBus bus drivers +# +# CONFIG_I2C_PCA_PLATFORM is not set +CONFIG_I2C_SIBYTE=y +# CONFIG_I2C_STUB is not set CONFIG_I2C_DEBUG_CORE=y CONFIG_I2C_DEBUG_ALGO=y CONFIG_I2C_DEBUG_BUS=y -CONFIG_I2C_DEBUG_CHIP=y # CONFIG_SPI is not set + +# +# PPS support +# +# CONFIG_PPS is not set # CONFIG_W1 is not set # CONFIG_POWER_SUPPLY is not set # CONFIG_HWMON is not set # CONFIG_THERMAL is not set -# CONFIG_THERMAL_HWMON is not set # CONFIG_WATCHDOG is not set +CONFIG_SSB_POSSIBLE=y # # Sonics Silicon Backplane # -CONFIG_SSB_POSSIBLE=y # CONFIG_SSB is not set # # Multifunction device drivers # +# CONFIG_MFD_CORE is not set +# CONFIG_MFD_88PM860X is not set # CONFIG_MFD_SM501 is not set # CONFIG_HTC_PASIC3 is not set - -# -# Multimedia devices -# - -# -# Multimedia core support -# -# CONFIG_VIDEO_DEV is not set -# CONFIG_DVB_CORE is not set -# CONFIG_VIDEO_MEDIA is not set - -# -# Multimedia drivers -# -# CONFIG_DAB is not set +# CONFIG_TWL4030_CORE is not set +# CONFIG_MFD_TMIO is not set +# CONFIG_PMIC_DA903X is not set +# CONFIG_PMIC_ADP5520 is not set +# CONFIG_MFD_MAX8925 is not set +# CONFIG_MFD_WM8400 is not set +# CONFIG_MFD_WM831X is not set +# CONFIG_MFD_WM8350_I2C is not set +# CONFIG_MFD_WM8994 is not set +# CONFIG_MFD_PCF50633 is not set +# CONFIG_AB3100_CORE is not set +# CONFIG_LPC_SCH is not set +# CONFIG_REGULATOR is not set +# CONFIG_MEDIA_SUPPORT is not set # # Graphics support # +CONFIG_VGA_ARB=y +CONFIG_VGA_ARB_MAX_GPUS=16 # CONFIG_DRM is not set # CONFIG_VGASTATE is not set # CONFIG_VIDEO_OUTPUT_CONTROL is not set @@ -1016,10 +1201,6 @@ CONFIG_SSB_POSSIBLE=y # Display device support # # CONFIG_DISPLAY_SUPPORT is not set - -# -# Sound -# # CONFIG_SOUND is not set CONFIG_USB_SUPPORT=y CONFIG_USB_ARCH_HAS_HCD=y @@ -1030,9 +1211,18 @@ CONFIG_USB_ARCH_HAS_EHCI=y # CONFIG_USB_OTG_BLACKLIST_HUB is not set # -# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support' +# Enable Host or Gadget support to see Inventra options +# + +# +# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may # # CONFIG_USB_GADGET is not set + +# +# OTG and related infrastructure +# +# CONFIG_UWB is not set # CONFIG_MMC is not set # CONFIG_MEMSTICK is not set # CONFIG_NEW_LEDS is not set @@ -1040,41 +1230,66 @@ CONFIG_USB_ARCH_HAS_EHCI=y # CONFIG_INFINIBAND is not set CONFIG_RTC_LIB=y # CONFIG_RTC_CLASS is not set +# CONFIG_DMADEVICES is not set +# CONFIG_AUXDISPLAY is not set # CONFIG_UIO is not set # +# TI VLYNQ +# +# CONFIG_STAGING is not set + +# # File systems # CONFIG_EXT2_FS=m CONFIG_EXT2_FS_XATTR=y -# CONFIG_EXT2_FS_POSIX_ACL is not set -# CONFIG_EXT2_FS_SECURITY is not set -# CONFIG_EXT2_FS_XIP is not set -CONFIG_EXT3_FS=y +CONFIG_EXT2_FS_POSIX_ACL=y +CONFIG_EXT2_FS_SECURITY=y +CONFIG_EXT2_FS_XIP=y +CONFIG_EXT3_FS=m +CONFIG_EXT3_DEFAULTS_TO_ORDERED=y CONFIG_EXT3_FS_XATTR=y -# CONFIG_EXT3_FS_POSIX_ACL is not set -# CONFIG_EXT3_FS_SECURITY is not set -# CONFIG_EXT4DEV_FS is not set -CONFIG_JBD=y +CONFIG_EXT3_FS_POSIX_ACL=y +CONFIG_EXT3_FS_SECURITY=y +CONFIG_EXT4_FS=y +CONFIG_EXT4_FS_XATTR=y +CONFIG_EXT4_FS_POSIX_ACL=y +CONFIG_EXT4_FS_SECURITY=y +# CONFIG_EXT4_DEBUG is not set +CONFIG_FS_XIP=y +CONFIG_JBD=m +CONFIG_JBD2=y CONFIG_FS_MBCACHE=y # CONFIG_REISERFS_FS is not set # CONFIG_JFS_FS is not set -# CONFIG_FS_POSIX_ACL is not set +CONFIG_FS_POSIX_ACL=y # CONFIG_XFS_FS is not set # CONFIG_GFS2_FS is not set # CONFIG_OCFS2_FS is not set +# CONFIG_BTRFS_FS is not set +# CONFIG_NILFS2_FS is not set +CONFIG_FILE_LOCKING=y +CONFIG_FSNOTIFY=y CONFIG_DNOTIFY=y CONFIG_INOTIFY=y CONFIG_INOTIFY_USER=y CONFIG_QUOTA=y CONFIG_QUOTA_NETLINK_INTERFACE=y # CONFIG_PRINT_QUOTA_WARNING is not set +CONFIG_QUOTA_TREE=m # CONFIG_QFMT_V1 is not set CONFIG_QFMT_V2=m CONFIG_QUOTACTL=y CONFIG_AUTOFS_FS=m CONFIG_AUTOFS4_FS=m CONFIG_FUSE_FS=m +# CONFIG_CUSE is not set + +# +# Caches +# +# CONFIG_FSCACHE is not set # # CD-ROM/DVD Filesystems @@ -1103,15 +1318,13 @@ CONFIG_NTFS_RW=y CONFIG_PROC_FS=y CONFIG_PROC_KCORE=y CONFIG_PROC_SYSCTL=y +CONFIG_PROC_PAGE_MONITOR=y CONFIG_SYSFS=y CONFIG_TMPFS=y # CONFIG_TMPFS_POSIX_ACL is not set # CONFIG_HUGETLB_PAGE is not set CONFIG_CONFIGFS_FS=m - -# -# Miscellaneous filesystems -# +CONFIG_MISC_FILESYSTEMS=y # CONFIG_ADFS_FS is not set # CONFIG_AFFS_FS is not set # CONFIG_ECRYPT_FS is not set @@ -1120,9 +1333,12 @@ CONFIG_CONFIGFS_FS=m # CONFIG_BEFS_FS is not set # CONFIG_BFS_FS is not set # CONFIG_EFS_FS is not set +# CONFIG_LOGFS is not set # CONFIG_CRAMFS is not set +# CONFIG_SQUASHFS is not set # CONFIG_VXFS_FS is not set # CONFIG_MINIX_FS is not set +# CONFIG_OMFS_FS is not set # CONFIG_HPFS_FS is not set # CONFIG_QNX4FS_FS is not set # CONFIG_ROMFS_FS is not set @@ -1133,16 +1349,17 @@ CONFIG_NFS_FS=y CONFIG_NFS_V3=y # CONFIG_NFS_V3_ACL is not set # CONFIG_NFS_V4 is not set -# CONFIG_NFSD is not set CONFIG_ROOT_NFS=y +# CONFIG_NFSD is not set CONFIG_LOCKD=y CONFIG_LOCKD_V4=y CONFIG_NFS_COMMON=y CONFIG_SUNRPC=y -# CONFIG_SUNRPC_BIND34 is not set -# CONFIG_RPCSEC_GSS_KRB5 is not set -# CONFIG_RPCSEC_GSS_SPKM3 is not set +CONFIG_SUNRPC_GSS=m +CONFIG_RPCSEC_GSS_KRB5=m +CONFIG_RPCSEC_GSS_SPKM3=m # CONFIG_SMB_FS is not set +# CONFIG_CEPH_FS is not set # CONFIG_CIFS is not set # CONFIG_NCP_FS is not set # CONFIG_CODA_FS is not set @@ -1205,12 +1422,18 @@ CONFIG_ENABLE_WARN_DEPRECATED=y CONFIG_ENABLE_MUST_CHECK=y CONFIG_FRAME_WARN=2048 CONFIG_MAGIC_SYSRQ=y +# CONFIG_STRIP_ASM_SYMS is not set # CONFIG_UNUSED_SYMBOLS is not set # CONFIG_DEBUG_FS is not set # CONFIG_HEADERS_CHECK is not set CONFIG_DEBUG_KERNEL=y # CONFIG_DEBUG_SHIRQ is not set CONFIG_DETECT_SOFTLOCKUP=y +# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set +CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0 +CONFIG_DETECT_HUNG_TASK=y +# CONFIG_BOOTPARAM_HUNG_TASK_PANIC is not set +CONFIG_BOOTPARAM_HUNG_TASK_PANIC_VALUE=0 CONFIG_SCHED_DEBUG=y # CONFIG_SCHEDSTATS is not set # CONFIG_TIMER_STATS is not set @@ -1219,23 +1442,53 @@ CONFIG_SCHED_DEBUG=y # CONFIG_DEBUG_RT_MUTEXES is not set # CONFIG_RT_MUTEX_TESTER is not set # CONFIG_DEBUG_SPINLOCK is not set -CONFIG_DEBUG_MUTEXES=y +# CONFIG_DEBUG_MUTEXES is not set # CONFIG_DEBUG_LOCK_ALLOC is not set # CONFIG_PROVE_LOCKING is not set # CONFIG_LOCK_STAT is not set -# CONFIG_DEBUG_SPINLOCK_SLEEP is not set +CONFIG_DEBUG_SPINLOCK_SLEEP=y # CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set # CONFIG_DEBUG_KOBJECT is not set # CONFIG_DEBUG_INFO is not set # CONFIG_DEBUG_VM is not set # CONFIG_DEBUG_WRITECOUNT is not set -# CONFIG_DEBUG_LIST is not set +CONFIG_DEBUG_MEMORY_INIT=y +CONFIG_DEBUG_LIST=y # CONFIG_DEBUG_SG is not set +# CONFIG_DEBUG_NOTIFIERS is not set +# CONFIG_DEBUG_CREDENTIALS is not set # CONFIG_BOOT_PRINTK_DELAY is not set # CONFIG_RCU_TORTURE_TEST is not set +CONFIG_RCU_CPU_STALL_DETECTOR=y # CONFIG_BACKTRACE_SELF_TEST is not set +# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set +# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set # CONFIG_FAULT_INJECTION is not set +# CONFIG_SYSCTL_SYSCALL_CHECK is not set +# CONFIG_PAGE_POISONING is not set +CONFIG_HAVE_FUNCTION_TRACER=y +CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y +CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST=y +CONFIG_HAVE_DYNAMIC_FTRACE=y +CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y +CONFIG_TRACING_SUPPORT=y +CONFIG_FTRACE=y +# CONFIG_FUNCTION_TRACER is not set +# CONFIG_IRQSOFF_TRACER is not set +# CONFIG_SCHED_TRACER is not set +# CONFIG_ENABLE_DEFAULT_TRACERS is not set +# CONFIG_BOOT_TRACER is not set +CONFIG_BRANCH_PROFILE_NONE=y +# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set +# CONFIG_PROFILE_ALL_BRANCHES is not set +# CONFIG_STACK_TRACER is not set +# CONFIG_KMEMTRACE is not set +# CONFIG_WORKQUEUE_TRACER is not set +# CONFIG_BLK_DEV_IO_TRACE is not set # CONFIG_SAMPLES is not set +CONFIG_HAVE_ARCH_KGDB=y +# CONFIG_KGDB is not set +CONFIG_EARLY_PRINTK=y # CONFIG_CMDLINE_BOOL is not set # CONFIG_DEBUG_STACK_USAGE is not set # CONFIG_SB1XXX_CORELIS is not set @@ -1246,20 +1499,50 @@ CONFIG_DEBUG_MUTEXES=y # CONFIG_KEYS=y CONFIG_KEYS_DEBUG_PROC_KEYS=y -# CONFIG_SECURITY is not set -# CONFIG_SECURITY_FILE_CAPABILITIES is not set +CONFIG_SECURITY=y +# CONFIG_SECURITYFS is not set +CONFIG_SECURITY_NETWORK=y +CONFIG_SECURITY_NETWORK_XFRM=y +# CONFIG_SECURITY_PATH is not set +CONFIG_LSM_MMAP_MIN_ADDR=65536 +CONFIG_SECURITY_SELINUX=y +CONFIG_SECURITY_SELINUX_BOOTPARAM=y +CONFIG_SECURITY_SELINUX_BOOTPARAM_VALUE=1 +CONFIG_SECURITY_SELINUX_DISABLE=y +CONFIG_SECURITY_SELINUX_DEVELOP=y +CONFIG_SECURITY_SELINUX_AVC_STATS=y +CONFIG_SECURITY_SELINUX_CHECKREQPROT_VALUE=1 +# CONFIG_SECURITY_SELINUX_POLICYDB_VERSION_MAX is not set +# CONFIG_SECURITY_SMACK is not set +# CONFIG_SECURITY_TOMOYO is not set +# CONFIG_DEFAULT_SECURITY_SELINUX is not set +# CONFIG_DEFAULT_SECURITY_SMACK is not set +# CONFIG_DEFAULT_SECURITY_TOMOYO is not set +CONFIG_DEFAULT_SECURITY_DAC=y +CONFIG_DEFAULT_SECURITY="" CONFIG_CRYPTO=y # # Crypto core or helper # +# CONFIG_CRYPTO_FIPS is not set CONFIG_CRYPTO_ALGAPI=y +CONFIG_CRYPTO_ALGAPI2=y CONFIG_CRYPTO_AEAD=m +CONFIG_CRYPTO_AEAD2=y CONFIG_CRYPTO_BLKCIPHER=y +CONFIG_CRYPTO_BLKCIPHER2=y CONFIG_CRYPTO_HASH=y +CONFIG_CRYPTO_HASH2=y +CONFIG_CRYPTO_RNG=m +CONFIG_CRYPTO_RNG2=y +CONFIG_CRYPTO_PCOMP=y CONFIG_CRYPTO_MANAGER=y +CONFIG_CRYPTO_MANAGER2=y CONFIG_CRYPTO_GF128MUL=m CONFIG_CRYPTO_NULL=y +# CONFIG_CRYPTO_PCRYPT is not set +CONFIG_CRYPTO_WORKQUEUE=y # CONFIG_CRYPTO_CRYPTD is not set CONFIG_CRYPTO_AUTHENC=m # CONFIG_CRYPTO_TEST is not set @@ -1276,7 +1559,7 @@ CONFIG_CRYPTO_SEQIV=m # CONFIG_CRYPTO_CBC=m CONFIG_CRYPTO_CTR=m -# CONFIG_CRYPTO_CTS is not set +CONFIG_CRYPTO_CTS=m CONFIG_CRYPTO_ECB=m CONFIG_CRYPTO_LRW=m CONFIG_CRYPTO_PCBC=m @@ -1287,14 +1570,20 @@ CONFIG_CRYPTO_XTS=m # CONFIG_CRYPTO_HMAC=y CONFIG_CRYPTO_XCBC=m +CONFIG_CRYPTO_VMAC=m # # Digest # -# CONFIG_CRYPTO_CRC32C is not set +CONFIG_CRYPTO_CRC32C=m +CONFIG_CRYPTO_GHASH=m CONFIG_CRYPTO_MD4=m CONFIG_CRYPTO_MD5=y CONFIG_CRYPTO_MICHAEL_MIC=m +CONFIG_CRYPTO_RMD128=m +CONFIG_CRYPTO_RMD160=m +CONFIG_CRYPTO_RMD256=m +CONFIG_CRYPTO_RMD320=m CONFIG_CRYPTO_SHA1=m CONFIG_CRYPTO_SHA256=m CONFIG_CRYPTO_SHA512=m @@ -1325,25 +1614,36 @@ CONFIG_CRYPTO_TWOFISH_COMMON=m # Compression # CONFIG_CRYPTO_DEFLATE=m -# CONFIG_CRYPTO_LZO is not set +CONFIG_CRYPTO_ZLIB=m +CONFIG_CRYPTO_LZO=m + +# +# Random Number Generation +# +CONFIG_CRYPTO_ANSI_CPRNG=m CONFIG_CRYPTO_HW=y # CONFIG_CRYPTO_DEV_HIFN_795X is not set +# CONFIG_BINARY_PRINTF is not set # # Library routines # CONFIG_BITREVERSE=y -# CONFIG_GENERIC_FIND_FIRST_BIT is not set +CONFIG_GENERIC_FIND_LAST_BIT=y CONFIG_CRC_CCITT=m -# CONFIG_CRC16 is not set +CONFIG_CRC16=y +CONFIG_CRC_T10DIF=m CONFIG_CRC_ITU_T=m CONFIG_CRC32=y -# CONFIG_CRC7 is not set +CONFIG_CRC7=m CONFIG_LIBCRC32C=m CONFIG_AUDIT_GENERIC=y -CONFIG_ZLIB_INFLATE=m +CONFIG_ZLIB_INFLATE=y CONFIG_ZLIB_DEFLATE=m -CONFIG_PLIST=y +CONFIG_LZO_COMPRESS=m +CONFIG_LZO_DECOMPRESS=m +CONFIG_DECOMPRESS_GZIP=y CONFIG_HAS_IOMEM=y CONFIG_HAS_IOPORT=y CONFIG_HAS_DMA=y +CONFIG_NLATTR=y diff --git a/arch/mips/include/asm/abi.h b/arch/mips/include/asm/abi.h index 1dd74fbdc09..9252d9b50e5 100644 --- a/arch/mips/include/asm/abi.h +++ b/arch/mips/include/asm/abi.h @@ -13,12 +13,14 @@ #include <asm/siginfo.h> struct mips_abi { - int (* const setup_frame)(struct k_sigaction * ka, + int (* const setup_frame)(void *sig_return, struct k_sigaction *ka, struct pt_regs *regs, int signr, sigset_t *set); - int (* const setup_rt_frame)(struct k_sigaction * ka, + const unsigned long signal_return_offset; + int (* const setup_rt_frame)(void *sig_return, struct k_sigaction *ka, struct pt_regs *regs, int signr, sigset_t *set, siginfo_t *info); + const unsigned long rt_signal_return_offset; const unsigned long restart; }; diff --git a/arch/mips/include/asm/elf.h b/arch/mips/include/asm/elf.h index e53d7bed5cd..ea77a42c5f8 100644 --- a/arch/mips/include/asm/elf.h +++ b/arch/mips/include/asm/elf.h @@ -310,6 +310,7 @@ do { \ #endif /* CONFIG_64BIT */ +struct pt_regs; struct task_struct; extern void elf_dump_regs(elf_greg_t *, struct pt_regs *regs); @@ -367,4 +368,8 @@ extern const char *__elf_platform; #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2) #endif +#define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1 +struct linux_binprm; +extern int arch_setup_additional_pages(struct linux_binprm *bprm, + int uses_interp); #endif /* _ASM_ELF_H */ diff --git a/arch/mips/include/asm/fpu_emulator.h b/arch/mips/include/asm/fpu_emulator.h index aecada6f611..3b409270556 100644 --- a/arch/mips/include/asm/fpu_emulator.h +++ b/arch/mips/include/asm/fpu_emulator.h @@ -41,7 +41,11 @@ struct mips_fpu_emulator_stats { DECLARE_PER_CPU(struct mips_fpu_emulator_stats, fpuemustats); #define MIPS_FPU_EMU_INC_STATS(M) \ - cpu_local_wrap(__local_inc(&__get_cpu_var(fpuemustats).M)) +do { \ + preempt_disable(); \ + __local_inc(&__get_cpu_var(fpuemustats).M); \ + preempt_enable(); \ +} while (0) #else #define MIPS_FPU_EMU_INC_STATS(M) do { } while (0) diff --git a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_cpu.h b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_cpu.h index b12c4aca2cc..96a2391ad85 100644 --- a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_cpu.h +++ b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_cpu.h @@ -85,6 +85,7 @@ enum bcm63xx_regs_set { RSET_TIMER, RSET_WDT, RSET_UART0, + RSET_UART1, RSET_GPIO, RSET_SPI, RSET_UDC0, @@ -123,6 +124,7 @@ enum bcm63xx_regs_set { #define BCM_6338_TIMER_BASE (0xfffe0200) #define BCM_6338_WDT_BASE (0xfffe021c) #define BCM_6338_UART0_BASE (0xfffe0300) +#define BCM_6338_UART1_BASE (0xdeadbeef) #define BCM_6338_GPIO_BASE (0xfffe0400) #define BCM_6338_SPI_BASE (0xfffe0c00) #define BCM_6338_UDC0_BASE (0xdeadbeef) @@ -153,6 +155,7 @@ enum bcm63xx_regs_set { #define BCM_6345_TIMER_BASE (0xfffe0200) #define BCM_6345_WDT_BASE (0xfffe021c) #define BCM_6345_UART0_BASE (0xfffe0300) +#define BCM_6345_UART1_BASE (0xdeadbeef) #define BCM_6345_GPIO_BASE (0xfffe0400) #define BCM_6345_SPI_BASE (0xdeadbeef) #define BCM_6345_UDC0_BASE (0xdeadbeef) @@ -182,6 +185,7 @@ enum bcm63xx_regs_set { #define BCM_6348_TIMER_BASE (0xfffe0200) #define BCM_6348_WDT_BASE (0xfffe021c) #define BCM_6348_UART0_BASE (0xfffe0300) +#define BCM_6348_UART1_BASE (0xdeadbeef) #define BCM_6348_GPIO_BASE (0xfffe0400) #define BCM_6348_SPI_BASE (0xfffe0c00) #define BCM_6348_UDC0_BASE (0xfffe1000) @@ -208,6 +212,7 @@ enum bcm63xx_regs_set { #define BCM_6358_TIMER_BASE (0xfffe0040) #define BCM_6358_WDT_BASE (0xfffe005c) #define BCM_6358_UART0_BASE (0xfffe0100) +#define BCM_6358_UART1_BASE (0xfffe0120) #define BCM_6358_GPIO_BASE (0xfffe0080) #define BCM_6358_SPI_BASE (0xdeadbeef) #define BCM_6358_UDC0_BASE (0xfffe0800) @@ -246,6 +251,8 @@ static inline unsigned long bcm63xx_regset_address(enum bcm63xx_regs_set set) return BCM_6338_WDT_BASE; case RSET_UART0: return BCM_6338_UART0_BASE; + case RSET_UART1: + return BCM_6338_UART1_BASE; case RSET_GPIO: return BCM_6338_GPIO_BASE; case RSET_SPI: @@ -292,6 +299,8 @@ static inline unsigned long bcm63xx_regset_address(enum bcm63xx_regs_set set) return BCM_6345_WDT_BASE; case RSET_UART0: return BCM_6345_UART0_BASE; + case RSET_UART1: + return BCM_6345_UART1_BASE; case RSET_GPIO: return BCM_6345_GPIO_BASE; case RSET_SPI: @@ -338,6 +347,8 @@ static inline unsigned long bcm63xx_regset_address(enum bcm63xx_regs_set set) return BCM_6348_WDT_BASE; case RSET_UART0: return BCM_6348_UART0_BASE; + case RSET_UART1: + return BCM_6348_UART1_BASE; case RSET_GPIO: return BCM_6348_GPIO_BASE; case RSET_SPI: @@ -384,6 +395,8 @@ static inline unsigned long bcm63xx_regset_address(enum bcm63xx_regs_set set) return BCM_6358_WDT_BASE; case RSET_UART0: return BCM_6358_UART0_BASE; + case RSET_UART1: + return BCM_6358_UART1_BASE; case RSET_GPIO: return BCM_6358_GPIO_BASE; case RSET_SPI: @@ -429,6 +442,7 @@ static inline unsigned long bcm63xx_regset_address(enum bcm63xx_regs_set set) enum bcm63xx_irq { IRQ_TIMER = 0, IRQ_UART0, + IRQ_UART1, IRQ_DSL, IRQ_ENET0, IRQ_ENET1, @@ -510,6 +524,7 @@ enum bcm63xx_irq { */ #define BCM_6358_TIMER_IRQ (IRQ_INTERNAL_BASE + 0) #define BCM_6358_UART0_IRQ (IRQ_INTERNAL_BASE + 2) +#define BCM_6358_UART1_IRQ (IRQ_INTERNAL_BASE + 3) #define BCM_6358_OHCI0_IRQ (IRQ_INTERNAL_BASE + 5) #define BCM_6358_ENET1_IRQ (IRQ_INTERNAL_BASE + 6) #define BCM_6358_ENET0_IRQ (IRQ_INTERNAL_BASE + 8) diff --git a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_dev_uart.h b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_dev_uart.h new file mode 100644 index 00000000000..23c705baf17 --- /dev/null +++ b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_dev_uart.h @@ -0,0 +1,6 @@ +#ifndef BCM63XX_DEV_UART_H_ +#define BCM63XX_DEV_UART_H_ + +int bcm63xx_uart_register(unsigned int id); + +#endif /* BCM63XX_DEV_UART_H_ */ diff --git a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_gpio.h b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_gpio.h index 76a0b7216af..43d4da0b1e9 100644 --- a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_gpio.h +++ b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_gpio.h @@ -10,6 +10,10 @@ static inline unsigned long bcm63xx_gpio_count(void) switch (bcm63xx_get_cpu_id()) { case BCM6358_CPU_ID: return 40; + case BCM6338_CPU_ID: + return 8; + case BCM6345_CPU_ID: + return 16; case BCM6348_CPU_ID: default: return 37; diff --git a/arch/mips/include/asm/mach-bcm63xx/board_bcm963xx.h b/arch/mips/include/asm/mach-bcm63xx/board_bcm963xx.h index 6479090a410..474daaa5349 100644 --- a/arch/mips/include/asm/mach-bcm63xx/board_bcm963xx.h +++ b/arch/mips/include/asm/mach-bcm63xx/board_bcm963xx.h @@ -45,6 +45,8 @@ struct board_info { unsigned int has_ohci0:1; unsigned int has_ehci0:1; unsigned int has_dsp:1; + unsigned int has_uart0:1; + unsigned int has_uart1:1; /* ethernet config */ struct bcm63xx_enet_platform_data enet0; diff --git a/arch/mips/include/asm/mach-bcm63xx/cpu-feature-overrides.h b/arch/mips/include/asm/mach-bcm63xx/cpu-feature-overrides.h index 71742bac940..f453c01d067 100644 --- a/arch/mips/include/asm/mach-bcm63xx/cpu-feature-overrides.h +++ b/arch/mips/include/asm/mach-bcm63xx/cpu-feature-overrides.h @@ -24,7 +24,7 @@ #define cpu_has_smartmips 0 #define cpu_has_vtag_icache 0 -#if !defined(BCMCPU_RUNTIME_DETECT) && (defined(CONFIG_BCMCPU_IS_6348) || defined(CONFIG_CPU_IS_6338) || defined(CONFIG_CPU_IS_BCM6345)) +#if !defined(BCMCPU_RUNTIME_DETECT) && (defined(CONFIG_BCM63XX_CPU_6348) || defined(CONFIG_BCM63XX_CPU_6345) || defined(CONFIG_BCM63XX_CPU_6338)) #define cpu_has_dc_aliases 0 #endif diff --git a/arch/mips/include/asm/mach-sibyte/war.h b/arch/mips/include/asm/mach-sibyte/war.h index 7950ef4f032..743385d7b5f 100644 --- a/arch/mips/include/asm/mach-sibyte/war.h +++ b/arch/mips/include/asm/mach-sibyte/war.h @@ -16,7 +16,11 @@ #if defined(CONFIG_SB1_PASS_1_WORKAROUNDS) || \ defined(CONFIG_SB1_PASS_2_WORKAROUNDS) -#define BCM1250_M3_WAR 1 +#ifndef __ASSEMBLY__ +extern int sb1250_m3_workaround_needed(void); +#endif + +#define BCM1250_M3_WAR sb1250_m3_workaround_needed() #define SIBYTE_1956_WAR 1 #else diff --git a/arch/mips/include/asm/mmu.h b/arch/mips/include/asm/mmu.h index 4063edd7962..c436138945a 100644 --- a/arch/mips/include/asm/mmu.h +++ b/arch/mips/include/asm/mmu.h @@ -1,6 +1,9 @@ #ifndef __ASM_MMU_H #define __ASM_MMU_H -typedef unsigned long mm_context_t[NR_CPUS]; +typedef struct { + unsigned long asid[NR_CPUS]; + void *vdso; +} mm_context_t; #endif /* __ASM_MMU_H */ diff --git a/arch/mips/include/asm/mmu_context.h b/arch/mips/include/asm/mmu_context.h index 145bb81ccaa..d9592733a7b 100644 --- a/arch/mips/include/asm/mmu_context.h +++ b/arch/mips/include/asm/mmu_context.h @@ -104,7 +104,7 @@ extern unsigned long smtc_asid_mask; #endif -#define cpu_context(cpu, mm) ((mm)->context[cpu]) +#define cpu_context(cpu, mm) ((mm)->context.asid[cpu]) #define cpu_asid(cpu, mm) (cpu_context((cpu), (mm)) & ASID_MASK) #define asid_cache(cpu) (cpu_data[cpu].asid_cache) diff --git a/arch/mips/include/asm/page.h b/arch/mips/include/asm/page.h index ac32572430f..a16beafcea9 100644 --- a/arch/mips/include/asm/page.h +++ b/arch/mips/include/asm/page.h @@ -188,8 +188,10 @@ typedef struct { unsigned long pgprot; } pgprot_t; #define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \ VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) -#define UNCAC_ADDR(addr) ((addr) - PAGE_OFFSET + UNCAC_BASE) -#define CAC_ADDR(addr) ((addr) - UNCAC_BASE + PAGE_OFFSET) +#define UNCAC_ADDR(addr) ((addr) - PAGE_OFFSET + UNCAC_BASE + \ + PHYS_OFFSET) +#define CAC_ADDR(addr) ((addr) - UNCAC_BASE + PAGE_OFFSET - \ + PHYS_OFFSET) #include <asm-generic/memory_model.h> #include <asm-generic/getorder.h> diff --git a/arch/mips/include/asm/processor.h b/arch/mips/include/asm/processor.h index 087a8884ef0..ab387910009 100644 --- a/arch/mips/include/asm/processor.h +++ b/arch/mips/include/asm/processor.h @@ -33,13 +33,19 @@ extern void (*cpu_wait)(void); extern unsigned int vced_count, vcei_count; +/* + * A special page (the vdso) is mapped into all processes at the very + * top of the virtual memory space. + */ +#define SPECIAL_PAGES_SIZE PAGE_SIZE + #ifdef CONFIG_32BIT /* * User space process size: 2GB. This is hardcoded into a few places, * so don't change it unless you know what you are doing. */ #define TASK_SIZE 0x7fff8000UL -#define STACK_TOP TASK_SIZE +#define STACK_TOP ((TASK_SIZE & PAGE_MASK) - SPECIAL_PAGES_SIZE) /* * This decides where the kernel will search for a free chunk of vm @@ -59,7 +65,8 @@ extern unsigned int vced_count, vcei_count; #define TASK_SIZE32 0x7fff8000UL #define TASK_SIZE 0x10000000000UL #define STACK_TOP \ - (test_thread_flag(TIF_32BIT_ADDR) ? TASK_SIZE32 : TASK_SIZE) + (((test_thread_flag(TIF_32BIT_ADDR) ? \ + TASK_SIZE32 : TASK_SIZE) & PAGE_MASK) - SPECIAL_PAGES_SIZE) /* * This decides where the kernel will search for a free chunk of vm diff --git a/arch/mips/include/asm/stackframe.h b/arch/mips/include/asm/stackframe.h index 3b6da3330e3..c8419129e77 100644 --- a/arch/mips/include/asm/stackframe.h +++ b/arch/mips/include/asm/stackframe.h @@ -121,6 +121,25 @@ .endm #else .macro get_saved_sp /* Uniprocessor variation */ +#ifdef CONFIG_CPU_LOONGSON2F + /* + * Clear BTB (branch target buffer), forbid RAS (return address + * stack) to workaround the Out-of-order Issue in Loongson2F + * via its diagnostic register. + */ + move k0, ra + jal 1f + nop +1: jal 1f + nop +1: jal 1f + nop +1: jal 1f + nop +1: move ra, k0 + li k0, 3 + mtc0 k0, $22 +#endif /* CONFIG_CPU_LOONGSON2F */ #if defined(CONFIG_32BIT) || defined(KBUILD_64BIT_SYM32) lui k1, %hi(kernelsp) #else diff --git a/arch/mips/include/asm/uasm.h b/arch/mips/include/asm/uasm.h index b99bd07e199..11a8b525254 100644 --- a/arch/mips/include/asm/uasm.h +++ b/arch/mips/include/asm/uasm.h @@ -84,6 +84,7 @@ Ip_u2s3u1(_lw); Ip_u1u2u3(_mfc0); Ip_u1u2u3(_mtc0); Ip_u2u1u3(_ori); +Ip_u3u1u2(_or); Ip_u2s3u1(_pref); Ip_0(_rfe); Ip_u2s3u1(_sc); @@ -102,6 +103,7 @@ Ip_0(_tlbwr); Ip_u3u1u2(_xor); Ip_u2u1u3(_xori); Ip_u2u1msbu3(_dins); +Ip_u1(_syscall); /* Handle labels. */ struct uasm_label { diff --git a/arch/mips/include/asm/vdso.h b/arch/mips/include/asm/vdso.h new file mode 100644 index 00000000000..cca56aa40ff --- /dev/null +++ b/arch/mips/include/asm/vdso.h @@ -0,0 +1,29 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2009 Cavium Networks + */ + +#ifndef __ASM_VDSO_H +#define __ASM_VDSO_H + +#include <linux/types.h> + + +#ifdef CONFIG_32BIT +struct mips_vdso { + u32 signal_trampoline[2]; + u32 rt_signal_trampoline[2]; +}; +#else /* !CONFIG_32BIT */ +struct mips_vdso { + u32 o32_signal_trampoline[2]; + u32 o32_rt_signal_trampoline[2]; + u32 rt_signal_trampoline[2]; + u32 n32_rt_signal_trampoline[2]; +}; +#endif /* CONFIG_32BIT */ + +#endif /* __ASM_VDSO_H */ diff --git a/arch/mips/kernel/Makefile b/arch/mips/kernel/Makefile index ef20957ca14..7a6ac501cbb 100644 --- a/arch/mips/kernel/Makefile +++ b/arch/mips/kernel/Makefile @@ -6,7 +6,7 @@ extra-y := head.o init_task.o vmlinux.lds obj-y += cpu-probe.o branch.o entry.o genex.o irq.o process.o \ ptrace.o reset.o setup.o signal.o syscall.o \ - time.o topology.o traps.o unaligned.o watch.o + time.o topology.o traps.o unaligned.o watch.o vdso.o ifdef CONFIG_FUNCTION_TRACER CFLAGS_REMOVE_ftrace.o = -pg diff --git a/arch/mips/kernel/cpufreq/loongson2_clock.c b/arch/mips/kernel/cpufreq/loongson2_clock.c index d7ca256e33e..cefc6e259ba 100644 --- a/arch/mips/kernel/cpufreq/loongson2_clock.c +++ b/arch/mips/kernel/cpufreq/loongson2_clock.c @@ -164,3 +164,7 @@ void loongson2_cpu_wait(void) spin_unlock_irqrestore(&loongson2_wait_lock, flags); } EXPORT_SYMBOL_GPL(loongson2_cpu_wait); + +MODULE_AUTHOR("Yanhua <yanh@lemote.com>"); +MODULE_DESCRIPTION("cpufreq driver for Loongson 2F"); +MODULE_LICENSE("GPL"); diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c index 463b71b90a0..99960940d4a 100644 --- a/arch/mips/kernel/process.c +++ b/arch/mips/kernel/process.c @@ -63,8 +63,13 @@ void __noreturn cpu_idle(void) smtc_idle_loop_hook(); #endif - if (cpu_wait) + + if (cpu_wait) { + /* Don't trace irqs off for idle */ + stop_critical_timings(); (*cpu_wait)(); + start_critical_timings(); + } } #ifdef CONFIG_HOTPLUG_CPU if (!cpu_online(cpu) && !cpu_isset(cpu, cpu_callin_map) && diff --git a/arch/mips/kernel/signal-common.h b/arch/mips/kernel/signal-common.h index 6c8e8c4246f..10263b40598 100644 --- a/arch/mips/kernel/signal-common.h +++ b/arch/mips/kernel/signal-common.h @@ -26,11 +26,6 @@ */ extern void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size); -/* - * install trampoline code to get back from the sig handler - */ -extern int install_sigtramp(unsigned int __user *tramp, unsigned int syscall); - /* Check and clear pending FPU exceptions in saved CSR */ extern int fpcsr_pending(unsigned int __user *fpcsr); diff --git a/arch/mips/kernel/signal.c b/arch/mips/kernel/signal.c index d0c68b5d717..2099d5a4c4b 100644 --- a/arch/mips/kernel/signal.c +++ b/arch/mips/kernel/signal.c @@ -32,6 +32,7 @@ #include <asm/ucontext.h> #include <asm/cpu-features.h> #include <asm/war.h> +#include <asm/vdso.h> #include "signal-common.h" @@ -44,47 +45,20 @@ extern asmlinkage int _restore_fp_context(struct sigcontext __user *sc); extern asmlinkage int fpu_emulator_save_context(struct sigcontext __user *sc); extern asmlinkage int fpu_emulator_restore_context(struct sigcontext __user *sc); -/* - * Horribly complicated - with the bloody RM9000 workarounds enabled - * the signal trampolines is moving to the end of the structure so we can - * increase the alignment without breaking software compatibility. - */ -#if ICACHE_REFILLS_WORKAROUND_WAR == 0 - struct sigframe { u32 sf_ass[4]; /* argument save space for o32 */ - u32 sf_code[2]; /* signal trampoline */ + u32 sf_pad[2]; /* Was: signal trampoline */ struct sigcontext sf_sc; sigset_t sf_mask; }; struct rt_sigframe { u32 rs_ass[4]; /* argument save space for o32 */ - u32 rs_code[2]; /* signal trampoline */ + u32 rs_pad[2]; /* Was: signal trampoline */ struct siginfo rs_info; struct ucontext rs_uc; }; -#else - -struct sigframe { - u32 sf_ass[4]; /* argument save space for o32 */ - u32 sf_pad[2]; - struct sigcontext sf_sc; /* hw context */ - sigset_t sf_mask; - u32 sf_code[8] ____cacheline_aligned; /* signal trampoline */ -}; - -struct rt_sigframe { - u32 rs_ass[4]; /* argument save space for o32 */ - u32 rs_pad[2]; - struct siginfo rs_info; - struct ucontext rs_uc; - u32 rs_code[8] ____cacheline_aligned; /* signal trampoline */ -}; - -#endif - /* * Helper routines */ @@ -266,32 +240,6 @@ void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, return (void __user *)((sp - frame_size) & (ICACHE_REFILLS_WORKAROUND_WAR ? ~(cpu_icache_line_size()-1) : ALMASK)); } -int install_sigtramp(unsigned int __user *tramp, unsigned int syscall) -{ - int err; - - /* - * Set up the return code ... - * - * li v0, __NR__foo_sigreturn - * syscall - */ - - err = __put_user(0x24020000 + syscall, tramp + 0); - err |= __put_user(0x0000000c , tramp + 1); - if (ICACHE_REFILLS_WORKAROUND_WAR) { - err |= __put_user(0, tramp + 2); - err |= __put_user(0, tramp + 3); - err |= __put_user(0, tramp + 4); - err |= __put_user(0, tramp + 5); - err |= __put_user(0, tramp + 6); - err |= __put_user(0, tramp + 7); - } - flush_cache_sigtramp((unsigned long) tramp); - - return err; -} - /* * Atomically swap in the new signal mask, and wait for a signal. */ @@ -484,8 +432,8 @@ badframe: } #ifdef CONFIG_TRAD_SIGNALS -static int setup_frame(struct k_sigaction * ka, struct pt_regs *regs, - int signr, sigset_t *set) +static int setup_frame(void *sig_return, struct k_sigaction *ka, + struct pt_regs *regs, int signr, sigset_t *set) { struct sigframe __user *frame; int err = 0; @@ -494,8 +442,6 @@ static int setup_frame(struct k_sigaction * ka, struct pt_regs *regs, if (!access_ok(VERIFY_WRITE, frame, sizeof (*frame))) goto give_sigsegv; - err |= install_sigtramp(frame->sf_code, __NR_sigreturn); - err |= setup_sigcontext(regs, &frame->sf_sc); err |= __copy_to_user(&frame->sf_mask, set, sizeof(*set)); if (err) @@ -515,7 +461,7 @@ static int setup_frame(struct k_sigaction * ka, struct pt_regs *regs, regs->regs[ 5] = 0; regs->regs[ 6] = (unsigned long) &frame->sf_sc; regs->regs[29] = (unsigned long) frame; - regs->regs[31] = (unsigned long) frame->sf_code; + regs->regs[31] = (unsigned long) sig_return; regs->cp0_epc = regs->regs[25] = (unsigned long) ka->sa.sa_handler; DEBUGP("SIG deliver (%s:%d): sp=0x%p pc=0x%lx ra=0x%lx\n", @@ -529,8 +475,9 @@ give_sigsegv: } #endif -static int setup_rt_frame(struct k_sigaction * ka, struct pt_regs *regs, - int signr, sigset_t *set, siginfo_t *info) +static int setup_rt_frame(void *sig_return, struct k_sigaction *ka, + struct pt_regs *regs, int signr, sigset_t *set, + siginfo_t *info) { struct rt_sigframe __user *frame; int err = 0; @@ -539,8 +486,6 @@ static int setup_rt_frame(struct k_sigaction * ka, struct pt_regs *regs, if (!access_ok(VERIFY_WRITE, frame, sizeof (*frame))) goto give_sigsegv; - err |= install_sigtramp(frame->rs_code, __NR_rt_sigreturn); - /* Create siginfo. */ err |= copy_siginfo_to_user(&frame->rs_info, info); @@ -573,7 +518,7 @@ static int setup_rt_frame(struct k_sigaction * ka, struct pt_regs *regs, regs->regs[ 5] = (unsigned long) &frame->rs_info; regs->regs[ 6] = (unsigned long) &frame->rs_uc; regs->regs[29] = (unsigned long) frame; - regs->regs[31] = (unsigned long) frame->rs_code; + regs->regs[31] = (unsigned long) sig_return; regs->cp0_epc = regs->regs[25] = (unsigned long) ka->sa.sa_handler; DEBUGP("SIG deliver (%s:%d): sp=0x%p pc=0x%lx ra=0x%lx\n", @@ -590,8 +535,11 @@ give_sigsegv: struct mips_abi mips_abi = { #ifdef CONFIG_TRAD_SIGNALS .setup_frame = setup_frame, + .signal_return_offset = offsetof(struct mips_vdso, signal_trampoline), #endif .setup_rt_frame = setup_rt_frame, + .rt_signal_return_offset = + offsetof(struct mips_vdso, rt_signal_trampoline), .restart = __NR_restart_syscall }; @@ -599,6 +547,8 @@ static int handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka, sigset_t *oldset, struct pt_regs *regs) { int ret; + struct mips_abi *abi = current->thread.abi; + void *vdso = current->mm->context.vdso; switch(regs->regs[0]) { case ERESTART_RESTARTBLOCK: @@ -619,9 +569,11 @@ static int handle_signal(unsigned long sig, siginfo_t *info, regs->regs[0] = 0; /* Don't deal with this again. */ if (sig_uses_siginfo(ka)) - ret = current->thread.abi->setup_rt_frame(ka, regs, sig, oldset, info); + ret = abi->setup_rt_frame(vdso + abi->rt_signal_return_offset, + ka, regs, sig, oldset, info); else - ret = current->thread.abi->setup_frame(ka, regs, sig, oldset); + ret = abi->setup_frame(vdso + abi->signal_return_offset, + ka, regs, sig, oldset); spin_lock_irq(¤t->sighand->siglock); sigorsets(¤t->blocked, ¤t->blocked, &ka->sa.sa_mask); diff --git a/arch/mips/kernel/signal32.c b/arch/mips/kernel/signal32.c index 03abaf048f0..a0ed0e052b2 100644 --- a/arch/mips/kernel/signal32.c +++ b/arch/mips/kernel/signal32.c @@ -32,6 +32,7 @@ #include <asm/system.h> #include <asm/fpu.h> #include <asm/war.h> +#include <asm/vdso.h> #include "signal-common.h" @@ -47,8 +48,6 @@ extern asmlinkage int fpu_emulator_restore_context32(struct sigcontext32 __user /* * Including <asm/unistd.h> would give use the 64-bit syscall numbers ... */ -#define __NR_O32_sigreturn 4119 -#define __NR_O32_rt_sigreturn 4193 #define __NR_O32_restart_syscall 4253 /* 32-bit compatibility types */ @@ -77,47 +76,20 @@ struct ucontext32 { compat_sigset_t uc_sigmask; /* mask last for extensibility */ }; -/* - * Horribly complicated - with the bloody RM9000 workarounds enabled - * the signal trampolines is moving to the end of the structure so we can - * increase the alignment without breaking software compatibility. - */ -#if ICACHE_REFILLS_WORKAROUND_WAR == 0 - struct sigframe32 { u32 sf_ass[4]; /* argument save space for o32 */ - u32 sf_code[2]; /* signal trampoline */ + u32 sf_pad[2]; /* Was: signal trampoline */ struct sigcontext32 sf_sc; compat_sigset_t sf_mask; }; struct rt_sigframe32 { u32 rs_ass[4]; /* argument save space for o32 */ - u32 rs_code[2]; /* signal trampoline */ + u32 rs_pad[2]; /* Was: signal trampoline */ compat_siginfo_t rs_info; struct ucontext32 rs_uc; }; -#else /* ICACHE_REFILLS_WORKAROUND_WAR */ - -struct sigframe32 { - u32 sf_ass[4]; /* argument save space for o32 */ - u32 sf_pad[2]; - struct sigcontext32 sf_sc; /* hw context */ - compat_sigset_t sf_mask; - u32 sf_code[8] ____cacheline_aligned; /* signal trampoline */ -}; - -struct rt_sigframe32 { - u32 rs_ass[4]; /* argument save space for o32 */ - u32 rs_pad[2]; - compat_siginfo_t rs_info; - struct ucontext32 rs_uc; - u32 rs_code[8] __attribute__((aligned(32))); /* signal trampoline */ -}; - -#endif /* !ICACHE_REFILLS_WORKAROUND_WAR */ - /* * sigcontext handlers */ @@ -598,8 +570,8 @@ badframe: force_sig(SIGSEGV, current); } -static int setup_frame_32(struct k_sigaction * ka, struct pt_regs *regs, - int signr, sigset_t *set) +static int setup_frame_32(void *sig_return, struct k_sigaction *ka, + struct pt_regs *regs, int signr, sigset_t *set) { struct sigframe32 __user *frame; int err = 0; @@ -608,8 +580,6 @@ static int setup_frame_32(struct k_sigaction * ka, struct pt_regs *regs, if (!access_ok(VERIFY_WRITE, frame, sizeof (*frame))) goto give_sigsegv; - err |= install_sigtramp(frame->sf_code, __NR_O32_sigreturn); - err |= setup_sigcontext32(regs, &frame->sf_sc); err |= __copy_conv_sigset_to_user(&frame->sf_mask, set); @@ -630,7 +600,7 @@ static int setup_frame_32(struct k_sigaction * ka, struct pt_regs *regs, regs->regs[ 5] = 0; regs->regs[ 6] = (unsigned long) &frame->sf_sc; regs->regs[29] = (unsigned long) frame; - regs->regs[31] = (unsigned long) frame->sf_code; + regs->regs[31] = (unsigned long) sig_return; regs->cp0_epc = regs->regs[25] = (unsigned long) ka->sa.sa_handler; DEBUGP("SIG deliver (%s:%d): sp=0x%p pc=0x%lx ra=0x%lx\n", @@ -644,8 +614,9 @@ give_sigsegv: return -EFAULT; } -static int setup_rt_frame_32(struct k_sigaction * ka, struct pt_regs *regs, - int signr, sigset_t *set, siginfo_t *info) +static int setup_rt_frame_32(void *sig_return, struct k_sigaction *ka, + struct pt_regs *regs, int signr, sigset_t *set, + siginfo_t *info) { struct rt_sigframe32 __user *frame; int err = 0; @@ -655,8 +626,6 @@ static int setup_rt_frame_32(struct k_sigaction * ka, struct pt_regs *regs, if (!access_ok(VERIFY_WRITE, frame, sizeof (*frame))) goto give_sigsegv; - err |= install_sigtramp(frame->rs_code, __NR_O32_rt_sigreturn); - /* Convert (siginfo_t -> compat_siginfo_t) and copy to user. */ err |= copy_siginfo_to_user32(&frame->rs_info, info); @@ -690,7 +659,7 @@ static int setup_rt_frame_32(struct k_sigaction * ka, struct pt_regs *regs, regs->regs[ 5] = (unsigned long) &frame->rs_info; regs->regs[ 6] = (unsigned long) &frame->rs_uc; regs->regs[29] = (unsigned long) frame; - regs->regs[31] = (unsigned long) frame->rs_code; + regs->regs[31] = (unsigned long) sig_return; regs->cp0_epc = regs->regs[25] = (unsigned long) ka->sa.sa_handler; DEBUGP("SIG deliver (%s:%d): sp=0x%p pc=0x%lx ra=0x%lx\n", @@ -709,7 +678,11 @@ give_sigsegv: */ struct mips_abi mips_abi_32 = { .setup_frame = setup_frame_32, + .signal_return_offset = + offsetof(struct mips_vdso, o32_signal_trampoline), .setup_rt_frame = setup_rt_frame_32, + .rt_signal_return_offset = + offsetof(struct mips_vdso, o32_rt_signal_trampoline), .restart = __NR_O32_restart_syscall }; diff --git a/arch/mips/kernel/signal_n32.c b/arch/mips/kernel/signal_n32.c index bb277e82d42..2c5df818c65 100644 --- a/arch/mips/kernel/signal_n32.c +++ b/arch/mips/kernel/signal_n32.c @@ -39,13 +39,13 @@ #include <asm/fpu.h> #include <asm/cpu-features.h> #include <asm/war.h> +#include <asm/vdso.h> #include "signal-common.h" /* * Including <asm/unistd.h> would give use the 64-bit syscall numbers ... */ -#define __NR_N32_rt_sigreturn 6211 #define __NR_N32_restart_syscall 6214 extern int setup_sigcontext(struct pt_regs *, struct sigcontext __user *); @@ -67,27 +67,13 @@ struct ucontextn32 { compat_sigset_t uc_sigmask; /* mask last for extensibility */ }; -#if ICACHE_REFILLS_WORKAROUND_WAR == 0 - -struct rt_sigframe_n32 { - u32 rs_ass[4]; /* argument save space for o32 */ - u32 rs_code[2]; /* signal trampoline */ - struct compat_siginfo rs_info; - struct ucontextn32 rs_uc; -}; - -#else /* ICACHE_REFILLS_WORKAROUND_WAR */ - struct rt_sigframe_n32 { u32 rs_ass[4]; /* argument save space for o32 */ - u32 rs_pad[2]; + u32 rs_pad[2]; /* Was: signal trampoline */ struct compat_siginfo rs_info; struct ucontextn32 rs_uc; - u32 rs_code[8] ____cacheline_aligned; /* signal trampoline */ }; -#endif /* !ICACHE_REFILLS_WORKAROUND_WAR */ - extern void sigset_from_compat(sigset_t *set, compat_sigset_t *compat); asmlinkage int sysn32_rt_sigsuspend(nabi_no_regargs struct pt_regs regs) @@ -173,7 +159,7 @@ badframe: force_sig(SIGSEGV, current); } -static int setup_rt_frame_n32(struct k_sigaction * ka, +static int setup_rt_frame_n32(void *sig_return, struct k_sigaction *ka, struct pt_regs *regs, int signr, sigset_t *set, siginfo_t *info) { struct rt_sigframe_n32 __user *frame; @@ -184,8 +170,6 @@ static int setup_rt_frame_n32(struct k_sigaction * ka, if (!access_ok(VERIFY_WRITE, frame, sizeof (*frame))) goto give_sigsegv; - install_sigtramp(frame->rs_code, __NR_N32_rt_sigreturn); - /* Create siginfo. */ err |= copy_siginfo_to_user32(&frame->rs_info, info); @@ -219,7 +203,7 @@ static int setup_rt_frame_n32(struct k_sigaction * ka, regs->regs[ 5] = (unsigned long) &frame->rs_info; regs->regs[ 6] = (unsigned long) &frame->rs_uc; regs->regs[29] = (unsigned long) frame; - regs->regs[31] = (unsigned long) frame->rs_code; + regs->regs[31] = (unsigned long) sig_return; regs->cp0_epc = regs->regs[25] = (unsigned long) ka->sa.sa_handler; DEBUGP("SIG deliver (%s:%d): sp=0x%p pc=0x%lx ra=0x%lx\n", @@ -235,5 +219,7 @@ give_sigsegv: struct mips_abi mips_abi_n32 = { .setup_rt_frame = setup_rt_frame_n32, + .rt_signal_return_offset = + offsetof(struct mips_vdso, n32_rt_signal_trampoline), .restart = __NR_N32_restart_syscall }; diff --git a/arch/mips/kernel/smtc.c b/arch/mips/kernel/smtc.c index 25e825aea32..a95dea5459c 100644 --- a/arch/mips/kernel/smtc.c +++ b/arch/mips/kernel/smtc.c @@ -182,7 +182,7 @@ static int vpemask[2][8] = { {0, 0, 0, 0, 0, 0, 0, 1} }; int tcnoprog[NR_CPUS]; -static atomic_t idle_hook_initialized = {0}; +static atomic_t idle_hook_initialized = ATOMIC_INIT(0); static int clock_hang_reported[NR_CPUS]; #endif /* CONFIG_SMTC_IDLE_HOOK_DEBUG */ diff --git a/arch/mips/kernel/syscall.c b/arch/mips/kernel/syscall.c index 9587abc67f3..dd81b0f8751 100644 --- a/arch/mips/kernel/syscall.c +++ b/arch/mips/kernel/syscall.c @@ -79,7 +79,11 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, int do_color_align; unsigned long task_size; - task_size = STACK_TOP; +#ifdef CONFIG_32BIT + task_size = TASK_SIZE; +#else /* Must be CONFIG_64BIT*/ + task_size = test_thread_flag(TIF_32BIT_ADDR) ? TASK_SIZE32 : TASK_SIZE; +#endif if (len > task_size) return -ENOMEM; diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c index 4e00f9bc23e..1a4dd657ccb 100644 --- a/arch/mips/kernel/traps.c +++ b/arch/mips/kernel/traps.c @@ -1599,7 +1599,7 @@ void __init trap_init(void) ebase = (unsigned long) __alloc_bootmem(size, 1 << fls(size), 0); } else { - ebase = CAC_BASE; + ebase = CKSEG0; if (cpu_has_mips_r2) ebase += (read_c0_ebase() & 0x3ffff000); } diff --git a/arch/mips/kernel/vdso.c b/arch/mips/kernel/vdso.c new file mode 100644 index 00000000000..b773c1112b1 --- /dev/null +++ b/arch/mips/kernel/vdso.c @@ -0,0 +1,112 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2009, 2010 Cavium Networks, Inc. + */ + + +#include <linux/kernel.h> +#include <linux/err.h> +#include <linux/sched.h> +#include <linux/mm.h> +#include <linux/init.h> +#include <linux/binfmts.h> +#include <linux/elf.h> +#include <linux/vmalloc.h> +#include <linux/unistd.h> + +#include <asm/vdso.h> +#include <asm/uasm.h> + +/* + * Including <asm/unistd.h> would give use the 64-bit syscall numbers ... + */ +#define __NR_O32_sigreturn 4119 +#define __NR_O32_rt_sigreturn 4193 +#define __NR_N32_rt_sigreturn 6211 + +static struct page *vdso_page; + +static void __init install_trampoline(u32 *tramp, unsigned int sigreturn) +{ + uasm_i_addiu(&tramp, 2, 0, sigreturn); /* li v0, sigreturn */ + uasm_i_syscall(&tramp, 0); +} + +static int __init init_vdso(void) +{ + struct mips_vdso *vdso; + + vdso_page = alloc_page(GFP_KERNEL); + if (!vdso_page) + panic("Cannot allocate vdso"); + + vdso = vmap(&vdso_page, 1, 0, PAGE_KERNEL); + if (!vdso) + panic("Cannot map vdso"); + clear_page(vdso); + + install_trampoline(vdso->rt_signal_trampoline, __NR_rt_sigreturn); +#ifdef CONFIG_32BIT + install_trampoline(vdso->signal_trampoline, __NR_sigreturn); +#else + install_trampoline(vdso->n32_rt_signal_trampoline, + __NR_N32_rt_sigreturn); + install_trampoline(vdso->o32_signal_trampoline, __NR_O32_sigreturn); + install_trampoline(vdso->o32_rt_signal_trampoline, + __NR_O32_rt_sigreturn); +#endif + + vunmap(vdso); + + pr_notice("init_vdso successfull\n"); + + return 0; +} +device_initcall(init_vdso); + +static unsigned long vdso_addr(unsigned long start) +{ + return STACK_TOP; +} + +int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) +{ + int ret; + unsigned long addr; + struct mm_struct *mm = current->mm; + + down_write(&mm->mmap_sem); + + addr = vdso_addr(mm->start_stack); + + addr = get_unmapped_area(NULL, addr, PAGE_SIZE, 0, 0); + if (IS_ERR_VALUE(addr)) { + ret = addr; + goto up_fail; + } + + ret = install_special_mapping(mm, addr, PAGE_SIZE, + VM_READ|VM_EXEC| + VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC| + VM_ALWAYSDUMP, + &vdso_page); + + if (ret) + goto up_fail; + + mm->context.vdso = (void *)addr; + +up_fail: + up_write(&mm->mmap_sem); + return ret; +} + +const char *arch_vma_name(struct vm_area_struct *vma) +{ + if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso) + return "[vdso]"; + return NULL; +} diff --git a/arch/mips/lib/delay.c b/arch/mips/lib/delay.c index 6b3b1de9dca..5995969e8c4 100644 --- a/arch/mips/lib/delay.c +++ b/arch/mips/lib/delay.c @@ -41,7 +41,7 @@ EXPORT_SYMBOL(__delay); void __udelay(unsigned long us) { - unsigned int lpj = current_cpu_data.udelay_val; + unsigned int lpj = raw_current_cpu_data.udelay_val; __delay((us * 0x000010c7ull * HZ * lpj) >> 32); } @@ -49,7 +49,7 @@ EXPORT_SYMBOL(__udelay); void __ndelay(unsigned long ns) { - unsigned int lpj = current_cpu_data.udelay_val; + unsigned int lpj = raw_current_cpu_data.udelay_val; __delay((ns * 0x00000005ull * HZ * lpj) >> 32); } diff --git a/arch/mips/lib/libgcc.h b/arch/mips/lib/libgcc.h index 3f19d1c5d94..05909d58e2f 100644 --- a/arch/mips/lib/libgcc.h +++ b/arch/mips/lib/libgcc.h @@ -17,8 +17,7 @@ struct DWstruct { #error I feel sick. #endif -typedef union -{ +typedef union { struct DWstruct s; long long ll; } DWunion; diff --git a/arch/mips/mm/cache.c b/arch/mips/mm/cache.c index be8627bc5b0..12af739048f 100644 --- a/arch/mips/mm/cache.c +++ b/arch/mips/mm/cache.c @@ -133,7 +133,7 @@ void __update_cache(struct vm_area_struct *vma, unsigned long address, } unsigned long _page_cachable_default; -EXPORT_SYMBOL_GPL(_page_cachable_default); +EXPORT_SYMBOL(_page_cachable_default); static inline void setup_protection_map(void) { diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c index 0de0e4127d6..d1f68aadbc4 100644 --- a/arch/mips/mm/tlbex.c +++ b/arch/mips/mm/tlbex.c @@ -788,10 +788,15 @@ static void __cpuinit build_r4000_tlb_refill_handler(void) * create the plain linear handler */ if (bcm1250_m3_war()) { - UASM_i_MFC0(&p, K0, C0_BADVADDR); - UASM_i_MFC0(&p, K1, C0_ENTRYHI); + unsigned int segbits = 44; + + uasm_i_dmfc0(&p, K0, C0_BADVADDR); + uasm_i_dmfc0(&p, K1, C0_ENTRYHI); uasm_i_xor(&p, K0, K0, K1); - UASM_i_SRL(&p, K0, K0, PAGE_SHIFT + 1); + uasm_i_dsrl32(&p, K1, K0, 62 - 32); + uasm_i_dsrl(&p, K0, K0, 12 + 1); + uasm_i_dsll32(&p, K0, K0, 64 + 12 + 1 - segbits - 32); + uasm_i_or(&p, K0, K0, K1); uasm_il_bnez(&p, &r, K0, label_leave); /* No need for uasm_i_nop */ } @@ -1312,10 +1317,15 @@ static void __cpuinit build_r4000_tlb_load_handler(void) memset(relocs, 0, sizeof(relocs)); if (bcm1250_m3_war()) { - UASM_i_MFC0(&p, K0, C0_BADVADDR); - UASM_i_MFC0(&p, K1, C0_ENTRYHI); + unsigned int segbits = 44; + + uasm_i_dmfc0(&p, K0, C0_BADVADDR); + uasm_i_dmfc0(&p, K1, C0_ENTRYHI); uasm_i_xor(&p, K0, K0, K1); - UASM_i_SRL(&p, K0, K0, PAGE_SHIFT + 1); + uasm_i_dsrl32(&p, K1, K0, 62 - 32); + uasm_i_dsrl(&p, K0, K0, 12 + 1); + uasm_i_dsll32(&p, K0, K0, 64 + 12 + 1 - segbits - 32); + uasm_i_or(&p, K0, K0, K1); uasm_il_bnez(&p, &r, K0, label_leave); /* No need for uasm_i_nop */ } diff --git a/arch/mips/mm/uasm.c b/arch/mips/mm/uasm.c index 1581e985246..611d564fdcf 100644 --- a/arch/mips/mm/uasm.c +++ b/arch/mips/mm/uasm.c @@ -31,7 +31,8 @@ enum fields { BIMM = 0x040, JIMM = 0x080, FUNC = 0x100, - SET = 0x200 + SET = 0x200, + SCIMM = 0x400 }; #define OP_MASK 0x3f @@ -52,6 +53,8 @@ enum fields { #define FUNC_SH 0 #define SET_MASK 0x7 #define SET_SH 0 +#define SCIMM_MASK 0xfffff +#define SCIMM_SH 6 enum opcode { insn_invalid, @@ -61,10 +64,10 @@ enum opcode { insn_dmtc0, insn_dsll, insn_dsll32, insn_dsra, insn_dsrl, insn_dsrl32, insn_drotr, insn_dsubu, insn_eret, insn_j, insn_jal, insn_jr, insn_ld, insn_ll, insn_lld, insn_lui, insn_lw, insn_mfc0, - insn_mtc0, insn_ori, insn_pref, insn_rfe, insn_sc, insn_scd, + insn_mtc0, insn_or, insn_ori, insn_pref, insn_rfe, insn_sc, insn_scd, insn_sd, insn_sll, insn_sra, insn_srl, insn_rotr, insn_subu, insn_sw, insn_tlbp, insn_tlbr, insn_tlbwi, insn_tlbwr, insn_xor, insn_xori, - insn_dins + insn_dins, insn_syscall }; struct insn { @@ -117,6 +120,7 @@ static struct insn insn_table[] __cpuinitdata = { { insn_lw, M(lw_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, { insn_mfc0, M(cop0_op, mfc_op, 0, 0, 0, 0), RT | RD | SET}, { insn_mtc0, M(cop0_op, mtc_op, 0, 0, 0, 0), RT | RD | SET}, + { insn_or, M(spec_op, 0, 0, 0, 0, or_op), RS | RT | RD }, { insn_ori, M(ori_op, 0, 0, 0, 0, 0), RS | RT | UIMM }, { insn_pref, M(pref_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, { insn_rfe, M(cop0_op, cop_op, 0, 0, 0, rfe_op), 0 }, @@ -136,6 +140,7 @@ static struct insn insn_table[] __cpuinitdata = { { insn_xor, M(spec_op, 0, 0, 0, 0, xor_op), RS | RT | RD }, { insn_xori, M(xori_op, 0, 0, 0, 0, 0), RS | RT | UIMM }, { insn_dins, M(spec3_op, 0, 0, 0, 0, dins_op), RS | RT | RD | RE }, + { insn_syscall, M(spec_op, 0, 0, 0, 0, syscall_op), SCIMM}, { insn_invalid, 0, 0 } }; @@ -208,6 +213,14 @@ static inline __cpuinit u32 build_jimm(u32 arg) return (arg >> 2) & JIMM_MASK; } +static inline __cpuinit u32 build_scimm(u32 arg) +{ + if (arg & ~SCIMM_MASK) + printk(KERN_WARNING "Micro-assembler field overflow\n"); + + return (arg & SCIMM_MASK) << SCIMM_SH; +} + static inline __cpuinit u32 build_func(u32 arg) { if (arg & ~FUNC_MASK) @@ -266,6 +279,8 @@ static void __cpuinit build_insn(u32 **buf, enum opcode opc, ...) op |= build_func(va_arg(ap, u32)); if (ip->fields & SET) op |= build_set(va_arg(ap, u32)); + if (ip->fields & SCIMM) + op |= build_scimm(va_arg(ap, u32)); va_end(ap); **buf = op; @@ -373,6 +388,7 @@ I_u2s3u1(_lw) I_u1u2u3(_mfc0) I_u1u2u3(_mtc0) I_u2u1u3(_ori) +I_u3u1u2(_or) I_u2s3u1(_pref) I_0(_rfe) I_u2s3u1(_sc) @@ -391,6 +407,7 @@ I_0(_tlbwr) I_u3u1u2(_xor) I_u2u1u3(_xori) I_u2u1msbu3(_dins); +I_u1(_syscall); /* Handle labels. */ void __cpuinit uasm_build_label(struct uasm_label **lab, u32 *addr, int lid) diff --git a/arch/mips/pci/ops-loongson2.c b/arch/mips/pci/ops-loongson2.c index 2bb4057bf6c..d657ee0bc13 100644 --- a/arch/mips/pci/ops-loongson2.c +++ b/arch/mips/pci/ops-loongson2.c @@ -180,15 +180,21 @@ struct pci_ops loongson_pci_ops = { }; #ifdef CONFIG_CS5536 +DEFINE_RAW_SPINLOCK(msr_lock); + void _rdmsr(u32 msr, u32 *hi, u32 *lo) { struct pci_bus bus = { .number = PCI_BUS_CS5536 }; u32 devfn = PCI_DEVFN(PCI_IDSEL_CS5536, 0); + unsigned long flags; + + raw_spin_lock_irqsave(&msr_lock, flags); loongson_pcibios_write(&bus, devfn, PCI_MSR_ADDR, 4, msr); loongson_pcibios_read(&bus, devfn, PCI_MSR_DATA_LO, 4, lo); loongson_pcibios_read(&bus, devfn, PCI_MSR_DATA_HI, 4, hi); + raw_spin_unlock_irqrestore(&msr_lock, flags); } EXPORT_SYMBOL(_rdmsr); @@ -198,9 +204,13 @@ void _wrmsr(u32 msr, u32 hi, u32 lo) .number = PCI_BUS_CS5536 }; u32 devfn = PCI_DEVFN(PCI_IDSEL_CS5536, 0); + unsigned long flags; + + raw_spin_lock_irqsave(&msr_lock, flags); loongson_pcibios_write(&bus, devfn, PCI_MSR_ADDR, 4, msr); loongson_pcibios_write(&bus, devfn, PCI_MSR_DATA_LO, 4, lo); loongson_pcibios_write(&bus, devfn, PCI_MSR_DATA_HI, 4, hi); + raw_spin_unlock_irqrestore(&msr_lock, flags); } EXPORT_SYMBOL(_wrmsr); #endif diff --git a/arch/mips/sibyte/sb1250/setup.c b/arch/mips/sibyte/sb1250/setup.c index 0444da1e23c..92da3155ce0 100644 --- a/arch/mips/sibyte/sb1250/setup.c +++ b/arch/mips/sibyte/sb1250/setup.c @@ -87,6 +87,21 @@ static int __init setup_bcm1250(void) return ret; } +int sb1250_m3_workaround_needed(void) +{ + switch (soc_type) { + case K_SYS_SOC_TYPE_BCM1250: + case K_SYS_SOC_TYPE_BCM1250_ALT: + case K_SYS_SOC_TYPE_BCM1250_ALT2: + case K_SYS_SOC_TYPE_BCM1125: + case K_SYS_SOC_TYPE_BCM1125H: + return soc_pass < K_SYS_REVISION_BCM1250_C0; + + default: + return 0; + } +} + static int __init setup_bcm112x(void) { int ret = 0; diff --git a/arch/s390/defconfig b/arch/s390/defconfig index 7ae71cc5697..bcd6884985a 100644 --- a/arch/s390/defconfig +++ b/arch/s390/defconfig @@ -1,7 +1,7 @@ # # Automatically generated make config: don't edit -# Linux kernel version: 2.6.33-rc2 -# Mon Jan 4 09:03:07 2010 +# Linux kernel version: 2.6.34-rc3 +# Fri Apr 9 09:57:10 2010 # CONFIG_SCHED_MC=y CONFIG_MMU=y @@ -17,6 +17,7 @@ CONFIG_GENERIC_TIME=y CONFIG_GENERIC_TIME_VSYSCALL=y CONFIG_GENERIC_CLOCKEVENTS=y CONFIG_GENERIC_BUG=y +CONFIG_GENERIC_BUG_RELATIVE_POINTERS=y CONFIG_NO_IOMEM=y CONFIG_NO_DMA=y CONFIG_GENERIC_LOCKBREAK=y @@ -62,15 +63,11 @@ CONFIG_TREE_RCU=y # CONFIG_RCU_TRACE is not set CONFIG_RCU_FANOUT=64 # CONFIG_RCU_FANOUT_EXACT is not set +# CONFIG_RCU_FAST_NO_HZ is not set # CONFIG_TREE_RCU_TRACE is not set CONFIG_IKCONFIG=y CONFIG_IKCONFIG_PROC=y CONFIG_LOG_BUF_SHIFT=17 -CONFIG_GROUP_SCHED=y -CONFIG_FAIR_GROUP_SCHED=y -# CONFIG_RT_GROUP_SCHED is not set -CONFIG_USER_SCHED=y -# CONFIG_CGROUP_SCHED is not set CONFIG_CGROUPS=y # CONFIG_CGROUP_DEBUG is not set CONFIG_CGROUP_NS=y @@ -79,6 +76,7 @@ CONFIG_CGROUP_NS=y # CONFIG_CPUSETS is not set # CONFIG_CGROUP_CPUACCT is not set # CONFIG_RESOURCE_COUNTERS is not set +# CONFIG_CGROUP_SCHED is not set CONFIG_SYSFS_DEPRECATED=y CONFIG_SYSFS_DEPRECATED_V2=y # CONFIG_RELAY is not set @@ -93,6 +91,7 @@ CONFIG_INITRAMFS_SOURCE="" CONFIG_RD_GZIP=y CONFIG_RD_BZIP2=y CONFIG_RD_LZMA=y +CONFIG_RD_LZO=y # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set CONFIG_SYSCTL=y CONFIG_ANON_INODES=y @@ -126,6 +125,7 @@ CONFIG_SLAB=y # CONFIG_SLUB is not set # CONFIG_SLOB is not set # CONFIG_PROFILING is not set +CONFIG_TRACEPOINTS=y CONFIG_HAVE_OPROFILE=y CONFIG_KPROBES=y CONFIG_HAVE_SYSCALL_WRAPPERS=y @@ -134,6 +134,7 @@ CONFIG_HAVE_KPROBES=y CONFIG_HAVE_KRETPROBES=y CONFIG_HAVE_ARCH_TRACEHOOK=y CONFIG_USE_GENERIC_SMP_HELPERS=y +CONFIG_HAVE_REGS_AND_STACK_ACCESS_API=y CONFIG_HAVE_DEFAULT_NO_SPIN_MUTEXES=y # @@ -246,6 +247,7 @@ CONFIG_64BIT=y CONFIG_SMP=y CONFIG_NR_CPUS=32 CONFIG_HOTPLUG_CPU=y +# CONFIG_SCHED_BOOK is not set CONFIG_COMPAT=y CONFIG_SYSVIPC_COMPAT=y CONFIG_AUDIT_ARCH=y @@ -345,13 +347,13 @@ CONFIG_PM_SLEEP=y CONFIG_HIBERNATION=y CONFIG_PM_STD_PARTITION="" # CONFIG_PM_RUNTIME is not set +CONFIG_PM_OPS=y CONFIG_NET=y # # Networking options # CONFIG_PACKET=y -# CONFIG_PACKET_MMAP is not set CONFIG_UNIX=y CONFIG_XFRM=y # CONFIG_XFRM_USER is not set @@ -529,6 +531,7 @@ CONFIG_NET_SCH_FIFO=y # # CONFIG_NET_PKTGEN is not set # CONFIG_NET_TCPPROBE is not set +# CONFIG_NET_DROP_MONITOR is not set CONFIG_CAN=m CONFIG_CAN_RAW=m CONFIG_CAN_BCM=m @@ -605,6 +608,7 @@ CONFIG_MISC_DEVICES=y # # SCSI device support # +CONFIG_SCSI_MOD=y # CONFIG_RAID_ATTRS is not set CONFIG_SCSI=y # CONFIG_SCSI_DMA is not set @@ -863,6 +867,7 @@ CONFIG_MISC_FILESYSTEMS=y # CONFIG_BEFS_FS is not set # CONFIG_BFS_FS is not set # CONFIG_EFS_FS is not set +# CONFIG_LOGFS is not set # CONFIG_CRAMFS is not set # CONFIG_SQUASHFS is not set # CONFIG_VXFS_FS is not set @@ -891,6 +896,7 @@ CONFIG_SUNRPC=y # CONFIG_RPCSEC_GSS_KRB5 is not set # CONFIG_RPCSEC_GSS_SPKM3 is not set # CONFIG_SMB_FS is not set +# CONFIG_CEPH_FS is not set # CONFIG_CIFS is not set # CONFIG_NCP_FS is not set # CONFIG_CODA_FS is not set @@ -952,6 +958,7 @@ CONFIG_DEBUG_MUTEXES=y # CONFIG_LOCK_STAT is not set CONFIG_DEBUG_SPINLOCK_SLEEP=y # CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set +CONFIG_STACKTRACE=y # CONFIG_DEBUG_KOBJECT is not set CONFIG_DEBUG_BUGVERBOSE=y # CONFIG_DEBUG_INFO is not set @@ -973,12 +980,17 @@ CONFIG_DEBUG_FORCE_WEAK_PER_CPU=y # CONFIG_LATENCYTOP is not set CONFIG_SYSCTL_SYSCALL_CHECK=y # CONFIG_DEBUG_PAGEALLOC is not set +CONFIG_NOP_TRACER=y CONFIG_HAVE_FUNCTION_TRACER=y CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST=y CONFIG_HAVE_DYNAMIC_FTRACE=y CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y CONFIG_HAVE_SYSCALL_TRACEPOINTS=y +CONFIG_RING_BUFFER=y +CONFIG_EVENT_TRACING=y +CONFIG_CONTEXT_SWITCH_TRACER=y +CONFIG_TRACING=y CONFIG_TRACING_SUPPORT=y CONFIG_FTRACE=y # CONFIG_FUNCTION_TRACER is not set @@ -995,10 +1007,15 @@ CONFIG_BRANCH_PROFILE_NONE=y # CONFIG_KMEMTRACE is not set # CONFIG_WORKQUEUE_TRACER is not set # CONFIG_BLK_DEV_IO_TRACE is not set +CONFIG_KPROBE_EVENT=y +# CONFIG_RING_BUFFER_BENCHMARK is not set # CONFIG_DYNAMIC_DEBUG is not set CONFIG_SAMPLES=y +# CONFIG_SAMPLE_TRACEPOINTS is not set +# CONFIG_SAMPLE_TRACE_EVENTS is not set # CONFIG_SAMPLE_KOBJECT is not set # CONFIG_SAMPLE_KPROBES is not set +# CONFIG_DEBUG_STRICT_USER_COPY_CHECKS is not set # # Security options @@ -1032,6 +1049,7 @@ CONFIG_CRYPTO_MANAGER=y CONFIG_CRYPTO_MANAGER2=y CONFIG_CRYPTO_GF128MUL=m # CONFIG_CRYPTO_NULL is not set +# CONFIG_CRYPTO_PCRYPT is not set CONFIG_CRYPTO_WORKQUEUE=y # CONFIG_CRYPTO_CRYPTD is not set CONFIG_CRYPTO_AUTHENC=m @@ -1119,7 +1137,7 @@ CONFIG_CRYPTO_SHA512_S390=m # CONFIG_CRYPTO_DES_S390 is not set # CONFIG_CRYPTO_AES_S390 is not set CONFIG_S390_PRNG=m -# CONFIG_BINARY_PRINTF is not set +CONFIG_BINARY_PRINTF=y # # Library routines @@ -1136,14 +1154,16 @@ CONFIG_LIBCRC32C=m CONFIG_ZLIB_INFLATE=y CONFIG_ZLIB_DEFLATE=m CONFIG_LZO_COMPRESS=m -CONFIG_LZO_DECOMPRESS=m +CONFIG_LZO_DECOMPRESS=y CONFIG_DECOMPRESS_GZIP=y CONFIG_DECOMPRESS_BZIP2=y CONFIG_DECOMPRESS_LZMA=y +CONFIG_DECOMPRESS_LZO=y CONFIG_NLATTR=y CONFIG_HAVE_KVM=y CONFIG_VIRTUALIZATION=y CONFIG_KVM=m +# CONFIG_VHOST_NET is not set CONFIG_VIRTIO=y CONFIG_VIRTIO_RING=y CONFIG_VIRTIO_BALLOON=m diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h index 9b5b9189c15..89a504c3f12 100644 --- a/arch/s390/include/asm/pgtable.h +++ b/arch/s390/include/asm/pgtable.h @@ -105,7 +105,7 @@ extern char empty_zero_page[PAGE_SIZE]; #ifndef __ASSEMBLY__ /* * The vmalloc area will always be on the topmost area of the kernel - * mapping. We reserve 96MB (31bit) / 1GB (64bit) for vmalloc, + * mapping. We reserve 96MB (31bit) / 128GB (64bit) for vmalloc, * which should be enough for any sane case. * By putting vmalloc at the top, we maximise the gap between physical * memory and vmalloc to catch misplaced memory accesses. As a side @@ -120,8 +120,8 @@ extern unsigned long VMALLOC_START; #define VMALLOC_END 0x7e000000UL #define VMEM_MAP_END 0x80000000UL #else /* __s390x__ */ -#define VMALLOC_SIZE (1UL << 30) -#define VMALLOC_END 0x3e040000000UL +#define VMALLOC_SIZE (128UL << 30) +#define VMALLOC_END 0x3e000000000UL #define VMEM_MAP_END 0x40000000000UL #endif /* __s390x__ */ diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c index 31d618a443a..2d92c2cf92d 100644 --- a/arch/s390/kernel/early.c +++ b/arch/s390/kernel/early.c @@ -82,7 +82,8 @@ asm( " lm 6,15,24(15)\n" #endif " br 14\n" - " .size savesys_ipl_nss, .-savesys_ipl_nss\n"); + " .size savesys_ipl_nss, .-savesys_ipl_nss\n" + " .previous\n"); static __initdata char upper_command_line[COMMAND_LINE_SIZE]; diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S index 4348f9bc539..6af7045280a 100644 --- a/arch/s390/kernel/entry.S +++ b/arch/s390/kernel/entry.S @@ -964,7 +964,7 @@ cleanup_critical: clc 4(4,%r12),BASED(cleanup_table_io_work_loop) bl BASED(0f) clc 4(4,%r12),BASED(cleanup_table_io_work_loop+4) - bl BASED(cleanup_io_return) + bl BASED(cleanup_io_work_loop) 0: br %r14 @@ -1039,6 +1039,12 @@ cleanup_sysc_leave_insn: cleanup_io_return: mvc __LC_RETURN_PSW(4),0(%r12) + mvc __LC_RETURN_PSW+4(4),BASED(cleanup_table_io_return) + la %r12,__LC_RETURN_PSW + br %r14 + +cleanup_io_work_loop: + mvc __LC_RETURN_PSW(4),0(%r12) mvc __LC_RETURN_PSW+4(4),BASED(cleanup_table_io_work_loop) la %r12,__LC_RETURN_PSW br %r14 diff --git a/arch/s390/kernel/entry64.S b/arch/s390/kernel/entry64.S index 29fd0f1e6ec..52106d53271 100644 --- a/arch/s390/kernel/entry64.S +++ b/arch/s390/kernel/entry64.S @@ -946,7 +946,7 @@ cleanup_critical: clc 8(8,%r12),BASED(cleanup_table_io_work_loop) jl 0f clc 8(8,%r12),BASED(cleanup_table_io_work_loop+8) - jl cleanup_io_return + jl cleanup_io_work_loop 0: br %r14 @@ -1021,6 +1021,12 @@ cleanup_sysc_leave_insn: cleanup_io_return: mvc __LC_RETURN_PSW(8),0(%r12) + mvc __LC_RETURN_PSW+8(8),BASED(cleanup_table_io_return) + la %r12,__LC_RETURN_PSW + br %r14 + +cleanup_io_work_loop: + mvc __LC_RETURN_PSW(8),0(%r12) mvc __LC_RETURN_PSW+8(8),BASED(cleanup_table_io_work_loop) la %r12,__LC_RETURN_PSW br %r14 diff --git a/arch/s390/kernel/topology.c b/arch/s390/kernel/topology.c index 14ef6f05e43..247b4c2d1e5 100644 --- a/arch/s390/kernel/topology.c +++ b/arch/s390/kernel/topology.c @@ -165,10 +165,11 @@ static void tl_to_cores(struct tl_info *info) default: clear_cores(); machine_has_topology = 0; - return; + goto out; } tle = next_tle(tle); } +out: spin_unlock_irq(&topology_lock); } diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c index 8ea3144b45b..90165e7ca04 100644 --- a/arch/s390/mm/vmem.c +++ b/arch/s390/mm/vmem.c @@ -71,12 +71,8 @@ static pte_t __ref *vmem_pte_alloc(void) pte = alloc_bootmem(PTRS_PER_PTE * sizeof(pte_t)); if (!pte) return NULL; - if (MACHINE_HAS_HPAGE) - clear_table((unsigned long *) pte, _PAGE_TYPE_EMPTY | _PAGE_CO, - PTRS_PER_PTE * sizeof(pte_t)); - else - clear_table((unsigned long *) pte, _PAGE_TYPE_EMPTY, - PTRS_PER_PTE * sizeof(pte_t)); + clear_table((unsigned long *) pte, _PAGE_TYPE_EMPTY, + PTRS_PER_PTE * sizeof(pte_t)); return pte; } @@ -117,8 +113,7 @@ static int vmem_add_mem(unsigned long start, unsigned long size, int ro) if (MACHINE_HAS_HPAGE && !(address & ~HPAGE_MASK) && (address + HPAGE_SIZE <= start + size) && (address >= HPAGE_SIZE)) { - pte_val(pte) |= _SEGMENT_ENTRY_LARGE | - _SEGMENT_ENTRY_CO; + pte_val(pte) |= _SEGMENT_ENTRY_LARGE; pmd_val(*pm_dir) = pte_val(pte); address += HPAGE_SIZE - PAGE_SIZE; continue; diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig index 6db51367405..9908d477ccd 100644 --- a/arch/sparc/Kconfig +++ b/arch/sparc/Kconfig @@ -37,6 +37,9 @@ config SPARC64 def_bool 64BIT select ARCH_SUPPORTS_MSI select HAVE_FUNCTION_TRACER + select HAVE_FUNCTION_GRAPH_TRACER + select HAVE_FUNCTION_GRAPH_FP_TEST + select HAVE_FUNCTION_TRACE_MCOUNT_TEST select HAVE_KRETPROBES select HAVE_KPROBES select HAVE_LMB diff --git a/arch/sparc/Kconfig.debug b/arch/sparc/Kconfig.debug index 9d3c889718a..1b4a831565f 100644 --- a/arch/sparc/Kconfig.debug +++ b/arch/sparc/Kconfig.debug @@ -19,13 +19,10 @@ config DEBUG_DCFLUSH bool "D-cache flush debugging" depends on SPARC64 && DEBUG_KERNEL -config STACK_DEBUG - bool "Stack Overflow Detection Support" - config MCOUNT bool depends on SPARC64 - depends on STACK_DEBUG || FUNCTION_TRACER + depends on FUNCTION_TRACER default y config FRAME_POINTER diff --git a/arch/sparc/include/asm/cpudata_64.h b/arch/sparc/include/asm/cpudata_64.h index 926397d345f..050ef35b9dc 100644 --- a/arch/sparc/include/asm/cpudata_64.h +++ b/arch/sparc/include/asm/cpudata_64.h @@ -17,7 +17,7 @@ typedef struct { unsigned int __nmi_count; unsigned long clock_tick; /* %tick's per second */ unsigned long __pad; - unsigned int __pad1; + unsigned int irq0_irqs; unsigned int __pad2; /* Dcache line 2, rarely used */ diff --git a/arch/sparc/include/asm/irqflags_64.h b/arch/sparc/include/asm/irqflags_64.h index 8b49bf920df..bfa1ea45b4c 100644 --- a/arch/sparc/include/asm/irqflags_64.h +++ b/arch/sparc/include/asm/irqflags_64.h @@ -76,9 +76,26 @@ static inline int raw_irqs_disabled(void) */ static inline unsigned long __raw_local_irq_save(void) { - unsigned long flags = __raw_local_save_flags(); - - raw_local_irq_disable(); + unsigned long flags, tmp; + + /* Disable interrupts to PIL_NORMAL_MAX unless we already + * are using PIL_NMI, in which case PIL_NMI is retained. + * + * The only values we ever program into the %pil are 0, + * PIL_NORMAL_MAX and PIL_NMI. + * + * Since PIL_NMI is the largest %pil value and all bits are + * set in it (0xf), it doesn't matter what PIL_NORMAL_MAX + * actually is. + */ + __asm__ __volatile__( + "rdpr %%pil, %0\n\t" + "or %0, %2, %1\n\t" + "wrpr %1, 0x0, %%pil" + : "=r" (flags), "=r" (tmp) + : "i" (PIL_NORMAL_MAX) + : "memory" + ); return flags; } diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile index c6316142db4..0c2dc1f24a9 100644 --- a/arch/sparc/kernel/Makefile +++ b/arch/sparc/kernel/Makefile @@ -13,6 +13,14 @@ extra-y += init_task.o CPPFLAGS_vmlinux.lds := -Usparc -m$(BITS) extra-y += vmlinux.lds +ifdef CONFIG_FUNCTION_TRACER +# Do not profile debug and lowlevel utilities +CFLAGS_REMOVE_ftrace.o := -pg +CFLAGS_REMOVE_time_$(BITS).o := -pg +CFLAGS_REMOVE_perf_event.o := -pg +CFLAGS_REMOVE_pcr.o := -pg +endif + obj-$(CONFIG_SPARC32) += entry.o wof.o wuf.o obj-$(CONFIG_SPARC32) += etrap_32.o obj-$(CONFIG_SPARC32) += rtrap_32.o @@ -85,7 +93,7 @@ obj-$(CONFIG_KGDB) += kgdb_$(BITS).o obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o -CFLAGS_REMOVE_ftrace.o := -pg +obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o obj-$(CONFIG_EARLYFB) += btext.o obj-$(CONFIG_STACKTRACE) += stacktrace.o diff --git a/arch/sparc/kernel/ftrace.c b/arch/sparc/kernel/ftrace.c index 9103a56b39e..03ab022e51c 100644 --- a/arch/sparc/kernel/ftrace.c +++ b/arch/sparc/kernel/ftrace.c @@ -13,7 +13,7 @@ static const u32 ftrace_nop = 0x01000000; static u32 ftrace_call_replace(unsigned long ip, unsigned long addr) { - static u32 call; + u32 call; s32 off; off = ((s32)addr - (s32)ip); @@ -91,3 +91,61 @@ int __init ftrace_dyn_arch_init(void *data) return 0; } #endif + +#ifdef CONFIG_FUNCTION_GRAPH_TRACER + +#ifdef CONFIG_DYNAMIC_FTRACE +extern void ftrace_graph_call(void); + +int ftrace_enable_ftrace_graph_caller(void) +{ + unsigned long ip = (unsigned long)(&ftrace_graph_call); + u32 old, new; + + old = *(u32 *) &ftrace_graph_call; + new = ftrace_call_replace(ip, (unsigned long) &ftrace_graph_caller); + return ftrace_modify_code(ip, old, new); +} + +int ftrace_disable_ftrace_graph_caller(void) +{ + unsigned long ip = (unsigned long)(&ftrace_graph_call); + u32 old, new; + + old = *(u32 *) &ftrace_graph_call; + new = ftrace_call_replace(ip, (unsigned long) &ftrace_stub); + + return ftrace_modify_code(ip, old, new); +} + +#endif /* !CONFIG_DYNAMIC_FTRACE */ + +/* + * Hook the return address and push it in the stack of return addrs + * in current thread info. + */ +unsigned long prepare_ftrace_return(unsigned long parent, + unsigned long self_addr, + unsigned long frame_pointer) +{ + unsigned long return_hooker = (unsigned long) &return_to_handler; + struct ftrace_graph_ent trace; + + if (unlikely(atomic_read(¤t->tracing_graph_pause))) + return parent + 8UL; + + if (ftrace_push_return_trace(parent, self_addr, &trace.depth, + frame_pointer) == -EBUSY) + return parent + 8UL; + + trace.func = self_addr; + + /* Only trace if the calling function expects to */ + if (!ftrace_graph_entry(&trace)) { + current->curr_ret_stack--; + return parent + 8UL; + } + + return return_hooker; +} +#endif /* CONFIG_FUNCTION_GRAPH_TRACER */ diff --git a/arch/sparc/kernel/irq_64.c b/arch/sparc/kernel/irq_64.c index e1cbdb94d97..454ce3a2527 100644 --- a/arch/sparc/kernel/irq_64.c +++ b/arch/sparc/kernel/irq_64.c @@ -20,6 +20,7 @@ #include <linux/delay.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> +#include <linux/ftrace.h> #include <linux/irq.h> #include <asm/ptrace.h> @@ -647,6 +648,14 @@ unsigned int sun4v_build_virq(u32 devhandle, unsigned int devino) bucket = kzalloc(sizeof(struct ino_bucket), GFP_ATOMIC); if (unlikely(!bucket)) return 0; + + /* The only reference we store to the IRQ bucket is + * by physical address which kmemleak can't see, tell + * it that this object explicitly is not a leak and + * should be scanned. + */ + kmemleak_not_leak(bucket); + __flush_dcache_range((unsigned long) bucket, ((unsigned long) bucket + sizeof(struct ino_bucket))); @@ -721,7 +730,7 @@ static __attribute__((always_inline)) void restore_hardirq_stack(void *orig_sp) __asm__ __volatile__("mov %0, %%sp" : : "r" (orig_sp)); } -void handler_irq(int irq, struct pt_regs *regs) +void __irq_entry handler_irq(int irq, struct pt_regs *regs) { unsigned long pstate, bucket_pa; struct pt_regs *old_regs; diff --git a/arch/sparc/kernel/kgdb_64.c b/arch/sparc/kernel/kgdb_64.c index f5a0fd490b5..0a2bd0f99fc 100644 --- a/arch/sparc/kernel/kgdb_64.c +++ b/arch/sparc/kernel/kgdb_64.c @@ -5,6 +5,7 @@ #include <linux/kgdb.h> #include <linux/kdebug.h> +#include <linux/ftrace.h> #include <asm/kdebug.h> #include <asm/ptrace.h> @@ -108,7 +109,7 @@ void gdb_regs_to_pt_regs(unsigned long *gdb_regs, struct pt_regs *regs) } #ifdef CONFIG_SMP -void smp_kgdb_capture_client(int irq, struct pt_regs *regs) +void __irq_entry smp_kgdb_capture_client(int irq, struct pt_regs *regs) { unsigned long flags; diff --git a/arch/sparc/kernel/nmi.c b/arch/sparc/kernel/nmi.c index b287b62c7ea..75a3d1a2535 100644 --- a/arch/sparc/kernel/nmi.c +++ b/arch/sparc/kernel/nmi.c @@ -92,7 +92,6 @@ static void die_nmi(const char *str, struct pt_regs *regs, int do_panic) notrace __kprobes void perfctr_irq(int irq, struct pt_regs *regs) { unsigned int sum, touched = 0; - int cpu = smp_processor_id(); clear_softint(1 << irq); @@ -106,7 +105,7 @@ notrace __kprobes void perfctr_irq(int irq, struct pt_regs *regs) else pcr_ops->write(PCR_PIC_PRIV); - sum = kstat_irqs_cpu(0, cpu); + sum = local_cpu_data().irq0_irqs; if (__get_cpu_var(nmi_touch)) { __get_cpu_var(nmi_touch) = 0; touched = 1; diff --git a/arch/sparc/kernel/pci_common.c b/arch/sparc/kernel/pci_common.c index b775658a927..8a000583b5c 100644 --- a/arch/sparc/kernel/pci_common.c +++ b/arch/sparc/kernel/pci_common.c @@ -371,14 +371,19 @@ static void pci_register_iommu_region(struct pci_pbm_info *pbm) struct resource *rp = kzalloc(sizeof(*rp), GFP_KERNEL); if (!rp) { - prom_printf("Cannot allocate IOMMU resource.\n"); - prom_halt(); + pr_info("%s: Cannot allocate IOMMU resource.\n", + pbm->name); + return; } rp->name = "IOMMU"; rp->start = pbm->mem_space.start + (unsigned long) vdma[0]; rp->end = rp->start + (unsigned long) vdma[1] - 1UL; rp->flags = IORESOURCE_BUSY; - request_resource(&pbm->mem_space, rp); + if (request_resource(&pbm->mem_space, rp)) { + pr_info("%s: Unable to request IOMMU resource.\n", + pbm->name); + kfree(rp); + } } } diff --git a/arch/sparc/kernel/pcr.c b/arch/sparc/kernel/pcr.c index 2d94e7a03af..c4a6a50b484 100644 --- a/arch/sparc/kernel/pcr.c +++ b/arch/sparc/kernel/pcr.c @@ -8,6 +8,7 @@ #include <linux/irq.h> #include <linux/perf_event.h> +#include <linux/ftrace.h> #include <asm/pil.h> #include <asm/pcr.h> @@ -34,7 +35,7 @@ unsigned int picl_shift; * Therefore in such situations we defer the work by signalling * a lower level cpu IRQ. */ -void deferred_pcr_work_irq(int irq, struct pt_regs *regs) +void __irq_entry deferred_pcr_work_irq(int irq, struct pt_regs *regs) { struct pt_regs *old_regs; diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c index 4c533452810..b6a2b8f4704 100644 --- a/arch/sparc/kernel/smp_64.c +++ b/arch/sparc/kernel/smp_64.c @@ -22,6 +22,7 @@ #include <linux/profile.h> #include <linux/bootmem.h> #include <linux/vmalloc.h> +#include <linux/ftrace.h> #include <linux/cpu.h> #include <linux/slab.h> @@ -823,13 +824,13 @@ void arch_send_call_function_single_ipi(int cpu) &cpumask_of_cpu(cpu)); } -void smp_call_function_client(int irq, struct pt_regs *regs) +void __irq_entry smp_call_function_client(int irq, struct pt_regs *regs) { clear_softint(1 << irq); generic_smp_call_function_interrupt(); } -void smp_call_function_single_client(int irq, struct pt_regs *regs) +void __irq_entry smp_call_function_single_client(int irq, struct pt_regs *regs) { clear_softint(1 << irq); generic_smp_call_function_single_interrupt(); @@ -965,7 +966,7 @@ void flush_dcache_page_all(struct mm_struct *mm, struct page *page) put_cpu(); } -void smp_new_mmu_context_version_client(int irq, struct pt_regs *regs) +void __irq_entry smp_new_mmu_context_version_client(int irq, struct pt_regs *regs) { struct mm_struct *mm; unsigned long flags; @@ -1149,7 +1150,7 @@ void smp_release(void) */ extern void prom_world(int); -void smp_penguin_jailcell(int irq, struct pt_regs *regs) +void __irq_entry smp_penguin_jailcell(int irq, struct pt_regs *regs) { clear_softint(1 << irq); @@ -1365,7 +1366,7 @@ void smp_send_reschedule(int cpu) &cpumask_of_cpu(cpu)); } -void smp_receive_signal_client(int irq, struct pt_regs *regs) +void __irq_entry smp_receive_signal_client(int irq, struct pt_regs *regs) { clear_softint(1 << irq); } diff --git a/arch/sparc/kernel/time_64.c b/arch/sparc/kernel/time_64.c index 67e16510288..c7bbe6cf7b8 100644 --- a/arch/sparc/kernel/time_64.c +++ b/arch/sparc/kernel/time_64.c @@ -35,6 +35,7 @@ #include <linux/clocksource.h> #include <linux/of_device.h> #include <linux/platform_device.h> +#include <linux/ftrace.h> #include <asm/oplib.h> #include <asm/timer.h> @@ -717,7 +718,7 @@ static struct clock_event_device sparc64_clockevent = { }; static DEFINE_PER_CPU(struct clock_event_device, sparc64_events); -void timer_interrupt(int irq, struct pt_regs *regs) +void __irq_entry timer_interrupt(int irq, struct pt_regs *regs) { struct pt_regs *old_regs = set_irq_regs(regs); unsigned long tick_mask = tick_ops->softint_mask; @@ -728,6 +729,7 @@ void timer_interrupt(int irq, struct pt_regs *regs) irq_enter(); + local_cpu_data().irq0_irqs++; kstat_incr_irqs_this_cpu(0, irq_to_desc(0)); if (unlikely(!evt->event_handler)) { diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c index 837dfc2390d..9da57f03298 100644 --- a/arch/sparc/kernel/traps_64.c +++ b/arch/sparc/kernel/traps_64.c @@ -2203,27 +2203,6 @@ void dump_stack(void) EXPORT_SYMBOL(dump_stack); -static inline int is_kernel_stack(struct task_struct *task, - struct reg_window *rw) -{ - unsigned long rw_addr = (unsigned long) rw; - unsigned long thread_base, thread_end; - - if (rw_addr < PAGE_OFFSET) { - if (task != &init_task) - return 0; - } - - thread_base = (unsigned long) task_stack_page(task); - thread_end = thread_base + sizeof(union thread_union); - if (rw_addr >= thread_base && - rw_addr < thread_end && - !(rw_addr & 0x7UL)) - return 1; - - return 0; -} - static inline struct reg_window *kernel_stack_up(struct reg_window *rw) { unsigned long fp = rw->ins[6]; @@ -2252,6 +2231,7 @@ void die_if_kernel(char *str, struct pt_regs *regs) show_regs(regs); add_taint(TAINT_DIE); if (regs->tstate & TSTATE_PRIV) { + struct thread_info *tp = current_thread_info(); struct reg_window *rw = (struct reg_window *) (regs->u_regs[UREG_FP] + STACK_BIAS); @@ -2259,8 +2239,8 @@ void die_if_kernel(char *str, struct pt_regs *regs) * find some badly aligned kernel stack. */ while (rw && - count++ < 30&& - is_kernel_stack(current, rw)) { + count++ < 30 && + kstack_valid(tp, (unsigned long) rw)) { printk("Caller[%016lx]: %pS\n", rw->ins[7], (void *) rw->ins[7]); diff --git a/arch/sparc/kernel/vmlinux.lds.S b/arch/sparc/kernel/vmlinux.lds.S index 4e599259396..0c1e6783657 100644 --- a/arch/sparc/kernel/vmlinux.lds.S +++ b/arch/sparc/kernel/vmlinux.lds.S @@ -46,11 +46,16 @@ SECTIONS SCHED_TEXT LOCK_TEXT KPROBES_TEXT + IRQENTRY_TEXT *(.gnu.warning) } = 0 _etext = .; RO_DATA(PAGE_SIZE) + + /* Start of data section */ + _sdata = .; + .data1 : { *(.data1) } diff --git a/arch/sparc/lib/mcount.S b/arch/sparc/lib/mcount.S index 24b8b12deed..3753e3c6e17 100644 --- a/arch/sparc/lib/mcount.S +++ b/arch/sparc/lib/mcount.S @@ -7,26 +7,11 @@ #include <linux/linkage.h> -#include <asm/ptrace.h> -#include <asm/thread_info.h> - /* * This is the main variant and is called by C code. GCC's -pg option * automatically instruments every C function with a call to this. */ -#ifdef CONFIG_STACK_DEBUG - -#define OVSTACKSIZE 4096 /* lets hope this is enough */ - - .data - .align 8 -panicstring: - .asciz "Stack overflow\n" - .align 8 -ovstack: - .skip OVSTACKSIZE -#endif .text .align 32 .globl _mcount @@ -35,84 +20,48 @@ ovstack: .type mcount,#function _mcount: mcount: -#ifdef CONFIG_STACK_DEBUG - /* - * Check whether %sp is dangerously low. - */ - ldub [%g6 + TI_FPDEPTH], %g1 - srl %g1, 1, %g3 - add %g3, 1, %g3 - sllx %g3, 8, %g3 ! each fpregs frame is 256b - add %g3, 192, %g3 - add %g6, %g3, %g3 ! where does task_struct+frame end? - sub %g3, STACK_BIAS, %g3 - cmp %sp, %g3 - bg,pt %xcc, 1f - nop - lduh [%g6 + TI_CPU], %g1 - sethi %hi(hardirq_stack), %g3 - or %g3, %lo(hardirq_stack), %g3 - sllx %g1, 3, %g1 - ldx [%g3 + %g1], %g7 - sub %g7, STACK_BIAS, %g7 - cmp %sp, %g7 - bleu,pt %xcc, 2f - sethi %hi(THREAD_SIZE), %g3 - add %g7, %g3, %g7 - cmp %sp, %g7 - blu,pn %xcc, 1f -2: sethi %hi(softirq_stack), %g3 - or %g3, %lo(softirq_stack), %g3 - ldx [%g3 + %g1], %g7 - sub %g7, STACK_BIAS, %g7 - cmp %sp, %g7 - bleu,pt %xcc, 3f - sethi %hi(THREAD_SIZE), %g3 - add %g7, %g3, %g7 - cmp %sp, %g7 - blu,pn %xcc, 1f - nop - /* If we are already on ovstack, don't hop onto it - * again, we are already trying to output the stack overflow - * message. - */ -3: sethi %hi(ovstack), %g7 ! cant move to panic stack fast enough - or %g7, %lo(ovstack), %g7 - add %g7, OVSTACKSIZE, %g3 - sub %g3, STACK_BIAS + 192, %g3 - sub %g7, STACK_BIAS, %g7 - cmp %sp, %g7 - blu,pn %xcc, 2f - cmp %sp, %g3 - bleu,pn %xcc, 1f - nop -2: mov %g3, %sp - sethi %hi(panicstring), %g3 - call prom_printf - or %g3, %lo(panicstring), %o0 - call prom_halt - nop -1: -#endif #ifdef CONFIG_FUNCTION_TRACER #ifdef CONFIG_DYNAMIC_FTRACE - mov %o7, %o0 - .globl mcount_call -mcount_call: - call ftrace_stub - mov %o0, %o7 + /* Do nothing, the retl/nop below is all we need. */ #else - sethi %hi(ftrace_trace_function), %g1 + sethi %hi(function_trace_stop), %g1 + lduw [%g1 + %lo(function_trace_stop)], %g2 + brnz,pn %g2, 2f + sethi %hi(ftrace_trace_function), %g1 sethi %hi(ftrace_stub), %g2 ldx [%g1 + %lo(ftrace_trace_function)], %g1 or %g2, %lo(ftrace_stub), %g2 cmp %g1, %g2 be,pn %icc, 1f - mov %i7, %o1 - jmpl %g1, %g0 - mov %o7, %o0 + mov %i7, %g3 + save %sp, -128, %sp + mov %g3, %o1 + jmpl %g1, %o7 + mov %i7, %o0 + ret + restore /* not reached */ 1: +#ifdef CONFIG_FUNCTION_GRAPH_TRACER + sethi %hi(ftrace_graph_return), %g1 + ldx [%g1 + %lo(ftrace_graph_return)], %g3 + cmp %g2, %g3 + bne,pn %xcc, 5f + sethi %hi(ftrace_graph_entry_stub), %g2 + sethi %hi(ftrace_graph_entry), %g1 + or %g2, %lo(ftrace_graph_entry_stub), %g2 + ldx [%g1 + %lo(ftrace_graph_entry)], %g1 + cmp %g1, %g2 + be,pt %xcc, 2f + nop +5: mov %i7, %g2 + mov %fp, %g3 + save %sp, -128, %sp + mov %g2, %l0 + ba,pt %xcc, ftrace_graph_caller + mov %g3, %l1 +#endif +2: #endif #endif retl @@ -131,14 +80,50 @@ ftrace_stub: .globl ftrace_caller .type ftrace_caller,#function ftrace_caller: - mov %i7, %o1 - mov %o7, %o0 + sethi %hi(function_trace_stop), %g1 + mov %i7, %g2 + lduw [%g1 + %lo(function_trace_stop)], %g1 + brnz,pn %g1, ftrace_stub + mov %fp, %g3 + save %sp, -128, %sp + mov %g2, %o1 + mov %g2, %l0 + mov %g3, %l1 .globl ftrace_call ftrace_call: call ftrace_stub - mov %o0, %o7 - retl + mov %i7, %o0 +#ifdef CONFIG_FUNCTION_GRAPH_TRACER + .globl ftrace_graph_call +ftrace_graph_call: + call ftrace_stub nop +#endif + ret + restore +#ifdef CONFIG_FUNCTION_GRAPH_TRACER + .size ftrace_graph_call,.-ftrace_graph_call +#endif + .size ftrace_call,.-ftrace_call .size ftrace_caller,.-ftrace_caller #endif #endif + +#ifdef CONFIG_FUNCTION_GRAPH_TRACER +ENTRY(ftrace_graph_caller) + mov %l0, %o0 + mov %i7, %o1 + call prepare_ftrace_return + mov %l1, %o2 + ret + restore %o0, -8, %i7 +END(ftrace_graph_caller) + +ENTRY(return_to_handler) + save %sp, -128, %sp + call ftrace_return_to_handler + mov %fp, %o0 + jmpl %o0 + 8, %g0 + restore +END(return_to_handler) +#endif diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 0eacb1ffb42..9458685902b 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -1216,8 +1216,8 @@ config NUMA_EMU config NODES_SHIFT int "Maximum NUMA Nodes (as a power of 2)" if !MAXSMP - range 1 9 - default "9" if MAXSMP + range 1 10 + default "10" if MAXSMP default "6" if X86_64 default "4" if X86_NUMAQ default "3" diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S index 59b4556a5b9..e790bc1fbfa 100644 --- a/arch/x86/ia32/ia32entry.S +++ b/arch/x86/ia32/ia32entry.S @@ -626,7 +626,7 @@ ia32_sys_call_table: .quad stub32_sigreturn .quad stub32_clone /* 120 */ .quad sys_setdomainname - .quad sys_uname + .quad sys_newuname .quad sys_modify_ldt .quad compat_sys_adjtimex .quad sys32_mprotect /* 125 */ diff --git a/arch/x86/include/asm/amd_iommu_types.h b/arch/x86/include/asm/amd_iommu_types.h index ba19ad4c47d..86a0ff0aeac 100644 --- a/arch/x86/include/asm/amd_iommu_types.h +++ b/arch/x86/include/asm/amd_iommu_types.h @@ -21,6 +21,7 @@ #define _ASM_X86_AMD_IOMMU_TYPES_H #include <linux/types.h> +#include <linux/mutex.h> #include <linux/list.h> #include <linux/spinlock.h> @@ -140,6 +141,7 @@ /* constants to configure the command buffer */ #define CMD_BUFFER_SIZE 8192 +#define CMD_BUFFER_UNINITIALIZED 1 #define CMD_BUFFER_ENTRIES 512 #define MMIO_CMD_SIZE_SHIFT 56 #define MMIO_CMD_SIZE_512 (0x9ULL << MMIO_CMD_SIZE_SHIFT) @@ -237,6 +239,7 @@ struct protection_domain { struct list_head list; /* for list of all protection domains */ struct list_head dev_list; /* List of all devices in this domain */ spinlock_t lock; /* mostly used to lock the page table*/ + struct mutex api_lock; /* protect page tables in the iommu-api path */ u16 id; /* the domain id written to the device table */ int mode; /* paging mode (0-6 levels) */ u64 *pt_root; /* page table root pointer */ diff --git a/arch/x86/include/asm/lguest_hcall.h b/arch/x86/include/asm/lguest_hcall.h index ba0eed8aa1a..b60f2924c41 100644 --- a/arch/x86/include/asm/lguest_hcall.h +++ b/arch/x86/include/asm/lguest_hcall.h @@ -28,22 +28,39 @@ #ifndef __ASSEMBLY__ #include <asm/hw_irq.h> -#include <asm/kvm_para.h> /*G:030 * But first, how does our Guest contact the Host to ask for privileged * operations? There are two ways: the direct way is to make a "hypercall", * to make requests of the Host Itself. * - * We use the KVM hypercall mechanism, though completely different hypercall - * numbers. Seventeen hypercalls are available: the hypercall number is put in - * the %eax register, and the arguments (when required) are placed in %ebx, - * %ecx, %edx and %esi. If a return value makes sense, it's returned in %eax. + * Our hypercall mechanism uses the highest unused trap code (traps 32 and + * above are used by real hardware interrupts). Seventeen hypercalls are + * available: the hypercall number is put in the %eax register, and the + * arguments (when required) are placed in %ebx, %ecx, %edx and %esi. + * If a return value makes sense, it's returned in %eax. * * Grossly invalid calls result in Sudden Death at the hands of the vengeful * Host, rather than returning failure. This reflects Winston Churchill's * definition of a gentleman: "someone who is only rude intentionally". -:*/ + */ +static inline unsigned long +hcall(unsigned long call, + unsigned long arg1, unsigned long arg2, unsigned long arg3, + unsigned long arg4) +{ + /* "int" is the Intel instruction to trigger a trap. */ + asm volatile("int $" __stringify(LGUEST_TRAP_ENTRY) + /* The call in %eax (aka "a") might be overwritten */ + : "=a"(call) + /* The arguments are in %eax, %ebx, %ecx, %edx & %esi */ + : "a"(call), "b"(arg1), "c"(arg2), "d"(arg3), "S"(arg4) + /* "memory" means this might write somewhere in memory. + * This isn't true for all calls, but it's safe to tell + * gcc that it might happen so it doesn't get clever. */ + : "memory"); + return call; +} /* Can't use our min() macro here: needs to be a constant */ #define LGUEST_IRQS (NR_IRQS < 32 ? NR_IRQS: 32) diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c index f3dadb571d9..f854d89b7ed 100644 --- a/arch/x86/kernel/amd_iommu.c +++ b/arch/x86/kernel/amd_iommu.c @@ -118,7 +118,7 @@ static bool check_device(struct device *dev) return false; /* No device or no PCI device */ - if (!dev || dev->bus != &pci_bus_type) + if (dev->bus != &pci_bus_type) return false; devid = get_device_id(dev); @@ -392,6 +392,7 @@ static int __iommu_queue_command(struct amd_iommu *iommu, struct iommu_cmd *cmd) u32 tail, head; u8 *target; + WARN_ON(iommu->cmd_buf_size & CMD_BUFFER_UNINITIALIZED); tail = readl(iommu->mmio_base + MMIO_CMD_TAIL_OFFSET); target = iommu->cmd_buf + tail; memcpy_toio(target, cmd, sizeof(*cmd)); @@ -2186,7 +2187,7 @@ static void prealloc_protection_domains(void) struct dma_ops_domain *dma_dom; u16 devid; - while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) { + for_each_pci_dev(dev) { /* Do we handle this device? */ if (!check_device(&dev->dev)) @@ -2298,7 +2299,7 @@ static void cleanup_domain(struct protection_domain *domain) list_for_each_entry_safe(dev_data, next, &domain->dev_list, list) { struct device *dev = dev_data->dev; - do_detach(dev); + __detach_device(dev); atomic_set(&dev_data->bind, 0); } @@ -2327,6 +2328,7 @@ static struct protection_domain *protection_domain_alloc(void) return NULL; spin_lock_init(&domain->lock); + mutex_init(&domain->api_lock); domain->id = domain_id_alloc(); if (!domain->id) goto out_err; @@ -2379,9 +2381,7 @@ static void amd_iommu_domain_destroy(struct iommu_domain *dom) free_pagetable(domain); - domain_id_free(domain->id); - - kfree(domain); + protection_domain_free(domain); dom->priv = NULL; } @@ -2456,6 +2456,8 @@ static int amd_iommu_map_range(struct iommu_domain *dom, iova &= PAGE_MASK; paddr &= PAGE_MASK; + mutex_lock(&domain->api_lock); + for (i = 0; i < npages; ++i) { ret = iommu_map_page(domain, iova, paddr, prot, PM_MAP_4k); if (ret) @@ -2465,6 +2467,8 @@ static int amd_iommu_map_range(struct iommu_domain *dom, paddr += PAGE_SIZE; } + mutex_unlock(&domain->api_lock); + return 0; } @@ -2477,12 +2481,16 @@ static void amd_iommu_unmap_range(struct iommu_domain *dom, iova &= PAGE_MASK; + mutex_lock(&domain->api_lock); + for (i = 0; i < npages; ++i) { iommu_unmap_page(domain, iova, PM_MAP_4k); iova += PAGE_SIZE; } iommu_flush_tlb_pde(domain); + + mutex_unlock(&domain->api_lock); } static phys_addr_t amd_iommu_iova_to_phys(struct iommu_domain *dom, diff --git a/arch/x86/kernel/amd_iommu_init.c b/arch/x86/kernel/amd_iommu_init.c index 42f5350b908..6360abf993d 100644 --- a/arch/x86/kernel/amd_iommu_init.c +++ b/arch/x86/kernel/amd_iommu_init.c @@ -138,9 +138,9 @@ int amd_iommus_present; bool amd_iommu_np_cache __read_mostly; /* - * Set to true if ACPI table parsing and hardware intialization went properly + * The ACPI table parsing functions set this variable on an error */ -static bool amd_iommu_initialized; +static int __initdata amd_iommu_init_err; /* * List of protection domains - used during resume @@ -391,9 +391,11 @@ static int __init find_last_devid_acpi(struct acpi_table_header *table) */ for (i = 0; i < table->length; ++i) checksum += p[i]; - if (checksum != 0) + if (checksum != 0) { /* ACPI table corrupt */ - return -ENODEV; + amd_iommu_init_err = -ENODEV; + return 0; + } p += IVRS_HEADER_LENGTH; @@ -436,7 +438,7 @@ static u8 * __init alloc_command_buffer(struct amd_iommu *iommu) if (cmd_buf == NULL) return NULL; - iommu->cmd_buf_size = CMD_BUFFER_SIZE; + iommu->cmd_buf_size = CMD_BUFFER_SIZE | CMD_BUFFER_UNINITIALIZED; return cmd_buf; } @@ -472,12 +474,13 @@ static void iommu_enable_command_buffer(struct amd_iommu *iommu) &entry, sizeof(entry)); amd_iommu_reset_cmd_buffer(iommu); + iommu->cmd_buf_size &= ~(CMD_BUFFER_UNINITIALIZED); } static void __init free_command_buffer(struct amd_iommu *iommu) { free_pages((unsigned long)iommu->cmd_buf, - get_order(iommu->cmd_buf_size)); + get_order(iommu->cmd_buf_size & ~(CMD_BUFFER_UNINITIALIZED))); } /* allocates the memory where the IOMMU will log its events to */ @@ -920,11 +923,16 @@ static int __init init_iommu_all(struct acpi_table_header *table) h->mmio_phys); iommu = kzalloc(sizeof(struct amd_iommu), GFP_KERNEL); - if (iommu == NULL) - return -ENOMEM; + if (iommu == NULL) { + amd_iommu_init_err = -ENOMEM; + return 0; + } + ret = init_iommu_one(iommu, h); - if (ret) - return ret; + if (ret) { + amd_iommu_init_err = ret; + return 0; + } break; default: break; @@ -934,8 +942,6 @@ static int __init init_iommu_all(struct acpi_table_header *table) } WARN_ON(p != end); - amd_iommu_initialized = true; - return 0; } @@ -1211,6 +1217,10 @@ static int __init amd_iommu_init(void) if (acpi_table_parse("IVRS", find_last_devid_acpi) != 0) return -ENODEV; + ret = amd_iommu_init_err; + if (ret) + goto out; + dev_table_size = tbl_size(DEV_TABLE_ENTRY_SIZE); alias_table_size = tbl_size(ALIAS_TABLE_ENTRY_SIZE); rlookup_table_size = tbl_size(RLOOKUP_TABLE_ENTRY_SIZE); @@ -1270,12 +1280,19 @@ static int __init amd_iommu_init(void) if (acpi_table_parse("IVRS", init_iommu_all) != 0) goto free; - if (!amd_iommu_initialized) + if (amd_iommu_init_err) { + ret = amd_iommu_init_err; goto free; + } if (acpi_table_parse("IVRS", init_memory_definitions) != 0) goto free; + if (amd_iommu_init_err) { + ret = amd_iommu_init_err; + goto free; + } + ret = sysdev_class_register(&amd_iommu_sysdev_class); if (ret) goto free; @@ -1288,6 +1305,8 @@ static int __init amd_iommu_init(void) if (ret) goto free; + enable_iommus(); + if (iommu_pass_through) ret = amd_iommu_init_passthrough(); else @@ -1300,8 +1319,6 @@ static int __init amd_iommu_init(void) amd_iommu_init_notifier(); - enable_iommus(); - if (iommu_pass_through) goto out; @@ -1315,6 +1332,7 @@ out: return ret; free: + disable_iommus(); amd_iommu_uninit_devices(); diff --git a/arch/x86/kernel/aperture_64.c b/arch/x86/kernel/aperture_64.c index 3704997e8b2..b5d8b0bcf23 100644 --- a/arch/x86/kernel/aperture_64.c +++ b/arch/x86/kernel/aperture_64.c @@ -393,6 +393,7 @@ void __init gart_iommu_hole_init(void) for (i = 0; i < ARRAY_SIZE(bus_dev_ranges); i++) { int bus; int dev_base, dev_limit; + u32 ctl; bus = bus_dev_ranges[i].bus; dev_base = bus_dev_ranges[i].dev_base; @@ -406,7 +407,19 @@ void __init gart_iommu_hole_init(void) gart_iommu_aperture = 1; x86_init.iommu.iommu_init = gart_iommu_init; - aper_order = (read_pci_config(bus, slot, 3, AMD64_GARTAPERTURECTL) >> 1) & 7; + ctl = read_pci_config(bus, slot, 3, + AMD64_GARTAPERTURECTL); + + /* + * Before we do anything else disable the GART. It may + * still be enabled if we boot into a crash-kernel here. + * Reconfiguring the GART while it is enabled could have + * unknown side-effects. + */ + ctl &= ~GARTEN; + write_pci_config(bus, slot, 3, AMD64_GARTAPERTURECTL, ctl); + + aper_order = (ctl >> 1) & 7; aper_size = (32 * 1024 * 1024) << aper_order; aper_base = read_pci_config(bus, slot, 3, AMD64_GARTAPERTUREBASE) & 0x7fff; aper_base <<= 25; diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c index 00187f1fcfb..e5a4a1e0161 100644 --- a/arch/x86/kernel/apic/apic.c +++ b/arch/x86/kernel/apic/apic.c @@ -1640,8 +1640,10 @@ int __init APIC_init_uniprocessor(void) } #endif +#ifndef CONFIG_SMP enable_IR_x2apic(); default_setup_apic_routing(); +#endif verify_local_APIC(); connect_bsp_APIC(); diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c index a4849c10a77..ebd4c51d096 100644 --- a/arch/x86/kernel/crash.c +++ b/arch/x86/kernel/crash.c @@ -27,7 +27,6 @@ #include <asm/cpu.h> #include <asm/reboot.h> #include <asm/virtext.h> -#include <asm/x86_init.h> #if defined(CONFIG_SMP) && defined(CONFIG_X86_LOCAL_APIC) @@ -103,10 +102,5 @@ void native_machine_crash_shutdown(struct pt_regs *regs) #ifdef CONFIG_HPET_TIMER hpet_disable(); #endif - -#ifdef CONFIG_X86_64 - x86_platform.iommu_shutdown(); -#endif - crash_save_cpu(regs, safe_smp_processor_id()); } diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c index 740b440fbd7..7bca3c6a02f 100644 --- a/arch/x86/kernel/e820.c +++ b/arch/x86/kernel/e820.c @@ -519,29 +519,45 @@ u64 __init e820_remove_range(u64 start, u64 size, unsigned old_type, printk(KERN_DEBUG "e820 remove range: %016Lx - %016Lx ", (unsigned long long) start, (unsigned long long) end); - e820_print_type(old_type); + if (checktype) + e820_print_type(old_type); printk(KERN_CONT "\n"); for (i = 0; i < e820.nr_map; i++) { struct e820entry *ei = &e820.map[i]; u64 final_start, final_end; + u64 ei_end; if (checktype && ei->type != old_type) continue; + + ei_end = ei->addr + ei->size; /* totally covered? */ - if (ei->addr >= start && - (ei->addr + ei->size) <= (start + size)) { + if (ei->addr >= start && ei_end <= end) { real_removed_size += ei->size; memset(ei, 0, sizeof(struct e820entry)); continue; } + + /* new range is totally covered? */ + if (ei->addr < start && ei_end > end) { + e820_add_region(end, ei_end - end, ei->type); + ei->size = start - ei->addr; + real_removed_size += size; + continue; + } + /* partially covered */ final_start = max(start, ei->addr); - final_end = min(start + size, ei->addr + ei->size); + final_end = min(end, ei_end); if (final_start >= final_end) continue; real_removed_size += final_end - final_start; + /* + * left range could be head or tail, so need to update + * size at first. + */ ei->size -= final_end - final_start; if (ei->addr < final_start) continue; diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c index d10a7e7294f..23b4ecdffa9 100644 --- a/arch/x86/kernel/hpet.c +++ b/arch/x86/kernel/hpet.c @@ -400,9 +400,15 @@ static int hpet_next_event(unsigned long delta, * then we might have a real hardware problem. We can not do * much about it here, but at least alert the user/admin with * a prominent warning. + * An erratum on some chipsets (ICH9,..), results in comparator read + * immediately following a write returning old value. Workaround + * for this is to read this value second time, when first + * read returns old value. */ - WARN_ONCE(hpet_readl(HPET_Tn_CMP(timer)) != cnt, + if (unlikely((u32)hpet_readl(HPET_Tn_CMP(timer)) != cnt)) { + WARN_ONCE(hpet_readl(HPET_Tn_CMP(timer)) != cnt, KERN_WARNING "hpet: compare register read back failed.\n"); + } return (s32)(hpet_readl(HPET_COUNTER) - cnt) >= 0 ? -ETIME : 0; } @@ -1144,6 +1150,7 @@ int hpet_set_periodic_freq(unsigned long freq) do_div(clc, freq); clc >>= hpet_clockevent.shift; hpet_pie_delta = clc; + hpet_pie_limit = 0; } return 1; } diff --git a/arch/x86/kernel/mpparse.c b/arch/x86/kernel/mpparse.c index a2c1edd2d3a..e81030f71a8 100644 --- a/arch/x86/kernel/mpparse.c +++ b/arch/x86/kernel/mpparse.c @@ -664,7 +664,7 @@ static void __init smp_reserve_memory(struct mpf_intel *mpf) { unsigned long size = get_mpc_size(mpf->physptr); - reserve_early(mpf->physptr, mpf->physptr+size, "MP-table mpc"); + reserve_early_overlap_ok(mpf->physptr, mpf->physptr+size, "MP-table mpc"); } static int __init smp_scan_config(unsigned long base, unsigned long length) @@ -693,7 +693,7 @@ static int __init smp_scan_config(unsigned long base, unsigned long length) mpf, (u64)virt_to_phys(mpf)); mem = virt_to_phys(mpf); - reserve_early(mem, mem + sizeof(*mpf), "MP-table mpf"); + reserve_early_overlap_ok(mem, mem + sizeof(*mpf), "MP-table mpf"); if (mpf->physptr) smp_reserve_memory(mpf); diff --git a/arch/x86/kernel/pci-gart_64.c b/arch/x86/kernel/pci-gart_64.c index 68cd24f9dea..0f7f130caa6 100644 --- a/arch/x86/kernel/pci-gart_64.c +++ b/arch/x86/kernel/pci-gart_64.c @@ -565,6 +565,9 @@ static void enable_gart_translations(void) enable_gart_translation(dev, __pa(agp_gatt_table)); } + + /* Flush the GART-TLB to remove stale entries */ + k8_flush_garts(); } /* diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index 9570541caf7..c4851eff57b 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c @@ -607,6 +607,16 @@ static int __init setup_elfcorehdr(char *arg) early_param("elfcorehdr", setup_elfcorehdr); #endif +static __init void reserve_ibft_region(void) +{ + unsigned long addr, size = 0; + + addr = find_ibft_region(&size); + + if (size) + reserve_early_overlap_ok(addr, addr + size, "ibft"); +} + #ifdef CONFIG_X86_RESERVE_LOW_64K static int __init dmi_low_memory_corruption(const struct dmi_system_id *d) { @@ -909,6 +919,8 @@ void __init setup_arch(char **cmdline_p) */ find_smp_config(); + reserve_ibft_region(); + reserve_trampoline_memory(); #ifdef CONFIG_ACPI_SLEEP @@ -976,8 +988,6 @@ void __init setup_arch(char **cmdline_p) dma32_reserve_bootmem(); - reserve_ibft_region(); - #ifdef CONFIG_KVM_CLOCK kvmclock_init(); #endif diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c index 7e59dc1d3fc..2bdf628066b 100644 --- a/arch/x86/lguest/boot.c +++ b/arch/x86/lguest/boot.c @@ -115,7 +115,7 @@ static void async_hcall(unsigned long call, unsigned long arg1, local_irq_save(flags); if (lguest_data.hcall_status[next_call] != 0xFF) { /* Table full, so do normal hcall which will flush table. */ - kvm_hypercall4(call, arg1, arg2, arg3, arg4); + hcall(call, arg1, arg2, arg3, arg4); } else { lguest_data.hcalls[next_call].arg0 = call; lguest_data.hcalls[next_call].arg1 = arg1; @@ -145,46 +145,45 @@ static void async_hcall(unsigned long call, unsigned long arg1, * So, when we're in lazy mode, we call async_hcall() to store the call for * future processing: */ -static void lazy_hcall1(unsigned long call, - unsigned long arg1) +static void lazy_hcall1(unsigned long call, unsigned long arg1) { if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE) - kvm_hypercall1(call, arg1); + hcall(call, arg1, 0, 0, 0); else async_hcall(call, arg1, 0, 0, 0); } /* You can imagine what lazy_hcall2, 3 and 4 look like. :*/ static void lazy_hcall2(unsigned long call, - unsigned long arg1, - unsigned long arg2) + unsigned long arg1, + unsigned long arg2) { if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE) - kvm_hypercall2(call, arg1, arg2); + hcall(call, arg1, arg2, 0, 0); else async_hcall(call, arg1, arg2, 0, 0); } static void lazy_hcall3(unsigned long call, - unsigned long arg1, - unsigned long arg2, - unsigned long arg3) + unsigned long arg1, + unsigned long arg2, + unsigned long arg3) { if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE) - kvm_hypercall3(call, arg1, arg2, arg3); + hcall(call, arg1, arg2, arg3, 0); else async_hcall(call, arg1, arg2, arg3, 0); } #ifdef CONFIG_X86_PAE static void lazy_hcall4(unsigned long call, - unsigned long arg1, - unsigned long arg2, - unsigned long arg3, - unsigned long arg4) + unsigned long arg1, + unsigned long arg2, + unsigned long arg3, + unsigned long arg4) { if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE) - kvm_hypercall4(call, arg1, arg2, arg3, arg4); + hcall(call, arg1, arg2, arg3, arg4); else async_hcall(call, arg1, arg2, arg3, arg4); } @@ -196,13 +195,13 @@ static void lazy_hcall4(unsigned long call, :*/ static void lguest_leave_lazy_mmu_mode(void) { - kvm_hypercall0(LHCALL_FLUSH_ASYNC); + hcall(LHCALL_FLUSH_ASYNC, 0, 0, 0, 0); paravirt_leave_lazy_mmu(); } static void lguest_end_context_switch(struct task_struct *next) { - kvm_hypercall0(LHCALL_FLUSH_ASYNC); + hcall(LHCALL_FLUSH_ASYNC, 0, 0, 0, 0); paravirt_end_context_switch(next); } @@ -286,7 +285,7 @@ static void lguest_write_idt_entry(gate_desc *dt, /* Keep the local copy up to date. */ native_write_idt_entry(dt, entrynum, g); /* Tell Host about this new entry. */ - kvm_hypercall3(LHCALL_LOAD_IDT_ENTRY, entrynum, desc[0], desc[1]); + hcall(LHCALL_LOAD_IDT_ENTRY, entrynum, desc[0], desc[1], 0); } /* @@ -300,7 +299,7 @@ static void lguest_load_idt(const struct desc_ptr *desc) struct desc_struct *idt = (void *)desc->address; for (i = 0; i < (desc->size+1)/8; i++) - kvm_hypercall3(LHCALL_LOAD_IDT_ENTRY, i, idt[i].a, idt[i].b); + hcall(LHCALL_LOAD_IDT_ENTRY, i, idt[i].a, idt[i].b, 0); } /* @@ -321,7 +320,7 @@ static void lguest_load_gdt(const struct desc_ptr *desc) struct desc_struct *gdt = (void *)desc->address; for (i = 0; i < (desc->size+1)/8; i++) - kvm_hypercall3(LHCALL_LOAD_GDT_ENTRY, i, gdt[i].a, gdt[i].b); + hcall(LHCALL_LOAD_GDT_ENTRY, i, gdt[i].a, gdt[i].b, 0); } /* @@ -334,8 +333,8 @@ static void lguest_write_gdt_entry(struct desc_struct *dt, int entrynum, { native_write_gdt_entry(dt, entrynum, desc, type); /* Tell Host about this new entry. */ - kvm_hypercall3(LHCALL_LOAD_GDT_ENTRY, entrynum, - dt[entrynum].a, dt[entrynum].b); + hcall(LHCALL_LOAD_GDT_ENTRY, entrynum, + dt[entrynum].a, dt[entrynum].b, 0); } /* @@ -931,7 +930,7 @@ static int lguest_clockevent_set_next_event(unsigned long delta, } /* Please wake us this far in the future. */ - kvm_hypercall1(LHCALL_SET_CLOCKEVENT, delta); + hcall(LHCALL_SET_CLOCKEVENT, delta, 0, 0, 0); return 0; } @@ -942,7 +941,7 @@ static void lguest_clockevent_set_mode(enum clock_event_mode mode, case CLOCK_EVT_MODE_UNUSED: case CLOCK_EVT_MODE_SHUTDOWN: /* A 0 argument shuts the clock down. */ - kvm_hypercall0(LHCALL_SET_CLOCKEVENT); + hcall(LHCALL_SET_CLOCKEVENT, 0, 0, 0, 0); break; case CLOCK_EVT_MODE_ONESHOT: /* This is what we expect. */ @@ -1100,7 +1099,7 @@ static void set_lguest_basic_apic_ops(void) /* STOP! Until an interrupt comes in. */ static void lguest_safe_halt(void) { - kvm_hypercall0(LHCALL_HALT); + hcall(LHCALL_HALT, 0, 0, 0, 0); } /* @@ -1112,8 +1111,8 @@ static void lguest_safe_halt(void) */ static void lguest_power_off(void) { - kvm_hypercall2(LHCALL_SHUTDOWN, __pa("Power down"), - LGUEST_SHUTDOWN_POWEROFF); + hcall(LHCALL_SHUTDOWN, __pa("Power down"), + LGUEST_SHUTDOWN_POWEROFF, 0, 0); } /* @@ -1123,7 +1122,7 @@ static void lguest_power_off(void) */ static int lguest_panic(struct notifier_block *nb, unsigned long l, void *p) { - kvm_hypercall2(LHCALL_SHUTDOWN, __pa(p), LGUEST_SHUTDOWN_POWEROFF); + hcall(LHCALL_SHUTDOWN, __pa(p), LGUEST_SHUTDOWN_POWEROFF, 0, 0); /* The hcall won't return, but to keep gcc happy, we're "done". */ return NOTIFY_DONE; } @@ -1162,7 +1161,7 @@ static __init int early_put_chars(u32 vtermno, const char *buf, int count) len = sizeof(scratch) - 1; scratch[len] = '\0'; memcpy(scratch, buf, len); - kvm_hypercall1(LHCALL_NOTIFY, __pa(scratch)); + hcall(LHCALL_NOTIFY, __pa(scratch), 0, 0, 0); /* This routine returns the number of bytes actually written. */ return len; @@ -1174,7 +1173,7 @@ static __init int early_put_chars(u32 vtermno, const char *buf, int count) */ static void lguest_restart(char *reason) { - kvm_hypercall2(LHCALL_SHUTDOWN, __pa(reason), LGUEST_SHUTDOWN_RESTART); + hcall(LHCALL_SHUTDOWN, __pa(reason), LGUEST_SHUTDOWN_RESTART, 0, 0); } /*G:050 diff --git a/arch/x86/lguest/i386_head.S b/arch/x86/lguest/i386_head.S index 27eac0faee4..4f420c2f2d5 100644 --- a/arch/x86/lguest/i386_head.S +++ b/arch/x86/lguest/i386_head.S @@ -32,7 +32,7 @@ ENTRY(lguest_entry) */ movl $LHCALL_LGUEST_INIT, %eax movl $lguest_data - __PAGE_OFFSET, %ebx - .byte 0x0f,0x01,0xc1 /* KVM_HYPERCALL */ + int $LGUEST_TRAP_ENTRY /* Set up the initial stack so we can run C code. */ movl $(init_thread_union+THREAD_SIZE),%esp diff --git a/arch/x86/power/hibernate_asm_32.S b/arch/x86/power/hibernate_asm_32.S index b641388d828..ad47daeafa4 100644 --- a/arch/x86/power/hibernate_asm_32.S +++ b/arch/x86/power/hibernate_asm_32.S @@ -27,10 +27,17 @@ ENTRY(swsusp_arch_suspend) ret ENTRY(restore_image) + movl mmu_cr4_features, %ecx movl resume_pg_dir, %eax subl $__PAGE_OFFSET, %eax movl %eax, %cr3 + jecxz 1f # cr4 Pentium and higher, skip if zero + andl $~(X86_CR4_PGE), %ecx + movl %ecx, %cr4; # turn off PGE + movl %cr3, %eax; # flush TLB + movl %eax, %cr3 +1: movl restore_pblist, %edx .p2align 4,,7 @@ -54,16 +61,8 @@ done: movl $swapper_pg_dir, %eax subl $__PAGE_OFFSET, %eax movl %eax, %cr3 - /* Flush TLB, including "global" things (vmalloc) */ movl mmu_cr4_features, %ecx jecxz 1f # cr4 Pentium and higher, skip if zero - movl %ecx, %edx - andl $~(X86_CR4_PGE), %edx - movl %edx, %cr4; # turn off PGE -1: - movl %cr3, %eax; # flush TLB - movl %eax, %cr3 - jecxz 1f # cr4 Pentium and higher, skip if zero movl %ecx, %cr4; # turn PGE back on 1: diff --git a/block/Kconfig b/block/Kconfig index 62a5921321c..f9e89f4d94b 100644 --- a/block/Kconfig +++ b/block/Kconfig @@ -78,8 +78,9 @@ config BLK_DEV_INTEGRITY Protection. If in doubt, say N. config BLK_CGROUP - tristate + tristate "Block cgroup support" depends on CGROUPS + depends on CFQ_GROUP_IOSCHED default n ---help--- Generic block IO controller cgroup interface. This is the common diff --git a/block/blk-settings.c b/block/blk-settings.c index d9a9db5f0a2..f5ed5a1187b 100644 --- a/block/blk-settings.c +++ b/block/blk-settings.c @@ -8,6 +8,7 @@ #include <linux/blkdev.h> #include <linux/bootmem.h> /* for max_pfn/max_low_pfn */ #include <linux/gcd.h> +#include <linux/lcm.h> #include <linux/jiffies.h> #include <linux/gfp.h> @@ -462,16 +463,6 @@ void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b) } EXPORT_SYMBOL(blk_queue_stack_limits); -static unsigned int lcm(unsigned int a, unsigned int b) -{ - if (a && b) - return (a * b) / gcd(a, b); - else if (b) - return b; - - return a; -} - /** * blk_stack_limits - adjust queue_limits for stacked devices * @t: the stacking driver limits (top device) diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c index c2b821fa324..306759bbdf1 100644 --- a/block/blk-sysfs.c +++ b/block/blk-sysfs.c @@ -107,6 +107,19 @@ static ssize_t queue_max_sectors_show(struct request_queue *q, char *page) return queue_var_show(max_sectors_kb, (page)); } +static ssize_t queue_max_segments_show(struct request_queue *q, char *page) +{ + return queue_var_show(queue_max_segments(q), (page)); +} + +static ssize_t queue_max_segment_size_show(struct request_queue *q, char *page) +{ + if (test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags)) + return queue_var_show(queue_max_segment_size(q), (page)); + + return queue_var_show(PAGE_CACHE_SIZE, (page)); +} + static ssize_t queue_logical_block_size_show(struct request_queue *q, char *page) { return queue_var_show(queue_logical_block_size(q), page); @@ -281,6 +294,16 @@ static struct queue_sysfs_entry queue_max_hw_sectors_entry = { .show = queue_max_hw_sectors_show, }; +static struct queue_sysfs_entry queue_max_segments_entry = { + .attr = {.name = "max_segments", .mode = S_IRUGO }, + .show = queue_max_segments_show, +}; + +static struct queue_sysfs_entry queue_max_segment_size_entry = { + .attr = {.name = "max_segment_size", .mode = S_IRUGO }, + .show = queue_max_segment_size_show, +}; + static struct queue_sysfs_entry queue_iosched_entry = { .attr = {.name = "scheduler", .mode = S_IRUGO | S_IWUSR }, .show = elv_iosched_show, @@ -356,6 +379,8 @@ static struct attribute *default_attrs[] = { &queue_ra_entry.attr, &queue_max_hw_sectors_entry.attr, &queue_max_sectors_entry.attr, + &queue_max_segments_entry.attr, + &queue_max_segment_size_entry.attr, &queue_iosched_entry.attr, &queue_hw_sector_size_entry.attr, &queue_logical_block_size_entry.attr, diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index fc98a48554f..838834be115 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c @@ -48,6 +48,7 @@ static const int cfq_hist_divisor = 4; #define CFQ_SERVICE_SHIFT 12 #define CFQQ_SEEK_THR (sector_t)(8 * 100) +#define CFQQ_CLOSE_THR (sector_t)(8 * 1024) #define CFQQ_SECT_THR_NONROT (sector_t)(2 * 32) #define CFQQ_SEEKY(cfqq) (hweight32(cfqq->seek_history) > 32/8) @@ -948,6 +949,11 @@ cfq_find_alloc_cfqg(struct cfq_data *cfqd, struct cgroup *cgroup, int create) unsigned int major, minor; cfqg = cfqg_of_blkg(blkiocg_lookup_group(blkcg, key)); + if (cfqg && !cfqg->blkg.dev && bdi->dev && dev_name(bdi->dev)) { + sscanf(dev_name(bdi->dev), "%u:%u", &major, &minor); + cfqg->blkg.dev = MKDEV(major, minor); + goto done; + } if (cfqg || !create) goto done; @@ -1518,7 +1524,8 @@ static void __cfq_set_active_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq) { if (cfqq) { - cfq_log_cfqq(cfqd, cfqq, "set_active"); + cfq_log_cfqq(cfqd, cfqq, "set_active wl_prio:%d wl_type:%d", + cfqd->serving_prio, cfqd->serving_type); cfqq->slice_start = 0; cfqq->dispatch_start = jiffies; cfqq->allocated_slice = 0; @@ -1661,9 +1668,9 @@ static inline sector_t cfq_dist_from_last(struct cfq_data *cfqd, } static inline int cfq_rq_close(struct cfq_data *cfqd, struct cfq_queue *cfqq, - struct request *rq, bool for_preempt) + struct request *rq) { - return cfq_dist_from_last(cfqd, rq) <= CFQQ_SEEK_THR; + return cfq_dist_from_last(cfqd, rq) <= CFQQ_CLOSE_THR; } static struct cfq_queue *cfqq_close(struct cfq_data *cfqd, @@ -1690,7 +1697,7 @@ static struct cfq_queue *cfqq_close(struct cfq_data *cfqd, * will contain the closest sector. */ __cfqq = rb_entry(parent, struct cfq_queue, p_node); - if (cfq_rq_close(cfqd, cur_cfqq, __cfqq->next_rq, false)) + if (cfq_rq_close(cfqd, cur_cfqq, __cfqq->next_rq)) return __cfqq; if (blk_rq_pos(__cfqq->next_rq) < sector) @@ -1701,7 +1708,7 @@ static struct cfq_queue *cfqq_close(struct cfq_data *cfqd, return NULL; __cfqq = rb_entry(node, struct cfq_queue, p_node); - if (cfq_rq_close(cfqd, cur_cfqq, __cfqq->next_rq, false)) + if (cfq_rq_close(cfqd, cur_cfqq, __cfqq->next_rq)) return __cfqq; return NULL; @@ -1722,6 +1729,8 @@ static struct cfq_queue *cfq_close_cooperator(struct cfq_data *cfqd, { struct cfq_queue *cfqq; + if (cfq_class_idle(cur_cfqq)) + return NULL; if (!cfq_cfqq_sync(cur_cfqq)) return NULL; if (CFQQ_SEEKY(cur_cfqq)) @@ -1788,7 +1797,11 @@ static bool cfq_should_idle(struct cfq_data *cfqd, struct cfq_queue *cfqq) * Otherwise, we do only if they are the last ones * in their service tree. */ - return service_tree->count == 1 && cfq_cfqq_sync(cfqq); + if (service_tree->count == 1 && cfq_cfqq_sync(cfqq)) + return 1; + cfq_log_cfqq(cfqd, cfqq, "Not idling. st->count:%d", + service_tree->count); + return 0; } static void cfq_arm_slice_timer(struct cfq_data *cfqd) @@ -1833,8 +1846,11 @@ static void cfq_arm_slice_timer(struct cfq_data *cfqd) * time slice. */ if (sample_valid(cic->ttime_samples) && - (cfqq->slice_end - jiffies < cic->ttime_mean)) + (cfqq->slice_end - jiffies < cic->ttime_mean)) { + cfq_log_cfqq(cfqd, cfqq, "Not idling. think_time:%d", + cic->ttime_mean); return; + } cfq_mark_cfqq_wait_request(cfqq); @@ -2042,6 +2058,7 @@ static void choose_service_tree(struct cfq_data *cfqd, struct cfq_group *cfqg) slice = max(slice, 2 * cfqd->cfq_slice_idle); slice = max_t(unsigned, slice, CFQ_MIN_TT); + cfq_log(cfqd, "workload slice:%d", slice); cfqd->workload_expires = jiffies + slice; cfqd->noidle_tree_requires_idle = false; } @@ -2189,10 +2206,13 @@ static int cfq_forced_dispatch(struct cfq_data *cfqd) struct cfq_queue *cfqq; int dispatched = 0; - while ((cfqq = cfq_get_next_queue_forced(cfqd)) != NULL) + /* Expire the timeslice of the current active queue first */ + cfq_slice_expired(cfqd, 0); + while ((cfqq = cfq_get_next_queue_forced(cfqd)) != NULL) { + __cfq_set_active_queue(cfqd, cfqq); dispatched += __cfq_forced_dispatch_cfqq(cfqq); + } - cfq_slice_expired(cfqd, 0); BUG_ON(cfqd->busy_queues); cfq_log(cfqd, "forced_dispatch=%d", dispatched); @@ -3104,7 +3124,7 @@ cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq, * if this request is as-good as one we would expect from the * current cfqq, let it preempt */ - if (cfq_rq_close(cfqd, cfqq, rq, true)) + if (cfq_rq_close(cfqd, cfqq, rq)) return true; return false; @@ -3308,6 +3328,7 @@ static void cfq_completed_request(struct request_queue *q, struct request *rq) if (cfq_should_wait_busy(cfqd, cfqq)) { cfqq->slice_end = jiffies + cfqd->cfq_slice_idle; cfq_mark_cfqq_wait_busy(cfqq); + cfq_log_cfqq(cfqd, cfqq, "will busy wait"); } /* diff --git a/block/elevator.c b/block/elevator.c index df75676f667..76e3702d538 100644 --- a/block/elevator.c +++ b/block/elevator.c @@ -154,7 +154,7 @@ static struct elevator_type *elevator_get(const char *name) spin_unlock(&elv_list_lock); - sprintf(elv, "%s-iosched", name); + snprintf(elv, sizeof(elv), "%s-iosched", name); request_module("%s", elv); spin_lock(&elv_list_lock); diff --git a/drivers/acpi/acpica/evgpe.c b/drivers/acpi/acpica/evgpe.c index 837de669743..78c55508aff 100644 --- a/drivers/acpi/acpica/evgpe.c +++ b/drivers/acpi/acpica/evgpe.c @@ -117,19 +117,14 @@ acpi_status acpi_ev_enable_gpe(struct acpi_gpe_event_info *gpe_event_info) if (ACPI_FAILURE(status)) return_ACPI_STATUS(status); - /* Mark wake-enabled or HW enable, or both */ - - if (gpe_event_info->runtime_count) { - /* Clear the GPE (of stale events), then enable it */ - status = acpi_hw_clear_gpe(gpe_event_info); - if (ACPI_FAILURE(status)) - return_ACPI_STATUS(status); - - /* Enable the requested runtime GPE */ - status = acpi_hw_write_gpe_enable_reg(gpe_event_info); - } + /* Clear the GPE (of stale events), then enable it */ + status = acpi_hw_clear_gpe(gpe_event_info); + if (ACPI_FAILURE(status)) + return_ACPI_STATUS(status); - return_ACPI_STATUS(AE_OK); + /* Enable the requested GPE */ + status = acpi_hw_write_gpe_enable_reg(gpe_event_info); + return_ACPI_STATUS(status); } /******************************************************************************* diff --git a/drivers/acpi/acpica/exprep.c b/drivers/acpi/acpica/exprep.c index edf62bf5b26..2fbfe51fb14 100644 --- a/drivers/acpi/acpica/exprep.c +++ b/drivers/acpi/acpica/exprep.c @@ -468,6 +468,23 @@ acpi_status acpi_ex_prep_field_value(struct acpi_create_field_info *info) acpi_ut_add_reference(obj_desc->field.region_obj); + /* allow full data read from EC address space */ + if (obj_desc->field.region_obj->region.space_id == + ACPI_ADR_SPACE_EC) { + if (obj_desc->common_field.bit_length > 8) { + unsigned width = + ACPI_ROUND_BITS_UP_TO_BYTES( + obj_desc->common_field.bit_length); + // access_bit_width is u8, don't overflow it + if (width > 8) + width = 8; + obj_desc->common_field.access_byte_width = + width; + obj_desc->common_field.access_bit_width = + 8 * width; + } + } + ACPI_DEBUG_PRINT((ACPI_DB_BFIELD, "RegionField: BitOff %X, Off %X, Gran %X, Region %p\n", obj_desc->field.start_field_bit_offset, diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c index 5717bd30086..3026e3fa83e 100644 --- a/drivers/acpi/battery.c +++ b/drivers/acpi/battery.c @@ -568,13 +568,13 @@ static int acpi_battery_update(struct acpi_battery *battery) result = acpi_battery_get_status(battery); if (result) return result; -#ifdef CONFIG_ACPI_SYSFS_POWER if (!acpi_battery_present(battery)) { +#ifdef CONFIG_ACPI_SYSFS_POWER sysfs_remove_battery(battery); +#endif battery->update_time = 0; return 0; } -#endif if (!battery->update_time || old_present != acpi_battery_present(battery)) { result = acpi_battery_get_info(battery); @@ -880,7 +880,7 @@ static void acpi_battery_notify(struct acpi_device *device, u32 event) #ifdef CONFIG_ACPI_SYSFS_POWER /* acpi_battery_update could remove power_supply object */ if (battery->bat.dev) - kobject_uevent(&battery->bat.dev->kobj, KOBJ_CHANGE); + power_supply_changed(&battery->bat); #endif } diff --git a/drivers/acpi/dock.c b/drivers/acpi/dock.c index a9c429c5d50..3fe29e992be 100644 --- a/drivers/acpi/dock.c +++ b/drivers/acpi/dock.c @@ -1026,13 +1026,10 @@ static int dock_remove(struct dock_station *ds) static acpi_status find_dock(acpi_handle handle, u32 lvl, void *context, void **rv) { - acpi_status status = AE_OK; - if (is_dock(handle)) - if (dock_add(handle) >= 0) - status = AE_CTRL_TERMINATE; + dock_add(handle); - return status; + return AE_OK; } static acpi_status diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c index 35ba2547f54..f2234db85da 100644 --- a/drivers/acpi/ec.c +++ b/drivers/acpi/ec.c @@ -629,12 +629,12 @@ static u32 acpi_ec_gpe_handler(void *data) static acpi_status acpi_ec_space_handler(u32 function, acpi_physical_address address, - u32 bits, u64 *value, + u32 bits, u64 *value64, void *handler_context, void *region_context) { struct acpi_ec *ec = handler_context; - int result = 0, i; - u8 temp = 0; + int result = 0, i, bytes = bits / 8; + u8 *value = (u8 *)value64; if ((address > 0xFF) || !value || !handler_context) return AE_BAD_PARAMETER; @@ -642,32 +642,15 @@ acpi_ec_space_handler(u32 function, acpi_physical_address address, if (function != ACPI_READ && function != ACPI_WRITE) return AE_BAD_PARAMETER; - if (bits != 8 && acpi_strict) - return AE_BAD_PARAMETER; - - if (EC_FLAGS_MSI) + if (EC_FLAGS_MSI || bits > 8) acpi_ec_burst_enable(ec); - if (function == ACPI_READ) { - result = acpi_ec_read(ec, address, &temp); - *value = temp; - } else { - temp = 0xff & (*value); - result = acpi_ec_write(ec, address, temp); - } - - for (i = 8; unlikely(bits - i > 0); i += 8) { - ++address; - if (function == ACPI_READ) { - result = acpi_ec_read(ec, address, &temp); - (*value) |= ((u64)temp) << i; - } else { - temp = 0xff & ((*value) >> i); - result = acpi_ec_write(ec, address, temp); - } - } + for (i = 0; i < bytes; ++i, ++address, ++value) + result = (function == ACPI_READ) ? + acpi_ec_read(ec, address, value) : + acpi_ec_write(ec, address, *value); - if (EC_FLAGS_MSI) + if (EC_FLAGS_MSI || bits > 8) acpi_ec_burst_disable(ec); switch (result) { diff --git a/drivers/acpi/numa.c b/drivers/acpi/numa.c index b8725461d88..b0337d31460 100644 --- a/drivers/acpi/numa.c +++ b/drivers/acpi/numa.c @@ -61,8 +61,10 @@ int node_to_pxm(int node) void __acpi_map_pxm_to_node(int pxm, int node) { - pxm_to_node_map[pxm] = node; - node_to_pxm_map[node] = pxm; + if (pxm_to_node_map[pxm] == NUMA_NO_NODE || node < pxm_to_node_map[pxm]) + pxm_to_node_map[pxm] = node; + if (node_to_pxm_map[node] == PXM_INVAL || pxm < node_to_pxm_map[node]) + node_to_pxm_map[node] = pxm; } int acpi_map_pxm_to_node(int pxm) diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c index 8e6d8665f0a..7594f65800c 100644 --- a/drivers/acpi/osl.c +++ b/drivers/acpi/osl.c @@ -758,7 +758,14 @@ static acpi_status __acpi_os_execute(acpi_execute_type type, queue = hp ? kacpi_hotplug_wq : (type == OSL_NOTIFY_HANDLER ? kacpi_notify_wq : kacpid_wq); dpc->wait = hp ? 1 : 0; - INIT_WORK(&dpc->work, acpi_os_execute_deferred); + + if (queue == kacpi_hotplug_wq) + INIT_WORK(&dpc->work, acpi_os_execute_deferred); + else if (queue == kacpi_notify_wq) + INIT_WORK(&dpc->work, acpi_os_execute_deferred); + else + INIT_WORK(&dpc->work, acpi_os_execute_deferred); + ret = queue_work(queue, &dpc->work); if (!ret) { @@ -1151,16 +1158,10 @@ int acpi_check_resource_conflict(const struct resource *res) if (clash) { if (acpi_enforce_resources != ENFORCE_RESOURCES_NO) { - printk("%sACPI: %s resource %s [0x%llx-0x%llx]" - " conflicts with ACPI region %s" - " [0x%llx-0x%llx]\n", - acpi_enforce_resources == ENFORCE_RESOURCES_LAX - ? KERN_WARNING : KERN_ERR, - ioport ? "I/O" : "Memory", res->name, - (long long) res->start, (long long) res->end, - res_list_elem->name, - (long long) res_list_elem->start, - (long long) res_list_elem->end); + printk(KERN_WARNING "ACPI: resource %s %pR" + " conflicts with ACPI region %s %pR\n", + res->name, res, res_list_elem->name, + res_list_elem); if (acpi_enforce_resources == ENFORCE_RESOURCES_LAX) printk(KERN_NOTICE "ACPI: This conflict may" " cause random problems and system" diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c index 0261b116d05..0338f513a01 100644 --- a/drivers/acpi/scan.c +++ b/drivers/acpi/scan.c @@ -1081,12 +1081,6 @@ static void acpi_device_set_id(struct acpi_device *device) if (ACPI_IS_ROOT_DEVICE(device)) { acpi_add_id(device, ACPI_SYSTEM_HID); break; - } else if (ACPI_IS_ROOT_DEVICE(device->parent)) { - /* \_SB_, the only root-level namespace device */ - acpi_add_id(device, ACPI_BUS_HID); - strcpy(device->pnp.device_name, ACPI_BUS_DEVICE_NAME); - strcpy(device->pnp.device_class, ACPI_BUS_CLASS); - break; } status = acpi_get_object_info(device->handle, &info); @@ -1121,6 +1115,12 @@ static void acpi_device_set_id(struct acpi_device *device) acpi_add_id(device, ACPI_DOCK_HID); else if (!acpi_ibm_smbus_match(device)) acpi_add_id(device, ACPI_SMBUS_IBM_HID); + else if (!acpi_device_hid(device) && + ACPI_IS_ROOT_DEVICE(device->parent)) { + acpi_add_id(device, ACPI_BUS_HID); /* \_SB, LNXSYBUS */ + strcpy(device->pnp.device_name, ACPI_BUS_DEVICE_NAME); + strcpy(device->pnp.device_class, ACPI_BUS_CLASS); + } break; case ACPI_BUS_TYPE_POWER: diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c index 6a014379677..a0c93b32148 100644 --- a/drivers/acpi/video.c +++ b/drivers/acpi/video.c @@ -44,6 +44,7 @@ #include <linux/dmi.h> #include <acpi/acpi_bus.h> #include <acpi/acpi_drivers.h> +#include <linux/suspend.h> #define PREFIX "ACPI: " @@ -89,7 +90,6 @@ module_param(allow_duplicates, bool, 0644); static int register_count = 0; static int acpi_video_bus_add(struct acpi_device *device); static int acpi_video_bus_remove(struct acpi_device *device, int type); -static int acpi_video_resume(struct acpi_device *device); static void acpi_video_bus_notify(struct acpi_device *device, u32 event); static const struct acpi_device_id video_device_ids[] = { @@ -105,7 +105,6 @@ static struct acpi_driver acpi_video_bus = { .ops = { .add = acpi_video_bus_add, .remove = acpi_video_bus_remove, - .resume = acpi_video_resume, .notify = acpi_video_bus_notify, }, }; @@ -160,6 +159,7 @@ struct acpi_video_bus { struct proc_dir_entry *dir; struct input_dev *input; char phys[32]; /* for input device */ + struct notifier_block pm_nb; }; struct acpi_video_device_flags { @@ -1021,6 +1021,13 @@ static void acpi_video_device_find_cap(struct acpi_video_device *device) if (IS_ERR(device->backlight)) return; + /* + * Save current brightness level in case we have to restore it + * before acpi_video_device_lcd_set_level() is called next time. + */ + device->backlight->props.brightness = + acpi_video_get_brightness(device->backlight); + result = sysfs_create_link(&device->backlight->dev.kobj, &device->dev->dev.kobj, "device"); if (result) @@ -2123,7 +2130,7 @@ static void acpi_video_bus_notify(struct acpi_device *device, u32 event) { struct acpi_video_bus *video = acpi_driver_data(device); struct input_dev *input; - int keycode; + int keycode = 0; if (!video) return; @@ -2159,17 +2166,19 @@ static void acpi_video_bus_notify(struct acpi_device *device, u32 event) break; default: - keycode = KEY_UNKNOWN; ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Unsupported event [0x%x]\n", event)); break; } acpi_notifier_call_chain(device, event, 0); - input_report_key(input, keycode, 1); - input_sync(input); - input_report_key(input, keycode, 0); - input_sync(input); + + if (keycode) { + input_report_key(input, keycode, 1); + input_sync(input); + input_report_key(input, keycode, 0); + input_sync(input); + } return; } @@ -2180,7 +2189,7 @@ static void acpi_video_device_notify(acpi_handle handle, u32 event, void *data) struct acpi_device *device = NULL; struct acpi_video_bus *bus; struct input_dev *input; - int keycode; + int keycode = 0; if (!video_device) return; @@ -2221,39 +2230,48 @@ static void acpi_video_device_notify(acpi_handle handle, u32 event, void *data) keycode = KEY_DISPLAY_OFF; break; default: - keycode = KEY_UNKNOWN; ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Unsupported event [0x%x]\n", event)); break; } acpi_notifier_call_chain(device, event, 0); - input_report_key(input, keycode, 1); - input_sync(input); - input_report_key(input, keycode, 0); - input_sync(input); + + if (keycode) { + input_report_key(input, keycode, 1); + input_sync(input); + input_report_key(input, keycode, 0); + input_sync(input); + } return; } -static int instance; -static int acpi_video_resume(struct acpi_device *device) +static int acpi_video_resume(struct notifier_block *nb, + unsigned long val, void *ign) { struct acpi_video_bus *video; struct acpi_video_device *video_device; int i; - if (!device || !acpi_driver_data(device)) - return -EINVAL; + switch (val) { + case PM_HIBERNATION_PREPARE: + case PM_SUSPEND_PREPARE: + case PM_RESTORE_PREPARE: + return NOTIFY_DONE; + } - video = acpi_driver_data(device); + video = container_of(nb, struct acpi_video_bus, pm_nb); + + dev_info(&video->device->dev, "Restoring backlight state\n"); for (i = 0; i < video->attached_count; i++) { video_device = video->attached_array[i].bind_info; if (video_device && video_device->backlight) acpi_video_set_brightness(video_device->backlight); } - return AE_OK; + + return NOTIFY_OK; } static acpi_status @@ -2277,6 +2295,8 @@ acpi_video_bus_match(acpi_handle handle, u32 level, void *context, return AE_OK; } +static int instance; + static int acpi_video_bus_add(struct acpi_device *device) { struct acpi_video_bus *video; @@ -2358,7 +2378,6 @@ static int acpi_video_bus_add(struct acpi_device *device) set_bit(KEY_BRIGHTNESSDOWN, input->keybit); set_bit(KEY_BRIGHTNESS_ZERO, input->keybit); set_bit(KEY_DISPLAY_OFF, input->keybit); - set_bit(KEY_UNKNOWN, input->keybit); error = input_register_device(input); if (error) @@ -2370,6 +2389,10 @@ static int acpi_video_bus_add(struct acpi_device *device) video->flags.rom ? "yes" : "no", video->flags.post ? "yes" : "no"); + video->pm_nb.notifier_call = acpi_video_resume; + video->pm_nb.priority = 0; + register_pm_notifier(&video->pm_nb); + return 0; err_free_input_dev: @@ -2396,6 +2419,8 @@ static int acpi_video_bus_remove(struct acpi_device *device, int type) video = acpi_driver_data(device); + unregister_pm_notifier(&video->pm_nb); + acpi_video_bus_stop_devices(video); acpi_video_bus_put_devices(video); acpi_video_bus_remove_fs(device); diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index 3f6771e6323..49cffb6094a 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c @@ -1494,6 +1494,7 @@ static int ata_hpa_resize(struct ata_device *dev) { struct ata_eh_context *ehc = &dev->link->eh_context; int print_info = ehc->i.flags & ATA_EHI_PRINTINFO; + bool unlock_hpa = ata_ignore_hpa || dev->flags & ATA_DFLAG_UNLOCK_HPA; u64 sectors = ata_id_n_sectors(dev->id); u64 native_sectors; int rc; @@ -1510,7 +1511,7 @@ static int ata_hpa_resize(struct ata_device *dev) /* If device aborted the command or HPA isn't going to * be unlocked, skip HPA resizing. */ - if (rc == -EACCES || !ata_ignore_hpa) { + if (rc == -EACCES || !unlock_hpa) { ata_dev_printk(dev, KERN_WARNING, "HPA support seems " "broken, skipping HPA handling\n"); dev->horkage |= ATA_HORKAGE_BROKEN_HPA; @@ -1525,7 +1526,7 @@ static int ata_hpa_resize(struct ata_device *dev) dev->n_native_sectors = native_sectors; /* nothing to do? */ - if (native_sectors <= sectors || !ata_ignore_hpa) { + if (native_sectors <= sectors || !unlock_hpa) { if (!print_info || native_sectors == sectors) return 0; @@ -4186,36 +4187,51 @@ int ata_dev_revalidate(struct ata_device *dev, unsigned int new_class, goto fail; /* verify n_sectors hasn't changed */ - if (dev->class == ATA_DEV_ATA && n_sectors && - dev->n_sectors != n_sectors) { - ata_dev_printk(dev, KERN_WARNING, "n_sectors mismatch " - "%llu != %llu\n", - (unsigned long long)n_sectors, - (unsigned long long)dev->n_sectors); - /* - * Something could have caused HPA to be unlocked - * involuntarily. If n_native_sectors hasn't changed - * and the new size matches it, keep the device. - */ - if (dev->n_native_sectors == n_native_sectors && - dev->n_sectors > n_sectors && - dev->n_sectors == n_native_sectors) { - ata_dev_printk(dev, KERN_WARNING, - "new n_sectors matches native, probably " - "late HPA unlock, continuing\n"); - /* keep using the old n_sectors */ - dev->n_sectors = n_sectors; - } else { - /* restore original n_[native]_sectors and fail */ - dev->n_native_sectors = n_native_sectors; - dev->n_sectors = n_sectors; - rc = -ENODEV; - goto fail; - } + if (dev->class != ATA_DEV_ATA || !n_sectors || + dev->n_sectors == n_sectors) + return 0; + + /* n_sectors has changed */ + ata_dev_printk(dev, KERN_WARNING, "n_sectors mismatch %llu != %llu\n", + (unsigned long long)n_sectors, + (unsigned long long)dev->n_sectors); + + /* + * Something could have caused HPA to be unlocked + * involuntarily. If n_native_sectors hasn't changed and the + * new size matches it, keep the device. + */ + if (dev->n_native_sectors == n_native_sectors && + dev->n_sectors > n_sectors && dev->n_sectors == n_native_sectors) { + ata_dev_printk(dev, KERN_WARNING, + "new n_sectors matches native, probably " + "late HPA unlock, continuing\n"); + /* keep using the old n_sectors */ + dev->n_sectors = n_sectors; + return 0; } - return 0; + /* + * Some BIOSes boot w/o HPA but resume w/ HPA locked. Try + * unlocking HPA in those cases. + * + * https://bugzilla.kernel.org/show_bug.cgi?id=15396 + */ + if (dev->n_native_sectors == n_native_sectors && + dev->n_sectors < n_sectors && n_sectors == n_native_sectors && + !(dev->horkage & ATA_HORKAGE_BROKEN_HPA)) { + ata_dev_printk(dev, KERN_WARNING, + "old n_sectors matches native, probably " + "late HPA lock, will try to unlock HPA\n"); + /* try unlocking HPA */ + dev->flags |= ATA_DFLAG_UNLOCK_HPA; + rc = -EIO; + } else + rc = -ENODEV; + /* restore original n_[native_]sectors and fail */ + dev->n_native_sectors = n_native_sectors; + dev->n_sectors = n_sectors; fail: ata_dev_printk(dev, KERN_ERR, "revalidation failed (errno=%d)\n", rc); return rc; @@ -4354,6 +4370,9 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = { { "HTS541080G9SA00", "MB4OC60D", ATA_HORKAGE_NONCQ, }, { "HTS541010G9SA00", "MBZOC60D", ATA_HORKAGE_NONCQ, }, + /* https://bugzilla.kernel.org/show_bug.cgi?id=15573 */ + { "C300-CTFDDAC128MAG", "0001", ATA_HORKAGE_NONCQ, }, + /* devices which puke on READ_NATIVE_MAX */ { "HDS724040KLSA80", "KFAOA20N", ATA_HORKAGE_BROKEN_HPA, }, { "WDC WD3200JD-00KLB0", "WD-WCAMR1130137", ATA_HORKAGE_BROKEN_HPA }, diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c index 6411e0c7b9f..e3877b6843c 100644 --- a/drivers/ata/libata-sff.c +++ b/drivers/ata/libata-sff.c @@ -1816,10 +1816,6 @@ retry: !ap->ops->sff_irq_check(ap)) continue; - if (printk_ratelimit()) - ata_port_printk(ap, KERN_INFO, - "clearing spurious IRQ\n"); - if (idle & (1 << i)) { ap->ops->sff_check_status(ap); ap->ops->sff_irq_clear(ap); diff --git a/drivers/base/memory.c b/drivers/base/memory.c index 4f4aa5897b4..933442f4032 100644 --- a/drivers/base/memory.c +++ b/drivers/base/memory.c @@ -313,7 +313,7 @@ static ssize_t print_block_size(struct sysdev_class *class, struct sysdev_class_attribute *attr, char *buf) { - return sprintf(buf, "%#lx\n", (unsigned long)PAGES_PER_SECTION * PAGE_SIZE); + return sprintf(buf, "%lx\n", (unsigned long)PAGES_PER_SECTION * PAGE_SIZE); } static SYSDEV_CLASS_ATTR(block_size_bytes, 0444, print_block_size, NULL); diff --git a/drivers/base/node.c b/drivers/base/node.c index 985abd7f49a..057979a19ee 100644 --- a/drivers/base/node.c +++ b/drivers/base/node.c @@ -15,7 +15,7 @@ #include <linux/cpu.h> #include <linux/device.h> #include <linux/swap.h> -#include <linux/gfp.h> +#include <linux/slab.h> static struct sysdev_class_attribute *node_state_attrs[]; diff --git a/drivers/block/DAC960.c b/drivers/block/DAC960.c index 459f1bc25a7..c5f22bb0a48 100644 --- a/drivers/block/DAC960.c +++ b/drivers/block/DAC960.c @@ -2533,7 +2533,6 @@ static bool DAC960_RegisterBlockDevice(DAC960_Controller_T *Controller) Controller->RequestQueue[n] = RequestQueue; blk_queue_bounce_limit(RequestQueue, Controller->BounceBufferLimit); RequestQueue->queuedata = Controller; - blk_queue_max_hw_segments(RequestQueue, Controller->DriverScatterGatherLimit); blk_queue_max_segments(RequestQueue, Controller->DriverScatterGatherLimit); blk_queue_max_hw_sectors(RequestQueue, Controller->MaxBlocksPerCommand); disk->queue = RequestQueue; diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c index 9e3af307aae..eb5ff0531cf 100644 --- a/drivers/block/cciss.c +++ b/drivers/block/cciss.c @@ -3341,6 +3341,7 @@ static irqreturn_t do_cciss_intr(int irq, void *dev_id) printk(KERN_WARNING "cciss: controller cciss%d failed, stopping.\n", h->ctlr); + spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags); fail_all_cmds(h->ctlr); return IRQ_HANDLED; } diff --git a/drivers/block/drbd/drbd_actlog.c b/drivers/block/drbd/drbd_actlog.c index 17956ff6a08..df018990c42 100644 --- a/drivers/block/drbd/drbd_actlog.c +++ b/drivers/block/drbd/drbd_actlog.c @@ -536,7 +536,9 @@ static void atodb_endio(struct bio *bio, int error) put_ldev(mdev); } +/* sector to word */ #define S2W(s) ((s)<<(BM_EXT_SHIFT-BM_BLOCK_SHIFT-LN2_BPL)) + /* activity log to on disk bitmap -- prepare bio unless that sector * is already covered by previously prepared bios */ static int atodb_prepare_unless_covered(struct drbd_conf *mdev, @@ -546,13 +548,20 @@ static int atodb_prepare_unless_covered(struct drbd_conf *mdev, { struct bio *bio; struct page *page; - sector_t on_disk_sector = enr + mdev->ldev->md.md_offset - + mdev->ldev->md.bm_offset; + sector_t on_disk_sector; unsigned int page_offset = PAGE_SIZE; int offset; int i = 0; int err = -ENOMEM; + /* We always write aligned, full 4k blocks, + * so we can ignore the logical_block_size (for now) */ + enr &= ~7U; + on_disk_sector = enr + mdev->ldev->md.md_offset + + mdev->ldev->md.bm_offset; + + D_ASSERT(!(on_disk_sector & 7U)); + /* Check if that enr is already covered by an already created bio. * Caution, bios[] is not NULL terminated, * but only initialized to all NULL. @@ -588,7 +597,7 @@ static int atodb_prepare_unless_covered(struct drbd_conf *mdev, offset = S2W(enr); drbd_bm_get_lel(mdev, offset, - min_t(size_t, S2W(1), drbd_bm_words(mdev) - offset), + min_t(size_t, S2W(8), drbd_bm_words(mdev) - offset), kmap(page) + page_offset); kunmap(page); @@ -597,7 +606,7 @@ static int atodb_prepare_unless_covered(struct drbd_conf *mdev, bio->bi_bdev = mdev->ldev->md_bdev; bio->bi_sector = on_disk_sector; - if (bio_add_page(bio, page, MD_SECTOR_SIZE, page_offset) != MD_SECTOR_SIZE) + if (bio_add_page(bio, page, 4096, page_offset) != 4096) goto out_put_page; atomic_inc(&wc->count); @@ -1327,7 +1336,7 @@ int drbd_rs_del_all(struct drbd_conf *mdev) /* ok, ->resync is there. */ for (i = 0; i < mdev->resync->nr_elements; i++) { e = lc_element_by_index(mdev->resync, i); - bm_ext = e ? lc_entry(e, struct bm_extent, lce) : NULL; + bm_ext = lc_entry(e, struct bm_extent, lce); if (bm_ext->lce.lc_number == LC_FREE) continue; if (bm_ext->lce.lc_number == mdev->resync_wenr) { diff --git a/drivers/block/drbd/drbd_bitmap.c b/drivers/block/drbd/drbd_bitmap.c index 3d6f3d98894..3390716898d 100644 --- a/drivers/block/drbd/drbd_bitmap.c +++ b/drivers/block/drbd/drbd_bitmap.c @@ -67,7 +67,7 @@ struct drbd_bitmap { size_t bm_words; size_t bm_number_of_pages; sector_t bm_dev_capacity; - struct semaphore bm_change; /* serializes resize operations */ + struct mutex bm_change; /* serializes resize operations */ atomic_t bm_async_io; wait_queue_head_t bm_io_wait; @@ -115,7 +115,7 @@ void drbd_bm_lock(struct drbd_conf *mdev, char *why) return; } - trylock_failed = down_trylock(&b->bm_change); + trylock_failed = !mutex_trylock(&b->bm_change); if (trylock_failed) { dev_warn(DEV, "%s going to '%s' but bitmap already locked for '%s' by %s\n", @@ -126,7 +126,7 @@ void drbd_bm_lock(struct drbd_conf *mdev, char *why) b->bm_task == mdev->receiver.task ? "receiver" : b->bm_task == mdev->asender.task ? "asender" : b->bm_task == mdev->worker.task ? "worker" : "?"); - down(&b->bm_change); + mutex_lock(&b->bm_change); } if (__test_and_set_bit(BM_LOCKED, &b->bm_flags)) dev_err(DEV, "FIXME bitmap already locked in bm_lock\n"); @@ -148,7 +148,7 @@ void drbd_bm_unlock(struct drbd_conf *mdev) b->bm_why = NULL; b->bm_task = NULL; - up(&b->bm_change); + mutex_unlock(&b->bm_change); } /* word offset to long pointer */ @@ -296,7 +296,7 @@ int drbd_bm_init(struct drbd_conf *mdev) if (!b) return -ENOMEM; spin_lock_init(&b->bm_lock); - init_MUTEX(&b->bm_change); + mutex_init(&b->bm_change); init_waitqueue_head(&b->bm_io_wait); mdev->bitmap = b; diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h index d9301e861d9..e5e86a78182 100644 --- a/drivers/block/drbd/drbd_int.h +++ b/drivers/block/drbd/drbd_int.h @@ -261,6 +261,9 @@ static inline const char *cmdname(enum drbd_packets cmd) [P_OV_REQUEST] = "OVRequest", [P_OV_REPLY] = "OVReply", [P_OV_RESULT] = "OVResult", + [P_CSUM_RS_REQUEST] = "CsumRSRequest", + [P_RS_IS_IN_SYNC] = "CsumRSIsInSync", + [P_COMPRESSED_BITMAP] = "CBitmap", [P_MAX_CMD] = NULL, }; @@ -443,13 +446,18 @@ struct p_rs_param_89 { char csums_alg[SHARED_SECRET_MAX]; } __packed; +enum drbd_conn_flags { + CF_WANT_LOSE = 1, + CF_DRY_RUN = 2, +}; + struct p_protocol { struct p_header head; u32 protocol; u32 after_sb_0p; u32 after_sb_1p; u32 after_sb_2p; - u32 want_lose; + u32 conn_flags; u32 two_primaries; /* Since protocol version 87 and higher. */ @@ -791,6 +799,8 @@ enum { * while this is set. */ RESIZE_PENDING, /* Size change detected locally, waiting for the response from * the peer, if it changed there as well. */ + CONN_DRY_RUN, /* Expect disconnect after resync handshake. */ + GOT_PING_ACK, /* set when we receive a ping_ack packet, misc wait gets woken */ }; struct drbd_bitmap; /* opaque for drbd_conf */ diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c index ab871e00ffc..67e0fc54224 100644 --- a/drivers/block/drbd/drbd_main.c +++ b/drivers/block/drbd/drbd_main.c @@ -1668,7 +1668,7 @@ int drbd_send_sync_param(struct drbd_conf *mdev, struct syncer_conf *sc) int drbd_send_protocol(struct drbd_conf *mdev) { struct p_protocol *p; - int size, rv; + int size, cf, rv; size = sizeof(struct p_protocol); @@ -1685,9 +1685,21 @@ int drbd_send_protocol(struct drbd_conf *mdev) p->after_sb_0p = cpu_to_be32(mdev->net_conf->after_sb_0p); p->after_sb_1p = cpu_to_be32(mdev->net_conf->after_sb_1p); p->after_sb_2p = cpu_to_be32(mdev->net_conf->after_sb_2p); - p->want_lose = cpu_to_be32(mdev->net_conf->want_lose); p->two_primaries = cpu_to_be32(mdev->net_conf->two_primaries); + cf = 0; + if (mdev->net_conf->want_lose) + cf |= CF_WANT_LOSE; + if (mdev->net_conf->dry_run) { + if (mdev->agreed_pro_version >= 92) + cf |= CF_DRY_RUN; + else { + dev_err(DEV, "--dry-run is not supported by peer"); + return 0; + } + } + p->conn_flags = cpu_to_be32(cf); + if (mdev->agreed_pro_version >= 87) strcpy(p->integrity_alg, mdev->net_conf->integrity_alg); @@ -3161,14 +3173,18 @@ void drbd_free_bc(struct drbd_backing_dev *ldev) void drbd_free_sock(struct drbd_conf *mdev) { if (mdev->data.socket) { + mutex_lock(&mdev->data.mutex); kernel_sock_shutdown(mdev->data.socket, SHUT_RDWR); sock_release(mdev->data.socket); mdev->data.socket = NULL; + mutex_unlock(&mdev->data.mutex); } if (mdev->meta.socket) { + mutex_lock(&mdev->meta.mutex); kernel_sock_shutdown(mdev->meta.socket, SHUT_RDWR); sock_release(mdev->meta.socket); mdev->meta.socket = NULL; + mutex_unlock(&mdev->meta.mutex); } } diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c index 4df3b40b105..6429d2b19e0 100644 --- a/drivers/block/drbd/drbd_nl.c +++ b/drivers/block/drbd/drbd_nl.c @@ -285,8 +285,8 @@ int drbd_set_role(struct drbd_conf *mdev, enum drbd_role new_role, int force) } if (r == SS_NO_UP_TO_DATE_DISK && force && - (mdev->state.disk == D_INCONSISTENT || - mdev->state.disk == D_OUTDATED)) { + (mdev->state.disk < D_UP_TO_DATE && + mdev->state.disk >= D_INCONSISTENT)) { mask.disk = D_MASK; val.disk = D_UP_TO_DATE; forced = 1; @@ -407,7 +407,7 @@ static int drbd_nl_primary(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, } reply->ret_code = - drbd_set_role(mdev, R_PRIMARY, primary_args.overwrite_peer); + drbd_set_role(mdev, R_PRIMARY, primary_args.primary_force); return 0; } @@ -941,6 +941,25 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp drbd_md_set_sector_offsets(mdev, nbc); + /* allocate a second IO page if logical_block_size != 512 */ + logical_block_size = bdev_logical_block_size(nbc->md_bdev); + if (logical_block_size == 0) + logical_block_size = MD_SECTOR_SIZE; + + if (logical_block_size != MD_SECTOR_SIZE) { + if (!mdev->md_io_tmpp) { + struct page *page = alloc_page(GFP_NOIO); + if (!page) + goto force_diskless_dec; + + dev_warn(DEV, "Meta data's bdev logical_block_size = %d != %d\n", + logical_block_size, MD_SECTOR_SIZE); + dev_warn(DEV, "Workaround engaged (has performance impact).\n"); + + mdev->md_io_tmpp = page; + } + } + if (!mdev->bitmap) { if (drbd_bm_init(mdev)) { retcode = ERR_NOMEM; @@ -980,25 +999,6 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp goto force_diskless_dec; } - /* allocate a second IO page if logical_block_size != 512 */ - logical_block_size = bdev_logical_block_size(nbc->md_bdev); - if (logical_block_size == 0) - logical_block_size = MD_SECTOR_SIZE; - - if (logical_block_size != MD_SECTOR_SIZE) { - if (!mdev->md_io_tmpp) { - struct page *page = alloc_page(GFP_NOIO); - if (!page) - goto force_diskless_dec; - - dev_warn(DEV, "Meta data's bdev logical_block_size = %d != %d\n", - logical_block_size, MD_SECTOR_SIZE); - dev_warn(DEV, "Workaround engaged (has performance impact).\n"); - - mdev->md_io_tmpp = page; - } - } - /* Reset the "barriers don't work" bits here, then force meta data to * be written, to ensure we determine if barriers are supported. */ if (nbc->dc.no_md_flush) diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c index d065c646b35..ed9f1de24a7 100644 --- a/drivers/block/drbd/drbd_receiver.c +++ b/drivers/block/drbd/drbd_receiver.c @@ -2513,6 +2513,10 @@ static enum drbd_conns drbd_sync_handshake(struct drbd_conf *mdev, enum drbd_rol } if (hg == -100) { + /* FIXME this log message is not correct if we end up here + * after an attempted attach on a diskless node. + * We just refuse to attach -- well, we drop the "connection" + * to that disk, in a way... */ dev_alert(DEV, "Split-Brain detected, dropping connection!\n"); drbd_khelper(mdev, "split-brain"); return C_MASK; @@ -2538,6 +2542,16 @@ static enum drbd_conns drbd_sync_handshake(struct drbd_conf *mdev, enum drbd_rol } } + if (mdev->net_conf->dry_run || test_bit(CONN_DRY_RUN, &mdev->flags)) { + if (hg == 0) + dev_info(DEV, "dry-run connect: No resync, would become Connected immediately.\n"); + else + dev_info(DEV, "dry-run connect: Would become %s, doing a %s resync.", + drbd_conn_str(hg > 0 ? C_SYNC_SOURCE : C_SYNC_TARGET), + abs(hg) >= 2 ? "full" : "bit-map based"); + return C_MASK; + } + if (abs(hg) >= 2) { dev_info(DEV, "Writing the whole bitmap, full sync required after drbd_sync_handshake.\n"); if (drbd_bitmap_io(mdev, &drbd_bmio_set_n_write, "set_n_write from sync_handshake")) @@ -2585,7 +2599,7 @@ static int receive_protocol(struct drbd_conf *mdev, struct p_header *h) struct p_protocol *p = (struct p_protocol *)h; int header_size, data_size; int p_proto, p_after_sb_0p, p_after_sb_1p, p_after_sb_2p; - int p_want_lose, p_two_primaries; + int p_want_lose, p_two_primaries, cf; char p_integrity_alg[SHARED_SECRET_MAX] = ""; header_size = sizeof(*p) - sizeof(*h); @@ -2598,8 +2612,14 @@ static int receive_protocol(struct drbd_conf *mdev, struct p_header *h) p_after_sb_0p = be32_to_cpu(p->after_sb_0p); p_after_sb_1p = be32_to_cpu(p->after_sb_1p); p_after_sb_2p = be32_to_cpu(p->after_sb_2p); - p_want_lose = be32_to_cpu(p->want_lose); p_two_primaries = be32_to_cpu(p->two_primaries); + cf = be32_to_cpu(p->conn_flags); + p_want_lose = cf & CF_WANT_LOSE; + + clear_bit(CONN_DRY_RUN, &mdev->flags); + + if (cf & CF_DRY_RUN) + set_bit(CONN_DRY_RUN, &mdev->flags); if (p_proto != mdev->net_conf->wire_protocol) { dev_err(DEV, "incompatible communication protocols\n"); @@ -3118,13 +3138,16 @@ static int receive_state(struct drbd_conf *mdev, struct p_header *h) put_ldev(mdev); if (nconn == C_MASK) { + nconn = C_CONNECTED; if (mdev->state.disk == D_NEGOTIATING) { drbd_force_state(mdev, NS(disk, D_DISKLESS)); - nconn = C_CONNECTED; } else if (peer_state.disk == D_NEGOTIATING) { dev_err(DEV, "Disk attach process on the peer node was aborted.\n"); peer_state.disk = D_DISKLESS; + real_peer_disk = D_DISKLESS; } else { + if (test_and_clear_bit(CONN_DRY_RUN, &mdev->flags)) + return FALSE; D_ASSERT(oconn == C_WF_REPORT_PARAMS); drbd_force_state(mdev, NS(conn, C_DISCONNECTING)); return FALSE; @@ -3594,10 +3617,7 @@ static void drbd_disconnect(struct drbd_conf *mdev) /* asender does not clean up anything. it must not interfere, either */ drbd_thread_stop(&mdev->asender); - - mutex_lock(&mdev->data.mutex); drbd_free_sock(mdev); - mutex_unlock(&mdev->data.mutex); spin_lock_irq(&mdev->req_lock); _drbd_wait_ee_list_empty(mdev, &mdev->active_ee); @@ -4054,6 +4074,8 @@ static int got_PingAck(struct drbd_conf *mdev, struct p_header *h) { /* restore idle timeout */ mdev->meta.socket->sk->sk_rcvtimeo = mdev->net_conf->ping_int*HZ; + if (!test_and_set_bit(GOT_PING_ACK, &mdev->flags)) + wake_up(&mdev->misc_wait); return TRUE; } diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c index b453c2bca3b..44bf6d11197 100644 --- a/drivers/block/drbd/drbd_worker.c +++ b/drivers/block/drbd/drbd_worker.c @@ -938,7 +938,8 @@ int w_e_end_csum_rs_req(struct drbd_conf *mdev, struct drbd_work *w, int cancel) if (eq) { drbd_set_in_sync(mdev, e->sector, e->size); - mdev->rs_same_csum++; + /* rs_same_csums unit is BM_BLOCK_SIZE */ + mdev->rs_same_csum += e->size >> BM_BLOCK_SHIFT; ok = drbd_send_ack(mdev, P_RS_IS_IN_SYNC, e); } else { inc_rs_pending(mdev); @@ -1288,6 +1289,14 @@ int drbd_alter_sa(struct drbd_conf *mdev, int na) return retcode; } +static void ping_peer(struct drbd_conf *mdev) +{ + clear_bit(GOT_PING_ACK, &mdev->flags); + request_ping(mdev); + wait_event(mdev->misc_wait, + test_bit(GOT_PING_ACK, &mdev->flags) || mdev->state.conn < C_CONNECTED); +} + /** * drbd_start_resync() - Start the resync process * @mdev: DRBD device. @@ -1371,7 +1380,6 @@ void drbd_start_resync(struct drbd_conf *mdev, enum drbd_conns side) _drbd_pause_after(mdev); } write_unlock_irq(&global_state_lock); - drbd_state_unlock(mdev); put_ldev(mdev); if (r == SS_SUCCESS) { @@ -1382,11 +1390,8 @@ void drbd_start_resync(struct drbd_conf *mdev, enum drbd_conns side) if (mdev->rs_total == 0) { /* Peer still reachable? Beware of failing before-resync-target handlers! */ - request_ping(mdev); - __set_current_state(TASK_INTERRUPTIBLE); - schedule_timeout(mdev->net_conf->ping_timeo*HZ/9); /* 9 instead 10 */ + ping_peer(mdev); drbd_resync_finished(mdev); - return; } /* ns.conn may already be != mdev->state.conn, @@ -1398,6 +1403,7 @@ void drbd_start_resync(struct drbd_conf *mdev, enum drbd_conns side) drbd_md_sync(mdev); } + drbd_state_unlock(mdev); } int drbd_worker(struct drbd_thread *thi) diff --git a/drivers/block/loop.c b/drivers/block/loop.c index cb69929d917..8546d123b9a 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c @@ -237,6 +237,8 @@ static int do_lo_send_aops(struct loop_device *lo, struct bio_vec *bvec, if (ret) goto fail; + file_update_time(file); + transfer_result = lo_do_transfer(lo, WRITE, page, offset, bvec->bv_page, bv_offs, size, IV); copied = size; diff --git a/drivers/block/paride/pcd.c b/drivers/block/paride/pcd.c index 8866ca369d5..71acf4e5335 100644 --- a/drivers/block/paride/pcd.c +++ b/drivers/block/paride/pcd.c @@ -341,11 +341,11 @@ static int pcd_wait(struct pcd_unit *cd, int go, int stop, char *fun, char *msg) && (j++ < PCD_SPIN)) udelay(PCD_DELAY); - if ((r & (IDE_ERR & stop)) || (j >= PCD_SPIN)) { + if ((r & (IDE_ERR & stop)) || (j > PCD_SPIN)) { s = read_reg(cd, 7); e = read_reg(cd, 1); p = read_reg(cd, 2); - if (j >= PCD_SPIN) + if (j > PCD_SPIN) e |= 0x100; if (fun) printk("%s: %s %s: alt=0x%x stat=0x%x err=0x%x" diff --git a/drivers/block/paride/pf.c b/drivers/block/paride/pf.c index ddb4f9abd48..c059aab3006 100644 --- a/drivers/block/paride/pf.c +++ b/drivers/block/paride/pf.c @@ -391,11 +391,11 @@ static int pf_wait(struct pf_unit *pf, int go, int stop, char *fun, char *msg) && (j++ < PF_SPIN)) udelay(PF_SPIN_DEL); - if ((r & (STAT_ERR & stop)) || (j >= PF_SPIN)) { + if ((r & (STAT_ERR & stop)) || (j > PF_SPIN)) { s = read_reg(pf, 7); e = read_reg(pf, 1); p = read_reg(pf, 2); - if (j >= PF_SPIN) + if (j > PF_SPIN) e |= 0x100; if (fun) printk("%s: %s %s: alt=0x%x stat=0x%x err=0x%x" diff --git a/drivers/block/paride/pt.c b/drivers/block/paride/pt.c index 1e4006e18f0..bc5825fdeaa 100644 --- a/drivers/block/paride/pt.c +++ b/drivers/block/paride/pt.c @@ -274,11 +274,11 @@ static int pt_wait(struct pt_unit *tape, int go, int stop, char *fun, char *msg) && (j++ < PT_SPIN)) udelay(PT_SPIN_DEL); - if ((r & (STAT_ERR & stop)) || (j >= PT_SPIN)) { + if ((r & (STAT_ERR & stop)) || (j > PT_SPIN)) { s = read_reg(pi, 7); e = read_reg(pi, 1); p = read_reg(pi, 2); - if (j >= PT_SPIN) + if (j > PT_SPIN) e |= 0x100; if (fun) printk("%s: %s %s: alt=0x%x stat=0x%x err=0x%x" diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c index 4b12b820c9a..2138a7ae050 100644 --- a/drivers/block/virtio_blk.c +++ b/drivers/block/virtio_blk.c @@ -348,14 +348,13 @@ static int __devinit virtblk_probe(struct virtio_device *vdev) set_capacity(vblk->disk, cap); /* We can handle whatever the host told us to handle. */ - blk_queue_max_phys_segments(q, vblk->sg_elems-2); - blk_queue_max_hw_segments(q, vblk->sg_elems-2); + blk_queue_max_segments(q, vblk->sg_elems-2); /* No need to bounce any requests */ blk_queue_bounce_limit(q, BLK_BOUNCE_ANY); /* No real sector limit. */ - blk_queue_max_sectors(q, -1U); + blk_queue_max_hw_sectors(q, -1U); /* Host can optionally specify maximum segment size and number of * segments. */ diff --git a/drivers/char/agp/intel-agp.c b/drivers/char/agp/intel-agp.c index d41331bc2aa..aa4248efc5d 100644 --- a/drivers/char/agp/intel-agp.c +++ b/drivers/char/agp/intel-agp.c @@ -1817,8 +1817,6 @@ static int intel_845_configure(void) pci_write_config_byte(agp_bridge->dev, INTEL_I845_AGPM, temp2 | (1 << 1)); /* clear any possible error conditions */ pci_write_config_word(agp_bridge->dev, INTEL_I845_ERRSTS, 0x001c); - - intel_i830_setup_flush(); return 0; } @@ -2188,7 +2186,6 @@ static const struct agp_bridge_driver intel_845_driver = { .agp_destroy_page = agp_generic_destroy_page, .agp_destroy_pages = agp_generic_destroy_pages, .agp_type_to_mask_type = agp_generic_type_to_mask_type, - .chipset_flush = intel_i830_chipset_flush, }; static const struct agp_bridge_driver intel_850_driver = { diff --git a/drivers/char/amiserial.c b/drivers/char/amiserial.c index 6c32fbf0716..56b27671adc 100644 --- a/drivers/char/amiserial.c +++ b/drivers/char/amiserial.c @@ -2021,8 +2021,6 @@ static int __init rs_init(void) state->baud_base = amiga_colorclock; state->xmit_fifo_size = 1; - local_irq_save(flags); - /* set ISRs, and then disable the rx interrupts */ error = request_irq(IRQ_AMIGA_TBE, ser_tx_int, 0, "serial TX", state); if (error) @@ -2033,6 +2031,8 @@ static int __init rs_init(void) if (error) goto fail_free_irq; + local_irq_save(flags); + /* turn off Rx and Tx interrupts */ custom.intena = IF_RBF | IF_TBE; mb(); diff --git a/drivers/char/hvc_console.c b/drivers/char/hvc_console.c index d3890e8d30e..35cca4c7fb1 100644 --- a/drivers/char/hvc_console.c +++ b/drivers/char/hvc_console.c @@ -368,16 +368,12 @@ static void hvc_close(struct tty_struct *tty, struct file * filp) hp = tty->driver_data; spin_lock_irqsave(&hp->lock, flags); - tty_kref_get(tty); if (--hp->count == 0) { /* We are done with the tty pointer now. */ hp->tty = NULL; spin_unlock_irqrestore(&hp->lock, flags); - /* Put the ref obtained in hvc_open() */ - tty_kref_put(tty); - if (hp->ops->notifier_del) hp->ops->notifier_del(hp, hp->data); diff --git a/drivers/char/mem.c b/drivers/char/mem.c index 1f3215ac085..f54dab8acdc 100644 --- a/drivers/char/mem.c +++ b/drivers/char/mem.c @@ -225,6 +225,7 @@ int __weak phys_mem_access_prot_allowed(struct file *file, * outside of main memory. * */ +#ifdef pgprot_noncached static int uncached_access(struct file *file, unsigned long addr) { #if defined(CONFIG_IA64) @@ -251,6 +252,7 @@ static int uncached_access(struct file *file, unsigned long addr) return addr >= __pa(high_memory); #endif } +#endif static pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, unsigned long size, pgprot_t vma_prot) @@ -710,11 +712,6 @@ static loff_t memory_lseek(struct file *file, loff_t offset, int orig) switch (orig) { case SEEK_CUR: offset += file->f_pos; - if ((unsigned long long)offset < - (unsigned long long)file->f_pos) { - ret = -EOVERFLOW; - break; - } case SEEK_SET: /* to avoid userland mistaking f_pos=-9 as -EBADF=-9 */ if ((unsigned long long)offset >= ~0xFFFULL) { @@ -908,6 +905,9 @@ static int __init chr_dev_init(void) printk("unable to get major %d for memory devs\n", MEM_MAJOR); mem_class = class_create(THIS_MODULE, "mem"); + if (IS_ERR(mem_class)) + return PTR_ERR(mem_class); + mem_class->devnode = mem_devnode; for (minor = 1; minor < ARRAY_SIZE(devlist); minor++) { if (!devlist[minor].name) diff --git a/drivers/char/mxser.c b/drivers/char/mxser.c index 95c9f54f3d3..47023053ee8 100644 --- a/drivers/char/mxser.c +++ b/drivers/char/mxser.c @@ -1768,7 +1768,7 @@ static int mxser_ioctl(struct tty_struct *tty, struct file *file, int len, lsr; len = mxser_chars_in_buffer(tty); - spin_lock(&info->slock); + spin_lock_irq(&info->slock); lsr = inb(info->ioaddr + UART_LSR) & UART_LSR_THRE; spin_unlock_irq(&info->slock); len += (lsr ? 0 : 1); @@ -1778,12 +1778,12 @@ static int mxser_ioctl(struct tty_struct *tty, struct file *file, case MOXA_ASPP_MON: { int mcr, status; - spin_lock(&info->slock); + spin_lock_irq(&info->slock); status = mxser_get_msr(info->ioaddr, 1, tty->index); mxser_check_modem_status(tty, info, status); mcr = inb(info->ioaddr + UART_MCR); - spin_unlock(&info->slock); + spin_unlock_irq(&info->slock); if (mcr & MOXA_MUST_MCR_XON_FLAG) info->mon_data.hold_reason &= ~NPPI_NOTIFY_XOFFHOLD; diff --git a/drivers/char/raw.c b/drivers/char/raw.c index d331c59b571..8756ab0daa8 100644 --- a/drivers/char/raw.c +++ b/drivers/char/raw.c @@ -248,6 +248,7 @@ static const struct file_operations raw_fops = { .aio_read = generic_file_aio_read, .write = do_sync_write, .aio_write = blkdev_aio_write, + .fsync = blkdev_fsync, .open = raw_open, .release= raw_release, .ioctl = raw_ioctl, diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c index 026ea6c27e0..196428c2287 100644 --- a/drivers/char/virtio_console.c +++ b/drivers/char/virtio_console.c @@ -33,6 +33,35 @@ #include <linux/workqueue.h> #include "hvc_console.h" +/* Moved here from .h file in order to disable MULTIPORT. */ +#define VIRTIO_CONSOLE_F_MULTIPORT 1 /* Does host provide multiple ports? */ + +struct virtio_console_multiport_conf { + struct virtio_console_config config; + /* max. number of ports this device can hold */ + __u32 max_nr_ports; + /* number of ports added so far */ + __u32 nr_ports; +} __attribute__((packed)); + +/* + * A message that's passed between the Host and the Guest for a + * particular port. + */ +struct virtio_console_control { + __u32 id; /* Port number */ + __u16 event; /* The kind of control event (see below) */ + __u16 value; /* Extra information for the key */ +}; + +/* Some events for control messages */ +#define VIRTIO_CONSOLE_PORT_READY 0 +#define VIRTIO_CONSOLE_CONSOLE_PORT 1 +#define VIRTIO_CONSOLE_RESIZE 2 +#define VIRTIO_CONSOLE_PORT_OPEN 3 +#define VIRTIO_CONSOLE_PORT_NAME 4 +#define VIRTIO_CONSOLE_PORT_REMOVE 5 + /* * This is a global struct for storing common data for all the devices * this driver handles. @@ -121,7 +150,7 @@ struct ports_device { spinlock_t cvq_lock; /* The current config space is stored here */ - struct virtio_console_config config; + struct virtio_console_multiport_conf config; /* The virtio device we're associated with */ struct virtio_device *vdev; @@ -416,20 +445,16 @@ static ssize_t send_buf(struct port *port, void *in_buf, size_t in_count) out_vq->vq_ops->kick(out_vq); if (ret < 0) { - len = 0; + in_count = 0; goto fail; } - /* - * Wait till the host acknowledges it pushed out the data we - * sent. Also ensure we return to userspace the number of - * bytes that were successfully consumed by the host. - */ + /* Wait till the host acknowledges it pushed out the data we sent. */ while (!out_vq->vq_ops->get_buf(out_vq, &len)) cpu_relax(); fail: /* We're expected to return the amount of data we wrote */ - return len; + return in_count; } /* @@ -646,13 +671,13 @@ static int put_chars(u32 vtermno, const char *buf, int count) { struct port *port; + if (unlikely(early_put_chars)) + return early_put_chars(vtermno, buf, count); + port = find_port_by_vtermno(vtermno); if (!port) return 0; - if (unlikely(early_put_chars)) - return early_put_chars(vtermno, buf, count); - return send_buf(port, (void *)buf, count); } @@ -1218,7 +1243,7 @@ fail: */ static void config_work_handler(struct work_struct *work) { - struct virtio_console_config virtconconf; + struct virtio_console_multiport_conf virtconconf; struct ports_device *portdev; struct virtio_device *vdev; int err; @@ -1227,7 +1252,8 @@ static void config_work_handler(struct work_struct *work) vdev = portdev->vdev; vdev->config->get(vdev, - offsetof(struct virtio_console_config, nr_ports), + offsetof(struct virtio_console_multiport_conf, + nr_ports), &virtconconf.nr_ports, sizeof(virtconconf.nr_ports)); @@ -1419,16 +1445,19 @@ static int __devinit virtcons_probe(struct virtio_device *vdev) multiport = false; portdev->config.nr_ports = 1; portdev->config.max_nr_ports = 1; +#if 0 /* Multiport is not quite ready yet --RR */ if (virtio_has_feature(vdev, VIRTIO_CONSOLE_F_MULTIPORT)) { multiport = true; vdev->features[0] |= 1 << VIRTIO_CONSOLE_F_MULTIPORT; - vdev->config->get(vdev, offsetof(struct virtio_console_config, - nr_ports), + vdev->config->get(vdev, + offsetof(struct virtio_console_multiport_conf, + nr_ports), &portdev->config.nr_ports, sizeof(portdev->config.nr_ports)); - vdev->config->get(vdev, offsetof(struct virtio_console_config, - max_nr_ports), + vdev->config->get(vdev, + offsetof(struct virtio_console_multiport_conf, + max_nr_ports), &portdev->config.max_nr_ports, sizeof(portdev->config.max_nr_ports)); if (portdev->config.nr_ports > portdev->config.max_nr_ports) { @@ -1444,6 +1473,7 @@ static int __devinit virtcons_probe(struct virtio_device *vdev) /* Let the Host know we support multiple ports.*/ vdev->config->finalize_features(vdev); +#endif err = init_vqs(portdev); if (err < 0) { @@ -1526,7 +1556,6 @@ static struct virtio_device_id id_table[] = { static unsigned int features[] = { VIRTIO_CONSOLE_F_SIZE, - VIRTIO_CONSOLE_F_MULTIPORT, }; static struct virtio_driver virtio_console = { diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c index 702dcc98c07..14a34d99eea 100644 --- a/drivers/firewire/core-cdev.c +++ b/drivers/firewire/core-cdev.c @@ -960,6 +960,8 @@ static int ioctl_queue_iso(struct client *client, union ioctl_arg *arg) u.packet.header_length = GET_HEADER_LENGTH(control); if (ctx->type == FW_ISO_CONTEXT_TRANSMIT) { + if (u.packet.header_length % 4 != 0) + return -EINVAL; header_length = u.packet.header_length; } else { /* @@ -969,7 +971,8 @@ static int ioctl_queue_iso(struct client *client, union ioctl_arg *arg) if (ctx->header_size == 0) { if (u.packet.header_length > 0) return -EINVAL; - } else if (u.packet.header_length % ctx->header_size != 0) { + } else if (u.packet.header_length == 0 || + u.packet.header_length % ctx->header_size != 0) { return -EINVAL; } header_length = 0; @@ -1354,24 +1357,24 @@ static int dispatch_ioctl(struct client *client, return -ENODEV; if (_IOC_TYPE(cmd) != '#' || - _IOC_NR(cmd) >= ARRAY_SIZE(ioctl_handlers)) + _IOC_NR(cmd) >= ARRAY_SIZE(ioctl_handlers) || + _IOC_SIZE(cmd) > sizeof(buffer)) return -EINVAL; - if (_IOC_DIR(cmd) & _IOC_WRITE) { - if (_IOC_SIZE(cmd) > sizeof(buffer) || - copy_from_user(&buffer, arg, _IOC_SIZE(cmd))) + if (_IOC_DIR(cmd) == _IOC_READ) + memset(&buffer, 0, _IOC_SIZE(cmd)); + + if (_IOC_DIR(cmd) & _IOC_WRITE) + if (copy_from_user(&buffer, arg, _IOC_SIZE(cmd))) return -EFAULT; - } ret = ioctl_handlers[_IOC_NR(cmd)](client, &buffer); if (ret < 0) return ret; - if (_IOC_DIR(cmd) & _IOC_READ) { - if (_IOC_SIZE(cmd) > sizeof(buffer) || - copy_to_user(arg, &buffer, _IOC_SIZE(cmd))) + if (_IOC_DIR(cmd) & _IOC_READ) + if (copy_to_user(arg, &buffer, _IOC_SIZE(cmd))) return -EFAULT; - } return ret; } diff --git a/drivers/firmware/iscsi_ibft_find.c b/drivers/firmware/iscsi_ibft_find.c index 134dd732839..d6470ef36e4 100644 --- a/drivers/firmware/iscsi_ibft_find.c +++ b/drivers/firmware/iscsi_ibft_find.c @@ -51,7 +51,7 @@ EXPORT_SYMBOL_GPL(ibft_addr); * Routine used to find the iSCSI Boot Format Table. The logical * kernel address is set in the ibft_addr global variable. */ -void __init reserve_ibft_region(void) +unsigned long __init find_ibft_region(unsigned long *sizep) { unsigned long pos; unsigned int len = 0; @@ -77,6 +77,11 @@ void __init reserve_ibft_region(void) } } } - if (ibft_addr) - reserve_bootmem(pos, PAGE_ALIGN(len), BOOTMEM_DEFAULT); + if (ibft_addr) { + *sizep = PAGE_ALIGN(len); + return pos; + } + + *sizep = 0; + return 0; } diff --git a/drivers/gpio/timbgpio.c b/drivers/gpio/timbgpio.c index ac4d0f0ea02..ddd053108a1 100644 --- a/drivers/gpio/timbgpio.c +++ b/drivers/gpio/timbgpio.c @@ -131,6 +131,7 @@ static int timbgpio_irq_type(unsigned irq, unsigned trigger) unsigned long flags; u32 lvr, flr, bflr = 0; u32 ver; + int ret = 0; if (offset < 0 || offset > tgpio->gpio.ngpio) return -EINVAL; @@ -154,8 +155,10 @@ static int timbgpio_irq_type(unsigned irq, unsigned trigger) } if ((trigger & IRQ_TYPE_EDGE_BOTH) == IRQ_TYPE_EDGE_BOTH) { - if (ver < 3) - return -EINVAL; + if (ver < 3) { + ret = -EINVAL; + goto out; + } else { flr |= 1 << offset; bflr |= 1 << offset; @@ -175,9 +178,10 @@ static int timbgpio_irq_type(unsigned irq, unsigned trigger) iowrite32(bflr, tgpio->membase + TGPIO_BFLR); iowrite32(1 << offset, tgpio->membase + TGPIO_ICR); - spin_unlock_irqrestore(&tgpio->lock, flags); - return 0; +out: + spin_unlock_irqrestore(&tgpio->lock, flags); + return ret; } static void timbgpio_irq(unsigned int irq, struct irq_desc *desc) diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index 2cc6e87d849..18f41d7061f 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c @@ -85,6 +85,8 @@ static struct edid_quirk { /* Envision Peripherals, Inc. EN-7100e */ { "EPI", 59264, EDID_QUIRK_135_CLOCK_TOO_HIGH }, + /* Envision EN2028 */ + { "EPI", 8232, EDID_QUIRK_PREFER_LARGE_60 }, /* Funai Electronics PM36B */ { "FCM", 13600, EDID_QUIRK_PREFER_LARGE_75 | diff --git a/drivers/gpu/drm/drm_stub.c b/drivers/gpu/drm/drm_stub.c index b743411d814..a0c365f2e52 100644 --- a/drivers/gpu/drm/drm_stub.c +++ b/drivers/gpu/drm/drm_stub.c @@ -516,8 +516,6 @@ void drm_put_dev(struct drm_device *dev) } driver = dev->driver; - drm_vblank_cleanup(dev); - drm_lastclose(dev); if (drm_core_has_MTRR(dev) && drm_core_has_AGP(dev) && @@ -537,6 +535,8 @@ void drm_put_dev(struct drm_device *dev) dev->agp = NULL; } + drm_vblank_cleanup(dev); + list_for_each_entry_safe(r_list, list_temp, &dev->maplist, head) drm_rmmap(dev, r_list->map); drm_ht_remove(&dev->map_hash); diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index b574503dddd..a0b8447b06e 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -226,7 +226,7 @@ static int i915_gem_fence_regs_info(struct seq_file *m, void *data) } else { struct drm_i915_gem_object *obj_priv; - obj_priv = obj->driver_private; + obj_priv = to_intel_bo(obj); seq_printf(m, "Fenced object[%2d] = %p: %s " "%08x %08zx %08x %s %08x %08x %d", i, obj, get_pin_flag(obj_priv), diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 4b26919abdb..0af3dcc85ce 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -80,14 +80,14 @@ const static struct intel_device_info intel_i915g_info = { .is_i915g = 1, .is_i9xx = 1, .cursor_needs_physical = 1, }; const static struct intel_device_info intel_i915gm_info = { - .is_i9xx = 1, .is_mobile = 1, .has_fbc = 1, + .is_i9xx = 1, .is_mobile = 1, .cursor_needs_physical = 1, }; const static struct intel_device_info intel_i945g_info = { .is_i9xx = 1, .has_hotplug = 1, .cursor_needs_physical = 1, }; const static struct intel_device_info intel_i945gm_info = { - .is_i945gm = 1, .is_i9xx = 1, .is_mobile = 1, .has_fbc = 1, + .is_i945gm = 1, .is_i9xx = 1, .is_mobile = 1, .has_hotplug = 1, .cursor_needs_physical = 1, }; @@ -361,7 +361,7 @@ int i965_reset(struct drm_device *dev, u8 flags) !dev_priv->mm.suspended) { drm_i915_ring_buffer_t *ring = &dev_priv->ring; struct drm_gem_object *obj = ring->ring_obj; - struct drm_i915_gem_object *obj_priv = obj->driver_private; + struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); dev_priv->mm.suspended = 0; /* Stop the ring if it's running. */ diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index aba8260fbc5..6960849522f 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -611,6 +611,8 @@ typedef struct drm_i915_private { /* Reclocking support */ bool render_reclock_avail; bool lvds_downclock_avail; + /* indicate whether the LVDS EDID is OK */ + bool lvds_edid_good; /* indicates the reduced downclock for LVDS*/ int lvds_downclock; struct work_struct idle_work; @@ -731,6 +733,8 @@ struct drm_i915_gem_object { atomic_t pending_flip; }; +#define to_intel_bo(x) ((struct drm_i915_gem_object *) (x)->driver_private) + /** * Request queue structure. * diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 368d726853d..80871c62a57 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -163,7 +163,7 @@ fast_shmem_read(struct page **pages, static int i915_gem_object_needs_bit17_swizzle(struct drm_gem_object *obj) { drm_i915_private_t *dev_priv = obj->dev->dev_private; - struct drm_i915_gem_object *obj_priv = obj->driver_private; + struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 && obj_priv->tiling_mode != I915_TILING_NONE; @@ -264,7 +264,7 @@ i915_gem_shmem_pread_fast(struct drm_device *dev, struct drm_gem_object *obj, struct drm_i915_gem_pread *args, struct drm_file *file_priv) { - struct drm_i915_gem_object *obj_priv = obj->driver_private; + struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); ssize_t remain; loff_t offset, page_base; char __user *user_data; @@ -285,7 +285,7 @@ i915_gem_shmem_pread_fast(struct drm_device *dev, struct drm_gem_object *obj, if (ret != 0) goto fail_put_pages; - obj_priv = obj->driver_private; + obj_priv = to_intel_bo(obj); offset = args->offset; while (remain > 0) { @@ -354,7 +354,7 @@ i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj, struct drm_i915_gem_pread *args, struct drm_file *file_priv) { - struct drm_i915_gem_object *obj_priv = obj->driver_private; + struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); struct mm_struct *mm = current->mm; struct page **user_pages; ssize_t remain; @@ -403,7 +403,7 @@ i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj, if (ret != 0) goto fail_put_pages; - obj_priv = obj->driver_private; + obj_priv = to_intel_bo(obj); offset = args->offset; while (remain > 0) { @@ -479,7 +479,7 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data, obj = drm_gem_object_lookup(dev, file_priv, args->handle); if (obj == NULL) return -EBADF; - obj_priv = obj->driver_private; + obj_priv = to_intel_bo(obj); /* Bounds check source. * @@ -581,7 +581,7 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj, struct drm_i915_gem_pwrite *args, struct drm_file *file_priv) { - struct drm_i915_gem_object *obj_priv = obj->driver_private; + struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); drm_i915_private_t *dev_priv = dev->dev_private; ssize_t remain; loff_t offset, page_base; @@ -605,7 +605,7 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj, if (ret) goto fail; - obj_priv = obj->driver_private; + obj_priv = to_intel_bo(obj); offset = obj_priv->gtt_offset + args->offset; while (remain > 0) { @@ -655,7 +655,7 @@ i915_gem_gtt_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj, struct drm_i915_gem_pwrite *args, struct drm_file *file_priv) { - struct drm_i915_gem_object *obj_priv = obj->driver_private; + struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); drm_i915_private_t *dev_priv = dev->dev_private; ssize_t remain; loff_t gtt_page_base, offset; @@ -699,7 +699,7 @@ i915_gem_gtt_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj, if (ret) goto out_unpin_object; - obj_priv = obj->driver_private; + obj_priv = to_intel_bo(obj); offset = obj_priv->gtt_offset + args->offset; while (remain > 0) { @@ -761,7 +761,7 @@ i915_gem_shmem_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj, struct drm_i915_gem_pwrite *args, struct drm_file *file_priv) { - struct drm_i915_gem_object *obj_priv = obj->driver_private; + struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); ssize_t remain; loff_t offset, page_base; char __user *user_data; @@ -781,7 +781,7 @@ i915_gem_shmem_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj, if (ret != 0) goto fail_put_pages; - obj_priv = obj->driver_private; + obj_priv = to_intel_bo(obj); offset = args->offset; obj_priv->dirty = 1; @@ -829,7 +829,7 @@ i915_gem_shmem_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj, struct drm_i915_gem_pwrite *args, struct drm_file *file_priv) { - struct drm_i915_gem_object *obj_priv = obj->driver_private; + struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); struct mm_struct *mm = current->mm; struct page **user_pages; ssize_t remain; @@ -877,7 +877,7 @@ i915_gem_shmem_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj, if (ret != 0) goto fail_put_pages; - obj_priv = obj->driver_private; + obj_priv = to_intel_bo(obj); offset = args->offset; obj_priv->dirty = 1; @@ -952,7 +952,7 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, obj = drm_gem_object_lookup(dev, file_priv, args->handle); if (obj == NULL) return -EBADF; - obj_priv = obj->driver_private; + obj_priv = to_intel_bo(obj); /* Bounds check destination. * @@ -1034,7 +1034,7 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, obj = drm_gem_object_lookup(dev, file_priv, args->handle); if (obj == NULL) return -EBADF; - obj_priv = obj->driver_private; + obj_priv = to_intel_bo(obj); mutex_lock(&dev->struct_mutex); @@ -1096,7 +1096,7 @@ i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data, DRM_INFO("%s: sw_finish %d (%p %zd)\n", __func__, args->handle, obj, obj->size); #endif - obj_priv = obj->driver_private; + obj_priv = to_intel_bo(obj); /* Pinned buffers may be scanout, so flush the cache */ if (obj_priv->pin_count) @@ -1167,7 +1167,7 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) struct drm_gem_object *obj = vma->vm_private_data; struct drm_device *dev = obj->dev; struct drm_i915_private *dev_priv = dev->dev_private; - struct drm_i915_gem_object *obj_priv = obj->driver_private; + struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); pgoff_t page_offset; unsigned long pfn; int ret = 0; @@ -1234,7 +1234,7 @@ i915_gem_create_mmap_offset(struct drm_gem_object *obj) { struct drm_device *dev = obj->dev; struct drm_gem_mm *mm = dev->mm_private; - struct drm_i915_gem_object *obj_priv = obj->driver_private; + struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); struct drm_map_list *list; struct drm_local_map *map; int ret = 0; @@ -1305,7 +1305,7 @@ void i915_gem_release_mmap(struct drm_gem_object *obj) { struct drm_device *dev = obj->dev; - struct drm_i915_gem_object *obj_priv = obj->driver_private; + struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); if (dev->dev_mapping) unmap_mapping_range(dev->dev_mapping, @@ -1316,7 +1316,7 @@ static void i915_gem_free_mmap_offset(struct drm_gem_object *obj) { struct drm_device *dev = obj->dev; - struct drm_i915_gem_object *obj_priv = obj->driver_private; + struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); struct drm_gem_mm *mm = dev->mm_private; struct drm_map_list *list; @@ -1347,7 +1347,7 @@ static uint32_t i915_gem_get_gtt_alignment(struct drm_gem_object *obj) { struct drm_device *dev = obj->dev; - struct drm_i915_gem_object *obj_priv = obj->driver_private; + struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); int start, i; /* @@ -1406,7 +1406,7 @@ i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data, mutex_lock(&dev->struct_mutex); - obj_priv = obj->driver_private; + obj_priv = to_intel_bo(obj); if (obj_priv->madv != I915_MADV_WILLNEED) { DRM_ERROR("Attempting to mmap a purgeable buffer\n"); @@ -1450,7 +1450,7 @@ i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data, void i915_gem_object_put_pages(struct drm_gem_object *obj) { - struct drm_i915_gem_object *obj_priv = obj->driver_private; + struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); int page_count = obj->size / PAGE_SIZE; int i; @@ -1486,7 +1486,7 @@ i915_gem_object_move_to_active(struct drm_gem_object *obj, uint32_t seqno) { struct drm_device *dev = obj->dev; drm_i915_private_t *dev_priv = dev->dev_private; - struct drm_i915_gem_object *obj_priv = obj->driver_private; + struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); /* Add a reference if we're newly entering the active list. */ if (!obj_priv->active) { @@ -1506,7 +1506,7 @@ i915_gem_object_move_to_flushing(struct drm_gem_object *obj) { struct drm_device *dev = obj->dev; drm_i915_private_t *dev_priv = dev->dev_private; - struct drm_i915_gem_object *obj_priv = obj->driver_private; + struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); BUG_ON(!obj_priv->active); list_move_tail(&obj_priv->list, &dev_priv->mm.flushing_list); @@ -1517,7 +1517,7 @@ i915_gem_object_move_to_flushing(struct drm_gem_object *obj) static void i915_gem_object_truncate(struct drm_gem_object *obj) { - struct drm_i915_gem_object *obj_priv = obj->driver_private; + struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); struct inode *inode; inode = obj->filp->f_path.dentry->d_inode; @@ -1538,7 +1538,7 @@ i915_gem_object_move_to_inactive(struct drm_gem_object *obj) { struct drm_device *dev = obj->dev; drm_i915_private_t *dev_priv = dev->dev_private; - struct drm_i915_gem_object *obj_priv = obj->driver_private; + struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); i915_verify_inactive(dev, __FILE__, __LINE__); if (obj_priv->pin_count != 0) @@ -1965,7 +1965,7 @@ static int i915_gem_object_wait_rendering(struct drm_gem_object *obj) { struct drm_device *dev = obj->dev; - struct drm_i915_gem_object *obj_priv = obj->driver_private; + struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); int ret; /* This function only exists to support waiting for existing rendering, @@ -1997,7 +1997,7 @@ i915_gem_object_unbind(struct drm_gem_object *obj) { struct drm_device *dev = obj->dev; drm_i915_private_t *dev_priv = dev->dev_private; - struct drm_i915_gem_object *obj_priv = obj->driver_private; + struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); int ret = 0; #if WATCH_BUF @@ -2173,7 +2173,7 @@ i915_gem_evict_something(struct drm_device *dev, int min_size) #if WATCH_LRU DRM_INFO("%s: evicting %p\n", __func__, obj); #endif - obj_priv = obj->driver_private; + obj_priv = to_intel_bo(obj); BUG_ON(obj_priv->pin_count != 0); BUG_ON(obj_priv->active); @@ -2244,7 +2244,7 @@ int i915_gem_object_get_pages(struct drm_gem_object *obj, gfp_t gfpmask) { - struct drm_i915_gem_object *obj_priv = obj->driver_private; + struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); int page_count, i; struct address_space *mapping; struct inode *inode; @@ -2297,7 +2297,7 @@ static void sandybridge_write_fence_reg(struct drm_i915_fence_reg *reg) struct drm_gem_object *obj = reg->obj; struct drm_device *dev = obj->dev; drm_i915_private_t *dev_priv = dev->dev_private; - struct drm_i915_gem_object *obj_priv = obj->driver_private; + struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); int regnum = obj_priv->fence_reg; uint64_t val; @@ -2319,7 +2319,7 @@ static void i965_write_fence_reg(struct drm_i915_fence_reg *reg) struct drm_gem_object *obj = reg->obj; struct drm_device *dev = obj->dev; drm_i915_private_t *dev_priv = dev->dev_private; - struct drm_i915_gem_object *obj_priv = obj->driver_private; + struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); int regnum = obj_priv->fence_reg; uint64_t val; @@ -2339,7 +2339,7 @@ static void i915_write_fence_reg(struct drm_i915_fence_reg *reg) struct drm_gem_object *obj = reg->obj; struct drm_device *dev = obj->dev; drm_i915_private_t *dev_priv = dev->dev_private; - struct drm_i915_gem_object *obj_priv = obj->driver_private; + struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); int regnum = obj_priv->fence_reg; int tile_width; uint32_t fence_reg, val; @@ -2381,7 +2381,7 @@ static void i830_write_fence_reg(struct drm_i915_fence_reg *reg) struct drm_gem_object *obj = reg->obj; struct drm_device *dev = obj->dev; drm_i915_private_t *dev_priv = dev->dev_private; - struct drm_i915_gem_object *obj_priv = obj->driver_private; + struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); int regnum = obj_priv->fence_reg; uint32_t val; uint32_t pitch_val; @@ -2425,7 +2425,7 @@ static int i915_find_fence_reg(struct drm_device *dev) if (!reg->obj) return i; - obj_priv = reg->obj->driver_private; + obj_priv = to_intel_bo(reg->obj); if (!obj_priv->pin_count) avail++; } @@ -2480,7 +2480,7 @@ i915_gem_object_get_fence_reg(struct drm_gem_object *obj) { struct drm_device *dev = obj->dev; struct drm_i915_private *dev_priv = dev->dev_private; - struct drm_i915_gem_object *obj_priv = obj->driver_private; + struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); struct drm_i915_fence_reg *reg = NULL; int ret; @@ -2547,7 +2547,7 @@ i915_gem_clear_fence_reg(struct drm_gem_object *obj) { struct drm_device *dev = obj->dev; drm_i915_private_t *dev_priv = dev->dev_private; - struct drm_i915_gem_object *obj_priv = obj->driver_private; + struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); if (IS_GEN6(dev)) { I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + @@ -2583,7 +2583,7 @@ int i915_gem_object_put_fence_reg(struct drm_gem_object *obj) { struct drm_device *dev = obj->dev; - struct drm_i915_gem_object *obj_priv = obj->driver_private; + struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); if (obj_priv->fence_reg == I915_FENCE_REG_NONE) return 0; @@ -2621,7 +2621,7 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment) { struct drm_device *dev = obj->dev; drm_i915_private_t *dev_priv = dev->dev_private; - struct drm_i915_gem_object *obj_priv = obj->driver_private; + struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); struct drm_mm_node *free_space; gfp_t gfpmask = __GFP_NORETRY | __GFP_NOWARN; int ret; @@ -2728,7 +2728,7 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment) void i915_gem_clflush_object(struct drm_gem_object *obj) { - struct drm_i915_gem_object *obj_priv = obj->driver_private; + struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); /* If we don't have a page list set up, then we're not pinned * to GPU, and we can ignore the cache flush because it'll happen @@ -2829,7 +2829,7 @@ i915_gem_object_flush_write_domain(struct drm_gem_object *obj) int i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write) { - struct drm_i915_gem_object *obj_priv = obj->driver_private; + struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); uint32_t old_write_domain, old_read_domains; int ret; @@ -2879,7 +2879,7 @@ int i915_gem_object_set_to_display_plane(struct drm_gem_object *obj) { struct drm_device *dev = obj->dev; - struct drm_i915_gem_object *obj_priv = obj->driver_private; + struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); uint32_t old_write_domain, old_read_domains; int ret; @@ -3092,7 +3092,7 @@ static void i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj) { struct drm_device *dev = obj->dev; - struct drm_i915_gem_object *obj_priv = obj->driver_private; + struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); uint32_t invalidate_domains = 0; uint32_t flush_domains = 0; uint32_t old_read_domains; @@ -3177,7 +3177,7 @@ i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj) static void i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj) { - struct drm_i915_gem_object *obj_priv = obj->driver_private; + struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); if (!obj_priv->page_cpu_valid) return; @@ -3217,7 +3217,7 @@ static int i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj, uint64_t offset, uint64_t size) { - struct drm_i915_gem_object *obj_priv = obj->driver_private; + struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); uint32_t old_read_domains; int i, ret; @@ -3286,7 +3286,7 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj, { struct drm_device *dev = obj->dev; drm_i915_private_t *dev_priv = dev->dev_private; - struct drm_i915_gem_object *obj_priv = obj->driver_private; + struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); int i, ret; void __iomem *reloc_page; bool need_fence; @@ -3337,7 +3337,7 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj, i915_gem_object_unpin(obj); return -EBADF; } - target_obj_priv = target_obj->driver_private; + target_obj_priv = to_intel_bo(target_obj); #if WATCH_RELOC DRM_INFO("%s: obj %p offset %08x target %d " @@ -3689,7 +3689,7 @@ i915_gem_wait_for_pending_flip(struct drm_device *dev, prepare_to_wait(&dev_priv->pending_flip_queue, &wait, TASK_INTERRUPTIBLE); for (i = 0; i < count; i++) { - obj_priv = object_list[i]->driver_private; + obj_priv = to_intel_bo(object_list[i]); if (atomic_read(&obj_priv->pending_flip) > 0) break; } @@ -3798,7 +3798,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, goto err; } - obj_priv = object_list[i]->driver_private; + obj_priv = to_intel_bo(object_list[i]); if (obj_priv->in_execbuffer) { DRM_ERROR("Object %p appears more than once in object list\n", object_list[i]); @@ -3924,7 +3924,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, for (i = 0; i < args->buffer_count; i++) { struct drm_gem_object *obj = object_list[i]; - struct drm_i915_gem_object *obj_priv = obj->driver_private; + struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); uint32_t old_write_domain = obj->write_domain; obj->write_domain = obj->pending_write_domain; @@ -3999,7 +3999,7 @@ err: for (i = 0; i < args->buffer_count; i++) { if (object_list[i]) { - obj_priv = object_list[i]->driver_private; + obj_priv = to_intel_bo(object_list[i]); obj_priv->in_execbuffer = false; } drm_gem_object_unreference(object_list[i]); @@ -4177,7 +4177,7 @@ int i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment) { struct drm_device *dev = obj->dev; - struct drm_i915_gem_object *obj_priv = obj->driver_private; + struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); int ret; i915_verify_inactive(dev, __FILE__, __LINE__); @@ -4210,7 +4210,7 @@ i915_gem_object_unpin(struct drm_gem_object *obj) { struct drm_device *dev = obj->dev; drm_i915_private_t *dev_priv = dev->dev_private; - struct drm_i915_gem_object *obj_priv = obj->driver_private; + struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); i915_verify_inactive(dev, __FILE__, __LINE__); obj_priv->pin_count--; @@ -4250,7 +4250,7 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data, mutex_unlock(&dev->struct_mutex); return -EBADF; } - obj_priv = obj->driver_private; + obj_priv = to_intel_bo(obj); if (obj_priv->madv != I915_MADV_WILLNEED) { DRM_ERROR("Attempting to pin a purgeable buffer\n"); @@ -4307,7 +4307,7 @@ i915_gem_unpin_ioctl(struct drm_device *dev, void *data, return -EBADF; } - obj_priv = obj->driver_private; + obj_priv = to_intel_bo(obj); if (obj_priv->pin_filp != file_priv) { DRM_ERROR("Not pinned by caller in i915_gem_pin_ioctl(): %d\n", args->handle); @@ -4349,7 +4349,7 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data, */ i915_gem_retire_requests(dev); - obj_priv = obj->driver_private; + obj_priv = to_intel_bo(obj); /* Don't count being on the flushing list against the object being * done. Otherwise, a buffer left on the flushing list but not getting * flushed (because nobody's flushing that domain) won't ever return @@ -4395,7 +4395,7 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data, } mutex_lock(&dev->struct_mutex); - obj_priv = obj->driver_private; + obj_priv = to_intel_bo(obj); if (obj_priv->pin_count) { drm_gem_object_unreference(obj); @@ -4456,7 +4456,7 @@ int i915_gem_init_object(struct drm_gem_object *obj) void i915_gem_free_object(struct drm_gem_object *obj) { struct drm_device *dev = obj->dev; - struct drm_i915_gem_object *obj_priv = obj->driver_private; + struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); trace_i915_gem_object_destroy(obj); @@ -4565,7 +4565,7 @@ i915_gem_init_hws(struct drm_device *dev) DRM_ERROR("Failed to allocate status page\n"); return -ENOMEM; } - obj_priv = obj->driver_private; + obj_priv = to_intel_bo(obj); obj_priv->agp_type = AGP_USER_CACHED_MEMORY; ret = i915_gem_object_pin(obj, 4096); @@ -4609,7 +4609,7 @@ i915_gem_cleanup_hws(struct drm_device *dev) return; obj = dev_priv->hws_obj; - obj_priv = obj->driver_private; + obj_priv = to_intel_bo(obj); kunmap(obj_priv->pages[0]); i915_gem_object_unpin(obj); @@ -4643,7 +4643,7 @@ i915_gem_init_ringbuffer(struct drm_device *dev) i915_gem_cleanup_hws(dev); return -ENOMEM; } - obj_priv = obj->driver_private; + obj_priv = to_intel_bo(obj); ret = i915_gem_object_pin(obj, 4096); if (ret != 0) { @@ -4936,7 +4936,7 @@ void i915_gem_detach_phys_object(struct drm_device *dev, int ret; int page_count; - obj_priv = obj->driver_private; + obj_priv = to_intel_bo(obj); if (!obj_priv->phys_obj) return; @@ -4975,7 +4975,7 @@ i915_gem_attach_phys_object(struct drm_device *dev, if (id > I915_MAX_PHYS_OBJECT) return -EINVAL; - obj_priv = obj->driver_private; + obj_priv = to_intel_bo(obj); if (obj_priv->phys_obj) { if (obj_priv->phys_obj->id == id) @@ -5026,7 +5026,7 @@ i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj, struct drm_i915_gem_pwrite *args, struct drm_file *file_priv) { - struct drm_i915_gem_object *obj_priv = obj->driver_private; + struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); void *obj_addr; int ret; char __user *user_data; diff --git a/drivers/gpu/drm/i915/i915_gem_debug.c b/drivers/gpu/drm/i915/i915_gem_debug.c index e602614bd3f..35507cf53fa 100644 --- a/drivers/gpu/drm/i915/i915_gem_debug.c +++ b/drivers/gpu/drm/i915/i915_gem_debug.c @@ -72,7 +72,7 @@ void i915_gem_dump_object(struct drm_gem_object *obj, int len, const char *where, uint32_t mark) { - struct drm_i915_gem_object *obj_priv = obj->driver_private; + struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); int page; DRM_INFO("%s: object at offset %08x\n", where, obj_priv->gtt_offset); @@ -137,7 +137,7 @@ void i915_gem_object_check_coherency(struct drm_gem_object *obj, int handle) { struct drm_device *dev = obj->dev; - struct drm_i915_gem_object *obj_priv = obj->driver_private; + struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); int page; uint32_t *gtt_mapping; uint32_t *backing_map = NULL; diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c index c01c878e51b..449157f7161 100644 --- a/drivers/gpu/drm/i915/i915_gem_tiling.c +++ b/drivers/gpu/drm/i915/i915_gem_tiling.c @@ -240,7 +240,7 @@ bool i915_gem_object_fence_offset_ok(struct drm_gem_object *obj, int tiling_mode) { struct drm_device *dev = obj->dev; - struct drm_i915_gem_object *obj_priv = obj->driver_private; + struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); if (obj_priv->gtt_space == NULL) return true; @@ -280,7 +280,7 @@ i915_gem_set_tiling(struct drm_device *dev, void *data, obj = drm_gem_object_lookup(dev, file_priv, args->handle); if (obj == NULL) return -EINVAL; - obj_priv = obj->driver_private; + obj_priv = to_intel_bo(obj); if (!i915_tiling_ok(dev, args->stride, obj->size, args->tiling_mode)) { drm_gem_object_unreference_unlocked(obj); @@ -364,7 +364,7 @@ i915_gem_get_tiling(struct drm_device *dev, void *data, obj = drm_gem_object_lookup(dev, file_priv, args->handle); if (obj == NULL) return -EINVAL; - obj_priv = obj->driver_private; + obj_priv = to_intel_bo(obj); mutex_lock(&dev->struct_mutex); @@ -427,7 +427,7 @@ i915_gem_object_do_bit_17_swizzle(struct drm_gem_object *obj) { struct drm_device *dev = obj->dev; drm_i915_private_t *dev_priv = dev->dev_private; - struct drm_i915_gem_object *obj_priv = obj->driver_private; + struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); int page_count = obj->size >> PAGE_SHIFT; int i; @@ -456,7 +456,7 @@ i915_gem_object_save_bit_17_swizzle(struct drm_gem_object *obj) { struct drm_device *dev = obj->dev; drm_i915_private_t *dev_priv = dev->dev_private; - struct drm_i915_gem_object *obj_priv = obj->driver_private; + struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); int page_count = obj->size >> PAGE_SHIFT; int i; diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 49c458bc650..6421481d622 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -260,10 +260,10 @@ static void i915_hotplug_work_func(struct work_struct *work) if (mode_config->num_connector) { list_for_each_entry(connector, &mode_config->connector_list, head) { - struct intel_output *intel_output = to_intel_output(connector); + struct intel_encoder *intel_encoder = to_intel_encoder(connector); - if (intel_output->hot_plug) - (*intel_output->hot_plug) (intel_output); + if (intel_encoder->hot_plug) + (*intel_encoder->hot_plug) (intel_encoder); } } /* Just fire off a uevent and let userspace tell us what to do */ @@ -444,7 +444,7 @@ i915_error_object_create(struct drm_device *dev, if (src == NULL) return NULL; - src_priv = src->driver_private; + src_priv = to_intel_bo(src); if (src_priv->pages == NULL) return NULL; diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c index 38110ce742a..759c2ef72ef 100644 --- a/drivers/gpu/drm/i915/intel_crt.c +++ b/drivers/gpu/drm/i915/intel_crt.c @@ -247,19 +247,19 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector) static bool intel_crt_detect_ddc(struct drm_connector *connector) { - struct intel_output *intel_output = to_intel_output(connector); + struct intel_encoder *intel_encoder = to_intel_encoder(connector); /* CRT should always be at 0, but check anyway */ - if (intel_output->type != INTEL_OUTPUT_ANALOG) + if (intel_encoder->type != INTEL_OUTPUT_ANALOG) return false; - return intel_ddc_probe(intel_output); + return intel_ddc_probe(intel_encoder); } static enum drm_connector_status -intel_crt_load_detect(struct drm_crtc *crtc, struct intel_output *intel_output) +intel_crt_load_detect(struct drm_crtc *crtc, struct intel_encoder *intel_encoder) { - struct drm_encoder *encoder = &intel_output->enc; + struct drm_encoder *encoder = &intel_encoder->enc; struct drm_device *dev = encoder->dev; struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); @@ -387,8 +387,8 @@ intel_crt_load_detect(struct drm_crtc *crtc, struct intel_output *intel_output) static enum drm_connector_status intel_crt_detect(struct drm_connector *connector) { struct drm_device *dev = connector->dev; - struct intel_output *intel_output = to_intel_output(connector); - struct drm_encoder *encoder = &intel_output->enc; + struct intel_encoder *intel_encoder = to_intel_encoder(connector); + struct drm_encoder *encoder = &intel_encoder->enc; struct drm_crtc *crtc; int dpms_mode; enum drm_connector_status status; @@ -405,13 +405,13 @@ static enum drm_connector_status intel_crt_detect(struct drm_connector *connecto /* for pre-945g platforms use load detect */ if (encoder->crtc && encoder->crtc->enabled) { - status = intel_crt_load_detect(encoder->crtc, intel_output); + status = intel_crt_load_detect(encoder->crtc, intel_encoder); } else { - crtc = intel_get_load_detect_pipe(intel_output, + crtc = intel_get_load_detect_pipe(intel_encoder, NULL, &dpms_mode); if (crtc) { - status = intel_crt_load_detect(crtc, intel_output); - intel_release_load_detect_pipe(intel_output, dpms_mode); + status = intel_crt_load_detect(crtc, intel_encoder); + intel_release_load_detect_pipe(intel_encoder, dpms_mode); } else status = connector_status_unknown; } @@ -421,9 +421,9 @@ static enum drm_connector_status intel_crt_detect(struct drm_connector *connecto static void intel_crt_destroy(struct drm_connector *connector) { - struct intel_output *intel_output = to_intel_output(connector); + struct intel_encoder *intel_encoder = to_intel_encoder(connector); - intel_i2c_destroy(intel_output->ddc_bus); + intel_i2c_destroy(intel_encoder->ddc_bus); drm_sysfs_connector_remove(connector); drm_connector_cleanup(connector); kfree(connector); @@ -432,28 +432,28 @@ static void intel_crt_destroy(struct drm_connector *connector) static int intel_crt_get_modes(struct drm_connector *connector) { int ret; - struct intel_output *intel_output = to_intel_output(connector); + struct intel_encoder *intel_encoder = to_intel_encoder(connector); struct i2c_adapter *ddcbus; struct drm_device *dev = connector->dev; - ret = intel_ddc_get_modes(intel_output); + ret = intel_ddc_get_modes(intel_encoder); if (ret || !IS_G4X(dev)) goto end; - ddcbus = intel_output->ddc_bus; + ddcbus = intel_encoder->ddc_bus; /* Try to probe digital port for output in DVI-I -> VGA mode. */ - intel_output->ddc_bus = + intel_encoder->ddc_bus = intel_i2c_create(connector->dev, GPIOD, "CRTDDC_D"); - if (!intel_output->ddc_bus) { - intel_output->ddc_bus = ddcbus; + if (!intel_encoder->ddc_bus) { + intel_encoder->ddc_bus = ddcbus; dev_printk(KERN_ERR, &connector->dev->pdev->dev, "DDC bus registration failed for CRTDDC_D.\n"); goto end; } /* Try to get modes by GPIOD port */ - ret = intel_ddc_get_modes(intel_output); + ret = intel_ddc_get_modes(intel_encoder); intel_i2c_destroy(ddcbus); end: @@ -506,23 +506,23 @@ static const struct drm_encoder_funcs intel_crt_enc_funcs = { void intel_crt_init(struct drm_device *dev) { struct drm_connector *connector; - struct intel_output *intel_output; + struct intel_encoder *intel_encoder; struct drm_i915_private *dev_priv = dev->dev_private; u32 i2c_reg; - intel_output = kzalloc(sizeof(struct intel_output), GFP_KERNEL); - if (!intel_output) + intel_encoder = kzalloc(sizeof(struct intel_encoder), GFP_KERNEL); + if (!intel_encoder) return; - connector = &intel_output->base; - drm_connector_init(dev, &intel_output->base, + connector = &intel_encoder->base; + drm_connector_init(dev, &intel_encoder->base, &intel_crt_connector_funcs, DRM_MODE_CONNECTOR_VGA); - drm_encoder_init(dev, &intel_output->enc, &intel_crt_enc_funcs, + drm_encoder_init(dev, &intel_encoder->enc, &intel_crt_enc_funcs, DRM_MODE_ENCODER_DAC); - drm_mode_connector_attach_encoder(&intel_output->base, - &intel_output->enc); + drm_mode_connector_attach_encoder(&intel_encoder->base, + &intel_encoder->enc); /* Set up the DDC bus. */ if (HAS_PCH_SPLIT(dev)) @@ -533,22 +533,22 @@ void intel_crt_init(struct drm_device *dev) if (dev_priv->crt_ddc_bus != 0) i2c_reg = dev_priv->crt_ddc_bus; } - intel_output->ddc_bus = intel_i2c_create(dev, i2c_reg, "CRTDDC_A"); - if (!intel_output->ddc_bus) { + intel_encoder->ddc_bus = intel_i2c_create(dev, i2c_reg, "CRTDDC_A"); + if (!intel_encoder->ddc_bus) { dev_printk(KERN_ERR, &dev->pdev->dev, "DDC bus registration " "failed.\n"); return; } - intel_output->type = INTEL_OUTPUT_ANALOG; - intel_output->clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT) | + intel_encoder->type = INTEL_OUTPUT_ANALOG; + intel_encoder->clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT) | (1 << INTEL_ANALOG_CLONE_BIT) | (1 << INTEL_SDVO_LVDS_CLONE_BIT); - intel_output->crtc_mask = (1 << 0) | (1 << 1); + intel_encoder->crtc_mask = (1 << 0) | (1 << 1); connector->interlace_allowed = 0; connector->doublescan_allowed = 0; - drm_encoder_helper_add(&intel_output->enc, &intel_crt_helper_funcs); + drm_encoder_helper_add(&intel_encoder->enc, &intel_crt_helper_funcs); drm_connector_helper_add(connector, &intel_crt_connector_helper_funcs); drm_sysfs_connector_add(connector); diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index e7e753b2845..e7356fb6c91 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -747,16 +747,16 @@ bool intel_pipe_has_type (struct drm_crtc *crtc, int type) list_for_each_entry(l_entry, &mode_config->connector_list, head) { if (l_entry->encoder && l_entry->encoder->crtc == crtc) { - struct intel_output *intel_output = to_intel_output(l_entry); - if (intel_output->type == type) + struct intel_encoder *intel_encoder = to_intel_encoder(l_entry); + if (intel_encoder->type == type) return true; } } return false; } -struct drm_connector * -intel_pipe_get_output (struct drm_crtc *crtc) +static struct drm_connector * +intel_pipe_get_connector (struct drm_crtc *crtc) { struct drm_device *dev = crtc->dev; struct drm_mode_config *mode_config = &dev->mode_config; @@ -1003,7 +1003,7 @@ static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval) struct drm_i915_private *dev_priv = dev->dev_private; struct drm_framebuffer *fb = crtc->fb; struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); - struct drm_i915_gem_object *obj_priv = intel_fb->obj->driver_private; + struct drm_i915_gem_object *obj_priv = to_intel_bo(intel_fb->obj); struct intel_crtc *intel_crtc = to_intel_crtc(crtc); int plane, i; u32 fbc_ctl, fbc_ctl2; @@ -1080,7 +1080,7 @@ static void g4x_enable_fbc(struct drm_crtc *crtc, unsigned long interval) struct drm_i915_private *dev_priv = dev->dev_private; struct drm_framebuffer *fb = crtc->fb; struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); - struct drm_i915_gem_object *obj_priv = intel_fb->obj->driver_private; + struct drm_i915_gem_object *obj_priv = to_intel_bo(intel_fb->obj); struct intel_crtc *intel_crtc = to_intel_crtc(crtc); int plane = (intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB); @@ -1176,7 +1176,7 @@ static void intel_update_fbc(struct drm_crtc *crtc, return; intel_fb = to_intel_framebuffer(fb); - obj_priv = intel_fb->obj->driver_private; + obj_priv = to_intel_bo(intel_fb->obj); /* * If FBC is already on, we just have to verify that we can @@ -1243,7 +1243,7 @@ out_disable: static int intel_pin_and_fence_fb_obj(struct drm_device *dev, struct drm_gem_object *obj) { - struct drm_i915_gem_object *obj_priv = obj->driver_private; + struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); u32 alignment; int ret; @@ -1323,7 +1323,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, intel_fb = to_intel_framebuffer(crtc->fb); obj = intel_fb->obj; - obj_priv = obj->driver_private; + obj_priv = to_intel_bo(obj); mutex_lock(&dev->struct_mutex); ret = intel_pin_and_fence_fb_obj(dev, obj); @@ -1401,7 +1401,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, if (old_fb) { intel_fb = to_intel_framebuffer(old_fb); - obj_priv = intel_fb->obj->driver_private; + obj_priv = to_intel_bo(intel_fb->obj); i915_gem_object_unpin(intel_fb->obj); } intel_increase_pllclock(crtc, true); @@ -2917,7 +2917,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, int dspsize_reg = (plane == 0) ? DSPASIZE : DSPBSIZE; int dsppos_reg = (plane == 0) ? DSPAPOS : DSPBPOS; int pipesrc_reg = (pipe == 0) ? PIPEASRC : PIPEBSRC; - int refclk, num_outputs = 0; + int refclk, num_connectors = 0; intel_clock_t clock, reduced_clock; u32 dpll = 0, fp = 0, fp2 = 0, dspcntr, pipeconf; bool ok, has_reduced_clock = false, is_sdvo = false, is_dvo = false; @@ -2943,19 +2943,19 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, drm_vblank_pre_modeset(dev, pipe); list_for_each_entry(connector, &mode_config->connector_list, head) { - struct intel_output *intel_output = to_intel_output(connector); + struct intel_encoder *intel_encoder = to_intel_encoder(connector); if (!connector->encoder || connector->encoder->crtc != crtc) continue; - switch (intel_output->type) { + switch (intel_encoder->type) { case INTEL_OUTPUT_LVDS: is_lvds = true; break; case INTEL_OUTPUT_SDVO: case INTEL_OUTPUT_HDMI: is_sdvo = true; - if (intel_output->needs_tv_clock) + if (intel_encoder->needs_tv_clock) is_tv = true; break; case INTEL_OUTPUT_DVO: @@ -2975,10 +2975,10 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, break; } - num_outputs++; + num_connectors++; } - if (is_lvds && dev_priv->lvds_use_ssc && num_outputs < 2) { + if (is_lvds && dev_priv->lvds_use_ssc && num_connectors < 2) { refclk = dev_priv->lvds_ssc_freq * 1000; DRM_DEBUG_KMS("using SSC reference clock of %d MHz\n", refclk / 1000); @@ -3049,8 +3049,8 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, if (is_edp) { struct drm_connector *edp; target_clock = mode->clock; - edp = intel_pipe_get_output(crtc); - intel_edp_link_config(to_intel_output(edp), + edp = intel_pipe_get_connector(crtc); + intel_edp_link_config(to_intel_encoder(edp), &lane, &link_bw); } else { /* DP over FDI requires target mode clock @@ -3231,7 +3231,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, /* XXX: just matching BIOS for now */ /* dpll |= PLL_REF_INPUT_TVCLKINBC; */ dpll |= 3; - else if (is_lvds && dev_priv->lvds_use_ssc && num_outputs < 2) + else if (is_lvds && dev_priv->lvds_use_ssc && num_connectors < 2) dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN; else dpll |= PLL_REF_INPUT_DREFCLK; @@ -3511,7 +3511,7 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc, if (!bo) return -ENOENT; - obj_priv = bo->driver_private; + obj_priv = to_intel_bo(bo); if (bo->size < width * height * 4) { DRM_ERROR("buffer is to small\n"); @@ -3655,9 +3655,9 @@ static void intel_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, * detection. * * It will be up to the load-detect code to adjust the pipe as appropriate for - * its requirements. The pipe will be connected to no other outputs. + * its requirements. The pipe will be connected to no other encoders. * - * Currently this code will only succeed if there is a pipe with no outputs + * Currently this code will only succeed if there is a pipe with no encoders * configured for it. In the future, it could choose to temporarily disable * some outputs to free up a pipe for its use. * @@ -3670,14 +3670,14 @@ static struct drm_display_mode load_detect_mode = { 704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC), }; -struct drm_crtc *intel_get_load_detect_pipe(struct intel_output *intel_output, +struct drm_crtc *intel_get_load_detect_pipe(struct intel_encoder *intel_encoder, struct drm_display_mode *mode, int *dpms_mode) { struct intel_crtc *intel_crtc; struct drm_crtc *possible_crtc; struct drm_crtc *supported_crtc =NULL; - struct drm_encoder *encoder = &intel_output->enc; + struct drm_encoder *encoder = &intel_encoder->enc; struct drm_crtc *crtc = NULL; struct drm_device *dev = encoder->dev; struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private; @@ -3729,8 +3729,8 @@ struct drm_crtc *intel_get_load_detect_pipe(struct intel_output *intel_output, } encoder->crtc = crtc; - intel_output->base.encoder = encoder; - intel_output->load_detect_temp = true; + intel_encoder->base.encoder = encoder; + intel_encoder->load_detect_temp = true; intel_crtc = to_intel_crtc(crtc); *dpms_mode = intel_crtc->dpms_mode; @@ -3755,23 +3755,23 @@ struct drm_crtc *intel_get_load_detect_pipe(struct intel_output *intel_output, return crtc; } -void intel_release_load_detect_pipe(struct intel_output *intel_output, int dpms_mode) +void intel_release_load_detect_pipe(struct intel_encoder *intel_encoder, int dpms_mode) { - struct drm_encoder *encoder = &intel_output->enc; + struct drm_encoder *encoder = &intel_encoder->enc; struct drm_device *dev = encoder->dev; struct drm_crtc *crtc = encoder->crtc; struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private; struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; - if (intel_output->load_detect_temp) { + if (intel_encoder->load_detect_temp) { encoder->crtc = NULL; - intel_output->base.encoder = NULL; - intel_output->load_detect_temp = false; + intel_encoder->base.encoder = NULL; + intel_encoder->load_detect_temp = false; crtc->enabled = drm_helper_crtc_in_use(crtc); drm_helper_disable_unused_functions(dev); } - /* Switch crtc and output back off if necessary */ + /* Switch crtc and encoder back off if necessary */ if (crtc->enabled && dpms_mode != DRM_MODE_DPMS_ON) { if (encoder->crtc == crtc) encoder_funcs->dpms(encoder, dpms_mode); @@ -4156,7 +4156,7 @@ void intel_finish_page_flip(struct drm_device *dev, int pipe) work = intel_crtc->unpin_work; if (work == NULL || !work->pending) { if (work && !work->pending) { - obj_priv = work->pending_flip_obj->driver_private; + obj_priv = to_intel_bo(work->pending_flip_obj); DRM_DEBUG_DRIVER("flip finish: %p (%d) not pending?\n", obj_priv, atomic_read(&obj_priv->pending_flip)); @@ -4181,7 +4181,7 @@ void intel_finish_page_flip(struct drm_device *dev, int pipe) spin_unlock_irqrestore(&dev->event_lock, flags); - obj_priv = work->pending_flip_obj->driver_private; + obj_priv = to_intel_bo(work->pending_flip_obj); /* Initial scanout buffer will have a 0 pending flip count */ if ((atomic_read(&obj_priv->pending_flip) == 0) || @@ -4252,7 +4252,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, ret = intel_pin_and_fence_fb_obj(dev, obj); if (ret != 0) { DRM_DEBUG_DRIVER("flip queue: %p pin & fence failed\n", - obj->driver_private); + to_intel_bo(obj)); kfree(work); intel_crtc->unpin_work = NULL; mutex_unlock(&dev->struct_mutex); @@ -4266,7 +4266,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, crtc->fb = fb; i915_gem_object_flush_write_domain(obj); drm_vblank_get(dev, intel_crtc->pipe); - obj_priv = obj->driver_private; + obj_priv = to_intel_bo(obj); atomic_inc(&obj_priv->pending_flip); work->pending_flip_obj = obj; @@ -4399,8 +4399,8 @@ static int intel_connector_clones(struct drm_device *dev, int type_mask) int entry = 0; list_for_each_entry(connector, &dev->mode_config.connector_list, head) { - struct intel_output *intel_output = to_intel_output(connector); - if (type_mask & intel_output->clone_mask) + struct intel_encoder *intel_encoder = to_intel_encoder(connector); + if (type_mask & intel_encoder->clone_mask) index_mask |= (1 << entry); entry++; } @@ -4495,12 +4495,12 @@ static void intel_setup_outputs(struct drm_device *dev) intel_tv_init(dev); list_for_each_entry(connector, &dev->mode_config.connector_list, head) { - struct intel_output *intel_output = to_intel_output(connector); - struct drm_encoder *encoder = &intel_output->enc; + struct intel_encoder *intel_encoder = to_intel_encoder(connector); + struct drm_encoder *encoder = &intel_encoder->enc; - encoder->possible_crtcs = intel_output->crtc_mask; + encoder->possible_crtcs = intel_encoder->crtc_mask; encoder->possible_clones = intel_connector_clones(dev, - intel_output->clone_mask); + intel_encoder->clone_mask); } } @@ -4779,14 +4779,14 @@ void intel_init_clock_gating(struct drm_device *dev) struct drm_i915_gem_object *obj_priv = NULL; if (dev_priv->pwrctx) { - obj_priv = dev_priv->pwrctx->driver_private; + obj_priv = to_intel_bo(dev_priv->pwrctx); } else { struct drm_gem_object *pwrctx; pwrctx = intel_alloc_power_context(dev); if (pwrctx) { dev_priv->pwrctx = pwrctx; - obj_priv = pwrctx->driver_private; + obj_priv = to_intel_bo(pwrctx); } } @@ -4815,7 +4815,7 @@ static void intel_init_display(struct drm_device *dev) dev_priv->display.fbc_enabled = g4x_fbc_enabled; dev_priv->display.enable_fbc = g4x_enable_fbc; dev_priv->display.disable_fbc = g4x_disable_fbc; - } else if (IS_I965GM(dev) || IS_I945GM(dev) || IS_I915GM(dev)) { + } else if (IS_I965GM(dev)) { dev_priv->display.fbc_enabled = i8xx_fbc_enabled; dev_priv->display.enable_fbc = i8xx_enable_fbc; dev_priv->display.disable_fbc = i8xx_disable_fbc; @@ -4957,7 +4957,7 @@ void intel_modeset_cleanup(struct drm_device *dev) if (dev_priv->pwrctx) { struct drm_i915_gem_object *obj_priv; - obj_priv = dev_priv->pwrctx->driver_private; + obj_priv = to_intel_bo(dev_priv->pwrctx); I915_WRITE(PWRCTXA, obj_priv->gtt_offset &~ PWRCTX_EN); I915_READ(PWRCTXA); i915_gem_object_unpin(dev_priv->pwrctx); @@ -4978,9 +4978,9 @@ void intel_modeset_cleanup(struct drm_device *dev) */ struct drm_encoder *intel_best_encoder(struct drm_connector *connector) { - struct intel_output *intel_output = to_intel_output(connector); + struct intel_encoder *intel_encoder = to_intel_encoder(connector); - return &intel_output->enc; + return &intel_encoder->enc; } /* diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index 8e283f75941..77e40cfcf21 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c @@ -55,23 +55,23 @@ struct intel_dp_priv { uint8_t link_bw; uint8_t lane_count; uint8_t dpcd[4]; - struct intel_output *intel_output; + struct intel_encoder *intel_encoder; struct i2c_adapter adapter; struct i2c_algo_dp_aux_data algo; }; static void -intel_dp_link_train(struct intel_output *intel_output, uint32_t DP, +intel_dp_link_train(struct intel_encoder *intel_encoder, uint32_t DP, uint8_t link_configuration[DP_LINK_CONFIGURATION_SIZE]); static void -intel_dp_link_down(struct intel_output *intel_output, uint32_t DP); +intel_dp_link_down(struct intel_encoder *intel_encoder, uint32_t DP); void -intel_edp_link_config (struct intel_output *intel_output, +intel_edp_link_config (struct intel_encoder *intel_encoder, int *lane_num, int *link_bw) { - struct intel_dp_priv *dp_priv = intel_output->dev_priv; + struct intel_dp_priv *dp_priv = intel_encoder->dev_priv; *lane_num = dp_priv->lane_count; if (dp_priv->link_bw == DP_LINK_BW_1_62) @@ -81,9 +81,9 @@ intel_edp_link_config (struct intel_output *intel_output, } static int -intel_dp_max_lane_count(struct intel_output *intel_output) +intel_dp_max_lane_count(struct intel_encoder *intel_encoder) { - struct intel_dp_priv *dp_priv = intel_output->dev_priv; + struct intel_dp_priv *dp_priv = intel_encoder->dev_priv; int max_lane_count = 4; if (dp_priv->dpcd[0] >= 0x11) { @@ -99,9 +99,9 @@ intel_dp_max_lane_count(struct intel_output *intel_output) } static int -intel_dp_max_link_bw(struct intel_output *intel_output) +intel_dp_max_link_bw(struct intel_encoder *intel_encoder) { - struct intel_dp_priv *dp_priv = intel_output->dev_priv; + struct intel_dp_priv *dp_priv = intel_encoder->dev_priv; int max_link_bw = dp_priv->dpcd[1]; switch (max_link_bw) { @@ -127,11 +127,11 @@ intel_dp_link_clock(uint8_t link_bw) /* I think this is a fiction */ static int intel_dp_link_required(struct drm_device *dev, - struct intel_output *intel_output, int pixel_clock) + struct intel_encoder *intel_encoder, int pixel_clock) { struct drm_i915_private *dev_priv = dev->dev_private; - if (IS_eDP(intel_output)) + if (IS_eDP(intel_encoder)) return (pixel_clock * dev_priv->edp_bpp) / 8; else return pixel_clock * 3; @@ -141,11 +141,11 @@ static int intel_dp_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode) { - struct intel_output *intel_output = to_intel_output(connector); - int max_link_clock = intel_dp_link_clock(intel_dp_max_link_bw(intel_output)); - int max_lanes = intel_dp_max_lane_count(intel_output); + struct intel_encoder *intel_encoder = to_intel_encoder(connector); + int max_link_clock = intel_dp_link_clock(intel_dp_max_link_bw(intel_encoder)); + int max_lanes = intel_dp_max_lane_count(intel_encoder); - if (intel_dp_link_required(connector->dev, intel_output, mode->clock) + if (intel_dp_link_required(connector->dev, intel_encoder, mode->clock) > max_link_clock * max_lanes) return MODE_CLOCK_HIGH; @@ -209,13 +209,13 @@ intel_hrawclk(struct drm_device *dev) } static int -intel_dp_aux_ch(struct intel_output *intel_output, +intel_dp_aux_ch(struct intel_encoder *intel_encoder, uint8_t *send, int send_bytes, uint8_t *recv, int recv_size) { - struct intel_dp_priv *dp_priv = intel_output->dev_priv; + struct intel_dp_priv *dp_priv = intel_encoder->dev_priv; uint32_t output_reg = dp_priv->output_reg; - struct drm_device *dev = intel_output->base.dev; + struct drm_device *dev = intel_encoder->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; uint32_t ch_ctl = output_reg + 0x10; uint32_t ch_data = ch_ctl + 4; @@ -230,7 +230,7 @@ intel_dp_aux_ch(struct intel_output *intel_output, * and would like to run at 2MHz. So, take the * hrawclk value and divide by 2 and use that */ - if (IS_eDP(intel_output)) + if (IS_eDP(intel_encoder)) aux_clock_divider = 225; /* eDP input clock at 450Mhz */ else if (HAS_PCH_SPLIT(dev)) aux_clock_divider = 62; /* IRL input clock fixed at 125Mhz */ @@ -313,7 +313,7 @@ intel_dp_aux_ch(struct intel_output *intel_output, /* Write data to the aux channel in native mode */ static int -intel_dp_aux_native_write(struct intel_output *intel_output, +intel_dp_aux_native_write(struct intel_encoder *intel_encoder, uint16_t address, uint8_t *send, int send_bytes) { int ret; @@ -330,7 +330,7 @@ intel_dp_aux_native_write(struct intel_output *intel_output, memcpy(&msg[4], send, send_bytes); msg_bytes = send_bytes + 4; for (;;) { - ret = intel_dp_aux_ch(intel_output, msg, msg_bytes, &ack, 1); + ret = intel_dp_aux_ch(intel_encoder, msg, msg_bytes, &ack, 1); if (ret < 0) return ret; if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK) @@ -345,15 +345,15 @@ intel_dp_aux_native_write(struct intel_output *intel_output, /* Write a single byte to the aux channel in native mode */ static int -intel_dp_aux_native_write_1(struct intel_output *intel_output, +intel_dp_aux_native_write_1(struct intel_encoder *intel_encoder, uint16_t address, uint8_t byte) { - return intel_dp_aux_native_write(intel_output, address, &byte, 1); + return intel_dp_aux_native_write(intel_encoder, address, &byte, 1); } /* read bytes from a native aux channel */ static int -intel_dp_aux_native_read(struct intel_output *intel_output, +intel_dp_aux_native_read(struct intel_encoder *intel_encoder, uint16_t address, uint8_t *recv, int recv_bytes) { uint8_t msg[4]; @@ -372,7 +372,7 @@ intel_dp_aux_native_read(struct intel_output *intel_output, reply_bytes = recv_bytes + 1; for (;;) { - ret = intel_dp_aux_ch(intel_output, msg, msg_bytes, + ret = intel_dp_aux_ch(intel_encoder, msg, msg_bytes, reply, reply_bytes); if (ret == 0) return -EPROTO; @@ -398,7 +398,7 @@ intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode, struct intel_dp_priv *dp_priv = container_of(adapter, struct intel_dp_priv, adapter); - struct intel_output *intel_output = dp_priv->intel_output; + struct intel_encoder *intel_encoder = dp_priv->intel_encoder; uint16_t address = algo_data->address; uint8_t msg[5]; uint8_t reply[2]; @@ -437,7 +437,7 @@ intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode, } for (;;) { - ret = intel_dp_aux_ch(intel_output, + ret = intel_dp_aux_ch(intel_encoder, msg, msg_bytes, reply, reply_bytes); if (ret < 0) { @@ -465,9 +465,9 @@ intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode, } static int -intel_dp_i2c_init(struct intel_output *intel_output, const char *name) +intel_dp_i2c_init(struct intel_encoder *intel_encoder, const char *name) { - struct intel_dp_priv *dp_priv = intel_output->dev_priv; + struct intel_dp_priv *dp_priv = intel_encoder->dev_priv; DRM_DEBUG_KMS("i2c_init %s\n", name); dp_priv->algo.running = false; @@ -480,7 +480,7 @@ intel_dp_i2c_init(struct intel_output *intel_output, const char *name) strncpy (dp_priv->adapter.name, name, sizeof(dp_priv->adapter.name) - 1); dp_priv->adapter.name[sizeof(dp_priv->adapter.name) - 1] = '\0'; dp_priv->adapter.algo_data = &dp_priv->algo; - dp_priv->adapter.dev.parent = &intel_output->base.kdev; + dp_priv->adapter.dev.parent = &intel_encoder->base.kdev; return i2c_dp_aux_add_bus(&dp_priv->adapter); } @@ -489,18 +489,18 @@ static bool intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { - struct intel_output *intel_output = enc_to_intel_output(encoder); - struct intel_dp_priv *dp_priv = intel_output->dev_priv; + struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); + struct intel_dp_priv *dp_priv = intel_encoder->dev_priv; int lane_count, clock; - int max_lane_count = intel_dp_max_lane_count(intel_output); - int max_clock = intel_dp_max_link_bw(intel_output) == DP_LINK_BW_2_7 ? 1 : 0; + int max_lane_count = intel_dp_max_lane_count(intel_encoder); + int max_clock = intel_dp_max_link_bw(intel_encoder) == DP_LINK_BW_2_7 ? 1 : 0; static int bws[2] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7 }; for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) { for (clock = 0; clock <= max_clock; clock++) { int link_avail = intel_dp_link_clock(bws[clock]) * lane_count; - if (intel_dp_link_required(encoder->dev, intel_output, mode->clock) + if (intel_dp_link_required(encoder->dev, intel_encoder, mode->clock) <= link_avail) { dp_priv->link_bw = bws[clock]; dp_priv->lane_count = lane_count; @@ -562,16 +562,16 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode, struct intel_dp_m_n m_n; /* - * Find the lane count in the intel_output private + * Find the lane count in the intel_encoder private */ list_for_each_entry(connector, &mode_config->connector_list, head) { - struct intel_output *intel_output = to_intel_output(connector); - struct intel_dp_priv *dp_priv = intel_output->dev_priv; + struct intel_encoder *intel_encoder = to_intel_encoder(connector); + struct intel_dp_priv *dp_priv = intel_encoder->dev_priv; if (!connector->encoder || connector->encoder->crtc != crtc) continue; - if (intel_output->type == INTEL_OUTPUT_DISPLAYPORT) { + if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT) { lane_count = dp_priv->lane_count; break; } @@ -626,9 +626,9 @@ static void intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { - struct intel_output *intel_output = enc_to_intel_output(encoder); - struct intel_dp_priv *dp_priv = intel_output->dev_priv; - struct drm_crtc *crtc = intel_output->enc.crtc; + struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); + struct intel_dp_priv *dp_priv = intel_encoder->dev_priv; + struct drm_crtc *crtc = intel_encoder->enc.crtc; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); dp_priv->DP = (DP_LINK_TRAIN_OFF | @@ -667,7 +667,7 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, if (intel_crtc->pipe == 1) dp_priv->DP |= DP_PIPEB_SELECT; - if (IS_eDP(intel_output)) { + if (IS_eDP(intel_encoder)) { /* don't miss out required setting for eDP */ dp_priv->DP |= DP_PLL_ENABLE; if (adjusted_mode->clock < 200000) @@ -702,22 +702,22 @@ static void ironlake_edp_backlight_off (struct drm_device *dev) static void intel_dp_dpms(struct drm_encoder *encoder, int mode) { - struct intel_output *intel_output = enc_to_intel_output(encoder); - struct intel_dp_priv *dp_priv = intel_output->dev_priv; - struct drm_device *dev = intel_output->base.dev; + struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); + struct intel_dp_priv *dp_priv = intel_encoder->dev_priv; + struct drm_device *dev = intel_encoder->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; uint32_t dp_reg = I915_READ(dp_priv->output_reg); if (mode != DRM_MODE_DPMS_ON) { if (dp_reg & DP_PORT_EN) { - intel_dp_link_down(intel_output, dp_priv->DP); - if (IS_eDP(intel_output)) + intel_dp_link_down(intel_encoder, dp_priv->DP); + if (IS_eDP(intel_encoder)) ironlake_edp_backlight_off(dev); } } else { if (!(dp_reg & DP_PORT_EN)) { - intel_dp_link_train(intel_output, dp_priv->DP, dp_priv->link_configuration); - if (IS_eDP(intel_output)) + intel_dp_link_train(intel_encoder, dp_priv->DP, dp_priv->link_configuration); + if (IS_eDP(intel_encoder)) ironlake_edp_backlight_on(dev); } } @@ -729,12 +729,12 @@ intel_dp_dpms(struct drm_encoder *encoder, int mode) * link status information */ static bool -intel_dp_get_link_status(struct intel_output *intel_output, +intel_dp_get_link_status(struct intel_encoder *intel_encoder, uint8_t link_status[DP_LINK_STATUS_SIZE]) { int ret; - ret = intel_dp_aux_native_read(intel_output, + ret = intel_dp_aux_native_read(intel_encoder, DP_LANE0_1_STATUS, link_status, DP_LINK_STATUS_SIZE); if (ret != DP_LINK_STATUS_SIZE) @@ -752,13 +752,13 @@ intel_dp_link_status(uint8_t link_status[DP_LINK_STATUS_SIZE], static void intel_dp_save(struct drm_connector *connector) { - struct intel_output *intel_output = to_intel_output(connector); - struct drm_device *dev = intel_output->base.dev; + struct intel_encoder *intel_encoder = to_intel_encoder(connector); + struct drm_device *dev = intel_encoder->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_dp_priv *dp_priv = intel_output->dev_priv; + struct intel_dp_priv *dp_priv = intel_encoder->dev_priv; dp_priv->save_DP = I915_READ(dp_priv->output_reg); - intel_dp_aux_native_read(intel_output, DP_LINK_BW_SET, + intel_dp_aux_native_read(intel_encoder, DP_LINK_BW_SET, dp_priv->save_link_configuration, sizeof (dp_priv->save_link_configuration)); } @@ -825,7 +825,7 @@ intel_dp_pre_emphasis_max(uint8_t voltage_swing) } static void -intel_get_adjust_train(struct intel_output *intel_output, +intel_get_adjust_train(struct intel_encoder *intel_encoder, uint8_t link_status[DP_LINK_STATUS_SIZE], int lane_count, uint8_t train_set[4]) @@ -942,15 +942,15 @@ intel_channel_eq_ok(uint8_t link_status[DP_LINK_STATUS_SIZE], int lane_count) } static bool -intel_dp_set_link_train(struct intel_output *intel_output, +intel_dp_set_link_train(struct intel_encoder *intel_encoder, uint32_t dp_reg_value, uint8_t dp_train_pat, uint8_t train_set[4], bool first) { - struct drm_device *dev = intel_output->base.dev; + struct drm_device *dev = intel_encoder->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_dp_priv *dp_priv = intel_output->dev_priv; + struct intel_dp_priv *dp_priv = intel_encoder->dev_priv; int ret; I915_WRITE(dp_priv->output_reg, dp_reg_value); @@ -958,11 +958,11 @@ intel_dp_set_link_train(struct intel_output *intel_output, if (first) intel_wait_for_vblank(dev); - intel_dp_aux_native_write_1(intel_output, + intel_dp_aux_native_write_1(intel_encoder, DP_TRAINING_PATTERN_SET, dp_train_pat); - ret = intel_dp_aux_native_write(intel_output, + ret = intel_dp_aux_native_write(intel_encoder, DP_TRAINING_LANE0_SET, train_set, 4); if (ret != 4) return false; @@ -971,12 +971,12 @@ intel_dp_set_link_train(struct intel_output *intel_output, } static void -intel_dp_link_train(struct intel_output *intel_output, uint32_t DP, +intel_dp_link_train(struct intel_encoder *intel_encoder, uint32_t DP, uint8_t link_configuration[DP_LINK_CONFIGURATION_SIZE]) { - struct drm_device *dev = intel_output->base.dev; + struct drm_device *dev = intel_encoder->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_dp_priv *dp_priv = intel_output->dev_priv; + struct intel_dp_priv *dp_priv = intel_encoder->dev_priv; uint8_t train_set[4]; uint8_t link_status[DP_LINK_STATUS_SIZE]; int i; @@ -987,7 +987,7 @@ intel_dp_link_train(struct intel_output *intel_output, uint32_t DP, int tries; /* Write the link configuration data */ - intel_dp_aux_native_write(intel_output, 0x100, + intel_dp_aux_native_write(intel_encoder, 0x100, link_configuration, DP_LINK_CONFIGURATION_SIZE); DP |= DP_PORT_EN; @@ -1001,14 +1001,14 @@ intel_dp_link_train(struct intel_output *intel_output, uint32_t DP, uint32_t signal_levels = intel_dp_signal_levels(train_set[0], dp_priv->lane_count); DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels; - if (!intel_dp_set_link_train(intel_output, DP | DP_LINK_TRAIN_PAT_1, + if (!intel_dp_set_link_train(intel_encoder, DP | DP_LINK_TRAIN_PAT_1, DP_TRAINING_PATTERN_1, train_set, first)) break; first = false; /* Set training pattern 1 */ udelay(100); - if (!intel_dp_get_link_status(intel_output, link_status)) + if (!intel_dp_get_link_status(intel_encoder, link_status)) break; if (intel_clock_recovery_ok(link_status, dp_priv->lane_count)) { @@ -1033,7 +1033,7 @@ intel_dp_link_train(struct intel_output *intel_output, uint32_t DP, voltage = train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK; /* Compute new train_set as requested by target */ - intel_get_adjust_train(intel_output, link_status, dp_priv->lane_count, train_set); + intel_get_adjust_train(intel_encoder, link_status, dp_priv->lane_count, train_set); } /* channel equalization */ @@ -1045,13 +1045,13 @@ intel_dp_link_train(struct intel_output *intel_output, uint32_t DP, DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels; /* channel eq pattern */ - if (!intel_dp_set_link_train(intel_output, DP | DP_LINK_TRAIN_PAT_2, + if (!intel_dp_set_link_train(intel_encoder, DP | DP_LINK_TRAIN_PAT_2, DP_TRAINING_PATTERN_2, train_set, false)) break; udelay(400); - if (!intel_dp_get_link_status(intel_output, link_status)) + if (!intel_dp_get_link_status(intel_encoder, link_status)) break; if (intel_channel_eq_ok(link_status, dp_priv->lane_count)) { @@ -1064,26 +1064,26 @@ intel_dp_link_train(struct intel_output *intel_output, uint32_t DP, break; /* Compute new train_set as requested by target */ - intel_get_adjust_train(intel_output, link_status, dp_priv->lane_count, train_set); + intel_get_adjust_train(intel_encoder, link_status, dp_priv->lane_count, train_set); ++tries; } I915_WRITE(dp_priv->output_reg, DP | DP_LINK_TRAIN_OFF); POSTING_READ(dp_priv->output_reg); - intel_dp_aux_native_write_1(intel_output, + intel_dp_aux_native_write_1(intel_encoder, DP_TRAINING_PATTERN_SET, DP_TRAINING_PATTERN_DISABLE); } static void -intel_dp_link_down(struct intel_output *intel_output, uint32_t DP) +intel_dp_link_down(struct intel_encoder *intel_encoder, uint32_t DP) { - struct drm_device *dev = intel_output->base.dev; + struct drm_device *dev = intel_encoder->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_dp_priv *dp_priv = intel_output->dev_priv; + struct intel_dp_priv *dp_priv = intel_encoder->dev_priv; DRM_DEBUG_KMS("\n"); - if (IS_eDP(intel_output)) { + if (IS_eDP(intel_encoder)) { DP &= ~DP_PLL_ENABLE; I915_WRITE(dp_priv->output_reg, DP); POSTING_READ(dp_priv->output_reg); @@ -1096,7 +1096,7 @@ intel_dp_link_down(struct intel_output *intel_output, uint32_t DP) udelay(17000); - if (IS_eDP(intel_output)) + if (IS_eDP(intel_encoder)) DP |= DP_LINK_TRAIN_OFF; I915_WRITE(dp_priv->output_reg, DP & ~DP_PORT_EN); POSTING_READ(dp_priv->output_reg); @@ -1105,13 +1105,13 @@ intel_dp_link_down(struct intel_output *intel_output, uint32_t DP) static void intel_dp_restore(struct drm_connector *connector) { - struct intel_output *intel_output = to_intel_output(connector); - struct intel_dp_priv *dp_priv = intel_output->dev_priv; + struct intel_encoder *intel_encoder = to_intel_encoder(connector); + struct intel_dp_priv *dp_priv = intel_encoder->dev_priv; if (dp_priv->save_DP & DP_PORT_EN) - intel_dp_link_train(intel_output, dp_priv->save_DP, dp_priv->save_link_configuration); + intel_dp_link_train(intel_encoder, dp_priv->save_DP, dp_priv->save_link_configuration); else - intel_dp_link_down(intel_output, dp_priv->save_DP); + intel_dp_link_down(intel_encoder, dp_priv->save_DP); } /* @@ -1124,32 +1124,32 @@ intel_dp_restore(struct drm_connector *connector) */ static void -intel_dp_check_link_status(struct intel_output *intel_output) +intel_dp_check_link_status(struct intel_encoder *intel_encoder) { - struct intel_dp_priv *dp_priv = intel_output->dev_priv; + struct intel_dp_priv *dp_priv = intel_encoder->dev_priv; uint8_t link_status[DP_LINK_STATUS_SIZE]; - if (!intel_output->enc.crtc) + if (!intel_encoder->enc.crtc) return; - if (!intel_dp_get_link_status(intel_output, link_status)) { - intel_dp_link_down(intel_output, dp_priv->DP); + if (!intel_dp_get_link_status(intel_encoder, link_status)) { + intel_dp_link_down(intel_encoder, dp_priv->DP); return; } if (!intel_channel_eq_ok(link_status, dp_priv->lane_count)) - intel_dp_link_train(intel_output, dp_priv->DP, dp_priv->link_configuration); + intel_dp_link_train(intel_encoder, dp_priv->DP, dp_priv->link_configuration); } static enum drm_connector_status ironlake_dp_detect(struct drm_connector *connector) { - struct intel_output *intel_output = to_intel_output(connector); - struct intel_dp_priv *dp_priv = intel_output->dev_priv; + struct intel_encoder *intel_encoder = to_intel_encoder(connector); + struct intel_dp_priv *dp_priv = intel_encoder->dev_priv; enum drm_connector_status status; status = connector_status_disconnected; - if (intel_dp_aux_native_read(intel_output, + if (intel_dp_aux_native_read(intel_encoder, 0x000, dp_priv->dpcd, sizeof (dp_priv->dpcd)) == sizeof (dp_priv->dpcd)) { @@ -1168,10 +1168,10 @@ ironlake_dp_detect(struct drm_connector *connector) static enum drm_connector_status intel_dp_detect(struct drm_connector *connector) { - struct intel_output *intel_output = to_intel_output(connector); - struct drm_device *dev = intel_output->base.dev; + struct intel_encoder *intel_encoder = to_intel_encoder(connector); + struct drm_device *dev = intel_encoder->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_dp_priv *dp_priv = intel_output->dev_priv; + struct intel_dp_priv *dp_priv = intel_encoder->dev_priv; uint32_t temp, bit; enum drm_connector_status status; @@ -1210,7 +1210,7 @@ intel_dp_detect(struct drm_connector *connector) return connector_status_disconnected; status = connector_status_disconnected; - if (intel_dp_aux_native_read(intel_output, + if (intel_dp_aux_native_read(intel_encoder, 0x000, dp_priv->dpcd, sizeof (dp_priv->dpcd)) == sizeof (dp_priv->dpcd)) { @@ -1222,20 +1222,20 @@ intel_dp_detect(struct drm_connector *connector) static int intel_dp_get_modes(struct drm_connector *connector) { - struct intel_output *intel_output = to_intel_output(connector); - struct drm_device *dev = intel_output->base.dev; + struct intel_encoder *intel_encoder = to_intel_encoder(connector); + struct drm_device *dev = intel_encoder->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; int ret; /* We should parse the EDID data and find out if it has an audio sink */ - ret = intel_ddc_get_modes(intel_output); + ret = intel_ddc_get_modes(intel_encoder); if (ret) return ret; /* if eDP has no EDID, try to use fixed panel mode from VBT */ - if (IS_eDP(intel_output)) { + if (IS_eDP(intel_encoder)) { if (dev_priv->panel_fixed_mode != NULL) { struct drm_display_mode *mode; mode = drm_mode_duplicate(dev, dev_priv->panel_fixed_mode); @@ -1249,13 +1249,13 @@ static int intel_dp_get_modes(struct drm_connector *connector) static void intel_dp_destroy (struct drm_connector *connector) { - struct intel_output *intel_output = to_intel_output(connector); + struct intel_encoder *intel_encoder = to_intel_encoder(connector); - if (intel_output->i2c_bus) - intel_i2c_destroy(intel_output->i2c_bus); + if (intel_encoder->i2c_bus) + intel_i2c_destroy(intel_encoder->i2c_bus); drm_sysfs_connector_remove(connector); drm_connector_cleanup(connector); - kfree(intel_output); + kfree(intel_encoder); } static const struct drm_encoder_helper_funcs intel_dp_helper_funcs = { @@ -1291,12 +1291,12 @@ static const struct drm_encoder_funcs intel_dp_enc_funcs = { }; void -intel_dp_hot_plug(struct intel_output *intel_output) +intel_dp_hot_plug(struct intel_encoder *intel_encoder) { - struct intel_dp_priv *dp_priv = intel_output->dev_priv; + struct intel_dp_priv *dp_priv = intel_encoder->dev_priv; if (dp_priv->dpms_mode == DRM_MODE_DPMS_ON) - intel_dp_check_link_status(intel_output); + intel_dp_check_link_status(intel_encoder); } void @@ -1304,53 +1304,53 @@ intel_dp_init(struct drm_device *dev, int output_reg) { struct drm_i915_private *dev_priv = dev->dev_private; struct drm_connector *connector; - struct intel_output *intel_output; + struct intel_encoder *intel_encoder; struct intel_dp_priv *dp_priv; const char *name = NULL; - intel_output = kcalloc(sizeof(struct intel_output) + + intel_encoder = kcalloc(sizeof(struct intel_encoder) + sizeof(struct intel_dp_priv), 1, GFP_KERNEL); - if (!intel_output) + if (!intel_encoder) return; - dp_priv = (struct intel_dp_priv *)(intel_output + 1); + dp_priv = (struct intel_dp_priv *)(intel_encoder + 1); - connector = &intel_output->base; + connector = &intel_encoder->base; drm_connector_init(dev, connector, &intel_dp_connector_funcs, DRM_MODE_CONNECTOR_DisplayPort); drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs); if (output_reg == DP_A) - intel_output->type = INTEL_OUTPUT_EDP; + intel_encoder->type = INTEL_OUTPUT_EDP; else - intel_output->type = INTEL_OUTPUT_DISPLAYPORT; + intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT; if (output_reg == DP_B || output_reg == PCH_DP_B) - intel_output->clone_mask = (1 << INTEL_DP_B_CLONE_BIT); + intel_encoder->clone_mask = (1 << INTEL_DP_B_CLONE_BIT); else if (output_reg == DP_C || output_reg == PCH_DP_C) - intel_output->clone_mask = (1 << INTEL_DP_C_CLONE_BIT); + intel_encoder->clone_mask = (1 << INTEL_DP_C_CLONE_BIT); else if (output_reg == DP_D || output_reg == PCH_DP_D) - intel_output->clone_mask = (1 << INTEL_DP_D_CLONE_BIT); + intel_encoder->clone_mask = (1 << INTEL_DP_D_CLONE_BIT); - if (IS_eDP(intel_output)) - intel_output->clone_mask = (1 << INTEL_EDP_CLONE_BIT); + if (IS_eDP(intel_encoder)) + intel_encoder->clone_mask = (1 << INTEL_EDP_CLONE_BIT); - intel_output->crtc_mask = (1 << 0) | (1 << 1); + intel_encoder->crtc_mask = (1 << 0) | (1 << 1); connector->interlace_allowed = true; connector->doublescan_allowed = 0; - dp_priv->intel_output = intel_output; + dp_priv->intel_encoder = intel_encoder; dp_priv->output_reg = output_reg; dp_priv->has_audio = false; dp_priv->dpms_mode = DRM_MODE_DPMS_ON; - intel_output->dev_priv = dp_priv; + intel_encoder->dev_priv = dp_priv; - drm_encoder_init(dev, &intel_output->enc, &intel_dp_enc_funcs, + drm_encoder_init(dev, &intel_encoder->enc, &intel_dp_enc_funcs, DRM_MODE_ENCODER_TMDS); - drm_encoder_helper_add(&intel_output->enc, &intel_dp_helper_funcs); + drm_encoder_helper_add(&intel_encoder->enc, &intel_dp_helper_funcs); - drm_mode_connector_attach_encoder(&intel_output->base, - &intel_output->enc); + drm_mode_connector_attach_encoder(&intel_encoder->base, + &intel_encoder->enc); drm_sysfs_connector_add(connector); /* Set up the DDC bus. */ @@ -1378,10 +1378,10 @@ intel_dp_init(struct drm_device *dev, int output_reg) break; } - intel_dp_i2c_init(intel_output, name); + intel_dp_i2c_init(intel_encoder, name); - intel_output->ddc_bus = &dp_priv->adapter; - intel_output->hot_plug = intel_dp_hot_plug; + intel_encoder->ddc_bus = &dp_priv->adapter; + intel_encoder->hot_plug = intel_dp_hot_plug; if (output_reg == DP_A) { /* initialize panel mode from VBT if available for eDP */ diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index 3a467ca5785..e30253755f1 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -95,7 +95,7 @@ struct intel_framebuffer { }; -struct intel_output { +struct intel_encoder { struct drm_connector base; struct drm_encoder enc; @@ -105,7 +105,7 @@ struct intel_output { bool load_detect_temp; bool needs_tv_clock; void *dev_priv; - void (*hot_plug)(struct intel_output *); + void (*hot_plug)(struct intel_encoder *); int crtc_mask; int clone_mask; }; @@ -152,15 +152,15 @@ struct intel_crtc { }; #define to_intel_crtc(x) container_of(x, struct intel_crtc, base) -#define to_intel_output(x) container_of(x, struct intel_output, base) -#define enc_to_intel_output(x) container_of(x, struct intel_output, enc) +#define to_intel_encoder(x) container_of(x, struct intel_encoder, base) +#define enc_to_intel_encoder(x) container_of(x, struct intel_encoder, enc) #define to_intel_framebuffer(x) container_of(x, struct intel_framebuffer, base) struct i2c_adapter *intel_i2c_create(struct drm_device *dev, const u32 reg, const char *name); void intel_i2c_destroy(struct i2c_adapter *adapter); -int intel_ddc_get_modes(struct intel_output *intel_output); -extern bool intel_ddc_probe(struct intel_output *intel_output); +int intel_ddc_get_modes(struct intel_encoder *intel_encoder); +extern bool intel_ddc_probe(struct intel_encoder *intel_encoder); void intel_i2c_quirk_set(struct drm_device *dev, bool enable); void intel_i2c_reset_gmbus(struct drm_device *dev); @@ -175,7 +175,7 @@ extern void intel_dp_init(struct drm_device *dev, int dp_reg); void intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode); -extern void intel_edp_link_config (struct intel_output *, int *, int *); +extern void intel_edp_link_config (struct intel_encoder *, int *, int *); extern int intel_panel_fitter_pipe (struct drm_device *dev); @@ -191,10 +191,10 @@ int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data, struct drm_file *file_priv); extern void intel_wait_for_vblank(struct drm_device *dev); extern struct drm_crtc *intel_get_crtc_from_pipe(struct drm_device *dev, int pipe); -extern struct drm_crtc *intel_get_load_detect_pipe(struct intel_output *intel_output, +extern struct drm_crtc *intel_get_load_detect_pipe(struct intel_encoder *intel_encoder, struct drm_display_mode *mode, int *dpms_mode); -extern void intel_release_load_detect_pipe(struct intel_output *intel_output, +extern void intel_release_load_detect_pipe(struct intel_encoder *intel_encoder, int dpms_mode); extern struct drm_connector* intel_sdvo_find(struct drm_device *dev, int sdvoB); diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c index 0427ca5a251..ebf213c96b9 100644 --- a/drivers/gpu/drm/i915/intel_dvo.c +++ b/drivers/gpu/drm/i915/intel_dvo.c @@ -80,8 +80,8 @@ static struct intel_dvo_device intel_dvo_devices[] = { static void intel_dvo_dpms(struct drm_encoder *encoder, int mode) { struct drm_i915_private *dev_priv = encoder->dev->dev_private; - struct intel_output *intel_output = enc_to_intel_output(encoder); - struct intel_dvo_device *dvo = intel_output->dev_priv; + struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); + struct intel_dvo_device *dvo = intel_encoder->dev_priv; u32 dvo_reg = dvo->dvo_reg; u32 temp = I915_READ(dvo_reg); @@ -99,8 +99,8 @@ static void intel_dvo_dpms(struct drm_encoder *encoder, int mode) static void intel_dvo_save(struct drm_connector *connector) { struct drm_i915_private *dev_priv = connector->dev->dev_private; - struct intel_output *intel_output = to_intel_output(connector); - struct intel_dvo_device *dvo = intel_output->dev_priv; + struct intel_encoder *intel_encoder = to_intel_encoder(connector); + struct intel_dvo_device *dvo = intel_encoder->dev_priv; /* Each output should probably just save the registers it touches, * but for now, use more overkill. @@ -115,8 +115,8 @@ static void intel_dvo_save(struct drm_connector *connector) static void intel_dvo_restore(struct drm_connector *connector) { struct drm_i915_private *dev_priv = connector->dev->dev_private; - struct intel_output *intel_output = to_intel_output(connector); - struct intel_dvo_device *dvo = intel_output->dev_priv; + struct intel_encoder *intel_encoder = to_intel_encoder(connector); + struct intel_dvo_device *dvo = intel_encoder->dev_priv; dvo->dev_ops->restore(dvo); @@ -128,8 +128,8 @@ static void intel_dvo_restore(struct drm_connector *connector) static int intel_dvo_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode) { - struct intel_output *intel_output = to_intel_output(connector); - struct intel_dvo_device *dvo = intel_output->dev_priv; + struct intel_encoder *intel_encoder = to_intel_encoder(connector); + struct intel_dvo_device *dvo = intel_encoder->dev_priv; if (mode->flags & DRM_MODE_FLAG_DBLSCAN) return MODE_NO_DBLESCAN; @@ -150,8 +150,8 @@ static bool intel_dvo_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { - struct intel_output *intel_output = enc_to_intel_output(encoder); - struct intel_dvo_device *dvo = intel_output->dev_priv; + struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); + struct intel_dvo_device *dvo = intel_encoder->dev_priv; /* If we have timings from the BIOS for the panel, put them in * to the adjusted mode. The CRTC will be set up for this mode, @@ -186,8 +186,8 @@ static void intel_dvo_mode_set(struct drm_encoder *encoder, struct drm_device *dev = encoder->dev; struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); - struct intel_output *intel_output = enc_to_intel_output(encoder); - struct intel_dvo_device *dvo = intel_output->dev_priv; + struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); + struct intel_dvo_device *dvo = intel_encoder->dev_priv; int pipe = intel_crtc->pipe; u32 dvo_val; u32 dvo_reg = dvo->dvo_reg, dvo_srcdim_reg; @@ -241,23 +241,23 @@ static void intel_dvo_mode_set(struct drm_encoder *encoder, */ static enum drm_connector_status intel_dvo_detect(struct drm_connector *connector) { - struct intel_output *intel_output = to_intel_output(connector); - struct intel_dvo_device *dvo = intel_output->dev_priv; + struct intel_encoder *intel_encoder = to_intel_encoder(connector); + struct intel_dvo_device *dvo = intel_encoder->dev_priv; return dvo->dev_ops->detect(dvo); } static int intel_dvo_get_modes(struct drm_connector *connector) { - struct intel_output *intel_output = to_intel_output(connector); - struct intel_dvo_device *dvo = intel_output->dev_priv; + struct intel_encoder *intel_encoder = to_intel_encoder(connector); + struct intel_dvo_device *dvo = intel_encoder->dev_priv; /* We should probably have an i2c driver get_modes function for those * devices which will have a fixed set of modes determined by the chip * (TV-out, for example), but for now with just TMDS and LVDS, * that's not the case. */ - intel_ddc_get_modes(intel_output); + intel_ddc_get_modes(intel_encoder); if (!list_empty(&connector->probed_modes)) return 1; @@ -275,8 +275,8 @@ static int intel_dvo_get_modes(struct drm_connector *connector) static void intel_dvo_destroy (struct drm_connector *connector) { - struct intel_output *intel_output = to_intel_output(connector); - struct intel_dvo_device *dvo = intel_output->dev_priv; + struct intel_encoder *intel_encoder = to_intel_encoder(connector); + struct intel_dvo_device *dvo = intel_encoder->dev_priv; if (dvo) { if (dvo->dev_ops->destroy) @@ -286,13 +286,13 @@ static void intel_dvo_destroy (struct drm_connector *connector) /* no need, in i830_dvoices[] now */ //kfree(dvo); } - if (intel_output->i2c_bus) - intel_i2c_destroy(intel_output->i2c_bus); - if (intel_output->ddc_bus) - intel_i2c_destroy(intel_output->ddc_bus); + if (intel_encoder->i2c_bus) + intel_i2c_destroy(intel_encoder->i2c_bus); + if (intel_encoder->ddc_bus) + intel_i2c_destroy(intel_encoder->ddc_bus); drm_sysfs_connector_remove(connector); drm_connector_cleanup(connector); - kfree(intel_output); + kfree(intel_encoder); } #ifdef RANDR_GET_CRTC_INTERFACE @@ -300,8 +300,8 @@ static struct drm_crtc *intel_dvo_get_crtc(struct drm_connector *connector) { struct drm_device *dev = connector->dev; struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_output *intel_output = to_intel_output(connector); - struct intel_dvo_device *dvo = intel_output->dev_priv; + struct intel_encoder *intel_encoder = to_intel_encoder(connector); + struct intel_dvo_device *dvo = intel_encoder->dev_priv; int pipe = !!(I915_READ(dvo->dvo_reg) & SDVO_PIPE_B_SELECT); return intel_pipe_to_crtc(pScrn, pipe); @@ -352,8 +352,8 @@ intel_dvo_get_current_mode (struct drm_connector *connector) { struct drm_device *dev = connector->dev; struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_output *intel_output = to_intel_output(connector); - struct intel_dvo_device *dvo = intel_output->dev_priv; + struct intel_encoder *intel_encoder = to_intel_encoder(connector); + struct intel_dvo_device *dvo = intel_encoder->dev_priv; uint32_t dvo_reg = dvo->dvo_reg; uint32_t dvo_val = I915_READ(dvo_reg); struct drm_display_mode *mode = NULL; @@ -383,24 +383,24 @@ intel_dvo_get_current_mode (struct drm_connector *connector) void intel_dvo_init(struct drm_device *dev) { - struct intel_output *intel_output; + struct intel_encoder *intel_encoder; struct intel_dvo_device *dvo; struct i2c_adapter *i2cbus = NULL; int ret = 0; int i; int encoder_type = DRM_MODE_ENCODER_NONE; - intel_output = kzalloc (sizeof(struct intel_output), GFP_KERNEL); - if (!intel_output) + intel_encoder = kzalloc (sizeof(struct intel_encoder), GFP_KERNEL); + if (!intel_encoder) return; /* Set up the DDC bus */ - intel_output->ddc_bus = intel_i2c_create(dev, GPIOD, "DVODDC_D"); - if (!intel_output->ddc_bus) + intel_encoder->ddc_bus = intel_i2c_create(dev, GPIOD, "DVODDC_D"); + if (!intel_encoder->ddc_bus) goto free_intel; /* Now, try to find a controller */ for (i = 0; i < ARRAY_SIZE(intel_dvo_devices); i++) { - struct drm_connector *connector = &intel_output->base; + struct drm_connector *connector = &intel_encoder->base; int gpio; dvo = &intel_dvo_devices[i]; @@ -435,11 +435,11 @@ void intel_dvo_init(struct drm_device *dev) if (!ret) continue; - intel_output->type = INTEL_OUTPUT_DVO; - intel_output->crtc_mask = (1 << 0) | (1 << 1); + intel_encoder->type = INTEL_OUTPUT_DVO; + intel_encoder->crtc_mask = (1 << 0) | (1 << 1); switch (dvo->type) { case INTEL_DVO_CHIP_TMDS: - intel_output->clone_mask = + intel_encoder->clone_mask = (1 << INTEL_DVO_TMDS_CLONE_BIT) | (1 << INTEL_ANALOG_CLONE_BIT); drm_connector_init(dev, connector, @@ -448,7 +448,7 @@ void intel_dvo_init(struct drm_device *dev) encoder_type = DRM_MODE_ENCODER_TMDS; break; case INTEL_DVO_CHIP_LVDS: - intel_output->clone_mask = + intel_encoder->clone_mask = (1 << INTEL_DVO_LVDS_CLONE_BIT); drm_connector_init(dev, connector, &intel_dvo_connector_funcs, @@ -463,16 +463,16 @@ void intel_dvo_init(struct drm_device *dev) connector->interlace_allowed = false; connector->doublescan_allowed = false; - intel_output->dev_priv = dvo; - intel_output->i2c_bus = i2cbus; + intel_encoder->dev_priv = dvo; + intel_encoder->i2c_bus = i2cbus; - drm_encoder_init(dev, &intel_output->enc, + drm_encoder_init(dev, &intel_encoder->enc, &intel_dvo_enc_funcs, encoder_type); - drm_encoder_helper_add(&intel_output->enc, + drm_encoder_helper_add(&intel_encoder->enc, &intel_dvo_helper_funcs); - drm_mode_connector_attach_encoder(&intel_output->base, - &intel_output->enc); + drm_mode_connector_attach_encoder(&intel_encoder->base, + &intel_encoder->enc); if (dvo->type == INTEL_DVO_CHIP_LVDS) { /* For our LVDS chipsets, we should hopefully be able * to dig the fixed panel mode out of the BIOS data. @@ -490,10 +490,10 @@ void intel_dvo_init(struct drm_device *dev) return; } - intel_i2c_destroy(intel_output->ddc_bus); + intel_i2c_destroy(intel_encoder->ddc_bus); /* Didn't find a chip, so tear down. */ if (i2cbus != NULL) intel_i2c_destroy(i2cbus); free_intel: - kfree(intel_output); + kfree(intel_encoder); } diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c index 69bbef92f13..8a0b3bcdc7b 100644 --- a/drivers/gpu/drm/i915/intel_fb.c +++ b/drivers/gpu/drm/i915/intel_fb.c @@ -144,7 +144,7 @@ static int intelfb_create(struct drm_device *dev, uint32_t fb_width, ret = -ENOMEM; goto out; } - obj_priv = fbo->driver_private; + obj_priv = to_intel_bo(fbo); mutex_lock(&dev->struct_mutex); diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c index 1ed02f64125..48cade0cf7b 100644 --- a/drivers/gpu/drm/i915/intel_hdmi.c +++ b/drivers/gpu/drm/i915/intel_hdmi.c @@ -51,8 +51,8 @@ static void intel_hdmi_mode_set(struct drm_encoder *encoder, struct drm_i915_private *dev_priv = dev->dev_private; struct drm_crtc *crtc = encoder->crtc; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - struct intel_output *intel_output = enc_to_intel_output(encoder); - struct intel_hdmi_priv *hdmi_priv = intel_output->dev_priv; + struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); + struct intel_hdmi_priv *hdmi_priv = intel_encoder->dev_priv; u32 sdvox; sdvox = SDVO_ENCODING_HDMI | @@ -74,8 +74,8 @@ static void intel_hdmi_dpms(struct drm_encoder *encoder, int mode) { struct drm_device *dev = encoder->dev; struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_output *intel_output = enc_to_intel_output(encoder); - struct intel_hdmi_priv *hdmi_priv = intel_output->dev_priv; + struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); + struct intel_hdmi_priv *hdmi_priv = intel_encoder->dev_priv; u32 temp; temp = I915_READ(hdmi_priv->sdvox_reg); @@ -110,8 +110,8 @@ static void intel_hdmi_save(struct drm_connector *connector) { struct drm_device *dev = connector->dev; struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_output *intel_output = to_intel_output(connector); - struct intel_hdmi_priv *hdmi_priv = intel_output->dev_priv; + struct intel_encoder *intel_encoder = to_intel_encoder(connector); + struct intel_hdmi_priv *hdmi_priv = intel_encoder->dev_priv; hdmi_priv->save_SDVOX = I915_READ(hdmi_priv->sdvox_reg); } @@ -120,8 +120,8 @@ static void intel_hdmi_restore(struct drm_connector *connector) { struct drm_device *dev = connector->dev; struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_output *intel_output = to_intel_output(connector); - struct intel_hdmi_priv *hdmi_priv = intel_output->dev_priv; + struct intel_encoder *intel_encoder = to_intel_encoder(connector); + struct intel_hdmi_priv *hdmi_priv = intel_encoder->dev_priv; I915_WRITE(hdmi_priv->sdvox_reg, hdmi_priv->save_SDVOX); POSTING_READ(hdmi_priv->sdvox_reg); @@ -151,21 +151,21 @@ static bool intel_hdmi_mode_fixup(struct drm_encoder *encoder, static enum drm_connector_status intel_hdmi_detect(struct drm_connector *connector) { - struct intel_output *intel_output = to_intel_output(connector); - struct intel_hdmi_priv *hdmi_priv = intel_output->dev_priv; + struct intel_encoder *intel_encoder = to_intel_encoder(connector); + struct intel_hdmi_priv *hdmi_priv = intel_encoder->dev_priv; struct edid *edid = NULL; enum drm_connector_status status = connector_status_disconnected; hdmi_priv->has_hdmi_sink = false; - edid = drm_get_edid(&intel_output->base, - intel_output->ddc_bus); + edid = drm_get_edid(&intel_encoder->base, + intel_encoder->ddc_bus); if (edid) { if (edid->input & DRM_EDID_INPUT_DIGITAL) { status = connector_status_connected; hdmi_priv->has_hdmi_sink = drm_detect_hdmi_monitor(edid); } - intel_output->base.display_info.raw_edid = NULL; + intel_encoder->base.display_info.raw_edid = NULL; kfree(edid); } @@ -174,24 +174,24 @@ intel_hdmi_detect(struct drm_connector *connector) static int intel_hdmi_get_modes(struct drm_connector *connector) { - struct intel_output *intel_output = to_intel_output(connector); + struct intel_encoder *intel_encoder = to_intel_encoder(connector); /* We should parse the EDID data and find out if it's an HDMI sink so * we can send audio to it. */ - return intel_ddc_get_modes(intel_output); + return intel_ddc_get_modes(intel_encoder); } static void intel_hdmi_destroy(struct drm_connector *connector) { - struct intel_output *intel_output = to_intel_output(connector); + struct intel_encoder *intel_encoder = to_intel_encoder(connector); - if (intel_output->i2c_bus) - intel_i2c_destroy(intel_output->i2c_bus); + if (intel_encoder->i2c_bus) + intel_i2c_destroy(intel_encoder->i2c_bus); drm_sysfs_connector_remove(connector); drm_connector_cleanup(connector); - kfree(intel_output); + kfree(intel_encoder); } static const struct drm_encoder_helper_funcs intel_hdmi_helper_funcs = { @@ -230,63 +230,63 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg) { struct drm_i915_private *dev_priv = dev->dev_private; struct drm_connector *connector; - struct intel_output *intel_output; + struct intel_encoder *intel_encoder; struct intel_hdmi_priv *hdmi_priv; - intel_output = kcalloc(sizeof(struct intel_output) + + intel_encoder = kcalloc(sizeof(struct intel_encoder) + sizeof(struct intel_hdmi_priv), 1, GFP_KERNEL); - if (!intel_output) + if (!intel_encoder) return; - hdmi_priv = (struct intel_hdmi_priv *)(intel_output + 1); + hdmi_priv = (struct intel_hdmi_priv *)(intel_encoder + 1); - connector = &intel_output->base; + connector = &intel_encoder->base; drm_connector_init(dev, connector, &intel_hdmi_connector_funcs, DRM_MODE_CONNECTOR_HDMIA); drm_connector_helper_add(connector, &intel_hdmi_connector_helper_funcs); - intel_output->type = INTEL_OUTPUT_HDMI; + intel_encoder->type = INTEL_OUTPUT_HDMI; connector->interlace_allowed = 0; connector->doublescan_allowed = 0; - intel_output->crtc_mask = (1 << 0) | (1 << 1); + intel_encoder->crtc_mask = (1 << 0) | (1 << 1); /* Set up the DDC bus. */ if (sdvox_reg == SDVOB) { - intel_output->clone_mask = (1 << INTEL_HDMIB_CLONE_BIT); - intel_output->ddc_bus = intel_i2c_create(dev, GPIOE, "HDMIB"); + intel_encoder->clone_mask = (1 << INTEL_HDMIB_CLONE_BIT); + intel_encoder->ddc_bus = intel_i2c_create(dev, GPIOE, "HDMIB"); dev_priv->hotplug_supported_mask |= HDMIB_HOTPLUG_INT_STATUS; } else if (sdvox_reg == SDVOC) { - intel_output->clone_mask = (1 << INTEL_HDMIC_CLONE_BIT); - intel_output->ddc_bus = intel_i2c_create(dev, GPIOD, "HDMIC"); + intel_encoder->clone_mask = (1 << INTEL_HDMIC_CLONE_BIT); + intel_encoder->ddc_bus = intel_i2c_create(dev, GPIOD, "HDMIC"); dev_priv->hotplug_supported_mask |= HDMIC_HOTPLUG_INT_STATUS; } else if (sdvox_reg == HDMIB) { - intel_output->clone_mask = (1 << INTEL_HDMID_CLONE_BIT); - intel_output->ddc_bus = intel_i2c_create(dev, PCH_GPIOE, + intel_encoder->clone_mask = (1 << INTEL_HDMID_CLONE_BIT); + intel_encoder->ddc_bus = intel_i2c_create(dev, PCH_GPIOE, "HDMIB"); dev_priv->hotplug_supported_mask |= HDMIB_HOTPLUG_INT_STATUS; } else if (sdvox_reg == HDMIC) { - intel_output->clone_mask = (1 << INTEL_HDMIE_CLONE_BIT); - intel_output->ddc_bus = intel_i2c_create(dev, PCH_GPIOD, + intel_encoder->clone_mask = (1 << INTEL_HDMIE_CLONE_BIT); + intel_encoder->ddc_bus = intel_i2c_create(dev, PCH_GPIOD, "HDMIC"); dev_priv->hotplug_supported_mask |= HDMIC_HOTPLUG_INT_STATUS; } else if (sdvox_reg == HDMID) { - intel_output->clone_mask = (1 << INTEL_HDMIF_CLONE_BIT); - intel_output->ddc_bus = intel_i2c_create(dev, PCH_GPIOF, + intel_encoder->clone_mask = (1 << INTEL_HDMIF_CLONE_BIT); + intel_encoder->ddc_bus = intel_i2c_create(dev, PCH_GPIOF, "HDMID"); dev_priv->hotplug_supported_mask |= HDMID_HOTPLUG_INT_STATUS; } - if (!intel_output->ddc_bus) + if (!intel_encoder->ddc_bus) goto err_connector; hdmi_priv->sdvox_reg = sdvox_reg; - intel_output->dev_priv = hdmi_priv; + intel_encoder->dev_priv = hdmi_priv; - drm_encoder_init(dev, &intel_output->enc, &intel_hdmi_enc_funcs, + drm_encoder_init(dev, &intel_encoder->enc, &intel_hdmi_enc_funcs, DRM_MODE_ENCODER_TMDS); - drm_encoder_helper_add(&intel_output->enc, &intel_hdmi_helper_funcs); + drm_encoder_helper_add(&intel_encoder->enc, &intel_hdmi_helper_funcs); - drm_mode_connector_attach_encoder(&intel_output->base, - &intel_output->enc); + drm_mode_connector_attach_encoder(&intel_encoder->base, + &intel_encoder->enc); drm_sysfs_connector_add(connector); /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written @@ -302,7 +302,7 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg) err_connector: drm_connector_cleanup(connector); - kfree(intel_output); + kfree(intel_encoder); return; } diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c index 216e9f52b6e..b66806a37d3 100644 --- a/drivers/gpu/drm/i915/intel_lvds.c +++ b/drivers/gpu/drm/i915/intel_lvds.c @@ -239,8 +239,8 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder, struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); struct drm_encoder *tmp_encoder; - struct intel_output *intel_output = enc_to_intel_output(encoder); - struct intel_lvds_priv *lvds_priv = intel_output->dev_priv; + struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); + struct intel_lvds_priv *lvds_priv = intel_encoder->dev_priv; u32 pfit_control = 0, pfit_pgm_ratios = 0; int left_border = 0, right_border = 0, top_border = 0; int bottom_border = 0; @@ -587,8 +587,8 @@ static void intel_lvds_mode_set(struct drm_encoder *encoder, { struct drm_device *dev = encoder->dev; struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_output *intel_output = enc_to_intel_output(encoder); - struct intel_lvds_priv *lvds_priv = intel_output->dev_priv; + struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); + struct intel_lvds_priv *lvds_priv = intel_encoder->dev_priv; /* * The LVDS pin pair will already have been turned on in the @@ -635,14 +635,16 @@ static enum drm_connector_status intel_lvds_detect(struct drm_connector *connect static int intel_lvds_get_modes(struct drm_connector *connector) { struct drm_device *dev = connector->dev; - struct intel_output *intel_output = to_intel_output(connector); + struct intel_encoder *intel_encoder = to_intel_encoder(connector); struct drm_i915_private *dev_priv = dev->dev_private; int ret = 0; - ret = intel_ddc_get_modes(intel_output); + if (dev_priv->lvds_edid_good) { + ret = intel_ddc_get_modes(intel_encoder); - if (ret) - return ret; + if (ret) + return ret; + } /* Didn't get an EDID, so * Set wide sync ranges so we get all modes @@ -715,11 +717,11 @@ static int intel_lid_notify(struct notifier_block *nb, unsigned long val, static void intel_lvds_destroy(struct drm_connector *connector) { struct drm_device *dev = connector->dev; - struct intel_output *intel_output = to_intel_output(connector); + struct intel_encoder *intel_encoder = to_intel_encoder(connector); struct drm_i915_private *dev_priv = dev->dev_private; - if (intel_output->ddc_bus) - intel_i2c_destroy(intel_output->ddc_bus); + if (intel_encoder->ddc_bus) + intel_i2c_destroy(intel_encoder->ddc_bus); if (dev_priv->lid_notifier.notifier_call) acpi_lid_notifier_unregister(&dev_priv->lid_notifier); drm_sysfs_connector_remove(connector); @@ -732,13 +734,13 @@ static int intel_lvds_set_property(struct drm_connector *connector, uint64_t value) { struct drm_device *dev = connector->dev; - struct intel_output *intel_output = - to_intel_output(connector); + struct intel_encoder *intel_encoder = + to_intel_encoder(connector); if (property == dev->mode_config.scaling_mode_property && connector->encoder) { struct drm_crtc *crtc = connector->encoder->crtc; - struct intel_lvds_priv *lvds_priv = intel_output->dev_priv; + struct intel_lvds_priv *lvds_priv = intel_encoder->dev_priv; if (value == DRM_MODE_SCALE_NONE) { DRM_DEBUG_KMS("no scaling not supported\n"); return 0; @@ -858,6 +860,14 @@ static const struct dmi_system_id intel_no_lvds[] = { DMI_MATCH(DMI_PRODUCT_VERSION, "AO00001JW"), }, }, + { + .callback = intel_no_lvds_dmi_callback, + .ident = "Clientron U800", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Clientron"), + DMI_MATCH(DMI_PRODUCT_NAME, "U800"), + }, + }, { } /* terminating entry */ }; @@ -968,7 +978,7 @@ static int lvds_is_present_in_vbt(struct drm_device *dev) void intel_lvds_init(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_output *intel_output; + struct intel_encoder *intel_encoder; struct drm_connector *connector; struct drm_encoder *encoder; struct drm_display_mode *scan; /* *modes, *bios_mode; */ @@ -996,40 +1006,40 @@ void intel_lvds_init(struct drm_device *dev) gpio = PCH_GPIOC; } - intel_output = kzalloc(sizeof(struct intel_output) + + intel_encoder = kzalloc(sizeof(struct intel_encoder) + sizeof(struct intel_lvds_priv), GFP_KERNEL); - if (!intel_output) { + if (!intel_encoder) { return; } - connector = &intel_output->base; - encoder = &intel_output->enc; - drm_connector_init(dev, &intel_output->base, &intel_lvds_connector_funcs, + connector = &intel_encoder->base; + encoder = &intel_encoder->enc; + drm_connector_init(dev, &intel_encoder->base, &intel_lvds_connector_funcs, DRM_MODE_CONNECTOR_LVDS); - drm_encoder_init(dev, &intel_output->enc, &intel_lvds_enc_funcs, + drm_encoder_init(dev, &intel_encoder->enc, &intel_lvds_enc_funcs, DRM_MODE_ENCODER_LVDS); - drm_mode_connector_attach_encoder(&intel_output->base, &intel_output->enc); - intel_output->type = INTEL_OUTPUT_LVDS; + drm_mode_connector_attach_encoder(&intel_encoder->base, &intel_encoder->enc); + intel_encoder->type = INTEL_OUTPUT_LVDS; - intel_output->clone_mask = (1 << INTEL_LVDS_CLONE_BIT); - intel_output->crtc_mask = (1 << 1); + intel_encoder->clone_mask = (1 << INTEL_LVDS_CLONE_BIT); + intel_encoder->crtc_mask = (1 << 1); drm_encoder_helper_add(encoder, &intel_lvds_helper_funcs); drm_connector_helper_add(connector, &intel_lvds_connector_helper_funcs); connector->display_info.subpixel_order = SubPixelHorizontalRGB; connector->interlace_allowed = false; connector->doublescan_allowed = false; - lvds_priv = (struct intel_lvds_priv *)(intel_output + 1); - intel_output->dev_priv = lvds_priv; + lvds_priv = (struct intel_lvds_priv *)(intel_encoder + 1); + intel_encoder->dev_priv = lvds_priv; /* create the scaling mode property */ drm_mode_create_scaling_mode_property(dev); /* * the initial panel fitting mode will be FULL_SCREEN. */ - drm_connector_attach_property(&intel_output->base, + drm_connector_attach_property(&intel_encoder->base, dev->mode_config.scaling_mode_property, DRM_MODE_SCALE_FULLSCREEN); lvds_priv->fitting_mode = DRM_MODE_SCALE_FULLSCREEN; @@ -1044,8 +1054,8 @@ void intel_lvds_init(struct drm_device *dev) */ /* Set up the DDC bus. */ - intel_output->ddc_bus = intel_i2c_create(dev, gpio, "LVDSDDC_C"); - if (!intel_output->ddc_bus) { + intel_encoder->ddc_bus = intel_i2c_create(dev, gpio, "LVDSDDC_C"); + if (!intel_encoder->ddc_bus) { dev_printk(KERN_ERR, &dev->pdev->dev, "DDC bus registration " "failed.\n"); goto failed; @@ -1055,7 +1065,10 @@ void intel_lvds_init(struct drm_device *dev) * Attempt to get the fixed panel mode from DDC. Assume that the * preferred mode is the right one. */ - intel_ddc_get_modes(intel_output); + dev_priv->lvds_edid_good = true; + + if (!intel_ddc_get_modes(intel_encoder)) + dev_priv->lvds_edid_good = false; list_for_each_entry(scan, &connector->probed_modes, head) { mutex_lock(&dev->mode_config.mutex); @@ -1133,9 +1146,9 @@ out: failed: DRM_DEBUG_KMS("No LVDS modes found, disabling.\n"); - if (intel_output->ddc_bus) - intel_i2c_destroy(intel_output->ddc_bus); + if (intel_encoder->ddc_bus) + intel_i2c_destroy(intel_encoder->ddc_bus); drm_connector_cleanup(connector); drm_encoder_cleanup(encoder); - kfree(intel_output); + kfree(intel_encoder); } diff --git a/drivers/gpu/drm/i915/intel_modes.c b/drivers/gpu/drm/i915/intel_modes.c index 89d303d1d3f..8e5c83b2d12 100644 --- a/drivers/gpu/drm/i915/intel_modes.c +++ b/drivers/gpu/drm/i915/intel_modes.c @@ -34,7 +34,7 @@ * intel_ddc_probe * */ -bool intel_ddc_probe(struct intel_output *intel_output) +bool intel_ddc_probe(struct intel_encoder *intel_encoder) { u8 out_buf[] = { 0x0, 0x0}; u8 buf[2]; @@ -54,9 +54,9 @@ bool intel_ddc_probe(struct intel_output *intel_output) } }; - intel_i2c_quirk_set(intel_output->base.dev, true); - ret = i2c_transfer(intel_output->ddc_bus, msgs, 2); - intel_i2c_quirk_set(intel_output->base.dev, false); + intel_i2c_quirk_set(intel_encoder->base.dev, true); + ret = i2c_transfer(intel_encoder->ddc_bus, msgs, 2); + intel_i2c_quirk_set(intel_encoder->base.dev, false); if (ret == 2) return true; @@ -69,19 +69,19 @@ bool intel_ddc_probe(struct intel_output *intel_output) * * Fetch the EDID information from @connector using the DDC bus. */ -int intel_ddc_get_modes(struct intel_output *intel_output) +int intel_ddc_get_modes(struct intel_encoder *intel_encoder) { struct edid *edid; int ret = 0; - intel_i2c_quirk_set(intel_output->base.dev, true); - edid = drm_get_edid(&intel_output->base, intel_output->ddc_bus); - intel_i2c_quirk_set(intel_output->base.dev, false); + intel_i2c_quirk_set(intel_encoder->base.dev, true); + edid = drm_get_edid(&intel_encoder->base, intel_encoder->ddc_bus); + intel_i2c_quirk_set(intel_encoder->base.dev, false); if (edid) { - drm_mode_connector_update_edid_property(&intel_output->base, + drm_mode_connector_update_edid_property(&intel_encoder->base, edid); - ret = drm_add_edid_modes(&intel_output->base, edid); - intel_output->base.display_info.raw_edid = NULL; + ret = drm_add_edid_modes(&intel_encoder->base, edid); + intel_encoder->base.display_info.raw_edid = NULL; kfree(edid); } diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c index 60595fc26fd..6d524a1fc27 100644 --- a/drivers/gpu/drm/i915/intel_overlay.c +++ b/drivers/gpu/drm/i915/intel_overlay.c @@ -724,7 +724,7 @@ int intel_overlay_do_put_image(struct intel_overlay *overlay, int ret, tmp_width; struct overlay_registers *regs; bool scale_changed = false; - struct drm_i915_gem_object *bo_priv = new_bo->driver_private; + struct drm_i915_gem_object *bo_priv = to_intel_bo(new_bo); struct drm_device *dev = overlay->dev; BUG_ON(!mutex_is_locked(&dev->struct_mutex)); @@ -809,7 +809,7 @@ int intel_overlay_do_put_image(struct intel_overlay *overlay, intel_overlay_continue(overlay, scale_changed); overlay->old_vid_bo = overlay->vid_bo; - overlay->vid_bo = new_bo->driver_private; + overlay->vid_bo = to_intel_bo(new_bo); return 0; @@ -1344,7 +1344,7 @@ void intel_setup_overlay(struct drm_device *dev) reg_bo = drm_gem_object_alloc(dev, PAGE_SIZE); if (!reg_bo) goto out_free; - overlay->reg_bo = reg_bo->driver_private; + overlay->reg_bo = to_intel_bo(reg_bo); if (OVERLAY_NONPHYSICAL(dev)) { ret = i915_gem_object_pin(reg_bo, PAGE_SIZE); diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c index 26e13a0bf30..87d953664cb 100644 --- a/drivers/gpu/drm/i915/intel_sdvo.c +++ b/drivers/gpu/drm/i915/intel_sdvo.c @@ -54,7 +54,7 @@ struct intel_sdvo_priv { u8 slave_addr; /* Register for the SDVO device: SDVOB or SDVOC */ - int output_device; + int sdvo_reg; /* Active outputs controlled by this SDVO output */ uint16_t controlled_output; @@ -124,7 +124,7 @@ struct intel_sdvo_priv { */ struct intel_sdvo_encode encode; - /* DDC bus used by this SDVO output */ + /* DDC bus used by this SDVO encoder */ uint8_t ddc_bus; /* Mac mini hack -- use the same DDC as the analog connector */ @@ -162,22 +162,22 @@ struct intel_sdvo_priv { }; static bool -intel_sdvo_output_setup(struct intel_output *intel_output, uint16_t flags); +intel_sdvo_output_setup(struct intel_encoder *intel_encoder, uint16_t flags); /** * Writes the SDVOB or SDVOC with the given value, but always writes both * SDVOB and SDVOC to work around apparent hardware issues (according to * comments in the BIOS). */ -static void intel_sdvo_write_sdvox(struct intel_output *intel_output, u32 val) +static void intel_sdvo_write_sdvox(struct intel_encoder *intel_encoder, u32 val) { - struct drm_device *dev = intel_output->base.dev; + struct drm_device *dev = intel_encoder->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; + struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; u32 bval = val, cval = val; int i; - if (sdvo_priv->output_device == SDVOB) { + if (sdvo_priv->sdvo_reg == SDVOB) { cval = I915_READ(SDVOC); } else { bval = I915_READ(SDVOB); @@ -196,10 +196,10 @@ static void intel_sdvo_write_sdvox(struct intel_output *intel_output, u32 val) } } -static bool intel_sdvo_read_byte(struct intel_output *intel_output, u8 addr, +static bool intel_sdvo_read_byte(struct intel_encoder *intel_encoder, u8 addr, u8 *ch) { - struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; + struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; u8 out_buf[2]; u8 buf[2]; int ret; @@ -222,7 +222,7 @@ static bool intel_sdvo_read_byte(struct intel_output *intel_output, u8 addr, out_buf[0] = addr; out_buf[1] = 0; - if ((ret = i2c_transfer(intel_output->i2c_bus, msgs, 2)) == 2) + if ((ret = i2c_transfer(intel_encoder->i2c_bus, msgs, 2)) == 2) { *ch = buf[0]; return true; @@ -232,10 +232,10 @@ static bool intel_sdvo_read_byte(struct intel_output *intel_output, u8 addr, return false; } -static bool intel_sdvo_write_byte(struct intel_output *intel_output, int addr, +static bool intel_sdvo_write_byte(struct intel_encoder *intel_encoder, int addr, u8 ch) { - struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; + struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; u8 out_buf[2]; struct i2c_msg msgs[] = { { @@ -249,7 +249,7 @@ static bool intel_sdvo_write_byte(struct intel_output *intel_output, int addr, out_buf[0] = addr; out_buf[1] = ch; - if (i2c_transfer(intel_output->i2c_bus, msgs, 1) == 1) + if (i2c_transfer(intel_encoder->i2c_bus, msgs, 1) == 1) { return true; } @@ -353,13 +353,13 @@ static const struct _sdvo_cmd_name { SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_DATA), }; -#define SDVO_NAME(dev_priv) ((dev_priv)->output_device == SDVOB ? "SDVOB" : "SDVOC") -#define SDVO_PRIV(output) ((struct intel_sdvo_priv *) (output)->dev_priv) +#define SDVO_NAME(dev_priv) ((dev_priv)->sdvo_reg == SDVOB ? "SDVOB" : "SDVOC") +#define SDVO_PRIV(encoder) ((struct intel_sdvo_priv *) (encoder)->dev_priv) -static void intel_sdvo_debug_write(struct intel_output *intel_output, u8 cmd, +static void intel_sdvo_debug_write(struct intel_encoder *intel_encoder, u8 cmd, void *args, int args_len) { - struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; + struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; int i; DRM_DEBUG_KMS("%s: W: %02X ", @@ -379,19 +379,19 @@ static void intel_sdvo_debug_write(struct intel_output *intel_output, u8 cmd, DRM_LOG_KMS("\n"); } -static void intel_sdvo_write_cmd(struct intel_output *intel_output, u8 cmd, +static void intel_sdvo_write_cmd(struct intel_encoder *intel_encoder, u8 cmd, void *args, int args_len) { int i; - intel_sdvo_debug_write(intel_output, cmd, args, args_len); + intel_sdvo_debug_write(intel_encoder, cmd, args, args_len); for (i = 0; i < args_len; i++) { - intel_sdvo_write_byte(intel_output, SDVO_I2C_ARG_0 - i, + intel_sdvo_write_byte(intel_encoder, SDVO_I2C_ARG_0 - i, ((u8*)args)[i]); } - intel_sdvo_write_byte(intel_output, SDVO_I2C_OPCODE, cmd); + intel_sdvo_write_byte(intel_encoder, SDVO_I2C_OPCODE, cmd); } static const char *cmd_status_names[] = { @@ -404,11 +404,11 @@ static const char *cmd_status_names[] = { "Scaling not supported" }; -static void intel_sdvo_debug_response(struct intel_output *intel_output, +static void intel_sdvo_debug_response(struct intel_encoder *intel_encoder, void *response, int response_len, u8 status) { - struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; + struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; int i; DRM_DEBUG_KMS("%s: R: ", SDVO_NAME(sdvo_priv)); @@ -423,7 +423,7 @@ static void intel_sdvo_debug_response(struct intel_output *intel_output, DRM_LOG_KMS("\n"); } -static u8 intel_sdvo_read_response(struct intel_output *intel_output, +static u8 intel_sdvo_read_response(struct intel_encoder *intel_encoder, void *response, int response_len) { int i; @@ -433,16 +433,16 @@ static u8 intel_sdvo_read_response(struct intel_output *intel_output, while (retry--) { /* Read the command response */ for (i = 0; i < response_len; i++) { - intel_sdvo_read_byte(intel_output, + intel_sdvo_read_byte(intel_encoder, SDVO_I2C_RETURN_0 + i, &((u8 *)response)[i]); } /* read the return status */ - intel_sdvo_read_byte(intel_output, SDVO_I2C_CMD_STATUS, + intel_sdvo_read_byte(intel_encoder, SDVO_I2C_CMD_STATUS, &status); - intel_sdvo_debug_response(intel_output, response, response_len, + intel_sdvo_debug_response(intel_encoder, response, response_len, status); if (status != SDVO_CMD_STATUS_PENDING) return status; @@ -470,10 +470,10 @@ static int intel_sdvo_get_pixel_multiplier(struct drm_display_mode *mode) * another I2C transaction after issuing the DDC bus switch, it will be * switched to the internal SDVO register. */ -static void intel_sdvo_set_control_bus_switch(struct intel_output *intel_output, +static void intel_sdvo_set_control_bus_switch(struct intel_encoder *intel_encoder, u8 target) { - struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; + struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; u8 out_buf[2], cmd_buf[2], ret_value[2], ret; struct i2c_msg msgs[] = { { @@ -497,10 +497,10 @@ static void intel_sdvo_set_control_bus_switch(struct intel_output *intel_output, }, }; - intel_sdvo_debug_write(intel_output, SDVO_CMD_SET_CONTROL_BUS_SWITCH, + intel_sdvo_debug_write(intel_encoder, SDVO_CMD_SET_CONTROL_BUS_SWITCH, &target, 1); /* write the DDC switch command argument */ - intel_sdvo_write_byte(intel_output, SDVO_I2C_ARG_0, target); + intel_sdvo_write_byte(intel_encoder, SDVO_I2C_ARG_0, target); out_buf[0] = SDVO_I2C_OPCODE; out_buf[1] = SDVO_CMD_SET_CONTROL_BUS_SWITCH; @@ -509,7 +509,7 @@ static void intel_sdvo_set_control_bus_switch(struct intel_output *intel_output, ret_value[0] = 0; ret_value[1] = 0; - ret = i2c_transfer(intel_output->i2c_bus, msgs, 3); + ret = i2c_transfer(intel_encoder->i2c_bus, msgs, 3); if (ret != 3) { /* failure in I2C transfer */ DRM_DEBUG_KMS("I2c transfer returned %d\n", ret); @@ -523,7 +523,7 @@ static void intel_sdvo_set_control_bus_switch(struct intel_output *intel_output, return; } -static bool intel_sdvo_set_target_input(struct intel_output *intel_output, bool target_0, bool target_1) +static bool intel_sdvo_set_target_input(struct intel_encoder *intel_encoder, bool target_0, bool target_1) { struct intel_sdvo_set_target_input_args targets = {0}; u8 status; @@ -534,10 +534,10 @@ static bool intel_sdvo_set_target_input(struct intel_output *intel_output, bool if (target_1) targets.target_1 = 1; - intel_sdvo_write_cmd(intel_output, SDVO_CMD_SET_TARGET_INPUT, &targets, + intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_TARGET_INPUT, &targets, sizeof(targets)); - status = intel_sdvo_read_response(intel_output, NULL, 0); + status = intel_sdvo_read_response(intel_encoder, NULL, 0); return (status == SDVO_CMD_STATUS_SUCCESS); } @@ -548,13 +548,13 @@ static bool intel_sdvo_set_target_input(struct intel_output *intel_output, bool * This function is making an assumption about the layout of the response, * which should be checked against the docs. */ -static bool intel_sdvo_get_trained_inputs(struct intel_output *intel_output, bool *input_1, bool *input_2) +static bool intel_sdvo_get_trained_inputs(struct intel_encoder *intel_encoder, bool *input_1, bool *input_2) { struct intel_sdvo_get_trained_inputs_response response; u8 status; - intel_sdvo_write_cmd(intel_output, SDVO_CMD_GET_TRAINED_INPUTS, NULL, 0); - status = intel_sdvo_read_response(intel_output, &response, sizeof(response)); + intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_TRAINED_INPUTS, NULL, 0); + status = intel_sdvo_read_response(intel_encoder, &response, sizeof(response)); if (status != SDVO_CMD_STATUS_SUCCESS) return false; @@ -563,29 +563,29 @@ static bool intel_sdvo_get_trained_inputs(struct intel_output *intel_output, boo return true; } -static bool intel_sdvo_get_active_outputs(struct intel_output *intel_output, +static bool intel_sdvo_get_active_outputs(struct intel_encoder *intel_encoder, u16 *outputs) { u8 status; - intel_sdvo_write_cmd(intel_output, SDVO_CMD_GET_ACTIVE_OUTPUTS, NULL, 0); - status = intel_sdvo_read_response(intel_output, outputs, sizeof(*outputs)); + intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_ACTIVE_OUTPUTS, NULL, 0); + status = intel_sdvo_read_response(intel_encoder, outputs, sizeof(*outputs)); return (status == SDVO_CMD_STATUS_SUCCESS); } -static bool intel_sdvo_set_active_outputs(struct intel_output *intel_output, +static bool intel_sdvo_set_active_outputs(struct intel_encoder *intel_encoder, u16 outputs) { u8 status; - intel_sdvo_write_cmd(intel_output, SDVO_CMD_SET_ACTIVE_OUTPUTS, &outputs, + intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_ACTIVE_OUTPUTS, &outputs, sizeof(outputs)); - status = intel_sdvo_read_response(intel_output, NULL, 0); + status = intel_sdvo_read_response(intel_encoder, NULL, 0); return (status == SDVO_CMD_STATUS_SUCCESS); } -static bool intel_sdvo_set_encoder_power_state(struct intel_output *intel_output, +static bool intel_sdvo_set_encoder_power_state(struct intel_encoder *intel_encoder, int mode) { u8 status, state = SDVO_ENCODER_STATE_ON; @@ -605,24 +605,24 @@ static bool intel_sdvo_set_encoder_power_state(struct intel_output *intel_output break; } - intel_sdvo_write_cmd(intel_output, SDVO_CMD_SET_ENCODER_POWER_STATE, &state, + intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_ENCODER_POWER_STATE, &state, sizeof(state)); - status = intel_sdvo_read_response(intel_output, NULL, 0); + status = intel_sdvo_read_response(intel_encoder, NULL, 0); return (status == SDVO_CMD_STATUS_SUCCESS); } -static bool intel_sdvo_get_input_pixel_clock_range(struct intel_output *intel_output, +static bool intel_sdvo_get_input_pixel_clock_range(struct intel_encoder *intel_encoder, int *clock_min, int *clock_max) { struct intel_sdvo_pixel_clock_range clocks; u8 status; - intel_sdvo_write_cmd(intel_output, SDVO_CMD_GET_INPUT_PIXEL_CLOCK_RANGE, + intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_INPUT_PIXEL_CLOCK_RANGE, NULL, 0); - status = intel_sdvo_read_response(intel_output, &clocks, sizeof(clocks)); + status = intel_sdvo_read_response(intel_encoder, &clocks, sizeof(clocks)); if (status != SDVO_CMD_STATUS_SUCCESS) return false; @@ -634,31 +634,31 @@ static bool intel_sdvo_get_input_pixel_clock_range(struct intel_output *intel_ou return true; } -static bool intel_sdvo_set_target_output(struct intel_output *intel_output, +static bool intel_sdvo_set_target_output(struct intel_encoder *intel_encoder, u16 outputs) { u8 status; - intel_sdvo_write_cmd(intel_output, SDVO_CMD_SET_TARGET_OUTPUT, &outputs, + intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_TARGET_OUTPUT, &outputs, sizeof(outputs)); - status = intel_sdvo_read_response(intel_output, NULL, 0); + status = intel_sdvo_read_response(intel_encoder, NULL, 0); return (status == SDVO_CMD_STATUS_SUCCESS); } -static bool intel_sdvo_get_timing(struct intel_output *intel_output, u8 cmd, +static bool intel_sdvo_get_timing(struct intel_encoder *intel_encoder, u8 cmd, struct intel_sdvo_dtd *dtd) { u8 status; - intel_sdvo_write_cmd(intel_output, cmd, NULL, 0); - status = intel_sdvo_read_response(intel_output, &dtd->part1, + intel_sdvo_write_cmd(intel_encoder, cmd, NULL, 0); + status = intel_sdvo_read_response(intel_encoder, &dtd->part1, sizeof(dtd->part1)); if (status != SDVO_CMD_STATUS_SUCCESS) return false; - intel_sdvo_write_cmd(intel_output, cmd + 1, NULL, 0); - status = intel_sdvo_read_response(intel_output, &dtd->part2, + intel_sdvo_write_cmd(intel_encoder, cmd + 1, NULL, 0); + status = intel_sdvo_read_response(intel_encoder, &dtd->part2, sizeof(dtd->part2)); if (status != SDVO_CMD_STATUS_SUCCESS) return false; @@ -666,60 +666,60 @@ static bool intel_sdvo_get_timing(struct intel_output *intel_output, u8 cmd, return true; } -static bool intel_sdvo_get_input_timing(struct intel_output *intel_output, +static bool intel_sdvo_get_input_timing(struct intel_encoder *intel_encoder, struct intel_sdvo_dtd *dtd) { - return intel_sdvo_get_timing(intel_output, + return intel_sdvo_get_timing(intel_encoder, SDVO_CMD_GET_INPUT_TIMINGS_PART1, dtd); } -static bool intel_sdvo_get_output_timing(struct intel_output *intel_output, +static bool intel_sdvo_get_output_timing(struct intel_encoder *intel_encoder, struct intel_sdvo_dtd *dtd) { - return intel_sdvo_get_timing(intel_output, + return intel_sdvo_get_timing(intel_encoder, SDVO_CMD_GET_OUTPUT_TIMINGS_PART1, dtd); } -static bool intel_sdvo_set_timing(struct intel_output *intel_output, u8 cmd, +static bool intel_sdvo_set_timing(struct intel_encoder *intel_encoder, u8 cmd, struct intel_sdvo_dtd *dtd) { u8 status; - intel_sdvo_write_cmd(intel_output, cmd, &dtd->part1, sizeof(dtd->part1)); - status = intel_sdvo_read_response(intel_output, NULL, 0); + intel_sdvo_write_cmd(intel_encoder, cmd, &dtd->part1, sizeof(dtd->part1)); + status = intel_sdvo_read_response(intel_encoder, NULL, 0); if (status != SDVO_CMD_STATUS_SUCCESS) return false; - intel_sdvo_write_cmd(intel_output, cmd + 1, &dtd->part2, sizeof(dtd->part2)); - status = intel_sdvo_read_response(intel_output, NULL, 0); + intel_sdvo_write_cmd(intel_encoder, cmd + 1, &dtd->part2, sizeof(dtd->part2)); + status = intel_sdvo_read_response(intel_encoder, NULL, 0); if (status != SDVO_CMD_STATUS_SUCCESS) return false; return true; } -static bool intel_sdvo_set_input_timing(struct intel_output *intel_output, +static bool intel_sdvo_set_input_timing(struct intel_encoder *intel_encoder, struct intel_sdvo_dtd *dtd) { - return intel_sdvo_set_timing(intel_output, + return intel_sdvo_set_timing(intel_encoder, SDVO_CMD_SET_INPUT_TIMINGS_PART1, dtd); } -static bool intel_sdvo_set_output_timing(struct intel_output *intel_output, +static bool intel_sdvo_set_output_timing(struct intel_encoder *intel_encoder, struct intel_sdvo_dtd *dtd) { - return intel_sdvo_set_timing(intel_output, + return intel_sdvo_set_timing(intel_encoder, SDVO_CMD_SET_OUTPUT_TIMINGS_PART1, dtd); } static bool -intel_sdvo_create_preferred_input_timing(struct intel_output *output, +intel_sdvo_create_preferred_input_timing(struct intel_encoder *intel_encoder, uint16_t clock, uint16_t width, uint16_t height) { struct intel_sdvo_preferred_input_timing_args args; - struct intel_sdvo_priv *sdvo_priv = output->dev_priv; + struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; uint8_t status; memset(&args, 0, sizeof(args)); @@ -733,32 +733,33 @@ intel_sdvo_create_preferred_input_timing(struct intel_output *output, sdvo_priv->sdvo_lvds_fixed_mode->vdisplay != height)) args.scaled = 1; - intel_sdvo_write_cmd(output, SDVO_CMD_CREATE_PREFERRED_INPUT_TIMING, + intel_sdvo_write_cmd(intel_encoder, + SDVO_CMD_CREATE_PREFERRED_INPUT_TIMING, &args, sizeof(args)); - status = intel_sdvo_read_response(output, NULL, 0); + status = intel_sdvo_read_response(intel_encoder, NULL, 0); if (status != SDVO_CMD_STATUS_SUCCESS) return false; return true; } -static bool intel_sdvo_get_preferred_input_timing(struct intel_output *output, +static bool intel_sdvo_get_preferred_input_timing(struct intel_encoder *intel_encoder, struct intel_sdvo_dtd *dtd) { bool status; - intel_sdvo_write_cmd(output, SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART1, + intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART1, NULL, 0); - status = intel_sdvo_read_response(output, &dtd->part1, + status = intel_sdvo_read_response(intel_encoder, &dtd->part1, sizeof(dtd->part1)); if (status != SDVO_CMD_STATUS_SUCCESS) return false; - intel_sdvo_write_cmd(output, SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART2, + intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART2, NULL, 0); - status = intel_sdvo_read_response(output, &dtd->part2, + status = intel_sdvo_read_response(intel_encoder, &dtd->part2, sizeof(dtd->part2)); if (status != SDVO_CMD_STATUS_SUCCESS) return false; @@ -766,12 +767,12 @@ static bool intel_sdvo_get_preferred_input_timing(struct intel_output *output, return false; } -static int intel_sdvo_get_clock_rate_mult(struct intel_output *intel_output) +static int intel_sdvo_get_clock_rate_mult(struct intel_encoder *intel_encoder) { u8 response, status; - intel_sdvo_write_cmd(intel_output, SDVO_CMD_GET_CLOCK_RATE_MULT, NULL, 0); - status = intel_sdvo_read_response(intel_output, &response, 1); + intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_CLOCK_RATE_MULT, NULL, 0); + status = intel_sdvo_read_response(intel_encoder, &response, 1); if (status != SDVO_CMD_STATUS_SUCCESS) { DRM_DEBUG_KMS("Couldn't get SDVO clock rate multiplier\n"); @@ -783,12 +784,12 @@ static int intel_sdvo_get_clock_rate_mult(struct intel_output *intel_output) return response; } -static bool intel_sdvo_set_clock_rate_mult(struct intel_output *intel_output, u8 val) +static bool intel_sdvo_set_clock_rate_mult(struct intel_encoder *intel_encoder, u8 val) { u8 status; - intel_sdvo_write_cmd(intel_output, SDVO_CMD_SET_CLOCK_RATE_MULT, &val, 1); - status = intel_sdvo_read_response(intel_output, NULL, 0); + intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_CLOCK_RATE_MULT, &val, 1); + status = intel_sdvo_read_response(intel_encoder, NULL, 0); if (status != SDVO_CMD_STATUS_SUCCESS) return false; @@ -877,13 +878,13 @@ static void intel_sdvo_get_mode_from_dtd(struct drm_display_mode * mode, mode->flags |= DRM_MODE_FLAG_PVSYNC; } -static bool intel_sdvo_get_supp_encode(struct intel_output *output, +static bool intel_sdvo_get_supp_encode(struct intel_encoder *intel_encoder, struct intel_sdvo_encode *encode) { uint8_t status; - intel_sdvo_write_cmd(output, SDVO_CMD_GET_SUPP_ENCODE, NULL, 0); - status = intel_sdvo_read_response(output, encode, sizeof(*encode)); + intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_SUPP_ENCODE, NULL, 0); + status = intel_sdvo_read_response(intel_encoder, encode, sizeof(*encode)); if (status != SDVO_CMD_STATUS_SUCCESS) { /* non-support means DVI */ memset(encode, 0, sizeof(*encode)); return false; @@ -892,29 +893,30 @@ static bool intel_sdvo_get_supp_encode(struct intel_output *output, return true; } -static bool intel_sdvo_set_encode(struct intel_output *output, uint8_t mode) +static bool intel_sdvo_set_encode(struct intel_encoder *intel_encoder, + uint8_t mode) { uint8_t status; - intel_sdvo_write_cmd(output, SDVO_CMD_SET_ENCODE, &mode, 1); - status = intel_sdvo_read_response(output, NULL, 0); + intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_ENCODE, &mode, 1); + status = intel_sdvo_read_response(intel_encoder, NULL, 0); return (status == SDVO_CMD_STATUS_SUCCESS); } -static bool intel_sdvo_set_colorimetry(struct intel_output *output, +static bool intel_sdvo_set_colorimetry(struct intel_encoder *intel_encoder, uint8_t mode) { uint8_t status; - intel_sdvo_write_cmd(output, SDVO_CMD_SET_COLORIMETRY, &mode, 1); - status = intel_sdvo_read_response(output, NULL, 0); + intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_COLORIMETRY, &mode, 1); + status = intel_sdvo_read_response(intel_encoder, NULL, 0); return (status == SDVO_CMD_STATUS_SUCCESS); } #if 0 -static void intel_sdvo_dump_hdmi_buf(struct intel_output *output) +static void intel_sdvo_dump_hdmi_buf(struct intel_encoder *intel_encoder) { int i, j; uint8_t set_buf_index[2]; @@ -923,43 +925,45 @@ static void intel_sdvo_dump_hdmi_buf(struct intel_output *output) uint8_t buf[48]; uint8_t *pos; - intel_sdvo_write_cmd(output, SDVO_CMD_GET_HBUF_AV_SPLIT, NULL, 0); - intel_sdvo_read_response(output, &av_split, 1); + intel_sdvo_write_cmd(encoder, SDVO_CMD_GET_HBUF_AV_SPLIT, NULL, 0); + intel_sdvo_read_response(encoder, &av_split, 1); for (i = 0; i <= av_split; i++) { set_buf_index[0] = i; set_buf_index[1] = 0; - intel_sdvo_write_cmd(output, SDVO_CMD_SET_HBUF_INDEX, + intel_sdvo_write_cmd(encoder, SDVO_CMD_SET_HBUF_INDEX, set_buf_index, 2); - intel_sdvo_write_cmd(output, SDVO_CMD_GET_HBUF_INFO, NULL, 0); - intel_sdvo_read_response(output, &buf_size, 1); + intel_sdvo_write_cmd(encoder, SDVO_CMD_GET_HBUF_INFO, NULL, 0); + intel_sdvo_read_response(encoder, &buf_size, 1); pos = buf; for (j = 0; j <= buf_size; j += 8) { - intel_sdvo_write_cmd(output, SDVO_CMD_GET_HBUF_DATA, + intel_sdvo_write_cmd(encoder, SDVO_CMD_GET_HBUF_DATA, NULL, 0); - intel_sdvo_read_response(output, pos, 8); + intel_sdvo_read_response(encoder, pos, 8); pos += 8; } } } #endif -static void intel_sdvo_set_hdmi_buf(struct intel_output *output, int index, - uint8_t *data, int8_t size, uint8_t tx_rate) +static void intel_sdvo_set_hdmi_buf(struct intel_encoder *intel_encoder, + int index, + uint8_t *data, int8_t size, uint8_t tx_rate) { uint8_t set_buf_index[2]; set_buf_index[0] = index; set_buf_index[1] = 0; - intel_sdvo_write_cmd(output, SDVO_CMD_SET_HBUF_INDEX, set_buf_index, 2); + intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_HBUF_INDEX, + set_buf_index, 2); for (; size > 0; size -= 8) { - intel_sdvo_write_cmd(output, SDVO_CMD_SET_HBUF_DATA, data, 8); + intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_HBUF_DATA, data, 8); data += 8; } - intel_sdvo_write_cmd(output, SDVO_CMD_SET_HBUF_TXRATE, &tx_rate, 1); + intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_HBUF_TXRATE, &tx_rate, 1); } static uint8_t intel_sdvo_calc_hbuf_csum(uint8_t *data, uint8_t size) @@ -1034,7 +1038,7 @@ struct dip_infoframe { } __attribute__ ((packed)) u; } __attribute__((packed)); -static void intel_sdvo_set_avi_infoframe(struct intel_output *output, +static void intel_sdvo_set_avi_infoframe(struct intel_encoder *intel_encoder, struct drm_display_mode * mode) { struct dip_infoframe avi_if = { @@ -1045,15 +1049,16 @@ static void intel_sdvo_set_avi_infoframe(struct intel_output *output, avi_if.checksum = intel_sdvo_calc_hbuf_csum((uint8_t *)&avi_if, 4 + avi_if.len); - intel_sdvo_set_hdmi_buf(output, 1, (uint8_t *)&avi_if, 4 + avi_if.len, + intel_sdvo_set_hdmi_buf(intel_encoder, 1, (uint8_t *)&avi_if, + 4 + avi_if.len, SDVO_HBUF_TX_VSYNC); } -static void intel_sdvo_set_tv_format(struct intel_output *output) +static void intel_sdvo_set_tv_format(struct intel_encoder *intel_encoder) { struct intel_sdvo_tv_format format; - struct intel_sdvo_priv *sdvo_priv = output->dev_priv; + struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; uint32_t format_map, i; uint8_t status; @@ -1066,10 +1071,10 @@ static void intel_sdvo_set_tv_format(struct intel_output *output) memcpy(&format, &format_map, sizeof(format_map) > sizeof(format) ? sizeof(format) : sizeof(format_map)); - intel_sdvo_write_cmd(output, SDVO_CMD_SET_TV_FORMAT, &format_map, + intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_TV_FORMAT, &format_map, sizeof(format)); - status = intel_sdvo_read_response(output, NULL, 0); + status = intel_sdvo_read_response(intel_encoder, NULL, 0); if (status != SDVO_CMD_STATUS_SUCCESS) DRM_DEBUG_KMS("%s: Failed to set TV format\n", SDVO_NAME(sdvo_priv)); @@ -1079,8 +1084,8 @@ static bool intel_sdvo_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { - struct intel_output *output = enc_to_intel_output(encoder); - struct intel_sdvo_priv *dev_priv = output->dev_priv; + struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); + struct intel_sdvo_priv *dev_priv = intel_encoder->dev_priv; if (dev_priv->is_tv) { struct intel_sdvo_dtd output_dtd; @@ -1095,22 +1100,22 @@ static bool intel_sdvo_mode_fixup(struct drm_encoder *encoder, /* Set output timings */ intel_sdvo_get_dtd_from_mode(&output_dtd, mode); - intel_sdvo_set_target_output(output, + intel_sdvo_set_target_output(intel_encoder, dev_priv->controlled_output); - intel_sdvo_set_output_timing(output, &output_dtd); + intel_sdvo_set_output_timing(intel_encoder, &output_dtd); /* Set the input timing to the screen. Assume always input 0. */ - intel_sdvo_set_target_input(output, true, false); + intel_sdvo_set_target_input(intel_encoder, true, false); - success = intel_sdvo_create_preferred_input_timing(output, + success = intel_sdvo_create_preferred_input_timing(intel_encoder, mode->clock / 10, mode->hdisplay, mode->vdisplay); if (success) { struct intel_sdvo_dtd input_dtd; - intel_sdvo_get_preferred_input_timing(output, + intel_sdvo_get_preferred_input_timing(intel_encoder, &input_dtd); intel_sdvo_get_mode_from_dtd(adjusted_mode, &input_dtd); dev_priv->sdvo_flags = input_dtd.part2.sdvo_flags; @@ -1133,16 +1138,16 @@ static bool intel_sdvo_mode_fixup(struct drm_encoder *encoder, intel_sdvo_get_dtd_from_mode(&output_dtd, dev_priv->sdvo_lvds_fixed_mode); - intel_sdvo_set_target_output(output, + intel_sdvo_set_target_output(intel_encoder, dev_priv->controlled_output); - intel_sdvo_set_output_timing(output, &output_dtd); + intel_sdvo_set_output_timing(intel_encoder, &output_dtd); /* Set the input timing to the screen. Assume always input 0. */ - intel_sdvo_set_target_input(output, true, false); + intel_sdvo_set_target_input(intel_encoder, true, false); success = intel_sdvo_create_preferred_input_timing( - output, + intel_encoder, mode->clock / 10, mode->hdisplay, mode->vdisplay); @@ -1150,7 +1155,7 @@ static bool intel_sdvo_mode_fixup(struct drm_encoder *encoder, if (success) { struct intel_sdvo_dtd input_dtd; - intel_sdvo_get_preferred_input_timing(output, + intel_sdvo_get_preferred_input_timing(intel_encoder, &input_dtd); intel_sdvo_get_mode_from_dtd(adjusted_mode, &input_dtd); dev_priv->sdvo_flags = input_dtd.part2.sdvo_flags; @@ -1182,8 +1187,8 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder, struct drm_i915_private *dev_priv = dev->dev_private; struct drm_crtc *crtc = encoder->crtc; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - struct intel_output *output = enc_to_intel_output(encoder); - struct intel_sdvo_priv *sdvo_priv = output->dev_priv; + struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); + struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; u32 sdvox = 0; int sdvo_pixel_multiply; struct intel_sdvo_in_out_map in_out; @@ -1202,12 +1207,12 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder, in_out.in0 = sdvo_priv->controlled_output; in_out.in1 = 0; - intel_sdvo_write_cmd(output, SDVO_CMD_SET_IN_OUT_MAP, + intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_IN_OUT_MAP, &in_out, sizeof(in_out)); - status = intel_sdvo_read_response(output, NULL, 0); + status = intel_sdvo_read_response(intel_encoder, NULL, 0); if (sdvo_priv->is_hdmi) { - intel_sdvo_set_avi_infoframe(output, mode); + intel_sdvo_set_avi_infoframe(intel_encoder, mode); sdvox |= SDVO_AUDIO_ENABLE; } @@ -1224,16 +1229,16 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder, */ if (!sdvo_priv->is_tv && !sdvo_priv->is_lvds) { /* Set the output timing to the screen */ - intel_sdvo_set_target_output(output, + intel_sdvo_set_target_output(intel_encoder, sdvo_priv->controlled_output); - intel_sdvo_set_output_timing(output, &input_dtd); + intel_sdvo_set_output_timing(intel_encoder, &input_dtd); } /* Set the input timing to the screen. Assume always input 0. */ - intel_sdvo_set_target_input(output, true, false); + intel_sdvo_set_target_input(intel_encoder, true, false); if (sdvo_priv->is_tv) - intel_sdvo_set_tv_format(output); + intel_sdvo_set_tv_format(intel_encoder); /* We would like to use intel_sdvo_create_preferred_input_timing() to * provide the device with a timing it can support, if it supports that @@ -1241,29 +1246,29 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder, * output the preferred timing, and we don't support that currently. */ #if 0 - success = intel_sdvo_create_preferred_input_timing(output, clock, + success = intel_sdvo_create_preferred_input_timing(encoder, clock, width, height); if (success) { struct intel_sdvo_dtd *input_dtd; - intel_sdvo_get_preferred_input_timing(output, &input_dtd); - intel_sdvo_set_input_timing(output, &input_dtd); + intel_sdvo_get_preferred_input_timing(encoder, &input_dtd); + intel_sdvo_set_input_timing(encoder, &input_dtd); } #else - intel_sdvo_set_input_timing(output, &input_dtd); + intel_sdvo_set_input_timing(intel_encoder, &input_dtd); #endif switch (intel_sdvo_get_pixel_multiplier(mode)) { case 1: - intel_sdvo_set_clock_rate_mult(output, + intel_sdvo_set_clock_rate_mult(intel_encoder, SDVO_CLOCK_RATE_MULT_1X); break; case 2: - intel_sdvo_set_clock_rate_mult(output, + intel_sdvo_set_clock_rate_mult(intel_encoder, SDVO_CLOCK_RATE_MULT_2X); break; case 4: - intel_sdvo_set_clock_rate_mult(output, + intel_sdvo_set_clock_rate_mult(intel_encoder, SDVO_CLOCK_RATE_MULT_4X); break; } @@ -1274,8 +1279,8 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder, SDVO_VSYNC_ACTIVE_HIGH | SDVO_HSYNC_ACTIVE_HIGH; } else { - sdvox |= I915_READ(sdvo_priv->output_device); - switch (sdvo_priv->output_device) { + sdvox |= I915_READ(sdvo_priv->sdvo_reg); + switch (sdvo_priv->sdvo_reg) { case SDVOB: sdvox &= SDVOB_PRESERVE_MASK; break; @@ -1299,26 +1304,26 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder, if (sdvo_priv->sdvo_flags & SDVO_NEED_TO_STALL) sdvox |= SDVO_STALL_SELECT; - intel_sdvo_write_sdvox(output, sdvox); + intel_sdvo_write_sdvox(intel_encoder, sdvox); } static void intel_sdvo_dpms(struct drm_encoder *encoder, int mode) { struct drm_device *dev = encoder->dev; struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_output *intel_output = enc_to_intel_output(encoder); - struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; + struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); + struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; u32 temp; if (mode != DRM_MODE_DPMS_ON) { - intel_sdvo_set_active_outputs(intel_output, 0); + intel_sdvo_set_active_outputs(intel_encoder, 0); if (0) - intel_sdvo_set_encoder_power_state(intel_output, mode); + intel_sdvo_set_encoder_power_state(intel_encoder, mode); if (mode == DRM_MODE_DPMS_OFF) { - temp = I915_READ(sdvo_priv->output_device); + temp = I915_READ(sdvo_priv->sdvo_reg); if ((temp & SDVO_ENABLE) != 0) { - intel_sdvo_write_sdvox(intel_output, temp & ~SDVO_ENABLE); + intel_sdvo_write_sdvox(intel_encoder, temp & ~SDVO_ENABLE); } } } else { @@ -1326,13 +1331,13 @@ static void intel_sdvo_dpms(struct drm_encoder *encoder, int mode) int i; u8 status; - temp = I915_READ(sdvo_priv->output_device); + temp = I915_READ(sdvo_priv->sdvo_reg); if ((temp & SDVO_ENABLE) == 0) - intel_sdvo_write_sdvox(intel_output, temp | SDVO_ENABLE); + intel_sdvo_write_sdvox(intel_encoder, temp | SDVO_ENABLE); for (i = 0; i < 2; i++) intel_wait_for_vblank(dev); - status = intel_sdvo_get_trained_inputs(intel_output, &input1, + status = intel_sdvo_get_trained_inputs(intel_encoder, &input1, &input2); @@ -1346,8 +1351,8 @@ static void intel_sdvo_dpms(struct drm_encoder *encoder, int mode) } if (0) - intel_sdvo_set_encoder_power_state(intel_output, mode); - intel_sdvo_set_active_outputs(intel_output, sdvo_priv->controlled_output); + intel_sdvo_set_encoder_power_state(intel_encoder, mode); + intel_sdvo_set_active_outputs(intel_encoder, sdvo_priv->controlled_output); } return; } @@ -1356,22 +1361,22 @@ static void intel_sdvo_save(struct drm_connector *connector) { struct drm_device *dev = connector->dev; struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_output *intel_output = to_intel_output(connector); - struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; + struct intel_encoder *intel_encoder = to_intel_encoder(connector); + struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; int o; - sdvo_priv->save_sdvo_mult = intel_sdvo_get_clock_rate_mult(intel_output); - intel_sdvo_get_active_outputs(intel_output, &sdvo_priv->save_active_outputs); + sdvo_priv->save_sdvo_mult = intel_sdvo_get_clock_rate_mult(intel_encoder); + intel_sdvo_get_active_outputs(intel_encoder, &sdvo_priv->save_active_outputs); if (sdvo_priv->caps.sdvo_inputs_mask & 0x1) { - intel_sdvo_set_target_input(intel_output, true, false); - intel_sdvo_get_input_timing(intel_output, + intel_sdvo_set_target_input(intel_encoder, true, false); + intel_sdvo_get_input_timing(intel_encoder, &sdvo_priv->save_input_dtd_1); } if (sdvo_priv->caps.sdvo_inputs_mask & 0x2) { - intel_sdvo_set_target_input(intel_output, false, true); - intel_sdvo_get_input_timing(intel_output, + intel_sdvo_set_target_input(intel_encoder, false, true); + intel_sdvo_get_input_timing(intel_encoder, &sdvo_priv->save_input_dtd_2); } @@ -1380,8 +1385,8 @@ static void intel_sdvo_save(struct drm_connector *connector) u16 this_output = (1 << o); if (sdvo_priv->caps.output_flags & this_output) { - intel_sdvo_set_target_output(intel_output, this_output); - intel_sdvo_get_output_timing(intel_output, + intel_sdvo_set_target_output(intel_encoder, this_output); + intel_sdvo_get_output_timing(intel_encoder, &sdvo_priv->save_output_dtd[o]); } } @@ -1389,66 +1394,66 @@ static void intel_sdvo_save(struct drm_connector *connector) /* XXX: Save TV format/enhancements. */ } - sdvo_priv->save_SDVOX = I915_READ(sdvo_priv->output_device); + sdvo_priv->save_SDVOX = I915_READ(sdvo_priv->sdvo_reg); } static void intel_sdvo_restore(struct drm_connector *connector) { struct drm_device *dev = connector->dev; - struct intel_output *intel_output = to_intel_output(connector); - struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; + struct intel_encoder *intel_encoder = to_intel_encoder(connector); + struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; int o; int i; bool input1, input2; u8 status; - intel_sdvo_set_active_outputs(intel_output, 0); + intel_sdvo_set_active_outputs(intel_encoder, 0); for (o = SDVO_OUTPUT_FIRST; o <= SDVO_OUTPUT_LAST; o++) { u16 this_output = (1 << o); if (sdvo_priv->caps.output_flags & this_output) { - intel_sdvo_set_target_output(intel_output, this_output); - intel_sdvo_set_output_timing(intel_output, &sdvo_priv->save_output_dtd[o]); + intel_sdvo_set_target_output(intel_encoder, this_output); + intel_sdvo_set_output_timing(intel_encoder, &sdvo_priv->save_output_dtd[o]); } } if (sdvo_priv->caps.sdvo_inputs_mask & 0x1) { - intel_sdvo_set_target_input(intel_output, true, false); - intel_sdvo_set_input_timing(intel_output, &sdvo_priv->save_input_dtd_1); + intel_sdvo_set_target_input(intel_encoder, true, false); + intel_sdvo_set_input_timing(intel_encoder, &sdvo_priv->save_input_dtd_1); } if (sdvo_priv->caps.sdvo_inputs_mask & 0x2) { - intel_sdvo_set_target_input(intel_output, false, true); - intel_sdvo_set_input_timing(intel_output, &sdvo_priv->save_input_dtd_2); + intel_sdvo_set_target_input(intel_encoder, false, true); + intel_sdvo_set_input_timing(intel_encoder, &sdvo_priv->save_input_dtd_2); } - intel_sdvo_set_clock_rate_mult(intel_output, sdvo_priv->save_sdvo_mult); + intel_sdvo_set_clock_rate_mult(intel_encoder, sdvo_priv->save_sdvo_mult); if (sdvo_priv->is_tv) { /* XXX: Restore TV format/enhancements. */ } - intel_sdvo_write_sdvox(intel_output, sdvo_priv->save_SDVOX); + intel_sdvo_write_sdvox(intel_encoder, sdvo_priv->save_SDVOX); if (sdvo_priv->save_SDVOX & SDVO_ENABLE) { for (i = 0; i < 2; i++) intel_wait_for_vblank(dev); - status = intel_sdvo_get_trained_inputs(intel_output, &input1, &input2); + status = intel_sdvo_get_trained_inputs(intel_encoder, &input1, &input2); if (status == SDVO_CMD_STATUS_SUCCESS && !input1) DRM_DEBUG_KMS("First %s output reported failure to " "sync\n", SDVO_NAME(sdvo_priv)); } - intel_sdvo_set_active_outputs(intel_output, sdvo_priv->save_active_outputs); + intel_sdvo_set_active_outputs(intel_encoder, sdvo_priv->save_active_outputs); } static int intel_sdvo_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode) { - struct intel_output *intel_output = to_intel_output(connector); - struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; + struct intel_encoder *intel_encoder = to_intel_encoder(connector); + struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; if (mode->flags & DRM_MODE_FLAG_DBLSCAN) return MODE_NO_DBLESCAN; @@ -1473,12 +1478,12 @@ static int intel_sdvo_mode_valid(struct drm_connector *connector, return MODE_OK; } -static bool intel_sdvo_get_capabilities(struct intel_output *intel_output, struct intel_sdvo_caps *caps) +static bool intel_sdvo_get_capabilities(struct intel_encoder *intel_encoder, struct intel_sdvo_caps *caps) { u8 status; - intel_sdvo_write_cmd(intel_output, SDVO_CMD_GET_DEVICE_CAPS, NULL, 0); - status = intel_sdvo_read_response(intel_output, caps, sizeof(*caps)); + intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_DEVICE_CAPS, NULL, 0); + status = intel_sdvo_read_response(intel_encoder, caps, sizeof(*caps)); if (status != SDVO_CMD_STATUS_SUCCESS) return false; @@ -1488,22 +1493,22 @@ static bool intel_sdvo_get_capabilities(struct intel_output *intel_output, struc struct drm_connector* intel_sdvo_find(struct drm_device *dev, int sdvoB) { struct drm_connector *connector = NULL; - struct intel_output *iout = NULL; + struct intel_encoder *iout = NULL; struct intel_sdvo_priv *sdvo; /* find the sdvo connector */ list_for_each_entry(connector, &dev->mode_config.connector_list, head) { - iout = to_intel_output(connector); + iout = to_intel_encoder(connector); if (iout->type != INTEL_OUTPUT_SDVO) continue; sdvo = iout->dev_priv; - if (sdvo->output_device == SDVOB && sdvoB) + if (sdvo->sdvo_reg == SDVOB && sdvoB) return connector; - if (sdvo->output_device == SDVOC && !sdvoB) + if (sdvo->sdvo_reg == SDVOC && !sdvoB) return connector; } @@ -1515,16 +1520,16 @@ int intel_sdvo_supports_hotplug(struct drm_connector *connector) { u8 response[2]; u8 status; - struct intel_output *intel_output; + struct intel_encoder *intel_encoder; DRM_DEBUG_KMS("\n"); if (!connector) return 0; - intel_output = to_intel_output(connector); + intel_encoder = to_intel_encoder(connector); - intel_sdvo_write_cmd(intel_output, SDVO_CMD_GET_HOT_PLUG_SUPPORT, NULL, 0); - status = intel_sdvo_read_response(intel_output, &response, 2); + intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_HOT_PLUG_SUPPORT, NULL, 0); + status = intel_sdvo_read_response(intel_encoder, &response, 2); if (response[0] !=0) return 1; @@ -1536,30 +1541,30 @@ void intel_sdvo_set_hotplug(struct drm_connector *connector, int on) { u8 response[2]; u8 status; - struct intel_output *intel_output = to_intel_output(connector); + struct intel_encoder *intel_encoder = to_intel_encoder(connector); - intel_sdvo_write_cmd(intel_output, SDVO_CMD_GET_ACTIVE_HOT_PLUG, NULL, 0); - intel_sdvo_read_response(intel_output, &response, 2); + intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_ACTIVE_HOT_PLUG, NULL, 0); + intel_sdvo_read_response(intel_encoder, &response, 2); if (on) { - intel_sdvo_write_cmd(intel_output, SDVO_CMD_GET_HOT_PLUG_SUPPORT, NULL, 0); - status = intel_sdvo_read_response(intel_output, &response, 2); + intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_HOT_PLUG_SUPPORT, NULL, 0); + status = intel_sdvo_read_response(intel_encoder, &response, 2); - intel_sdvo_write_cmd(intel_output, SDVO_CMD_SET_ACTIVE_HOT_PLUG, &response, 2); + intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_ACTIVE_HOT_PLUG, &response, 2); } else { response[0] = 0; response[1] = 0; - intel_sdvo_write_cmd(intel_output, SDVO_CMD_SET_ACTIVE_HOT_PLUG, &response, 2); + intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_ACTIVE_HOT_PLUG, &response, 2); } - intel_sdvo_write_cmd(intel_output, SDVO_CMD_GET_ACTIVE_HOT_PLUG, NULL, 0); - intel_sdvo_read_response(intel_output, &response, 2); + intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_ACTIVE_HOT_PLUG, NULL, 0); + intel_sdvo_read_response(intel_encoder, &response, 2); } static bool -intel_sdvo_multifunc_encoder(struct intel_output *intel_output) +intel_sdvo_multifunc_encoder(struct intel_encoder *intel_encoder) { - struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; + struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; int caps = 0; if (sdvo_priv->caps.output_flags & @@ -1593,11 +1598,11 @@ static struct drm_connector * intel_find_analog_connector(struct drm_device *dev) { struct drm_connector *connector; - struct intel_output *intel_output; + struct intel_encoder *intel_encoder; list_for_each_entry(connector, &dev->mode_config.connector_list, head) { - intel_output = to_intel_output(connector); - if (intel_output->type == INTEL_OUTPUT_ANALOG) + intel_encoder = to_intel_encoder(connector); + if (intel_encoder->type == INTEL_OUTPUT_ANALOG) return connector; } return NULL; @@ -1622,16 +1627,16 @@ intel_analog_is_connected(struct drm_device *dev) enum drm_connector_status intel_sdvo_hdmi_sink_detect(struct drm_connector *connector, u16 response) { - struct intel_output *intel_output = to_intel_output(connector); - struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; + struct intel_encoder *intel_encoder = to_intel_encoder(connector); + struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; enum drm_connector_status status = connector_status_connected; struct edid *edid = NULL; - edid = drm_get_edid(&intel_output->base, - intel_output->ddc_bus); + edid = drm_get_edid(&intel_encoder->base, + intel_encoder->ddc_bus); /* This is only applied to SDVO cards with multiple outputs */ - if (edid == NULL && intel_sdvo_multifunc_encoder(intel_output)) { + if (edid == NULL && intel_sdvo_multifunc_encoder(intel_encoder)) { uint8_t saved_ddc, temp_ddc; saved_ddc = sdvo_priv->ddc_bus; temp_ddc = sdvo_priv->ddc_bus >> 1; @@ -1641,8 +1646,8 @@ intel_sdvo_hdmi_sink_detect(struct drm_connector *connector, u16 response) */ while(temp_ddc > 1) { sdvo_priv->ddc_bus = temp_ddc; - edid = drm_get_edid(&intel_output->base, - intel_output->ddc_bus); + edid = drm_get_edid(&intel_encoder->base, + intel_encoder->ddc_bus); if (edid) { /* * When we can get the EDID, maybe it is the @@ -1661,8 +1666,8 @@ intel_sdvo_hdmi_sink_detect(struct drm_connector *connector, u16 response) */ if (edid == NULL && sdvo_priv->analog_ddc_bus && - !intel_analog_is_connected(intel_output->base.dev)) - edid = drm_get_edid(&intel_output->base, + !intel_analog_is_connected(intel_encoder->base.dev)) + edid = drm_get_edid(&intel_encoder->base, sdvo_priv->analog_ddc_bus); if (edid != NULL) { /* Don't report the output as connected if it's a DVI-I @@ -1677,7 +1682,7 @@ intel_sdvo_hdmi_sink_detect(struct drm_connector *connector, u16 response) } kfree(edid); - intel_output->base.display_info.raw_edid = NULL; + intel_encoder->base.display_info.raw_edid = NULL; } else if (response & (SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_TMDS1)) status = connector_status_disconnected; @@ -1689,16 +1694,16 @@ static enum drm_connector_status intel_sdvo_detect(struct drm_connector *connect { uint16_t response; u8 status; - struct intel_output *intel_output = to_intel_output(connector); - struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; + struct intel_encoder *intel_encoder = to_intel_encoder(connector); + struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; - intel_sdvo_write_cmd(intel_output, + intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_ATTACHED_DISPLAYS, NULL, 0); if (sdvo_priv->is_tv) { /* add 30ms delay when the output type is SDVO-TV */ mdelay(30); } - status = intel_sdvo_read_response(intel_output, &response, 2); + status = intel_sdvo_read_response(intel_encoder, &response, 2); DRM_DEBUG_KMS("SDVO response %d %d\n", response & 0xff, response >> 8); @@ -1708,10 +1713,10 @@ static enum drm_connector_status intel_sdvo_detect(struct drm_connector *connect if (response == 0) return connector_status_disconnected; - if (intel_sdvo_multifunc_encoder(intel_output) && + if (intel_sdvo_multifunc_encoder(intel_encoder) && sdvo_priv->attached_output != response) { if (sdvo_priv->controlled_output != response && - intel_sdvo_output_setup(intel_output, response) != true) + intel_sdvo_output_setup(intel_encoder, response) != true) return connector_status_unknown; sdvo_priv->attached_output = response; } @@ -1720,12 +1725,12 @@ static enum drm_connector_status intel_sdvo_detect(struct drm_connector *connect static void intel_sdvo_get_ddc_modes(struct drm_connector *connector) { - struct intel_output *intel_output = to_intel_output(connector); - struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; + struct intel_encoder *intel_encoder = to_intel_encoder(connector); + struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; int num_modes; /* set the bus switch and get the modes */ - num_modes = intel_ddc_get_modes(intel_output); + num_modes = intel_ddc_get_modes(intel_encoder); /* * Mac mini hack. On this device, the DVI-I connector shares one DDC @@ -1735,17 +1740,17 @@ static void intel_sdvo_get_ddc_modes(struct drm_connector *connector) */ if (num_modes == 0 && sdvo_priv->analog_ddc_bus && - !intel_analog_is_connected(intel_output->base.dev)) { + !intel_analog_is_connected(intel_encoder->base.dev)) { struct i2c_adapter *digital_ddc_bus; /* Switch to the analog ddc bus and try that */ - digital_ddc_bus = intel_output->ddc_bus; - intel_output->ddc_bus = sdvo_priv->analog_ddc_bus; + digital_ddc_bus = intel_encoder->ddc_bus; + intel_encoder->ddc_bus = sdvo_priv->analog_ddc_bus; - (void) intel_ddc_get_modes(intel_output); + (void) intel_ddc_get_modes(intel_encoder); - intel_output->ddc_bus = digital_ddc_bus; + intel_encoder->ddc_bus = digital_ddc_bus; } } @@ -1816,7 +1821,7 @@ struct drm_display_mode sdvo_tv_modes[] = { static void intel_sdvo_get_tv_modes(struct drm_connector *connector) { - struct intel_output *output = to_intel_output(connector); + struct intel_encoder *output = to_intel_encoder(connector); struct intel_sdvo_priv *sdvo_priv = output->dev_priv; struct intel_sdvo_sdtv_resolution_request tv_res; uint32_t reply = 0, format_map = 0; @@ -1858,9 +1863,9 @@ static void intel_sdvo_get_tv_modes(struct drm_connector *connector) static void intel_sdvo_get_lvds_modes(struct drm_connector *connector) { - struct intel_output *intel_output = to_intel_output(connector); + struct intel_encoder *intel_encoder = to_intel_encoder(connector); struct drm_i915_private *dev_priv = connector->dev->dev_private; - struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; + struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; struct drm_display_mode *newmode; /* @@ -1868,7 +1873,7 @@ static void intel_sdvo_get_lvds_modes(struct drm_connector *connector) * Assume that the preferred modes are * arranged in priority order. */ - intel_ddc_get_modes(intel_output); + intel_ddc_get_modes(intel_encoder); if (list_empty(&connector->probed_modes) == false) goto end; @@ -1897,7 +1902,7 @@ end: static int intel_sdvo_get_modes(struct drm_connector *connector) { - struct intel_output *output = to_intel_output(connector); + struct intel_encoder *output = to_intel_encoder(connector); struct intel_sdvo_priv *sdvo_priv = output->dev_priv; if (sdvo_priv->is_tv) @@ -1915,8 +1920,8 @@ static int intel_sdvo_get_modes(struct drm_connector *connector) static void intel_sdvo_destroy_enhance_property(struct drm_connector *connector) { - struct intel_output *intel_output = to_intel_output(connector); - struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; + struct intel_encoder *intel_encoder = to_intel_encoder(connector); + struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; struct drm_device *dev = connector->dev; if (sdvo_priv->is_tv) { @@ -1953,13 +1958,13 @@ void intel_sdvo_destroy_enhance_property(struct drm_connector *connector) static void intel_sdvo_destroy(struct drm_connector *connector) { - struct intel_output *intel_output = to_intel_output(connector); - struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; + struct intel_encoder *intel_encoder = to_intel_encoder(connector); + struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; - if (intel_output->i2c_bus) - intel_i2c_destroy(intel_output->i2c_bus); - if (intel_output->ddc_bus) - intel_i2c_destroy(intel_output->ddc_bus); + if (intel_encoder->i2c_bus) + intel_i2c_destroy(intel_encoder->i2c_bus); + if (intel_encoder->ddc_bus) + intel_i2c_destroy(intel_encoder->ddc_bus); if (sdvo_priv->analog_ddc_bus) intel_i2c_destroy(sdvo_priv->analog_ddc_bus); @@ -1977,7 +1982,7 @@ static void intel_sdvo_destroy(struct drm_connector *connector) drm_sysfs_connector_remove(connector); drm_connector_cleanup(connector); - kfree(intel_output); + kfree(intel_encoder); } static int @@ -1985,9 +1990,9 @@ intel_sdvo_set_property(struct drm_connector *connector, struct drm_property *property, uint64_t val) { - struct intel_output *intel_output = to_intel_output(connector); - struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; - struct drm_encoder *encoder = &intel_output->enc; + struct intel_encoder *intel_encoder = to_intel_encoder(connector); + struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; + struct drm_encoder *encoder = &intel_encoder->enc; struct drm_crtc *crtc = encoder->crtc; int ret = 0; bool changed = false; @@ -2095,8 +2100,8 @@ intel_sdvo_set_property(struct drm_connector *connector, sdvo_priv->cur_brightness = temp_value; } if (cmd) { - intel_sdvo_write_cmd(intel_output, cmd, &temp_value, 2); - status = intel_sdvo_read_response(intel_output, + intel_sdvo_write_cmd(intel_encoder, cmd, &temp_value, 2); + status = intel_sdvo_read_response(intel_encoder, NULL, 0); if (status != SDVO_CMD_STATUS_SUCCESS) { DRM_DEBUG_KMS("Incorrect SDVO command \n"); @@ -2191,7 +2196,7 @@ intel_sdvo_select_ddc_bus(struct intel_sdvo_priv *dev_priv) } static bool -intel_sdvo_get_digital_encoding_mode(struct intel_output *output) +intel_sdvo_get_digital_encoding_mode(struct intel_encoder *output) { struct intel_sdvo_priv *sdvo_priv = output->dev_priv; uint8_t status; @@ -2205,42 +2210,42 @@ intel_sdvo_get_digital_encoding_mode(struct intel_output *output) return true; } -static struct intel_output * -intel_sdvo_chan_to_intel_output(struct intel_i2c_chan *chan) +static struct intel_encoder * +intel_sdvo_chan_to_intel_encoder(struct intel_i2c_chan *chan) { struct drm_device *dev = chan->drm_dev; struct drm_connector *connector; - struct intel_output *intel_output = NULL; + struct intel_encoder *intel_encoder = NULL; list_for_each_entry(connector, &dev->mode_config.connector_list, head) { - if (to_intel_output(connector)->ddc_bus == &chan->adapter) { - intel_output = to_intel_output(connector); + if (to_intel_encoder(connector)->ddc_bus == &chan->adapter) { + intel_encoder = to_intel_encoder(connector); break; } } - return intel_output; + return intel_encoder; } static int intel_sdvo_master_xfer(struct i2c_adapter *i2c_adap, struct i2c_msg msgs[], int num) { - struct intel_output *intel_output; + struct intel_encoder *intel_encoder; struct intel_sdvo_priv *sdvo_priv; struct i2c_algo_bit_data *algo_data; const struct i2c_algorithm *algo; algo_data = (struct i2c_algo_bit_data *)i2c_adap->algo_data; - intel_output = - intel_sdvo_chan_to_intel_output( + intel_encoder = + intel_sdvo_chan_to_intel_encoder( (struct intel_i2c_chan *)(algo_data->data)); - if (intel_output == NULL) + if (intel_encoder == NULL) return -EINVAL; - sdvo_priv = intel_output->dev_priv; - algo = intel_output->i2c_bus->algo; + sdvo_priv = intel_encoder->dev_priv; + algo = intel_encoder->i2c_bus->algo; - intel_sdvo_set_control_bus_switch(intel_output, sdvo_priv->ddc_bus); + intel_sdvo_set_control_bus_switch(intel_encoder, sdvo_priv->ddc_bus); return algo->master_xfer(i2c_adap, msgs, num); } @@ -2249,12 +2254,12 @@ static struct i2c_algorithm intel_sdvo_i2c_bit_algo = { }; static u8 -intel_sdvo_get_slave_addr(struct drm_device *dev, int output_device) +intel_sdvo_get_slave_addr(struct drm_device *dev, int sdvo_reg) { struct drm_i915_private *dev_priv = dev->dev_private; struct sdvo_device_mapping *my_mapping, *other_mapping; - if (output_device == SDVOB) { + if (sdvo_reg == SDVOB) { my_mapping = &dev_priv->sdvo_mappings[0]; other_mapping = &dev_priv->sdvo_mappings[1]; } else { @@ -2279,7 +2284,7 @@ intel_sdvo_get_slave_addr(struct drm_device *dev, int output_device) /* No SDVO device info is found for another DVO port, * so use mapping assumption we had before BIOS parsing. */ - if (output_device == SDVOB) + if (sdvo_reg == SDVOB) return 0x70; else return 0x72; @@ -2305,15 +2310,15 @@ static struct dmi_system_id intel_sdvo_bad_tv[] = { }; static bool -intel_sdvo_output_setup(struct intel_output *intel_output, uint16_t flags) +intel_sdvo_output_setup(struct intel_encoder *intel_encoder, uint16_t flags) { - struct drm_connector *connector = &intel_output->base; - struct drm_encoder *encoder = &intel_output->enc; - struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; + struct drm_connector *connector = &intel_encoder->base; + struct drm_encoder *encoder = &intel_encoder->enc; + struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; bool ret = true, registered = false; sdvo_priv->is_tv = false; - intel_output->needs_tv_clock = false; + intel_encoder->needs_tv_clock = false; sdvo_priv->is_lvds = false; if (device_is_registered(&connector->kdev)) { @@ -2331,16 +2336,16 @@ intel_sdvo_output_setup(struct intel_output *intel_output, uint16_t flags) encoder->encoder_type = DRM_MODE_ENCODER_TMDS; connector->connector_type = DRM_MODE_CONNECTOR_DVID; - if (intel_sdvo_get_supp_encode(intel_output, + if (intel_sdvo_get_supp_encode(intel_encoder, &sdvo_priv->encode) && - intel_sdvo_get_digital_encoding_mode(intel_output) && + intel_sdvo_get_digital_encoding_mode(intel_encoder) && sdvo_priv->is_hdmi) { /* enable hdmi encoding mode if supported */ - intel_sdvo_set_encode(intel_output, SDVO_ENCODE_HDMI); - intel_sdvo_set_colorimetry(intel_output, + intel_sdvo_set_encode(intel_encoder, SDVO_ENCODE_HDMI); + intel_sdvo_set_colorimetry(intel_encoder, SDVO_COLORIMETRY_RGB256); connector->connector_type = DRM_MODE_CONNECTOR_HDMIA; - intel_output->clone_mask = + intel_encoder->clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT) | (1 << INTEL_ANALOG_CLONE_BIT); } @@ -2351,21 +2356,21 @@ intel_sdvo_output_setup(struct intel_output *intel_output, uint16_t flags) encoder->encoder_type = DRM_MODE_ENCODER_TVDAC; connector->connector_type = DRM_MODE_CONNECTOR_SVIDEO; sdvo_priv->is_tv = true; - intel_output->needs_tv_clock = true; - intel_output->clone_mask = 1 << INTEL_SDVO_TV_CLONE_BIT; + intel_encoder->needs_tv_clock = true; + intel_encoder->clone_mask = 1 << INTEL_SDVO_TV_CLONE_BIT; } else if (flags & SDVO_OUTPUT_RGB0) { sdvo_priv->controlled_output = SDVO_OUTPUT_RGB0; encoder->encoder_type = DRM_MODE_ENCODER_DAC; connector->connector_type = DRM_MODE_CONNECTOR_VGA; - intel_output->clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT) | + intel_encoder->clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT) | (1 << INTEL_ANALOG_CLONE_BIT); } else if (flags & SDVO_OUTPUT_RGB1) { sdvo_priv->controlled_output = SDVO_OUTPUT_RGB1; encoder->encoder_type = DRM_MODE_ENCODER_DAC; connector->connector_type = DRM_MODE_CONNECTOR_VGA; - intel_output->clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT) | + intel_encoder->clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT) | (1 << INTEL_ANALOG_CLONE_BIT); } else if (flags & SDVO_OUTPUT_CVBS0) { @@ -2373,15 +2378,15 @@ intel_sdvo_output_setup(struct intel_output *intel_output, uint16_t flags) encoder->encoder_type = DRM_MODE_ENCODER_TVDAC; connector->connector_type = DRM_MODE_CONNECTOR_SVIDEO; sdvo_priv->is_tv = true; - intel_output->needs_tv_clock = true; - intel_output->clone_mask = 1 << INTEL_SDVO_TV_CLONE_BIT; + intel_encoder->needs_tv_clock = true; + intel_encoder->clone_mask = 1 << INTEL_SDVO_TV_CLONE_BIT; } else if (flags & SDVO_OUTPUT_LVDS0) { sdvo_priv->controlled_output = SDVO_OUTPUT_LVDS0; encoder->encoder_type = DRM_MODE_ENCODER_LVDS; connector->connector_type = DRM_MODE_CONNECTOR_LVDS; sdvo_priv->is_lvds = true; - intel_output->clone_mask = (1 << INTEL_ANALOG_CLONE_BIT) | + intel_encoder->clone_mask = (1 << INTEL_ANALOG_CLONE_BIT) | (1 << INTEL_SDVO_LVDS_CLONE_BIT); } else if (flags & SDVO_OUTPUT_LVDS1) { @@ -2389,7 +2394,7 @@ intel_sdvo_output_setup(struct intel_output *intel_output, uint16_t flags) encoder->encoder_type = DRM_MODE_ENCODER_LVDS; connector->connector_type = DRM_MODE_CONNECTOR_LVDS; sdvo_priv->is_lvds = true; - intel_output->clone_mask = (1 << INTEL_ANALOG_CLONE_BIT) | + intel_encoder->clone_mask = (1 << INTEL_ANALOG_CLONE_BIT) | (1 << INTEL_SDVO_LVDS_CLONE_BIT); } else { @@ -2402,7 +2407,7 @@ intel_sdvo_output_setup(struct intel_output *intel_output, uint16_t flags) bytes[0], bytes[1]); ret = false; } - intel_output->crtc_mask = (1 << 0) | (1 << 1); + intel_encoder->crtc_mask = (1 << 0) | (1 << 1); if (ret && registered) ret = drm_sysfs_connector_add(connector) == 0 ? true : false; @@ -2414,18 +2419,18 @@ intel_sdvo_output_setup(struct intel_output *intel_output, uint16_t flags) static void intel_sdvo_tv_create_property(struct drm_connector *connector) { - struct intel_output *intel_output = to_intel_output(connector); - struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; + struct intel_encoder *intel_encoder = to_intel_encoder(connector); + struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; struct intel_sdvo_tv_format format; uint32_t format_map, i; uint8_t status; - intel_sdvo_set_target_output(intel_output, + intel_sdvo_set_target_output(intel_encoder, sdvo_priv->controlled_output); - intel_sdvo_write_cmd(intel_output, + intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_SUPPORTED_TV_FORMATS, NULL, 0); - status = intel_sdvo_read_response(intel_output, + status = intel_sdvo_read_response(intel_encoder, &format, sizeof(format)); if (status != SDVO_CMD_STATUS_SUCCESS) return; @@ -2463,16 +2468,16 @@ static void intel_sdvo_tv_create_property(struct drm_connector *connector) static void intel_sdvo_create_enhance_property(struct drm_connector *connector) { - struct intel_output *intel_output = to_intel_output(connector); - struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; + struct intel_encoder *intel_encoder = to_intel_encoder(connector); + struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; struct intel_sdvo_enhancements_reply sdvo_data; struct drm_device *dev = connector->dev; uint8_t status; uint16_t response, data_value[2]; - intel_sdvo_write_cmd(intel_output, SDVO_CMD_GET_SUPPORTED_ENHANCEMENTS, + intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_SUPPORTED_ENHANCEMENTS, NULL, 0); - status = intel_sdvo_read_response(intel_output, &sdvo_data, + status = intel_sdvo_read_response(intel_encoder, &sdvo_data, sizeof(sdvo_data)); if (status != SDVO_CMD_STATUS_SUCCESS) { DRM_DEBUG_KMS(" incorrect response is returned\n"); @@ -2488,18 +2493,18 @@ static void intel_sdvo_create_enhance_property(struct drm_connector *connector) * property */ if (sdvo_data.overscan_h) { - intel_sdvo_write_cmd(intel_output, + intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_MAX_OVERSCAN_H, NULL, 0); - status = intel_sdvo_read_response(intel_output, + status = intel_sdvo_read_response(intel_encoder, &data_value, 4); if (status != SDVO_CMD_STATUS_SUCCESS) { DRM_DEBUG_KMS("Incorrect SDVO max " "h_overscan\n"); return; } - intel_sdvo_write_cmd(intel_output, + intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_OVERSCAN_H, NULL, 0); - status = intel_sdvo_read_response(intel_output, + status = intel_sdvo_read_response(intel_encoder, &response, 2); if (status != SDVO_CMD_STATUS_SUCCESS) { DRM_DEBUG_KMS("Incorrect SDVO h_overscan\n"); @@ -2529,18 +2534,18 @@ static void intel_sdvo_create_enhance_property(struct drm_connector *connector) data_value[0], data_value[1], response); } if (sdvo_data.overscan_v) { - intel_sdvo_write_cmd(intel_output, + intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_MAX_OVERSCAN_V, NULL, 0); - status = intel_sdvo_read_response(intel_output, + status = intel_sdvo_read_response(intel_encoder, &data_value, 4); if (status != SDVO_CMD_STATUS_SUCCESS) { DRM_DEBUG_KMS("Incorrect SDVO max " "v_overscan\n"); return; } - intel_sdvo_write_cmd(intel_output, + intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_OVERSCAN_V, NULL, 0); - status = intel_sdvo_read_response(intel_output, + status = intel_sdvo_read_response(intel_encoder, &response, 2); if (status != SDVO_CMD_STATUS_SUCCESS) { DRM_DEBUG_KMS("Incorrect SDVO v_overscan\n"); @@ -2570,17 +2575,17 @@ static void intel_sdvo_create_enhance_property(struct drm_connector *connector) data_value[0], data_value[1], response); } if (sdvo_data.position_h) { - intel_sdvo_write_cmd(intel_output, + intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_MAX_POSITION_H, NULL, 0); - status = intel_sdvo_read_response(intel_output, + status = intel_sdvo_read_response(intel_encoder, &data_value, 4); if (status != SDVO_CMD_STATUS_SUCCESS) { DRM_DEBUG_KMS("Incorrect SDVO Max h_pos\n"); return; } - intel_sdvo_write_cmd(intel_output, + intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_POSITION_H, NULL, 0); - status = intel_sdvo_read_response(intel_output, + status = intel_sdvo_read_response(intel_encoder, &response, 2); if (status != SDVO_CMD_STATUS_SUCCESS) { DRM_DEBUG_KMS("Incorrect SDVO get h_postion\n"); @@ -2601,17 +2606,17 @@ static void intel_sdvo_create_enhance_property(struct drm_connector *connector) data_value[0], data_value[1], response); } if (sdvo_data.position_v) { - intel_sdvo_write_cmd(intel_output, + intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_MAX_POSITION_V, NULL, 0); - status = intel_sdvo_read_response(intel_output, + status = intel_sdvo_read_response(intel_encoder, &data_value, 4); if (status != SDVO_CMD_STATUS_SUCCESS) { DRM_DEBUG_KMS("Incorrect SDVO Max v_pos\n"); return; } - intel_sdvo_write_cmd(intel_output, + intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_POSITION_V, NULL, 0); - status = intel_sdvo_read_response(intel_output, + status = intel_sdvo_read_response(intel_encoder, &response, 2); if (status != SDVO_CMD_STATUS_SUCCESS) { DRM_DEBUG_KMS("Incorrect SDVO get v_postion\n"); @@ -2634,17 +2639,17 @@ static void intel_sdvo_create_enhance_property(struct drm_connector *connector) } if (sdvo_priv->is_tv) { if (sdvo_data.saturation) { - intel_sdvo_write_cmd(intel_output, + intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_MAX_SATURATION, NULL, 0); - status = intel_sdvo_read_response(intel_output, + status = intel_sdvo_read_response(intel_encoder, &data_value, 4); if (status != SDVO_CMD_STATUS_SUCCESS) { DRM_DEBUG_KMS("Incorrect SDVO Max sat\n"); return; } - intel_sdvo_write_cmd(intel_output, + intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_SATURATION, NULL, 0); - status = intel_sdvo_read_response(intel_output, + status = intel_sdvo_read_response(intel_encoder, &response, 2); if (status != SDVO_CMD_STATUS_SUCCESS) { DRM_DEBUG_KMS("Incorrect SDVO get sat\n"); @@ -2666,17 +2671,17 @@ static void intel_sdvo_create_enhance_property(struct drm_connector *connector) data_value[0], data_value[1], response); } if (sdvo_data.contrast) { - intel_sdvo_write_cmd(intel_output, + intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_MAX_CONTRAST, NULL, 0); - status = intel_sdvo_read_response(intel_output, + status = intel_sdvo_read_response(intel_encoder, &data_value, 4); if (status != SDVO_CMD_STATUS_SUCCESS) { DRM_DEBUG_KMS("Incorrect SDVO Max contrast\n"); return; } - intel_sdvo_write_cmd(intel_output, + intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_CONTRAST, NULL, 0); - status = intel_sdvo_read_response(intel_output, + status = intel_sdvo_read_response(intel_encoder, &response, 2); if (status != SDVO_CMD_STATUS_SUCCESS) { DRM_DEBUG_KMS("Incorrect SDVO get contrast\n"); @@ -2697,17 +2702,17 @@ static void intel_sdvo_create_enhance_property(struct drm_connector *connector) data_value[0], data_value[1], response); } if (sdvo_data.hue) { - intel_sdvo_write_cmd(intel_output, + intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_MAX_HUE, NULL, 0); - status = intel_sdvo_read_response(intel_output, + status = intel_sdvo_read_response(intel_encoder, &data_value, 4); if (status != SDVO_CMD_STATUS_SUCCESS) { DRM_DEBUG_KMS("Incorrect SDVO Max hue\n"); return; } - intel_sdvo_write_cmd(intel_output, + intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_HUE, NULL, 0); - status = intel_sdvo_read_response(intel_output, + status = intel_sdvo_read_response(intel_encoder, &response, 2); if (status != SDVO_CMD_STATUS_SUCCESS) { DRM_DEBUG_KMS("Incorrect SDVO get hue\n"); @@ -2730,17 +2735,17 @@ static void intel_sdvo_create_enhance_property(struct drm_connector *connector) } if (sdvo_priv->is_tv || sdvo_priv->is_lvds) { if (sdvo_data.brightness) { - intel_sdvo_write_cmd(intel_output, + intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_MAX_BRIGHTNESS, NULL, 0); - status = intel_sdvo_read_response(intel_output, + status = intel_sdvo_read_response(intel_encoder, &data_value, 4); if (status != SDVO_CMD_STATUS_SUCCESS) { DRM_DEBUG_KMS("Incorrect SDVO Max bright\n"); return; } - intel_sdvo_write_cmd(intel_output, + intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_BRIGHTNESS, NULL, 0); - status = intel_sdvo_read_response(intel_output, + status = intel_sdvo_read_response(intel_encoder, &response, 2); if (status != SDVO_CMD_STATUS_SUCCESS) { DRM_DEBUG_KMS("Incorrect SDVO get brigh\n"); @@ -2765,81 +2770,81 @@ static void intel_sdvo_create_enhance_property(struct drm_connector *connector) return; } -bool intel_sdvo_init(struct drm_device *dev, int output_device) +bool intel_sdvo_init(struct drm_device *dev, int sdvo_reg) { struct drm_i915_private *dev_priv = dev->dev_private; struct drm_connector *connector; - struct intel_output *intel_output; + struct intel_encoder *intel_encoder; struct intel_sdvo_priv *sdvo_priv; u8 ch[0x40]; int i; - intel_output = kcalloc(sizeof(struct intel_output)+sizeof(struct intel_sdvo_priv), 1, GFP_KERNEL); - if (!intel_output) { + intel_encoder = kcalloc(sizeof(struct intel_encoder)+sizeof(struct intel_sdvo_priv), 1, GFP_KERNEL); + if (!intel_encoder) { return false; } - sdvo_priv = (struct intel_sdvo_priv *)(intel_output + 1); - sdvo_priv->output_device = output_device; + sdvo_priv = (struct intel_sdvo_priv *)(intel_encoder + 1); + sdvo_priv->sdvo_reg = sdvo_reg; - intel_output->dev_priv = sdvo_priv; - intel_output->type = INTEL_OUTPUT_SDVO; + intel_encoder->dev_priv = sdvo_priv; + intel_encoder->type = INTEL_OUTPUT_SDVO; /* setup the DDC bus. */ - if (output_device == SDVOB) - intel_output->i2c_bus = intel_i2c_create(dev, GPIOE, "SDVOCTRL_E for SDVOB"); + if (sdvo_reg == SDVOB) + intel_encoder->i2c_bus = intel_i2c_create(dev, GPIOE, "SDVOCTRL_E for SDVOB"); else - intel_output->i2c_bus = intel_i2c_create(dev, GPIOE, "SDVOCTRL_E for SDVOC"); + intel_encoder->i2c_bus = intel_i2c_create(dev, GPIOE, "SDVOCTRL_E for SDVOC"); - if (!intel_output->i2c_bus) + if (!intel_encoder->i2c_bus) goto err_inteloutput; - sdvo_priv->slave_addr = intel_sdvo_get_slave_addr(dev, output_device); + sdvo_priv->slave_addr = intel_sdvo_get_slave_addr(dev, sdvo_reg); /* Save the bit-banging i2c functionality for use by the DDC wrapper */ - intel_sdvo_i2c_bit_algo.functionality = intel_output->i2c_bus->algo->functionality; + intel_sdvo_i2c_bit_algo.functionality = intel_encoder->i2c_bus->algo->functionality; /* Read the regs to test if we can talk to the device */ for (i = 0; i < 0x40; i++) { - if (!intel_sdvo_read_byte(intel_output, i, &ch[i])) { + if (!intel_sdvo_read_byte(intel_encoder, i, &ch[i])) { DRM_DEBUG_KMS("No SDVO device found on SDVO%c\n", - output_device == SDVOB ? 'B' : 'C'); + sdvo_reg == SDVOB ? 'B' : 'C'); goto err_i2c; } } /* setup the DDC bus. */ - if (output_device == SDVOB) { - intel_output->ddc_bus = intel_i2c_create(dev, GPIOE, "SDVOB DDC BUS"); + if (sdvo_reg == SDVOB) { + intel_encoder->ddc_bus = intel_i2c_create(dev, GPIOE, "SDVOB DDC BUS"); sdvo_priv->analog_ddc_bus = intel_i2c_create(dev, GPIOA, "SDVOB/VGA DDC BUS"); dev_priv->hotplug_supported_mask |= SDVOB_HOTPLUG_INT_STATUS; } else { - intel_output->ddc_bus = intel_i2c_create(dev, GPIOE, "SDVOC DDC BUS"); + intel_encoder->ddc_bus = intel_i2c_create(dev, GPIOE, "SDVOC DDC BUS"); sdvo_priv->analog_ddc_bus = intel_i2c_create(dev, GPIOA, "SDVOC/VGA DDC BUS"); dev_priv->hotplug_supported_mask |= SDVOC_HOTPLUG_INT_STATUS; } - if (intel_output->ddc_bus == NULL) + if (intel_encoder->ddc_bus == NULL) goto err_i2c; /* Wrap with our custom algo which switches to DDC mode */ - intel_output->ddc_bus->algo = &intel_sdvo_i2c_bit_algo; + intel_encoder->ddc_bus->algo = &intel_sdvo_i2c_bit_algo; /* In default case sdvo lvds is false */ - intel_sdvo_get_capabilities(intel_output, &sdvo_priv->caps); + intel_sdvo_get_capabilities(intel_encoder, &sdvo_priv->caps); - if (intel_sdvo_output_setup(intel_output, + if (intel_sdvo_output_setup(intel_encoder, sdvo_priv->caps.output_flags) != true) { DRM_DEBUG_KMS("SDVO output failed to setup on SDVO%c\n", - output_device == SDVOB ? 'B' : 'C'); + sdvo_reg == SDVOB ? 'B' : 'C'); goto err_i2c; } - connector = &intel_output->base; + connector = &intel_encoder->base; drm_connector_init(dev, connector, &intel_sdvo_connector_funcs, connector->connector_type); @@ -2848,12 +2853,12 @@ bool intel_sdvo_init(struct drm_device *dev, int output_device) connector->doublescan_allowed = 0; connector->display_info.subpixel_order = SubPixelHorizontalRGB; - drm_encoder_init(dev, &intel_output->enc, - &intel_sdvo_enc_funcs, intel_output->enc.encoder_type); + drm_encoder_init(dev, &intel_encoder->enc, + &intel_sdvo_enc_funcs, intel_encoder->enc.encoder_type); - drm_encoder_helper_add(&intel_output->enc, &intel_sdvo_helper_funcs); + drm_encoder_helper_add(&intel_encoder->enc, &intel_sdvo_helper_funcs); - drm_mode_connector_attach_encoder(&intel_output->base, &intel_output->enc); + drm_mode_connector_attach_encoder(&intel_encoder->base, &intel_encoder->enc); if (sdvo_priv->is_tv) intel_sdvo_tv_create_property(connector); @@ -2865,9 +2870,9 @@ bool intel_sdvo_init(struct drm_device *dev, int output_device) intel_sdvo_select_ddc_bus(sdvo_priv); /* Set the input timing to the screen. Assume always input 0. */ - intel_sdvo_set_target_input(intel_output, true, false); + intel_sdvo_set_target_input(intel_encoder, true, false); - intel_sdvo_get_input_pixel_clock_range(intel_output, + intel_sdvo_get_input_pixel_clock_range(intel_encoder, &sdvo_priv->pixel_clock_min, &sdvo_priv->pixel_clock_max); @@ -2894,12 +2899,12 @@ bool intel_sdvo_init(struct drm_device *dev, int output_device) err_i2c: if (sdvo_priv->analog_ddc_bus != NULL) intel_i2c_destroy(sdvo_priv->analog_ddc_bus); - if (intel_output->ddc_bus != NULL) - intel_i2c_destroy(intel_output->ddc_bus); - if (intel_output->i2c_bus != NULL) - intel_i2c_destroy(intel_output->i2c_bus); + if (intel_encoder->ddc_bus != NULL) + intel_i2c_destroy(intel_encoder->ddc_bus); + if (intel_encoder->i2c_bus != NULL) + intel_i2c_destroy(intel_encoder->i2c_bus); err_inteloutput: - kfree(intel_output); + kfree(intel_encoder); return false; } diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c index 552ec110b74..d7d39b2327d 100644 --- a/drivers/gpu/drm/i915/intel_tv.c +++ b/drivers/gpu/drm/i915/intel_tv.c @@ -921,8 +921,8 @@ intel_tv_save(struct drm_connector *connector) { struct drm_device *dev = connector->dev; struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_output *intel_output = to_intel_output(connector); - struct intel_tv_priv *tv_priv = intel_output->dev_priv; + struct intel_encoder *intel_encoder = to_intel_encoder(connector); + struct intel_tv_priv *tv_priv = intel_encoder->dev_priv; int i; tv_priv->save_TV_H_CTL_1 = I915_READ(TV_H_CTL_1); @@ -971,8 +971,8 @@ intel_tv_restore(struct drm_connector *connector) { struct drm_device *dev = connector->dev; struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_output *intel_output = to_intel_output(connector); - struct intel_tv_priv *tv_priv = intel_output->dev_priv; + struct intel_encoder *intel_encoder = to_intel_encoder(connector); + struct intel_tv_priv *tv_priv = intel_encoder->dev_priv; struct drm_crtc *crtc = connector->encoder->crtc; struct intel_crtc *intel_crtc; int i; @@ -1068,9 +1068,9 @@ intel_tv_mode_lookup (char *tv_format) } static const struct tv_mode * -intel_tv_mode_find (struct intel_output *intel_output) +intel_tv_mode_find (struct intel_encoder *intel_encoder) { - struct intel_tv_priv *tv_priv = intel_output->dev_priv; + struct intel_tv_priv *tv_priv = intel_encoder->dev_priv; return intel_tv_mode_lookup(tv_priv->tv_format); } @@ -1078,8 +1078,8 @@ intel_tv_mode_find (struct intel_output *intel_output) static enum drm_mode_status intel_tv_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode) { - struct intel_output *intel_output = to_intel_output(connector); - const struct tv_mode *tv_mode = intel_tv_mode_find(intel_output); + struct intel_encoder *intel_encoder = to_intel_encoder(connector); + const struct tv_mode *tv_mode = intel_tv_mode_find(intel_encoder); /* Ensure TV refresh is close to desired refresh */ if (tv_mode && abs(tv_mode->refresh - drm_mode_vrefresh(mode) * 1000) @@ -1095,8 +1095,8 @@ intel_tv_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode, { struct drm_device *dev = encoder->dev; struct drm_mode_config *drm_config = &dev->mode_config; - struct intel_output *intel_output = enc_to_intel_output(encoder); - const struct tv_mode *tv_mode = intel_tv_mode_find (intel_output); + struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); + const struct tv_mode *tv_mode = intel_tv_mode_find (intel_encoder); struct drm_encoder *other_encoder; if (!tv_mode) @@ -1121,9 +1121,9 @@ intel_tv_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, struct drm_i915_private *dev_priv = dev->dev_private; struct drm_crtc *crtc = encoder->crtc; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - struct intel_output *intel_output = enc_to_intel_output(encoder); - struct intel_tv_priv *tv_priv = intel_output->dev_priv; - const struct tv_mode *tv_mode = intel_tv_mode_find(intel_output); + struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); + struct intel_tv_priv *tv_priv = intel_encoder->dev_priv; + const struct tv_mode *tv_mode = intel_tv_mode_find(intel_encoder); u32 tv_ctl; u32 hctl1, hctl2, hctl3; u32 vctl1, vctl2, vctl3, vctl4, vctl5, vctl6, vctl7; @@ -1360,9 +1360,9 @@ static const struct drm_display_mode reported_modes[] = { * \return false if TV is disconnected. */ static int -intel_tv_detect_type (struct drm_crtc *crtc, struct intel_output *intel_output) +intel_tv_detect_type (struct drm_crtc *crtc, struct intel_encoder *intel_encoder) { - struct drm_encoder *encoder = &intel_output->enc; + struct drm_encoder *encoder = &intel_encoder->enc; struct drm_device *dev = encoder->dev; struct drm_i915_private *dev_priv = dev->dev_private; unsigned long irqflags; @@ -1441,9 +1441,9 @@ intel_tv_detect_type (struct drm_crtc *crtc, struct intel_output *intel_output) */ static void intel_tv_find_better_format(struct drm_connector *connector) { - struct intel_output *intel_output = to_intel_output(connector); - struct intel_tv_priv *tv_priv = intel_output->dev_priv; - const struct tv_mode *tv_mode = intel_tv_mode_find(intel_output); + struct intel_encoder *intel_encoder = to_intel_encoder(connector); + struct intel_tv_priv *tv_priv = intel_encoder->dev_priv; + const struct tv_mode *tv_mode = intel_tv_mode_find(intel_encoder); int i; if ((tv_priv->type == DRM_MODE_CONNECTOR_Component) == @@ -1475,9 +1475,9 @@ intel_tv_detect(struct drm_connector *connector) { struct drm_crtc *crtc; struct drm_display_mode mode; - struct intel_output *intel_output = to_intel_output(connector); - struct intel_tv_priv *tv_priv = intel_output->dev_priv; - struct drm_encoder *encoder = &intel_output->enc; + struct intel_encoder *intel_encoder = to_intel_encoder(connector); + struct intel_tv_priv *tv_priv = intel_encoder->dev_priv; + struct drm_encoder *encoder = &intel_encoder->enc; int dpms_mode; int type = tv_priv->type; @@ -1485,12 +1485,12 @@ intel_tv_detect(struct drm_connector *connector) drm_mode_set_crtcinfo(&mode, CRTC_INTERLACE_HALVE_V); if (encoder->crtc && encoder->crtc->enabled) { - type = intel_tv_detect_type(encoder->crtc, intel_output); + type = intel_tv_detect_type(encoder->crtc, intel_encoder); } else { - crtc = intel_get_load_detect_pipe(intel_output, &mode, &dpms_mode); + crtc = intel_get_load_detect_pipe(intel_encoder, &mode, &dpms_mode); if (crtc) { - type = intel_tv_detect_type(crtc, intel_output); - intel_release_load_detect_pipe(intel_output, dpms_mode); + type = intel_tv_detect_type(crtc, intel_encoder); + intel_release_load_detect_pipe(intel_encoder, dpms_mode); } else type = -1; } @@ -1525,8 +1525,8 @@ static void intel_tv_chose_preferred_modes(struct drm_connector *connector, struct drm_display_mode *mode_ptr) { - struct intel_output *intel_output = to_intel_output(connector); - const struct tv_mode *tv_mode = intel_tv_mode_find(intel_output); + struct intel_encoder *intel_encoder = to_intel_encoder(connector); + const struct tv_mode *tv_mode = intel_tv_mode_find(intel_encoder); if (tv_mode->nbr_end < 480 && mode_ptr->vdisplay == 480) mode_ptr->type |= DRM_MODE_TYPE_PREFERRED; @@ -1550,8 +1550,8 @@ static int intel_tv_get_modes(struct drm_connector *connector) { struct drm_display_mode *mode_ptr; - struct intel_output *intel_output = to_intel_output(connector); - const struct tv_mode *tv_mode = intel_tv_mode_find(intel_output); + struct intel_encoder *intel_encoder = to_intel_encoder(connector); + const struct tv_mode *tv_mode = intel_tv_mode_find(intel_encoder); int j, count = 0; u64 tmp; @@ -1604,11 +1604,11 @@ intel_tv_get_modes(struct drm_connector *connector) static void intel_tv_destroy (struct drm_connector *connector) { - struct intel_output *intel_output = to_intel_output(connector); + struct intel_encoder *intel_encoder = to_intel_encoder(connector); drm_sysfs_connector_remove(connector); drm_connector_cleanup(connector); - kfree(intel_output); + kfree(intel_encoder); } @@ -1617,9 +1617,9 @@ intel_tv_set_property(struct drm_connector *connector, struct drm_property *prop uint64_t val) { struct drm_device *dev = connector->dev; - struct intel_output *intel_output = to_intel_output(connector); - struct intel_tv_priv *tv_priv = intel_output->dev_priv; - struct drm_encoder *encoder = &intel_output->enc; + struct intel_encoder *intel_encoder = to_intel_encoder(connector); + struct intel_tv_priv *tv_priv = intel_encoder->dev_priv; + struct drm_encoder *encoder = &intel_encoder->enc; struct drm_crtc *crtc = encoder->crtc; int ret = 0; bool changed = false; @@ -1740,7 +1740,7 @@ intel_tv_init(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; struct drm_connector *connector; - struct intel_output *intel_output; + struct intel_encoder *intel_encoder; struct intel_tv_priv *tv_priv; u32 tv_dac_on, tv_dac_off, save_tv_dac; char **tv_format_names; @@ -1780,28 +1780,28 @@ intel_tv_init(struct drm_device *dev) (tv_dac_off & TVDAC_STATE_CHG_EN) != 0) return; - intel_output = kzalloc(sizeof(struct intel_output) + + intel_encoder = kzalloc(sizeof(struct intel_encoder) + sizeof(struct intel_tv_priv), GFP_KERNEL); - if (!intel_output) { + if (!intel_encoder) { return; } - connector = &intel_output->base; + connector = &intel_encoder->base; drm_connector_init(dev, connector, &intel_tv_connector_funcs, DRM_MODE_CONNECTOR_SVIDEO); - drm_encoder_init(dev, &intel_output->enc, &intel_tv_enc_funcs, + drm_encoder_init(dev, &intel_encoder->enc, &intel_tv_enc_funcs, DRM_MODE_ENCODER_TVDAC); - drm_mode_connector_attach_encoder(&intel_output->base, &intel_output->enc); - tv_priv = (struct intel_tv_priv *)(intel_output + 1); - intel_output->type = INTEL_OUTPUT_TVOUT; - intel_output->crtc_mask = (1 << 0) | (1 << 1); - intel_output->clone_mask = (1 << INTEL_TV_CLONE_BIT); - intel_output->enc.possible_crtcs = ((1 << 0) | (1 << 1)); - intel_output->enc.possible_clones = (1 << INTEL_OUTPUT_TVOUT); - intel_output->dev_priv = tv_priv; + drm_mode_connector_attach_encoder(&intel_encoder->base, &intel_encoder->enc); + tv_priv = (struct intel_tv_priv *)(intel_encoder + 1); + intel_encoder->type = INTEL_OUTPUT_TVOUT; + intel_encoder->crtc_mask = (1 << 0) | (1 << 1); + intel_encoder->clone_mask = (1 << INTEL_TV_CLONE_BIT); + intel_encoder->enc.possible_crtcs = ((1 << 0) | (1 << 1)); + intel_encoder->enc.possible_clones = (1 << INTEL_OUTPUT_TVOUT); + intel_encoder->dev_priv = tv_priv; tv_priv->type = DRM_MODE_CONNECTOR_Unknown; /* BIOS margin values */ @@ -1812,7 +1812,7 @@ intel_tv_init(struct drm_device *dev) tv_priv->tv_format = kstrdup(tv_modes[initial_mode].name, GFP_KERNEL); - drm_encoder_helper_add(&intel_output->enc, &intel_tv_helper_funcs); + drm_encoder_helper_add(&intel_encoder->enc, &intel_tv_helper_funcs); drm_connector_helper_add(connector, &intel_tv_connector_helper_funcs); connector->interlace_allowed = false; connector->doublescan_allowed = false; diff --git a/drivers/gpu/drm/nouveau/Makefile b/drivers/gpu/drm/nouveau/Makefile index 7f0d807a0d0..453df3f6053 100644 --- a/drivers/gpu/drm/nouveau/Makefile +++ b/drivers/gpu/drm/nouveau/Makefile @@ -22,7 +22,7 @@ nouveau-y := nouveau_drv.o nouveau_state.o nouveau_channel.o nouveau_mem.o \ nv50_cursor.o nv50_display.o nv50_fbcon.o \ nv04_dac.o nv04_dfp.o nv04_tv.o nv17_tv.o nv17_tv_modes.o \ nv04_crtc.o nv04_display.o nv04_cursor.o nv04_fbcon.o \ - nv17_gpio.o + nv17_gpio.o nv50_gpio.o nouveau-$(CONFIG_DRM_NOUVEAU_DEBUG) += nouveau_debugfs.o nouveau-$(CONFIG_COMPAT) += nouveau_ioc32.o diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c index b5a9336a2e8..abc382a9918 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bios.c +++ b/drivers/gpu/drm/nouveau/nouveau_bios.c @@ -2573,48 +2573,34 @@ init_gpio(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) * each GPIO according to various values listed in each entry */ - const uint32_t nv50_gpio_reg[4] = { 0xe104, 0xe108, 0xe280, 0xe284 }; + struct drm_nouveau_private *dev_priv = bios->dev->dev_private; const uint32_t nv50_gpio_ctl[2] = { 0xe100, 0xe28c }; - const uint8_t *gpio_table = &bios->data[bios->dcb.gpio_table_ptr]; - const uint8_t *gpio_entry; int i; - if (!iexec->execute) - return 1; - - if (bios->dcb.version != 0x40) { - NV_ERROR(bios->dev, "DCB table not version 4.0\n"); - return 0; - } - - if (!bios->dcb.gpio_table_ptr) { - NV_WARN(bios->dev, "Invalid pointer to INIT_8E table\n"); - return 0; + if (dev_priv->card_type != NV_50) { + NV_ERROR(bios->dev, "INIT_GPIO on unsupported chipset\n"); + return -ENODEV; } - gpio_entry = gpio_table + gpio_table[1]; - for (i = 0; i < gpio_table[2]; i++, gpio_entry += gpio_table[3]) { - uint32_t entry = ROM32(gpio_entry[0]), r, s, v; - int line = (entry & 0x0000001f); + if (!iexec->execute) + return 1; - BIOSLOG(bios, "0x%04X: Entry: 0x%08X\n", offset, entry); + for (i = 0; i < bios->dcb.gpio.entries; i++) { + struct dcb_gpio_entry *gpio = &bios->dcb.gpio.entry[i]; + uint32_t r, s, v; - if ((entry & 0x0000ff00) == 0x0000ff00) - continue; + BIOSLOG(bios, "0x%04X: Entry: 0x%08X\n", offset, gpio->entry); - r = nv50_gpio_reg[line >> 3]; - s = (line & 0x07) << 2; - v = bios_rd32(bios, r) & ~(0x00000003 << s); - if (entry & 0x01000000) - v |= (((entry & 0x60000000) >> 29) ^ 2) << s; - else - v |= (((entry & 0x18000000) >> 27) ^ 2) << s; - bios_wr32(bios, r, v); + nv50_gpio_set(bios->dev, gpio->tag, gpio->state_default); - r = nv50_gpio_ctl[line >> 4]; - s = (line & 0x0f); + /* The NVIDIA binary driver doesn't appear to actually do + * any of this, my VBIOS does however. + */ + /* Not a clue, needs de-magicing */ + r = nv50_gpio_ctl[gpio->line >> 4]; + s = (gpio->line & 0x0f); v = bios_rd32(bios, r) & ~(0x00010001 << s); - switch ((entry & 0x06000000) >> 25) { + switch ((gpio->entry & 0x06000000) >> 25) { case 1: v |= (0x00000001 << s); break; @@ -3198,7 +3184,6 @@ static int run_lvds_table(struct drm_device *dev, struct dcb_entry *dcbent, int struct nvbios *bios = &dev_priv->vbios; unsigned int outputset = (dcbent->or == 4) ? 1 : 0; uint16_t scriptptr = 0, clktable; - uint8_t clktableptr = 0; /* * For now we assume version 3.0 table - g80 support will need some @@ -3217,26 +3202,29 @@ static int run_lvds_table(struct drm_device *dev, struct dcb_entry *dcbent, int scriptptr = ROM16(bios->data[bios->fp.lvdsmanufacturerpointer + 11 + outputset * 2]); break; case LVDS_RESET: + clktable = bios->fp.lvdsmanufacturerpointer + 15; + if (dcbent->or == 4) + clktable += 8; + if (dcbent->lvdsconf.use_straps_for_mode) { if (bios->fp.dual_link) - clktableptr += 2; - if (bios->fp.BITbit1) - clktableptr++; + clktable += 4; + if (bios->fp.if_is_24bit) + clktable += 2; } else { /* using EDID */ - uint8_t fallback = bios->data[bios->fp.lvdsmanufacturerpointer + 4]; - int fallbackcmpval = (dcbent->or == 4) ? 4 : 1; + int cmpval_24bit = (dcbent->or == 4) ? 4 : 1; if (bios->fp.dual_link) { - clktableptr += 2; - fallbackcmpval *= 2; + clktable += 4; + cmpval_24bit <<= 1; } - if (fallbackcmpval & fallback) - clktableptr++; + + if (bios->fp.strapless_is_24bit & cmpval_24bit) + clktable += 2; } - /* adding outputset * 8 may not be correct */ - clktable = ROM16(bios->data[bios->fp.lvdsmanufacturerpointer + 15 + clktableptr * 2 + outputset * 8]); + clktable = ROM16(bios->data[clktable]); if (!clktable) { NV_ERROR(dev, "Pixel clock comparison table not found\n"); return -ENOENT; @@ -3638,37 +3626,40 @@ int nouveau_bios_parse_lvds_table(struct drm_device *dev, int pxclk, bool *dl, b *if_is_24bit = bios->data[lvdsofs] & 16; break; case 0x30: - /* - * My money would be on there being a 24 bit interface bit in - * this table, but I have no example of a laptop bios with a - * 24 bit panel to confirm that. Hence we shout loudly if any - * bit other than bit 0 is set (I've not even seen bit 1) - */ - if (bios->data[lvdsofs] > 1) - NV_ERROR(dev, - "You have a very unusual laptop display; please report it\n"); + case 0x40: /* * No sign of the "power off for reset" or "reset for panel * on" bits, but it's safer to assume we should */ bios->fp.power_off_for_reset = true; bios->fp.reset_after_pclk_change = true; + /* * It's ok lvdsofs is wrong for nv4x edid case; dual_link is - * over-written, and BITbit1 isn't used + * over-written, and if_is_24bit isn't used */ bios->fp.dual_link = bios->data[lvdsofs] & 1; - bios->fp.BITbit1 = bios->data[lvdsofs] & 2; - bios->fp.duallink_transition_clk = ROM16(bios->data[bios->fp.lvdsmanufacturerpointer + 5]) * 10; - break; - case 0x40: - bios->fp.dual_link = bios->data[lvdsofs] & 1; bios->fp.if_is_24bit = bios->data[lvdsofs] & 2; bios->fp.strapless_is_24bit = bios->data[bios->fp.lvdsmanufacturerpointer + 4]; bios->fp.duallink_transition_clk = ROM16(bios->data[bios->fp.lvdsmanufacturerpointer + 5]) * 10; break; } + /* Dell Latitude D620 reports a too-high value for the dual-link + * transition freq, causing us to program the panel incorrectly. + * + * It doesn't appear the VBIOS actually uses its transition freq + * (90000kHz), instead it uses the "Number of LVDS channels" field + * out of the panel ID structure (http://www.spwg.org/). + * + * For the moment, a quirk will do :) + */ + if ((dev->pdev->device == 0x01d7) && + (dev->pdev->subsystem_vendor == 0x1028) && + (dev->pdev->subsystem_device == 0x01c2)) { + bios->fp.duallink_transition_clk = 80000; + } + /* set dual_link flag for EDID case */ if (pxclk && (chip_version < 0x25 || chip_version > 0x28)) bios->fp.dual_link = (pxclk >= bios->fp.duallink_transition_clk); @@ -5077,25 +5068,25 @@ parse_dcb30_gpio_entry(struct nvbios *bios, uint16_t offset) gpio->tag = tag; gpio->line = line; gpio->invert = flags != 4; + gpio->entry = ent; } static void parse_dcb40_gpio_entry(struct nvbios *bios, uint16_t offset) { + uint32_t entry = ROM32(bios->data[offset]); struct dcb_gpio_entry *gpio; - uint32_t ent = ROM32(bios->data[offset]); - uint8_t line = ent & 0x1f, - tag = ent >> 8 & 0xff; - if (tag == 0xff) + if ((entry & 0x0000ff00) == 0x0000ff00) return; gpio = new_gpio_entry(bios); - - /* Currently unused, we may need more fields parsed at some - * point. */ - gpio->tag = tag; - gpio->line = line; + gpio->tag = (entry & 0x0000ff00) >> 8; + gpio->line = (entry & 0x0000001f) >> 0; + gpio->state_default = (entry & 0x01000000) >> 24; + gpio->state[0] = (entry & 0x18000000) >> 27; + gpio->state[1] = (entry & 0x60000000) >> 29; + gpio->entry = entry; } static void diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.h b/drivers/gpu/drm/nouveau/nouveau_bios.h index 4f88e6924d2..c0d7b0a3ece 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bios.h +++ b/drivers/gpu/drm/nouveau/nouveau_bios.h @@ -49,6 +49,9 @@ struct dcb_gpio_entry { enum dcb_gpio_tag tag; int line; bool invert; + uint32_t entry; + uint8_t state_default; + uint8_t state[2]; }; struct dcb_gpio_table { @@ -267,7 +270,6 @@ struct nvbios { bool reset_after_pclk_change; bool dual_link; bool link_c_increment; - bool BITbit1; bool if_is_24bit; int duallink_transition_clk; uint8_t strapless_is_24bit; diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c index 9042dd7fb05..957d1762984 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c @@ -72,7 +72,7 @@ nouveau_bo_fixup_align(struct drm_device *dev, * many small buffers. */ if (dev_priv->card_type == NV_50) { - uint32_t block_size = nouveau_mem_fb_amount(dev) >> 15; + uint32_t block_size = dev_priv->vram_size >> 15; int i; switch (tile_flags) { @@ -154,7 +154,7 @@ nouveau_bo_new(struct drm_device *dev, struct nouveau_channel *chan, nvbo->placement.fpfn = 0; nvbo->placement.lpfn = mappable ? dev_priv->fb_mappable_pages : 0; - nouveau_bo_placement_set(nvbo, flags); + nouveau_bo_placement_set(nvbo, flags, 0); nvbo->channel = chan; ret = ttm_bo_init(&dev_priv->ttm.bdev, &nvbo->bo, size, @@ -173,26 +173,33 @@ nouveau_bo_new(struct drm_device *dev, struct nouveau_channel *chan, return 0; } +static void +set_placement_list(uint32_t *pl, unsigned *n, uint32_t type, uint32_t flags) +{ + *n = 0; + + if (type & TTM_PL_FLAG_VRAM) + pl[(*n)++] = TTM_PL_FLAG_VRAM | flags; + if (type & TTM_PL_FLAG_TT) + pl[(*n)++] = TTM_PL_FLAG_TT | flags; + if (type & TTM_PL_FLAG_SYSTEM) + pl[(*n)++] = TTM_PL_FLAG_SYSTEM | flags; +} + void -nouveau_bo_placement_set(struct nouveau_bo *nvbo, uint32_t memtype) +nouveau_bo_placement_set(struct nouveau_bo *nvbo, uint32_t type, uint32_t busy) { - int n = 0; - - if (memtype & TTM_PL_FLAG_VRAM) - nvbo->placements[n++] = TTM_PL_FLAG_VRAM | TTM_PL_MASK_CACHING; - if (memtype & TTM_PL_FLAG_TT) - nvbo->placements[n++] = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING; - if (memtype & TTM_PL_FLAG_SYSTEM) - nvbo->placements[n++] = TTM_PL_FLAG_SYSTEM | TTM_PL_MASK_CACHING; - nvbo->placement.placement = nvbo->placements; - nvbo->placement.busy_placement = nvbo->placements; - nvbo->placement.num_placement = n; - nvbo->placement.num_busy_placement = n; - - if (nvbo->pin_refcnt) { - while (n--) - nvbo->placements[n] |= TTM_PL_FLAG_NO_EVICT; - } + struct ttm_placement *pl = &nvbo->placement; + uint32_t flags = TTM_PL_MASK_CACHING | + (nvbo->pin_refcnt ? TTM_PL_FLAG_NO_EVICT : 0); + + pl->placement = nvbo->placements; + set_placement_list(nvbo->placements, &pl->num_placement, + type, flags); + + pl->busy_placement = nvbo->busy_placements; + set_placement_list(nvbo->busy_placements, &pl->num_busy_placement, + type | busy, flags); } int @@ -200,7 +207,7 @@ nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype) { struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev); struct ttm_buffer_object *bo = &nvbo->bo; - int ret, i; + int ret; if (nvbo->pin_refcnt && !(memtype & (1 << bo->mem.mem_type))) { NV_ERROR(nouveau_bdev(bo->bdev)->dev, @@ -216,9 +223,7 @@ nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype) if (ret) goto out; - nouveau_bo_placement_set(nvbo, memtype); - for (i = 0; i < nvbo->placement.num_placement; i++) - nvbo->placements[i] |= TTM_PL_FLAG_NO_EVICT; + nouveau_bo_placement_set(nvbo, memtype, 0); ret = ttm_bo_validate(bo, &nvbo->placement, false, false); if (ret == 0) { @@ -245,7 +250,7 @@ nouveau_bo_unpin(struct nouveau_bo *nvbo) { struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev); struct ttm_buffer_object *bo = &nvbo->bo; - int ret, i; + int ret; if (--nvbo->pin_refcnt) return 0; @@ -254,8 +259,7 @@ nouveau_bo_unpin(struct nouveau_bo *nvbo) if (ret) return ret; - for (i = 0; i < nvbo->placement.num_placement; i++) - nvbo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT; + nouveau_bo_placement_set(nvbo, bo->mem.placement, 0); ret = ttm_bo_validate(bo, &nvbo->placement, false, false); if (ret == 0) { @@ -396,8 +400,8 @@ nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, man->io_addr = NULL; man->io_offset = drm_get_resource_start(dev, 1); man->io_size = drm_get_resource_len(dev, 1); - if (man->io_size > nouveau_mem_fb_amount(dev)) - man->io_size = nouveau_mem_fb_amount(dev); + if (man->io_size > dev_priv->vram_size) + man->io_size = dev_priv->vram_size; man->gpu_offset = dev_priv->vm_vram_base; break; @@ -440,10 +444,11 @@ nouveau_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl) switch (bo->mem.mem_type) { case TTM_PL_VRAM: - nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_TT); + nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_TT, + TTM_PL_FLAG_SYSTEM); break; default: - nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_SYSTEM); + nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_SYSTEM, 0); break; } diff --git a/drivers/gpu/drm/nouveau/nouveau_channel.c b/drivers/gpu/drm/nouveau/nouveau_channel.c index 6dfb425cbae..1fc57ef5829 100644 --- a/drivers/gpu/drm/nouveau/nouveau_channel.c +++ b/drivers/gpu/drm/nouveau/nouveau_channel.c @@ -142,7 +142,6 @@ nouveau_channel_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret, GFP_KERNEL); if (!dev_priv->fifos[channel]) return -ENOMEM; - dev_priv->fifo_alloc_count++; chan = dev_priv->fifos[channel]; INIT_LIST_HEAD(&chan->nvsw.vbl_wait); INIT_LIST_HEAD(&chan->fence.pending); @@ -321,7 +320,6 @@ nouveau_channel_free(struct nouveau_channel *chan) iounmap(chan->user); dev_priv->fifos[chan->id] = NULL; - dev_priv->fifo_alloc_count--; kfree(chan); } diff --git a/drivers/gpu/drm/nouveau/nouveau_debugfs.c b/drivers/gpu/drm/nouveau/nouveau_debugfs.c index 8ff9ef5d4b4..a251886a0ce 100644 --- a/drivers/gpu/drm/nouveau/nouveau_debugfs.c +++ b/drivers/gpu/drm/nouveau/nouveau_debugfs.c @@ -137,10 +137,9 @@ nouveau_debugfs_memory_info(struct seq_file *m, void *data) { struct drm_info_node *node = (struct drm_info_node *) m->private; struct drm_minor *minor = node->minor; - struct drm_device *dev = minor->dev; + struct drm_nouveau_private *dev_priv = minor->dev->dev_private; - seq_printf(m, "VRAM total: %dKiB\n", - (int)(nouveau_mem_fb_amount(dev) >> 10)); + seq_printf(m, "VRAM total: %dKiB\n", (int)(dev_priv->vram_size >> 10)); return 0; } diff --git a/drivers/gpu/drm/nouveau/nouveau_dp.c b/drivers/gpu/drm/nouveau/nouveau_dp.c index f954ad93e81..deeb21c6865 100644 --- a/drivers/gpu/drm/nouveau/nouveau_dp.c +++ b/drivers/gpu/drm/nouveau/nouveau_dp.c @@ -483,7 +483,7 @@ nouveau_dp_auxch(struct nouveau_i2c_chan *auxch, int cmd, int addr, ctrl |= (cmd << NV50_AUXCH_CTRL_CMD_SHIFT); ctrl |= ((data_nr - 1) << NV50_AUXCH_CTRL_LEN_SHIFT); - for (;;) { + for (i = 0; i < 16; i++) { nv_wr32(dev, NV50_AUXCH_CTRL(index), ctrl | 0x80000000); nv_wr32(dev, NV50_AUXCH_CTRL(index), ctrl); nv_wr32(dev, NV50_AUXCH_CTRL(index), ctrl | 0x00010000); @@ -502,6 +502,12 @@ nouveau_dp_auxch(struct nouveau_i2c_chan *auxch, int cmd, int addr, break; } + if (i == 16) { + NV_ERROR(dev, "auxch DEFER too many times, bailing\n"); + ret = -EREMOTEIO; + goto out; + } + if (cmd & 1) { if ((stat & NV50_AUXCH_STAT_COUNT) != data_nr) { ret = -EREMOTEIO; diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h index d8b55901177..ace630aa89e 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.h +++ b/drivers/gpu/drm/nouveau/nouveau_drv.h @@ -76,6 +76,7 @@ struct nouveau_bo { struct ttm_buffer_object bo; struct ttm_placement placement; u32 placements[3]; + u32 busy_placements[3]; struct ttm_bo_kmap_obj kmap; struct list_head head; @@ -519,6 +520,7 @@ struct drm_nouveau_private { struct workqueue_struct *wq; struct work_struct irq_work; + struct work_struct hpd_work; struct list_head vbl_waiting; @@ -533,7 +535,6 @@ struct drm_nouveau_private { struct fb_info *fbdev_info; - int fifo_alloc_count; struct nouveau_channel *fifos[NOUVEAU_MAX_CHANNEL_NR]; struct nouveau_engine engine; @@ -553,12 +554,6 @@ struct drm_nouveau_private { uint32_t ramro_offset; uint32_t ramro_size; - /* base physical addresses */ - uint64_t fb_phys; - uint64_t fb_available_size; - uint64_t fb_mappable_pages; - uint64_t fb_aper_free; - struct { enum { NOUVEAU_GART_NONE = 0, @@ -572,10 +567,6 @@ struct drm_nouveau_private { struct nouveau_gpuobj *sg_ctxdma; struct page *sg_dummy_page; dma_addr_t sg_dummy_bus; - - /* nottm hack */ - struct drm_ttm_backend *sg_be; - unsigned long sg_handle; } gart_info; /* nv10-nv40 tiling regions */ @@ -584,6 +575,16 @@ struct drm_nouveau_private { spinlock_t lock; } tile; + /* VRAM/fb configuration */ + uint64_t vram_size; + uint64_t vram_sys_base; + + uint64_t fb_phys; + uint64_t fb_available_size; + uint64_t fb_mappable_pages; + uint64_t fb_aper_free; + int fb_mtrr; + /* G8x/G9x virtual address space */ uint64_t vm_gart_base; uint64_t vm_gart_size; @@ -592,10 +593,6 @@ struct drm_nouveau_private { uint64_t vm_end; struct nouveau_gpuobj *vm_vram_pt[NV50_VM_VRAM_NR]; int vm_vram_pt_nr; - uint64_t vram_sys_base; - - /* the mtrr covering the FB */ - int fb_mtrr; struct mem_block *ramin_heap; @@ -614,11 +611,7 @@ struct drm_nouveau_private { uint32_t dac_users[4]; struct nouveau_suspend_resume { - uint32_t fifo_mode; - uint32_t graph_ctx_control; - uint32_t graph_state; uint32_t *ramin_copy; - uint64_t ramin_size; } susres; struct backlight_device *backlight; @@ -717,7 +710,7 @@ extern struct mem_block *nouveau_mem_alloc_block(struct mem_block *, struct drm_file *, int tail); extern void nouveau_mem_takedown(struct mem_block **heap); extern void nouveau_mem_free_block(struct mem_block *); -extern uint64_t nouveau_mem_fb_amount(struct drm_device *); +extern int nouveau_mem_detect(struct drm_device *dev); extern void nouveau_mem_release(struct drm_file *, struct mem_block *heap); extern int nouveau_mem_init(struct drm_device *); extern int nouveau_mem_init_agp(struct drm_device *); @@ -1124,7 +1117,8 @@ extern int nouveau_bo_pin(struct nouveau_bo *, uint32_t flags); extern int nouveau_bo_unpin(struct nouveau_bo *); extern int nouveau_bo_map(struct nouveau_bo *); extern void nouveau_bo_unmap(struct nouveau_bo *); -extern void nouveau_bo_placement_set(struct nouveau_bo *, uint32_t memtype); +extern void nouveau_bo_placement_set(struct nouveau_bo *, uint32_t type, + uint32_t busy); extern u16 nouveau_bo_rd16(struct nouveau_bo *nvbo, unsigned index); extern void nouveau_bo_wr16(struct nouveau_bo *nvbo, unsigned index, u16 val); extern u32 nouveau_bo_rd32(struct nouveau_bo *nvbo, unsigned index); @@ -1168,6 +1162,10 @@ extern int nouveau_gem_ioctl_info(struct drm_device *, void *, int nv17_gpio_get(struct drm_device *dev, enum dcb_gpio_tag tag); int nv17_gpio_set(struct drm_device *dev, enum dcb_gpio_tag tag, int state); +/* nv50_gpio.c */ +int nv50_gpio_get(struct drm_device *dev, enum dcb_gpio_tag tag); +int nv50_gpio_set(struct drm_device *dev, enum dcb_gpio_tag tag, int state); + #ifndef ioread32_native #ifdef __BIG_ENDIAN #define ioread16_native ioread16be diff --git a/drivers/gpu/drm/nouveau/nouveau_encoder.h b/drivers/gpu/drm/nouveau/nouveau_encoder.h index bc4a24029ed..9f28b94e479 100644 --- a/drivers/gpu/drm/nouveau/nouveau_encoder.h +++ b/drivers/gpu/drm/nouveau/nouveau_encoder.h @@ -47,6 +47,7 @@ struct nouveau_encoder { union { struct { + int mc_unknown; int dpcd_version; int link_nr; int link_bw; diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c index 0d22f66f1c7..1bc0b38a516 100644 --- a/drivers/gpu/drm/nouveau/nouveau_gem.c +++ b/drivers/gpu/drm/nouveau/nouveau_gem.c @@ -180,40 +180,35 @@ nouveau_gem_set_domain(struct drm_gem_object *gem, uint32_t read_domains, { struct nouveau_bo *nvbo = gem->driver_private; struct ttm_buffer_object *bo = &nvbo->bo; - uint64_t flags; + uint32_t domains = valid_domains & + (write_domains ? write_domains : read_domains); + uint32_t pref_flags = 0, valid_flags = 0; - if (!valid_domains || (!read_domains && !write_domains)) + if (!domains) return -EINVAL; - if (write_domains) { - if ((valid_domains & NOUVEAU_GEM_DOMAIN_VRAM) && - (write_domains & NOUVEAU_GEM_DOMAIN_VRAM)) - flags = TTM_PL_FLAG_VRAM; - else - if ((valid_domains & NOUVEAU_GEM_DOMAIN_GART) && - (write_domains & NOUVEAU_GEM_DOMAIN_GART)) - flags = TTM_PL_FLAG_TT; - else - return -EINVAL; - } else { - if ((valid_domains & NOUVEAU_GEM_DOMAIN_VRAM) && - (read_domains & NOUVEAU_GEM_DOMAIN_VRAM) && - bo->mem.mem_type == TTM_PL_VRAM) - flags = TTM_PL_FLAG_VRAM; - else - if ((valid_domains & NOUVEAU_GEM_DOMAIN_GART) && - (read_domains & NOUVEAU_GEM_DOMAIN_GART) && - bo->mem.mem_type == TTM_PL_TT) - flags = TTM_PL_FLAG_TT; - else - if ((valid_domains & NOUVEAU_GEM_DOMAIN_VRAM) && - (read_domains & NOUVEAU_GEM_DOMAIN_VRAM)) - flags = TTM_PL_FLAG_VRAM; - else - flags = TTM_PL_FLAG_TT; - } + if (valid_domains & NOUVEAU_GEM_DOMAIN_VRAM) + valid_flags |= TTM_PL_FLAG_VRAM; + + if (valid_domains & NOUVEAU_GEM_DOMAIN_GART) + valid_flags |= TTM_PL_FLAG_TT; + + if ((domains & NOUVEAU_GEM_DOMAIN_VRAM) && + bo->mem.mem_type == TTM_PL_VRAM) + pref_flags |= TTM_PL_FLAG_VRAM; + + else if ((domains & NOUVEAU_GEM_DOMAIN_GART) && + bo->mem.mem_type == TTM_PL_TT) + pref_flags |= TTM_PL_FLAG_TT; + + else if (domains & NOUVEAU_GEM_DOMAIN_VRAM) + pref_flags |= TTM_PL_FLAG_VRAM; + + else + pref_flags |= TTM_PL_FLAG_TT; + + nouveau_bo_placement_set(nvbo, pref_flags, valid_flags); - nouveau_bo_placement_set(nvbo, flags); return 0; } diff --git a/drivers/gpu/drm/nouveau/nouveau_irq.c b/drivers/gpu/drm/nouveau/nouveau_irq.c index 2bd59a92fee..13e73cee4c4 100644 --- a/drivers/gpu/drm/nouveau/nouveau_irq.c +++ b/drivers/gpu/drm/nouveau/nouveau_irq.c @@ -51,6 +51,7 @@ nouveau_irq_preinstall(struct drm_device *dev) if (dev_priv->card_type == NV_50) { INIT_WORK(&dev_priv->irq_work, nv50_display_irq_handler_bh); + INIT_WORK(&dev_priv->hpd_work, nv50_display_irq_hotplug_bh); INIT_LIST_HEAD(&dev_priv->vbl_waiting); } } diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.c b/drivers/gpu/drm/nouveau/nouveau_mem.c index 2dc09dbd817..775a7017af6 100644 --- a/drivers/gpu/drm/nouveau/nouveau_mem.c +++ b/drivers/gpu/drm/nouveau/nouveau_mem.c @@ -347,6 +347,20 @@ nv50_mem_vm_bind_linear(struct drm_device *dev, uint64_t virt, uint32_t size, return -EBUSY; } + nv_wr32(dev, 0x100c80, 0x00040001); + if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) { + NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (2)\n"); + NV_ERROR(dev, "0x100c80 = 0x%08x\n", nv_rd32(dev, 0x100c80)); + return -EBUSY; + } + + nv_wr32(dev, 0x100c80, 0x00060001); + if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) { + NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (2)\n"); + NV_ERROR(dev, "0x100c80 = 0x%08x\n", nv_rd32(dev, 0x100c80)); + return -EBUSY; + } + return 0; } @@ -387,6 +401,20 @@ nv50_mem_vm_unbind(struct drm_device *dev, uint64_t virt, uint32_t size) if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) { NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (2)\n"); NV_ERROR(dev, "0x100c80 = 0x%08x\n", nv_rd32(dev, 0x100c80)); + return; + } + + nv_wr32(dev, 0x100c80, 0x00040001); + if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) { + NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (2)\n"); + NV_ERROR(dev, "0x100c80 = 0x%08x\n", nv_rd32(dev, 0x100c80)); + return; + } + + nv_wr32(dev, 0x100c80, 0x00060001); + if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) { + NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (2)\n"); + NV_ERROR(dev, "0x100c80 = 0x%08x\n", nv_rd32(dev, 0x100c80)); } } @@ -449,9 +477,30 @@ void nouveau_mem_close(struct drm_device *dev) } } -/*XXX won't work on BSD because of pci_read_config_dword */ static uint32_t -nouveau_mem_fb_amount_igp(struct drm_device *dev) +nouveau_mem_detect_nv04(struct drm_device *dev) +{ + uint32_t boot0 = nv_rd32(dev, NV03_BOOT_0); + + if (boot0 & 0x00000100) + return (((boot0 >> 12) & 0xf) * 2 + 2) * 1024 * 1024; + + switch (boot0 & NV03_BOOT_0_RAM_AMOUNT) { + case NV04_BOOT_0_RAM_AMOUNT_32MB: + return 32 * 1024 * 1024; + case NV04_BOOT_0_RAM_AMOUNT_16MB: + return 16 * 1024 * 1024; + case NV04_BOOT_0_RAM_AMOUNT_8MB: + return 8 * 1024 * 1024; + case NV04_BOOT_0_RAM_AMOUNT_4MB: + return 4 * 1024 * 1024; + } + + return 0; +} + +static uint32_t +nouveau_mem_detect_nforce(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; struct pci_dev *bridge; @@ -463,11 +512,11 @@ nouveau_mem_fb_amount_igp(struct drm_device *dev) return 0; } - if (dev_priv->flags&NV_NFORCE) { + if (dev_priv->flags & NV_NFORCE) { pci_read_config_dword(bridge, 0x7C, &mem); return (uint64_t)(((mem >> 6) & 31) + 1)*1024*1024; } else - if (dev_priv->flags&NV_NFORCE2) { + if (dev_priv->flags & NV_NFORCE2) { pci_read_config_dword(bridge, 0x84, &mem); return (uint64_t)(((mem >> 4) & 127) + 1)*1024*1024; } @@ -477,50 +526,32 @@ nouveau_mem_fb_amount_igp(struct drm_device *dev) } /* returns the amount of FB ram in bytes */ -uint64_t nouveau_mem_fb_amount(struct drm_device *dev) +int +nouveau_mem_detect(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; - uint32_t boot0; - - switch (dev_priv->card_type) { - case NV_04: - boot0 = nv_rd32(dev, NV03_BOOT_0); - if (boot0 & 0x00000100) - return (((boot0 >> 12) & 0xf) * 2 + 2) * 1024 * 1024; - - switch (boot0 & NV03_BOOT_0_RAM_AMOUNT) { - case NV04_BOOT_0_RAM_AMOUNT_32MB: - return 32 * 1024 * 1024; - case NV04_BOOT_0_RAM_AMOUNT_16MB: - return 16 * 1024 * 1024; - case NV04_BOOT_0_RAM_AMOUNT_8MB: - return 8 * 1024 * 1024; - case NV04_BOOT_0_RAM_AMOUNT_4MB: - return 4 * 1024 * 1024; - } - break; - case NV_10: - case NV_20: - case NV_30: - case NV_40: - case NV_50: - default: - if (dev_priv->flags & (NV_NFORCE | NV_NFORCE2)) { - return nouveau_mem_fb_amount_igp(dev); - } else { - uint64_t mem; - mem = (nv_rd32(dev, NV04_FIFO_DATA) & - NV10_FIFO_DATA_RAM_AMOUNT_MB_MASK) >> - NV10_FIFO_DATA_RAM_AMOUNT_MB_SHIFT; - return mem * 1024 * 1024; - } - break; + + if (dev_priv->card_type == NV_04) { + dev_priv->vram_size = nouveau_mem_detect_nv04(dev); + } else + if (dev_priv->flags & (NV_NFORCE | NV_NFORCE2)) { + dev_priv->vram_size = nouveau_mem_detect_nforce(dev); + } else { + dev_priv->vram_size = nv_rd32(dev, NV04_FIFO_DATA); + dev_priv->vram_size &= NV10_FIFO_DATA_RAM_AMOUNT_MB_MASK; + if (dev_priv->chipset == 0xaa || dev_priv->chipset == 0xac) + dev_priv->vram_sys_base = nv_rd32(dev, 0x100e10) << 12; } - NV_ERROR(dev, - "Unable to detect video ram size. Please report your setup to " - DRIVER_EMAIL "\n"); - return 0; + NV_INFO(dev, "Detected %dMiB VRAM\n", (int)(dev_priv->vram_size >> 20)); + if (dev_priv->vram_sys_base) { + NV_INFO(dev, "Stolen system memory at: 0x%010llx\n", + dev_priv->vram_sys_base); + } + + if (dev_priv->vram_size) + return 0; + return -ENOMEM; } #if __OS_HAS_AGP @@ -631,15 +662,12 @@ nouveau_mem_init(struct drm_device *dev) spin_lock_init(&dev_priv->ttm.bo_list_lock); spin_lock_init(&dev_priv->tile.lock); - dev_priv->fb_available_size = nouveau_mem_fb_amount(dev); - + dev_priv->fb_available_size = dev_priv->vram_size; dev_priv->fb_mappable_pages = dev_priv->fb_available_size; if (dev_priv->fb_mappable_pages > drm_get_resource_len(dev, 1)) dev_priv->fb_mappable_pages = drm_get_resource_len(dev, 1); dev_priv->fb_mappable_pages >>= PAGE_SHIFT; - NV_INFO(dev, "%d MiB VRAM\n", (int)(dev_priv->fb_available_size >> 20)); - /* remove reserved space at end of vram from available amount */ dev_priv->fb_available_size -= dev_priv->ramin_rsvd_vram; dev_priv->fb_aper_free = dev_priv->fb_available_size; diff --git a/drivers/gpu/drm/nouveau/nouveau_sgdma.c b/drivers/gpu/drm/nouveau/nouveau_sgdma.c index 86785b8d42e..1d6ee8b5515 100644 --- a/drivers/gpu/drm/nouveau/nouveau_sgdma.c +++ b/drivers/gpu/drm/nouveau/nouveau_sgdma.c @@ -172,6 +172,24 @@ nouveau_sgdma_unbind(struct ttm_backend *be) } dev_priv->engine.instmem.finish_access(nvbe->dev); + if (dev_priv->card_type == NV_50) { + nv_wr32(dev, 0x100c80, 0x00050001); + if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) { + NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (2)\n"); + NV_ERROR(dev, "0x100c80 = 0x%08x\n", + nv_rd32(dev, 0x100c80)); + return -EBUSY; + } + + nv_wr32(dev, 0x100c80, 0x00000001); + if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) { + NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (2)\n"); + NV_ERROR(dev, "0x100c80 = 0x%08x\n", + nv_rd32(dev, 0x100c80)); + return -EBUSY; + } + } + nvbe->bound = false; return 0; } diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c index 10656a6be8e..e1710640a27 100644 --- a/drivers/gpu/drm/nouveau/nouveau_state.c +++ b/drivers/gpu/drm/nouveau/nouveau_state.c @@ -341,7 +341,7 @@ nouveau_card_init_channel(struct drm_device *dev) gpuobj = NULL; ret = nouveau_gpuobj_dma_new(dev_priv->channel, NV_CLASS_DMA_IN_MEMORY, - 0, nouveau_mem_fb_amount(dev), + 0, dev_priv->vram_size, NV_DMA_ACCESS_RW, NV_DMA_TARGET_VIDMEM, &gpuobj); if (ret) @@ -427,6 +427,10 @@ nouveau_card_init(struct drm_device *dev) goto out; } + ret = nouveau_mem_detect(dev); + if (ret) + goto out_bios; + ret = nouveau_gpuobj_early_init(dev); if (ret) goto out_bios; @@ -502,7 +506,7 @@ nouveau_card_init(struct drm_device *dev) else ret = nv04_display_create(dev); if (ret) - goto out_irq; + goto out_channel; } ret = nouveau_backlight_init(dev); @@ -516,6 +520,11 @@ nouveau_card_init(struct drm_device *dev) return 0; +out_channel: + if (dev_priv->channel) { + nouveau_channel_free(dev_priv->channel); + dev_priv->channel = NULL; + } out_irq: drm_irq_uninstall(dev); out_fifo: @@ -533,6 +542,7 @@ out_mc: out_gpuobj: nouveau_gpuobj_takedown(dev); out_mem: + nouveau_sgdma_takedown(dev); nouveau_mem_close(dev); out_instmem: engine->instmem.takedown(dev); diff --git a/drivers/gpu/drm/nouveau/nv40_fifo.c b/drivers/gpu/drm/nouveau/nv40_fifo.c index 6b2ef4a9fce..500ccfd3a0b 100644 --- a/drivers/gpu/drm/nouveau/nv40_fifo.c +++ b/drivers/gpu/drm/nouveau/nv40_fifo.c @@ -278,7 +278,7 @@ nv40_fifo_init_ramxx(struct drm_device *dev) default: nv_wr32(dev, 0x2230, 0); nv_wr32(dev, NV40_PFIFO_RAMFC, - ((nouveau_mem_fb_amount(dev) - 512 * 1024 + + ((dev_priv->vram_size - 512 * 1024 + dev_priv->ramfc_offset) >> 16) | (3 << 16)); break; } diff --git a/drivers/gpu/drm/nouveau/nv40_graph.c b/drivers/gpu/drm/nouveau/nv40_graph.c index 53e8afe1dcd..0616c96e4b6 100644 --- a/drivers/gpu/drm/nouveau/nv40_graph.c +++ b/drivers/gpu/drm/nouveau/nv40_graph.c @@ -335,6 +335,27 @@ nv40_graph_init(struct drm_device *dev) nv_wr32(dev, 0x400b38, 0x2ffff800); nv_wr32(dev, 0x400b3c, 0x00006000); + /* Tiling related stuff. */ + switch (dev_priv->chipset) { + case 0x44: + case 0x4a: + nv_wr32(dev, 0x400bc4, 0x1003d888); + nv_wr32(dev, 0x400bbc, 0xb7a7b500); + break; + case 0x46: + nv_wr32(dev, 0x400bc4, 0x0000e024); + nv_wr32(dev, 0x400bbc, 0xb7a7b520); + break; + case 0x4c: + case 0x4e: + case 0x67: + nv_wr32(dev, 0x400bc4, 0x1003d888); + nv_wr32(dev, 0x400bbc, 0xb7a7b540); + break; + default: + break; + } + /* Turn all the tiling regions off. */ for (i = 0; i < pfb->num_tiles; i++) nv40_graph_set_region_tiling(dev, i, 0, 0, 0); diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c index fac6c88a2b1..649db4c1b69 100644 --- a/drivers/gpu/drm/nouveau/nv50_display.c +++ b/drivers/gpu/drm/nouveau/nv50_display.c @@ -143,7 +143,7 @@ nv50_evo_channel_new(struct drm_device *dev, struct nouveau_channel **pchan) } ret = nv50_evo_dmaobj_new(chan, 0x3d, NvEvoVRAM, 0, 0x19, - 0, nouveau_mem_fb_amount(dev)); + 0, dev_priv->vram_size); if (ret) { nv50_evo_channel_del(pchan); return ret; @@ -231,7 +231,7 @@ nv50_display_init(struct drm_device *dev) /* This used to be in crtc unblank, but seems out of place there. */ nv_wr32(dev, NV50_PDISPLAY_UNK_380, 0); /* RAM is clamped to 256 MiB. */ - ram_amount = nouveau_mem_fb_amount(dev); + ram_amount = dev_priv->vram_size; NV_DEBUG_KMS(dev, "ram_amount %d\n", ram_amount); if (ram_amount > 256*1024*1024) ram_amount = 256*1024*1024; @@ -529,8 +529,10 @@ int nv50_display_create(struct drm_device *dev) } ret = nv50_display_init(dev); - if (ret) + if (ret) { + nv50_display_destroy(dev); return ret; + } return 0; } @@ -885,10 +887,12 @@ nv50_display_error_handler(struct drm_device *dev) nv_wr32(dev, NV50_PDISPLAY_TRAPPED_ADDR, 0x90000000); } -static void -nv50_display_irq_hotplug(struct drm_device *dev) +void +nv50_display_irq_hotplug_bh(struct work_struct *work) { - struct drm_nouveau_private *dev_priv = dev->dev_private; + struct drm_nouveau_private *dev_priv = + container_of(work, struct drm_nouveau_private, hpd_work); + struct drm_device *dev = dev_priv->dev; struct drm_connector *connector; const uint32_t gpio_reg[4] = { 0xe104, 0xe108, 0xe280, 0xe284 }; uint32_t unplug_mask, plug_mask, change_mask; @@ -949,8 +953,10 @@ nv50_display_irq_handler(struct drm_device *dev) struct drm_nouveau_private *dev_priv = dev->dev_private; uint32_t delayed = 0; - while (nv_rd32(dev, NV50_PMC_INTR_0) & NV50_PMC_INTR_0_HOTPLUG) - nv50_display_irq_hotplug(dev); + if (nv_rd32(dev, NV50_PMC_INTR_0) & NV50_PMC_INTR_0_HOTPLUG) { + if (!work_pending(&dev_priv->hpd_work)) + queue_work(dev_priv->wq, &dev_priv->hpd_work); + } while (nv_rd32(dev, NV50_PMC_INTR_0) & NV50_PMC_INTR_0_DISPLAY) { uint32_t intr0 = nv_rd32(dev, NV50_PDISPLAY_INTR_0); diff --git a/drivers/gpu/drm/nouveau/nv50_display.h b/drivers/gpu/drm/nouveau/nv50_display.h index 3ae8d0725f6..581d405ac01 100644 --- a/drivers/gpu/drm/nouveau/nv50_display.h +++ b/drivers/gpu/drm/nouveau/nv50_display.h @@ -37,6 +37,7 @@ void nv50_display_irq_handler(struct drm_device *dev); void nv50_display_irq_handler_bh(struct work_struct *work); +void nv50_display_irq_hotplug_bh(struct work_struct *work); int nv50_display_init(struct drm_device *dev); int nv50_display_create(struct drm_device *dev); int nv50_display_destroy(struct drm_device *dev); diff --git a/drivers/gpu/drm/nouveau/nv50_fbcon.c b/drivers/gpu/drm/nouveau/nv50_fbcon.c index 25a3cd8794f..a8c70e7e918 100644 --- a/drivers/gpu/drm/nouveau/nv50_fbcon.c +++ b/drivers/gpu/drm/nouveau/nv50_fbcon.c @@ -157,8 +157,11 @@ nv50_fbcon_accel_init(struct fb_info *info) struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_channel *chan = dev_priv->channel; struct nouveau_gpuobj *eng2d = NULL; + uint64_t fb; int ret, format; + fb = info->fix.smem_start - dev_priv->fb_phys + dev_priv->vm_vram_base; + switch (info->var.bits_per_pixel) { case 8: format = 0xf3; @@ -248,9 +251,8 @@ nv50_fbcon_accel_init(struct fb_info *info) OUT_RING(chan, info->fix.line_length); OUT_RING(chan, info->var.xres_virtual); OUT_RING(chan, info->var.yres_virtual); - OUT_RING(chan, 0); - OUT_RING(chan, info->fix.smem_start - dev_priv->fb_phys + - dev_priv->vm_vram_base); + OUT_RING(chan, upper_32_bits(fb)); + OUT_RING(chan, lower_32_bits(fb)); BEGIN_RING(chan, NvSub2D, 0x0230, 2); OUT_RING(chan, format); OUT_RING(chan, 1); @@ -258,9 +260,8 @@ nv50_fbcon_accel_init(struct fb_info *info) OUT_RING(chan, info->fix.line_length); OUT_RING(chan, info->var.xres_virtual); OUT_RING(chan, info->var.yres_virtual); - OUT_RING(chan, 0); - OUT_RING(chan, info->fix.smem_start - dev_priv->fb_phys + - dev_priv->vm_vram_base); + OUT_RING(chan, upper_32_bits(fb)); + OUT_RING(chan, lower_32_bits(fb)); return 0; } diff --git a/drivers/gpu/drm/nouveau/nv50_gpio.c b/drivers/gpu/drm/nouveau/nv50_gpio.c new file mode 100644 index 00000000000..c61782b314e --- /dev/null +++ b/drivers/gpu/drm/nouveau/nv50_gpio.c @@ -0,0 +1,76 @@ +/* + * Copyright 2010 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ + +#include "drmP.h" +#include "nouveau_drv.h" +#include "nouveau_hw.h" + +static int +nv50_gpio_location(struct dcb_gpio_entry *gpio, uint32_t *reg, uint32_t *shift) +{ + const uint32_t nv50_gpio_reg[4] = { 0xe104, 0xe108, 0xe280, 0xe284 }; + + if (gpio->line > 32) + return -EINVAL; + + *reg = nv50_gpio_reg[gpio->line >> 3]; + *shift = (gpio->line & 7) << 2; + return 0; +} + +int +nv50_gpio_get(struct drm_device *dev, enum dcb_gpio_tag tag) +{ + struct dcb_gpio_entry *gpio; + uint32_t r, s, v; + + gpio = nouveau_bios_gpio_entry(dev, tag); + if (!gpio) + return -ENOENT; + + if (nv50_gpio_location(gpio, &r, &s)) + return -EINVAL; + + v = nv_rd32(dev, r) >> (s + 2); + return ((v & 1) == (gpio->state[1] & 1)); +} + +int +nv50_gpio_set(struct drm_device *dev, enum dcb_gpio_tag tag, int state) +{ + struct dcb_gpio_entry *gpio; + uint32_t r, s, v; + + gpio = nouveau_bios_gpio_entry(dev, tag); + if (!gpio) + return -ENOENT; + + if (nv50_gpio_location(gpio, &r, &s)) + return -EINVAL; + + v = nv_rd32(dev, r) & ~(0x3 << s); + v |= (gpio->state[state] ^ 2) << s; + nv_wr32(dev, r, v); + return 0; +} diff --git a/drivers/gpu/drm/nouveau/nv50_graph.c b/drivers/gpu/drm/nouveau/nv50_graph.c index c62b33a02f8..b203d06f601 100644 --- a/drivers/gpu/drm/nouveau/nv50_graph.c +++ b/drivers/gpu/drm/nouveau/nv50_graph.c @@ -410,9 +410,10 @@ struct nouveau_pgraph_object_class nv50_graph_grclass[] = { { 0x5039, false, NULL }, /* m2mf */ { 0x502d, false, NULL }, /* 2d */ { 0x50c0, false, NULL }, /* compute */ + { 0x85c0, false, NULL }, /* compute (nva3, nva5, nva8) */ { 0x5097, false, NULL }, /* tesla (nv50) */ - { 0x8297, false, NULL }, /* tesla (nv80/nv90) */ - { 0x8397, false, NULL }, /* tesla (nva0) */ - { 0x8597, false, NULL }, /* tesla (nva8) */ + { 0x8297, false, NULL }, /* tesla (nv8x/nv9x) */ + { 0x8397, false, NULL }, /* tesla (nva0, nvaa, nvac) */ + { 0x8597, false, NULL }, /* tesla (nva3, nva5, nva8) */ {} }; diff --git a/drivers/gpu/drm/nouveau/nv50_grctx.c b/drivers/gpu/drm/nouveau/nv50_grctx.c index 546b31949a3..42a8fb20c1e 100644 --- a/drivers/gpu/drm/nouveau/nv50_grctx.c +++ b/drivers/gpu/drm/nouveau/nv50_grctx.c @@ -55,12 +55,12 @@ #define CP_FLAG_AUTO_LOAD ((2 * 32) + 5) #define CP_FLAG_AUTO_LOAD_NOT_PENDING 0 #define CP_FLAG_AUTO_LOAD_PENDING 1 +#define CP_FLAG_NEWCTX ((2 * 32) + 10) +#define CP_FLAG_NEWCTX_BUSY 0 +#define CP_FLAG_NEWCTX_DONE 1 #define CP_FLAG_XFER ((2 * 32) + 11) #define CP_FLAG_XFER_IDLE 0 #define CP_FLAG_XFER_BUSY 1 -#define CP_FLAG_NEWCTX ((2 * 32) + 12) -#define CP_FLAG_NEWCTX_BUSY 0 -#define CP_FLAG_NEWCTX_DONE 1 #define CP_FLAG_ALWAYS ((2 * 32) + 13) #define CP_FLAG_ALWAYS_FALSE 0 #define CP_FLAG_ALWAYS_TRUE 1 @@ -177,6 +177,7 @@ nv50_grctx_init(struct nouveau_grctx *ctx) case 0x96: case 0x98: case 0xa0: + case 0xa3: case 0xa5: case 0xa8: case 0xaa: @@ -364,6 +365,7 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx) case 0xac: gr_def(ctx, 0x401c00, 0x042500df); break; + case 0xa3: case 0xa5: case 0xa8: gr_def(ctx, 0x401c00, 0x142500df); @@ -418,6 +420,7 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx) break; case 0x84: case 0xa0: + case 0xa3: case 0xa5: case 0xa8: case 0xaa: @@ -792,6 +795,7 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx) case 0xa5: gr_def(ctx, offset + 0x1c, 0x310c0000); break; + case 0xa3: case 0xa8: case 0xaa: case 0xac: @@ -859,6 +863,8 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx) else gr_def(ctx, offset + 0x8, 0x05010202); gr_def(ctx, offset + 0xc, 0x00030201); + if (dev_priv->chipset == 0xa3) + cp_ctx(ctx, base + 0x36c, 1); cp_ctx(ctx, base + 0x400, 2); gr_def(ctx, base + 0x404, 0x00000040); @@ -1159,7 +1165,9 @@ nv50_graph_construct_xfer1(struct nouveau_grctx *ctx) nv50_graph_construct_gene_unk8(ctx); if (dev_priv->chipset == 0xa0) xf_emit(ctx, 0x189, 0); - else if (dev_priv->chipset < 0xa8) + else if (dev_priv->chipset == 0xa3) + xf_emit(ctx, 0xd5, 0); + else if (dev_priv->chipset == 0xa5) xf_emit(ctx, 0x99, 0); else if (dev_priv->chipset == 0xaa) xf_emit(ctx, 0x65, 0); @@ -1197,6 +1205,8 @@ nv50_graph_construct_xfer1(struct nouveau_grctx *ctx) ctx->ctxvals_pos = offset + 4; if (dev_priv->chipset == 0xa0) xf_emit(ctx, 0xa80, 0); + else if (dev_priv->chipset == 0xa3) + xf_emit(ctx, 0xa7c, 0); else xf_emit(ctx, 0xa7a, 0); xf_emit(ctx, 1, 0x3fffff); @@ -1341,6 +1351,7 @@ nv50_graph_construct_gene_unk1(struct nouveau_grctx *ctx) xf_emit(ctx, 0x942, 0); break; case 0xa0: + case 0xa3: xf_emit(ctx, 0x2042, 0); break; case 0xa5: diff --git a/drivers/gpu/drm/nouveau/nv50_instmem.c b/drivers/gpu/drm/nouveau/nv50_instmem.c index de1f5b0062c..5f21df31f3a 100644 --- a/drivers/gpu/drm/nouveau/nv50_instmem.c +++ b/drivers/gpu/drm/nouveau/nv50_instmem.c @@ -63,9 +63,10 @@ nv50_instmem_init(struct drm_device *dev) struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_channel *chan; uint32_t c_offset, c_size, c_ramfc, c_vmpd, c_base, pt_size; + uint32_t save_nv001700; + uint64_t v; struct nv50_instmem_priv *priv; int ret, i; - uint32_t v, save_nv001700; priv = kzalloc(sizeof(*priv), GFP_KERNEL); if (!priv) @@ -76,17 +77,12 @@ nv50_instmem_init(struct drm_device *dev) for (i = 0x1700; i <= 0x1710; i += 4) priv->save1700[(i-0x1700)/4] = nv_rd32(dev, i); - if (dev_priv->chipset == 0xaa || dev_priv->chipset == 0xac) - dev_priv->vram_sys_base = nv_rd32(dev, 0x100e10) << 12; - else - dev_priv->vram_sys_base = 0; - /* Reserve the last MiB of VRAM, we should probably try to avoid * setting up the below tables over the top of the VBIOS image at * some point. */ dev_priv->ramin_rsvd_vram = 1 << 20; - c_offset = nouveau_mem_fb_amount(dev) - dev_priv->ramin_rsvd_vram; + c_offset = dev_priv->vram_size - dev_priv->ramin_rsvd_vram; c_size = 128 << 10; c_vmpd = ((dev_priv->chipset & 0xf0) == 0x50) ? 0x1400 : 0x200; c_ramfc = ((dev_priv->chipset & 0xf0) == 0x50) ? 0x0 : 0x20; @@ -106,7 +102,7 @@ nv50_instmem_init(struct drm_device *dev) dev_priv->vm_gart_size = NV50_VM_BLOCK; dev_priv->vm_vram_base = dev_priv->vm_gart_base + dev_priv->vm_gart_size; - dev_priv->vm_vram_size = nouveau_mem_fb_amount(dev); + dev_priv->vm_vram_size = dev_priv->vram_size; if (dev_priv->vm_vram_size > NV50_VM_MAX_VRAM) dev_priv->vm_vram_size = NV50_VM_MAX_VRAM; dev_priv->vm_vram_size = roundup(dev_priv->vm_vram_size, NV50_VM_BLOCK); @@ -189,8 +185,8 @@ nv50_instmem_init(struct drm_device *dev) i = 0; while (v < dev_priv->vram_sys_base + c_offset + c_size) { - BAR0_WI32(priv->pramin_pt->gpuobj, i + 0, v); - BAR0_WI32(priv->pramin_pt->gpuobj, i + 4, 0x00000000); + BAR0_WI32(priv->pramin_pt->gpuobj, i + 0, lower_32_bits(v)); + BAR0_WI32(priv->pramin_pt->gpuobj, i + 4, upper_32_bits(v)); v += 0x1000; i += 8; } diff --git a/drivers/gpu/drm/nouveau/nv50_sor.c b/drivers/gpu/drm/nouveau/nv50_sor.c index c2fff543b06..0c68698f23d 100644 --- a/drivers/gpu/drm/nouveau/nv50_sor.c +++ b/drivers/gpu/drm/nouveau/nv50_sor.c @@ -211,7 +211,7 @@ nv50_sor_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, mode_ctl = 0x0200; break; case OUTPUT_DP: - mode_ctl |= 0x00050000; + mode_ctl |= (nv_encoder->dp.mc_unknown << 16); if (nv_encoder->dcb->sorconf.link & 1) mode_ctl |= 0x00000800; else @@ -274,6 +274,7 @@ static const struct drm_encoder_funcs nv50_sor_encoder_funcs = { int nv50_sor_create(struct drm_device *dev, struct dcb_entry *entry) { + struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_encoder *nv_encoder = NULL; struct drm_encoder *encoder; bool dum; @@ -319,5 +320,27 @@ nv50_sor_create(struct drm_device *dev, struct dcb_entry *entry) encoder->possible_crtcs = entry->heads; encoder->possible_clones = 0; + if (nv_encoder->dcb->type == OUTPUT_DP) { + uint32_t mc, or = nv_encoder->or; + + if (dev_priv->chipset < 0x90 || + dev_priv->chipset == 0x92 || dev_priv->chipset == 0xa0) + mc = nv_rd32(dev, NV50_PDISPLAY_SOR_MODE_CTRL_C(or)); + else + mc = nv_rd32(dev, NV90_PDISPLAY_SOR_MODE_CTRL_C(or)); + + switch ((mc & 0x00000f00) >> 8) { + case 8: + case 9: + nv_encoder->dp.mc_unknown = (mc & 0x000f0000) >> 16; + break; + default: + break; + } + + if (!nv_encoder->dp.mc_unknown) + nv_encoder->dp.mc_unknown = 5; + } + return 0; } diff --git a/drivers/gpu/drm/radeon/atom.c b/drivers/gpu/drm/radeon/atom.c index 07b7ebf1f46..1d569830ed9 100644 --- a/drivers/gpu/drm/radeon/atom.c +++ b/drivers/gpu/drm/radeon/atom.c @@ -908,11 +908,16 @@ static void atom_op_shl(atom_exec_context *ctx, int *ptr, int arg) uint8_t attr = U8((*ptr)++), shift; uint32_t saved, dst; int dptr = *ptr; + uint32_t dst_align = atom_dst_to_src[(attr >> 3) & 7][(attr >> 6) & 3]; SDEBUG(" dst: "); dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1); + /* op needs to full dst value */ + dst = saved; shift = atom_get_src(ctx, attr, ptr); SDEBUG(" shift: %d\n", shift); dst <<= shift; + dst &= atom_arg_mask[dst_align]; + dst >>= atom_arg_shift[dst_align]; SDEBUG(" dst: "); atom_put_dst(ctx, arg, attr, &dptr, dst, saved); } @@ -922,11 +927,16 @@ static void atom_op_shr(atom_exec_context *ctx, int *ptr, int arg) uint8_t attr = U8((*ptr)++), shift; uint32_t saved, dst; int dptr = *ptr; + uint32_t dst_align = atom_dst_to_src[(attr >> 3) & 7][(attr >> 6) & 3]; SDEBUG(" dst: "); dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1); + /* op needs to full dst value */ + dst = saved; shift = atom_get_src(ctx, attr, ptr); SDEBUG(" shift: %d\n", shift); dst >>= shift; + dst &= atom_arg_mask[dst_align]; + dst >>= atom_arg_shift[dst_align]; SDEBUG(" dst: "); atom_put_dst(ctx, arg, attr, &dptr, dst, saved); } @@ -1137,6 +1147,7 @@ static int atom_execute_table_locked(struct atom_context *ctx, int index, uint32 int len, ws, ps, ptr; unsigned char op; atom_exec_context ectx; + int ret = 0; if (!base) return -EINVAL; @@ -1169,7 +1180,8 @@ static int atom_execute_table_locked(struct atom_context *ctx, int index, uint32 if (ectx.abort) { DRM_ERROR("atombios stuck executing %04X (len %d, WS %d, PS %d) @ 0x%04X\n", base, len, ws, ps, ptr - 1); - return -EINVAL; + ret = -EINVAL; + goto free; } if (op < ATOM_OP_CNT && op > 0) @@ -1184,9 +1196,10 @@ static int atom_execute_table_locked(struct atom_context *ctx, int index, uint32 debug_depth--; SDEBUG("<<\n"); +free: if (ws) kfree(ectx.ws); - return 0; + return ret; } int atom_execute_table(struct atom_context *ctx, int index, uint32_t * params) diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c index fd4ef6d1884..a87990b3ae8 100644 --- a/drivers/gpu/drm/radeon/atombios_crtc.c +++ b/drivers/gpu/drm/radeon/atombios_crtc.c @@ -521,6 +521,10 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc, /* DVO wants 2x pixel clock if the DVO chip is in 12 bit mode */ if (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1) adjusted_clock = mode->clock * 2; + if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT)) { + pll->algo = PLL_ALGO_LEGACY; + pll->flags |= RADEON_PLL_PREFER_CLOSEST_LOWER; + } } else { if (encoder->encoder_type != DRM_MODE_ENCODER_DAC) pll->flags |= RADEON_PLL_NO_ODD_POST_DIV; diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c index c9580497ede..d7388fdb6d0 100644 --- a/drivers/gpu/drm/radeon/r100.c +++ b/drivers/gpu/drm/radeon/r100.c @@ -2891,7 +2891,7 @@ static int r100_cs_track_texture_check(struct radeon_device *rdev, { struct radeon_bo *robj; unsigned long size; - unsigned u, i, w, h; + unsigned u, i, w, h, d; int ret; for (u = 0; u < track->num_texture; u++) { @@ -2923,20 +2923,25 @@ static int r100_cs_track_texture_check(struct radeon_device *rdev, h = h / (1 << i); if (track->textures[u].roundup_h) h = roundup_pow_of_two(h); + if (track->textures[u].tex_coord_type == 1) { + d = (1 << track->textures[u].txdepth) / (1 << i); + if (!d) + d = 1; + } else { + d = 1; + } if (track->textures[u].compress_format) { - size += r100_track_compress_size(track->textures[u].compress_format, w, h); + size += r100_track_compress_size(track->textures[u].compress_format, w, h) * d; /* compressed textures are block based */ } else - size += w * h; + size += w * h * d; } size *= track->textures[u].cpp; switch (track->textures[u].tex_coord_type) { case 0: - break; case 1: - size *= (1 << track->textures[u].txdepth); break; case 2: if (track->separate_cube) { @@ -3007,7 +3012,11 @@ int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track) } } prim_walk = (track->vap_vf_cntl >> 4) & 0x3; - nverts = (track->vap_vf_cntl >> 16) & 0xFFFF; + if (track->vap_vf_cntl & (1 << 14)) { + nverts = track->vap_alt_nverts; + } else { + nverts = (track->vap_vf_cntl >> 16) & 0xFFFF; + } switch (prim_walk) { case 1: for (i = 0; i < track->num_arrays; i++) { diff --git a/drivers/gpu/drm/radeon/r100_track.h b/drivers/gpu/drm/radeon/r100_track.h index b27a6999d21..fadfe68de9c 100644 --- a/drivers/gpu/drm/radeon/r100_track.h +++ b/drivers/gpu/drm/radeon/r100_track.h @@ -64,6 +64,7 @@ struct r100_cs_track { unsigned maxy; unsigned vtx_size; unsigned vap_vf_cntl; + unsigned vap_alt_nverts; unsigned immd_dwords; unsigned num_arrays; unsigned max_indx; diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c index 561048a7c0a..bd75f99bd65 100644 --- a/drivers/gpu/drm/radeon/r300.c +++ b/drivers/gpu/drm/radeon/r300.c @@ -325,11 +325,12 @@ void r300_gpu_init(struct radeon_device *rdev) r100_hdp_reset(rdev); /* FIXME: rv380 one pipes ? */ - if ((rdev->family == CHIP_R300) || (rdev->family == CHIP_R350)) { + if ((rdev->family == CHIP_R300 && rdev->pdev->device != 0x4144) || + (rdev->family == CHIP_R350)) { /* r300,r350 */ rdev->num_gb_pipes = 2; } else { - /* rv350,rv370,rv380 */ + /* rv350,rv370,rv380,r300 AD */ rdev->num_gb_pipes = 1; } rdev->num_z_pipes = 1; @@ -729,6 +730,12 @@ static int r300_packet0_check(struct radeon_cs_parser *p, /* VAP_VF_MAX_VTX_INDX */ track->max_indx = idx_value & 0x00FFFFFFUL; break; + case 0x2088: + /* VAP_ALT_NUM_VERTICES - only valid on r500 */ + if (p->rdev->family < CHIP_RV515) + goto fail; + track->vap_alt_nverts = idx_value & 0xFFFFFF; + break; case 0x43E4: /* SC_SCISSOR1 */ track->maxy = ((idx_value >> 13) & 0x1FFF) + 1; @@ -766,7 +773,6 @@ static int r300_packet0_check(struct radeon_cs_parser *p, tmp = idx_value & ~(0x7 << 16); tmp |= tile_flags; ib[idx] = tmp; - i = (reg - 0x4E38) >> 2; track->cb[i].pitch = idx_value & 0x3FFE; switch (((idx_value >> 21) & 0xF)) { @@ -1051,11 +1057,13 @@ static int r300_packet0_check(struct radeon_cs_parser *p, break; /* fallthrough do not move */ default: - printk(KERN_ERR "Forbidden register 0x%04X in cs at %d\n", - reg, idx); - return -EINVAL; + goto fail; } return 0; +fail: + printk(KERN_ERR "Forbidden register 0x%04X in cs at %d\n", + reg, idx); + return -EINVAL; } static int r300_packet3_check(struct radeon_cs_parser *p, diff --git a/drivers/gpu/drm/radeon/r600_audio.c b/drivers/gpu/drm/radeon/r600_audio.c index dac7042b797..1d898051c63 100644 --- a/drivers/gpu/drm/radeon/r600_audio.c +++ b/drivers/gpu/drm/radeon/r600_audio.c @@ -35,7 +35,7 @@ */ static int r600_audio_chipset_supported(struct radeon_device *rdev) { - return rdev->family >= CHIP_R600 + return (rdev->family >= CHIP_R600 && rdev->family < CHIP_CEDAR) || rdev->family == CHIP_RS600 || rdev->family == CHIP_RS690 || rdev->family == CHIP_RS740; diff --git a/drivers/gpu/drm/radeon/r600_hdmi.c b/drivers/gpu/drm/radeon/r600_hdmi.c index 029fa1406d1..2616b822ba6 100644 --- a/drivers/gpu/drm/radeon/r600_hdmi.c +++ b/drivers/gpu/drm/radeon/r600_hdmi.c @@ -314,6 +314,9 @@ void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mod struct radeon_device *rdev = dev->dev_private; uint32_t offset = to_radeon_encoder(encoder)->hdmi_offset; + if (ASIC_IS_DCE4(rdev)) + return; + if (!offset) return; @@ -484,6 +487,9 @@ void r600_hdmi_enable(struct drm_encoder *encoder) struct radeon_device *rdev = dev->dev_private; struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); + if (ASIC_IS_DCE4(rdev)) + return; + if (!radeon_encoder->hdmi_offset) { r600_hdmi_assign_block(encoder); if (!radeon_encoder->hdmi_offset) { @@ -525,6 +531,9 @@ void r600_hdmi_disable(struct drm_encoder *encoder) struct radeon_device *rdev = dev->dev_private; struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); + if (ASIC_IS_DCE4(rdev)) + return; + if (!radeon_encoder->hdmi_offset) { dev_err(rdev->dev, "Disabling not enabled HDMI\n"); return; diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c index 1fff95505cf..5673665ff21 100644 --- a/drivers/gpu/drm/radeon/radeon_atombios.c +++ b/drivers/gpu/drm/radeon/radeon_atombios.c @@ -69,16 +69,19 @@ static inline struct radeon_i2c_bus_rec radeon_lookup_i2c_gpio(struct radeon_dev struct radeon_i2c_bus_rec i2c; int index = GetIndexIntoMasterTable(DATA, GPIO_I2C_Info); struct _ATOM_GPIO_I2C_INFO *i2c_info; - uint16_t data_offset; - int i; + uint16_t data_offset, size; + int i, num_indices; memset(&i2c, 0, sizeof(struct radeon_i2c_bus_rec)); i2c.valid = false; - if (atom_parse_data_header(ctx, index, NULL, NULL, NULL, &data_offset)) { + if (atom_parse_data_header(ctx, index, &size, NULL, NULL, &data_offset)) { i2c_info = (struct _ATOM_GPIO_I2C_INFO *)(ctx->bios + data_offset); - for (i = 0; i < ATOM_MAX_SUPPORTED_DEVICE; i++) { + num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) / + sizeof(ATOM_GPIO_I2C_ASSIGMENT); + + for (i = 0; i < num_indices; i++) { gpio = &i2c_info->asGPIO_Info[i]; if (gpio->sucI2cId.ucAccess == id) { diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c index 2becdeda68a..37db8adb274 100644 --- a/drivers/gpu/drm/radeon/radeon_combios.c +++ b/drivers/gpu/drm/radeon/radeon_combios.c @@ -760,7 +760,9 @@ struct radeon_encoder_primary_dac *radeon_combios_get_primary_dac_info(struct dac = RBIOS8(dac_info + 0x3) & 0xf; p_dac->ps2_pdac_adj = (bg << 8) | (dac); } - found = 1; + /* if the values are all zeros, use the table */ + if (p_dac->ps2_pdac_adj) + found = 1; } if (!found) /* fallback to defaults */ @@ -895,7 +897,9 @@ struct radeon_encoder_tv_dac *radeon_combios_get_tv_dac_info(struct bg = RBIOS8(dac_info + 0x10) & 0xf; dac = RBIOS8(dac_info + 0x11) & 0xf; tv_dac->ntsc_tvdac_adj = (bg << 16) | (dac << 20); - found = 1; + /* if the values are all zeros, use the table */ + if (tv_dac->ps2_tvdac_adj) + found = 1; } else if (rev > 1) { bg = RBIOS8(dac_info + 0xc) & 0xf; dac = (RBIOS8(dac_info + 0xc) >> 4) & 0xf; @@ -908,7 +912,9 @@ struct radeon_encoder_tv_dac *radeon_combios_get_tv_dac_info(struct bg = RBIOS8(dac_info + 0xe) & 0xf; dac = (RBIOS8(dac_info + 0xe) >> 4) & 0xf; tv_dac->ntsc_tvdac_adj = (bg << 16) | (dac << 20); - found = 1; + /* if the values are all zeros, use the table */ + if (tv_dac->ps2_tvdac_adj) + found = 1; } tv_dac->tv_std = radeon_combios_get_tv_info(rdev); } @@ -925,7 +931,9 @@ struct radeon_encoder_tv_dac *radeon_combios_get_tv_dac_info(struct (bg << 16) | (dac << 20); tv_dac->pal_tvdac_adj = tv_dac->ps2_tvdac_adj; tv_dac->ntsc_tvdac_adj = tv_dac->ps2_tvdac_adj; - found = 1; + /* if the values are all zeros, use the table */ + if (tv_dac->ps2_tvdac_adj) + found = 1; } else { bg = RBIOS8(dac_info + 0x4) & 0xf; dac = RBIOS8(dac_info + 0x5) & 0xf; @@ -933,7 +941,9 @@ struct radeon_encoder_tv_dac *radeon_combios_get_tv_dac_info(struct (bg << 16) | (dac << 20); tv_dac->pal_tvdac_adj = tv_dac->ps2_tvdac_adj; tv_dac->ntsc_tvdac_adj = tv_dac->ps2_tvdac_adj; - found = 1; + /* if the values are all zeros, use the table */ + if (tv_dac->ps2_tvdac_adj) + found = 1; } } else { DRM_INFO("No TV DAC info found in BIOS\n"); diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c index 60d59816b94..1331351c517 100644 --- a/drivers/gpu/drm/radeon/radeon_connectors.c +++ b/drivers/gpu/drm/radeon/radeon_connectors.c @@ -162,12 +162,14 @@ radeon_connector_analog_encoder_conflict_solve(struct drm_connector *connector, { struct drm_device *dev = connector->dev; struct drm_connector *conflict; + struct radeon_connector *radeon_conflict; int i; list_for_each_entry(conflict, &dev->mode_config.connector_list, head) { if (conflict == connector) continue; + radeon_conflict = to_radeon_connector(conflict); for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) { if (conflict->encoder_ids[i] == 0) break; @@ -177,6 +179,9 @@ radeon_connector_analog_encoder_conflict_solve(struct drm_connector *connector, if (conflict->status != connector_status_connected) continue; + if (radeon_conflict->use_digital) + continue; + if (priority == true) { DRM_INFO("1: conflicting encoders switching off %s\n", drm_get_connector_name(conflict)); DRM_INFO("in favor of %s\n", drm_get_connector_name(connector)); @@ -287,6 +292,7 @@ int radeon_connector_set_property(struct drm_connector *connector, struct drm_pr if (property == rdev->mode_info.coherent_mode_property) { struct radeon_encoder_atom_dig *dig; + bool new_coherent_mode; /* need to find digital encoder on connector */ encoder = radeon_find_encoder(connector, DRM_MODE_ENCODER_TMDS); @@ -299,8 +305,11 @@ int radeon_connector_set_property(struct drm_connector *connector, struct drm_pr return 0; dig = radeon_encoder->enc_priv; - dig->coherent_mode = val ? true : false; - radeon_property_change_mode(&radeon_encoder->base); + new_coherent_mode = val ? true : false; + if (dig->coherent_mode != new_coherent_mode) { + dig->coherent_mode = new_coherent_mode; + radeon_property_change_mode(&radeon_encoder->base); + } } if (property == rdev->mode_info.tv_std_property) { @@ -315,7 +324,7 @@ int radeon_connector_set_property(struct drm_connector *connector, struct drm_pr radeon_encoder = to_radeon_encoder(encoder); if (!radeon_encoder->enc_priv) return 0; - if (rdev->is_atom_bios) { + if (ASIC_IS_AVIVO(rdev) || radeon_r4xx_atom) { struct radeon_encoder_atom_dac *dac_int; dac_int = radeon_encoder->enc_priv; dac_int->tv_std = val; diff --git a/drivers/gpu/drm/radeon/radeon_cp.c b/drivers/gpu/drm/radeon/radeon_cp.c index dc6eba6b96d..419630dd207 100644 --- a/drivers/gpu/drm/radeon/radeon_cp.c +++ b/drivers/gpu/drm/radeon/radeon_cp.c @@ -417,8 +417,9 @@ static int radeon_do_wait_for_idle(drm_radeon_private_t * dev_priv) return -EBUSY; } -static void radeon_init_pipes(drm_radeon_private_t *dev_priv) +static void radeon_init_pipes(struct drm_device *dev) { + drm_radeon_private_t *dev_priv = dev->dev_private; uint32_t gb_tile_config, gb_pipe_sel = 0; if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV530) { @@ -436,11 +437,12 @@ static void radeon_init_pipes(drm_radeon_private_t *dev_priv) dev_priv->num_gb_pipes = ((gb_pipe_sel >> 12) & 0x3) + 1; } else { /* R3xx */ - if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R300) || + if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R300 && + dev->pdev->device != 0x4144) || ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R350)) { dev_priv->num_gb_pipes = 2; } else { - /* R3Vxx */ + /* RV3xx/R300 AD */ dev_priv->num_gb_pipes = 1; } } @@ -736,7 +738,7 @@ static int radeon_do_engine_reset(struct drm_device * dev) /* setup the raster pipes */ if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R300) - radeon_init_pipes(dev_priv); + radeon_init_pipes(dev); /* Reset the CP ring */ radeon_do_cp_reset(dev_priv); diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index bddf17f97da..7b629e30556 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c @@ -36,6 +36,54 @@ #include "radeon.h" #include "atom.h" +static const char radeon_family_name[][16] = { + "R100", + "RV100", + "RS100", + "RV200", + "RS200", + "R200", + "RV250", + "RS300", + "RV280", + "R300", + "R350", + "RV350", + "RV380", + "R420", + "R423", + "RV410", + "RS400", + "RS480", + "RS600", + "RS690", + "RS740", + "RV515", + "R520", + "RV530", + "RV560", + "RV570", + "R580", + "R600", + "RV610", + "RV630", + "RV670", + "RV620", + "RV635", + "RS780", + "RS880", + "RV770", + "RV730", + "RV710", + "RV740", + "CEDAR", + "REDWOOD", + "JUNIPER", + "CYPRESS", + "HEMLOCK", + "LAST", +}; + /* * Clear GPU surface registers. */ @@ -526,7 +574,6 @@ int radeon_device_init(struct radeon_device *rdev, int r; int dma_bits; - DRM_INFO("radeon: Initializing kernel modesetting.\n"); rdev->shutdown = false; rdev->dev = &pdev->dev; rdev->ddev = ddev; @@ -538,6 +585,10 @@ int radeon_device_init(struct radeon_device *rdev, rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024; rdev->gpu_lockup = false; rdev->accel_working = false; + + DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X).\n", + radeon_family_name[rdev->family], pdev->vendor, pdev->device); + /* mutex initialization are all done here so we * can recall function without having locking issues */ mutex_init(&rdev->cs_mutex); diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c index 055a51732dc..4b05563d99e 100644 --- a/drivers/gpu/drm/radeon/radeon_drv.c +++ b/drivers/gpu/drm/radeon/radeon_drv.c @@ -43,9 +43,10 @@ * - 2.0.0 - initial interface * - 2.1.0 - add square tiling interface * - 2.2.0 - add r6xx/r7xx const buffer support + * - 2.3.0 - add MSPOS + 3D texture + r500 VAP regs */ #define KMS_DRIVER_MAJOR 2 -#define KMS_DRIVER_MINOR 2 +#define KMS_DRIVER_MINOR 3 #define KMS_DRIVER_PATCHLEVEL 0 int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags); int radeon_driver_unload_kms(struct drm_device *dev); diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c index 52d6f96f274..30293bec080 100644 --- a/drivers/gpu/drm/radeon/radeon_encoders.c +++ b/drivers/gpu/drm/radeon/radeon_encoders.c @@ -317,12 +317,8 @@ atombios_dac_setup(struct drm_encoder *encoder, int action) struct radeon_device *rdev = dev->dev_private; struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); DAC_ENCODER_CONTROL_PS_ALLOCATION args; - int index = 0, num = 0; + int index = 0; struct radeon_encoder_atom_dac *dac_info = radeon_encoder->enc_priv; - enum radeon_tv_std tv_std = TV_STD_NTSC; - - if (dac_info->tv_std) - tv_std = dac_info->tv_std; memset(&args, 0, sizeof(args)); @@ -330,12 +326,10 @@ atombios_dac_setup(struct drm_encoder *encoder, int action) case ENCODER_OBJECT_ID_INTERNAL_DAC1: case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1: index = GetIndexIntoMasterTable(COMMAND, DAC1EncoderControl); - num = 1; break; case ENCODER_OBJECT_ID_INTERNAL_DAC2: case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2: index = GetIndexIntoMasterTable(COMMAND, DAC2EncoderControl); - num = 2; break; } @@ -346,7 +340,7 @@ atombios_dac_setup(struct drm_encoder *encoder, int action) else if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT)) args.ucDacStandard = ATOM_DAC1_CV; else { - switch (tv_std) { + switch (dac_info->tv_std) { case TV_STD_PAL: case TV_STD_PAL_M: case TV_STD_SCART_PAL: @@ -377,10 +371,6 @@ atombios_tv_setup(struct drm_encoder *encoder, int action) TV_ENCODER_CONTROL_PS_ALLOCATION args; int index = 0; struct radeon_encoder_atom_dac *dac_info = radeon_encoder->enc_priv; - enum radeon_tv_std tv_std = TV_STD_NTSC; - - if (dac_info->tv_std) - tv_std = dac_info->tv_std; memset(&args, 0, sizeof(args)); @@ -391,7 +381,7 @@ atombios_tv_setup(struct drm_encoder *encoder, int action) if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT)) args.sTVEncoder.ucTvStandard = ATOM_TV_CV; else { - switch (tv_std) { + switch (dac_info->tv_std) { case TV_STD_NTSC: args.sTVEncoder.ucTvStandard = ATOM_TV_NTSC; break; @@ -875,6 +865,8 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t else if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) { if (dig->coherent_mode) args.v3.acConfig.fCoherentMode = 1; + if (radeon_encoder->pixel_clock > 165000) + args.v3.acConfig.fDualLinkConnector = 1; } } else if (ASIC_IS_DCE32(rdev)) { args.v2.acConfig.ucEncoderSel = dig->dig_encoder; @@ -898,6 +890,8 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t else if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) { if (dig->coherent_mode) args.v2.acConfig.fCoherentMode = 1; + if (radeon_encoder->pixel_clock > 165000) + args.v2.acConfig.fDualLinkConnector = 1; } } else { args.v1.ucConfig = ATOM_TRANSMITTER_CONFIG_CLKSRC_PPLL; @@ -1383,8 +1377,12 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder, case ENCODER_OBJECT_ID_INTERNAL_DAC2: case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2: atombios_dac_setup(encoder, ATOM_ENABLE); - if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT)) - atombios_tv_setup(encoder, ATOM_ENABLE); + if (radeon_encoder->devices & (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT)) { + if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT)) + atombios_tv_setup(encoder, ATOM_ENABLE); + else + atombios_tv_setup(encoder, ATOM_DISABLE); + } break; } atombios_apply_encoder_quirks(encoder, adjusted_mode); @@ -1558,12 +1556,14 @@ static const struct drm_encoder_funcs radeon_atom_enc_funcs = { struct radeon_encoder_atom_dac * radeon_atombios_set_dac_info(struct radeon_encoder *radeon_encoder) { + struct drm_device *dev = radeon_encoder->base.dev; + struct radeon_device *rdev = dev->dev_private; struct radeon_encoder_atom_dac *dac = kzalloc(sizeof(struct radeon_encoder_atom_dac), GFP_KERNEL); if (!dac) return NULL; - dac->tv_std = TV_STD_NTSC; + dac->tv_std = radeon_atombios_get_tv_info(rdev); return dac; } @@ -1641,6 +1641,7 @@ radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_id, uint32_t su break; case ENCODER_OBJECT_ID_INTERNAL_DAC1: drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_DAC); + radeon_encoder->enc_priv = radeon_atombios_set_dac_info(radeon_encoder); drm_encoder_helper_add(encoder, &radeon_atom_dac_helper_funcs); break; case ENCODER_OBJECT_ID_INTERNAL_DAC2: diff --git a/drivers/gpu/drm/radeon/radeon_family.h b/drivers/gpu/drm/radeon/radeon_family.h index 93c7d5d4191..e329066dcab 100644 --- a/drivers/gpu/drm/radeon/radeon_family.h +++ b/drivers/gpu/drm/radeon/radeon_family.h @@ -36,7 +36,7 @@ * Radeon chip families */ enum radeon_family { - CHIP_R100, + CHIP_R100 = 0, CHIP_RV100, CHIP_RS100, CHIP_RV200, @@ -99,4 +99,5 @@ enum radeon_chip_flags { RADEON_IS_PCI = 0x00800000UL, RADEON_IS_IGPGART = 0x01000000UL, }; + #endif diff --git a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c index cf389ce50a8..2441cca7d77 100644 --- a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c +++ b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c @@ -830,8 +830,8 @@ static void radeon_legacy_tv_dac_dpms(struct drm_encoder *encoder, int mode) crtc2_gen_cntl &= ~RADEON_CRTC2_CRT2_ON; if (rdev->family == CHIP_R420 || - rdev->family == CHIP_R423 || - rdev->family == CHIP_RV410) + rdev->family == CHIP_R423 || + rdev->family == CHIP_RV410) tv_dac_cntl |= (R420_TV_DAC_RDACPD | R420_TV_DAC_GDACPD | R420_TV_DAC_BDACPD | @@ -907,35 +907,43 @@ static void radeon_legacy_tv_dac_mode_set(struct drm_encoder *encoder, if (rdev->family != CHIP_R200) { tv_dac_cntl = RREG32(RADEON_TV_DAC_CNTL); if (rdev->family == CHIP_R420 || - rdev->family == CHIP_R423 || - rdev->family == CHIP_RV410) { + rdev->family == CHIP_R423 || + rdev->family == CHIP_RV410) { tv_dac_cntl &= ~(RADEON_TV_DAC_STD_MASK | - RADEON_TV_DAC_BGADJ_MASK | - R420_TV_DAC_DACADJ_MASK | - R420_TV_DAC_RDACPD | - R420_TV_DAC_GDACPD | - R420_TV_DAC_BDACPD | - R420_TV_DAC_TVENABLE); + RADEON_TV_DAC_BGADJ_MASK | + R420_TV_DAC_DACADJ_MASK | + R420_TV_DAC_RDACPD | + R420_TV_DAC_GDACPD | + R420_TV_DAC_BDACPD | + R420_TV_DAC_TVENABLE); } else { tv_dac_cntl &= ~(RADEON_TV_DAC_STD_MASK | - RADEON_TV_DAC_BGADJ_MASK | - RADEON_TV_DAC_DACADJ_MASK | - RADEON_TV_DAC_RDACPD | - RADEON_TV_DAC_GDACPD | - RADEON_TV_DAC_BDACPD); + RADEON_TV_DAC_BGADJ_MASK | + RADEON_TV_DAC_DACADJ_MASK | + RADEON_TV_DAC_RDACPD | + RADEON_TV_DAC_GDACPD | + RADEON_TV_DAC_BDACPD); } - /* FIXME TV */ - if (tv_dac) { - struct radeon_encoder_tv_dac *tv_dac = radeon_encoder->enc_priv; - tv_dac_cntl |= (RADEON_TV_DAC_NBLANK | - RADEON_TV_DAC_NHOLD | - RADEON_TV_DAC_STD_PS2 | - tv_dac->ps2_tvdac_adj); + tv_dac_cntl |= RADEON_TV_DAC_NBLANK | RADEON_TV_DAC_NHOLD; + + if (is_tv) { + if (tv_dac->tv_std == TV_STD_NTSC || + tv_dac->tv_std == TV_STD_NTSC_J || + tv_dac->tv_std == TV_STD_PAL_M || + tv_dac->tv_std == TV_STD_PAL_60) + tv_dac_cntl |= tv_dac->ntsc_tvdac_adj; + else + tv_dac_cntl |= tv_dac->pal_tvdac_adj; + + if (tv_dac->tv_std == TV_STD_NTSC || + tv_dac->tv_std == TV_STD_NTSC_J) + tv_dac_cntl |= RADEON_TV_DAC_STD_NTSC; + else + tv_dac_cntl |= RADEON_TV_DAC_STD_PAL; } else - tv_dac_cntl |= (RADEON_TV_DAC_NBLANK | - RADEON_TV_DAC_NHOLD | - RADEON_TV_DAC_STD_PS2); + tv_dac_cntl |= (RADEON_TV_DAC_STD_PS2 | + tv_dac->ps2_tvdac_adj); WREG32(RADEON_TV_DAC_CNTL, tv_dac_cntl); } diff --git a/drivers/gpu/drm/radeon/reg_srcs/r300 b/drivers/gpu/drm/radeon/reg_srcs/r300 index 19c4663fa9c..1e97b2d129f 100644 --- a/drivers/gpu/drm/radeon/reg_srcs/r300 +++ b/drivers/gpu/drm/radeon/reg_srcs/r300 @@ -125,6 +125,8 @@ r300 0x4f60 0x4000 GB_VAP_RASTER_VTX_FMT_0 0x4004 GB_VAP_RASTER_VTX_FMT_1 0x4008 GB_ENABLE +0x4010 GB_MSPOS0 +0x4014 GB_MSPOS1 0x401C GB_SELECT 0x4020 GB_AA_CONFIG 0x4024 GB_FIFO_SIZE diff --git a/drivers/gpu/drm/radeon/reg_srcs/r420 b/drivers/gpu/drm/radeon/reg_srcs/r420 index 989f7a02083..e958980d00f 100644 --- a/drivers/gpu/drm/radeon/reg_srcs/r420 +++ b/drivers/gpu/drm/radeon/reg_srcs/r420 @@ -125,6 +125,8 @@ r420 0x4f60 0x4000 GB_VAP_RASTER_VTX_FMT_0 0x4004 GB_VAP_RASTER_VTX_FMT_1 0x4008 GB_ENABLE +0x4010 GB_MSPOS0 +0x4014 GB_MSPOS1 0x401C GB_SELECT 0x4020 GB_AA_CONFIG 0x4024 GB_FIFO_SIZE diff --git a/drivers/gpu/drm/radeon/reg_srcs/rs600 b/drivers/gpu/drm/radeon/reg_srcs/rs600 index 6801b865d1c..83e8bc0c2bb 100644 --- a/drivers/gpu/drm/radeon/reg_srcs/rs600 +++ b/drivers/gpu/drm/radeon/reg_srcs/rs600 @@ -125,6 +125,8 @@ rs600 0x6d40 0x4000 GB_VAP_RASTER_VTX_FMT_0 0x4004 GB_VAP_RASTER_VTX_FMT_1 0x4008 GB_ENABLE +0x4010 GB_MSPOS0 +0x4014 GB_MSPOS1 0x401C GB_SELECT 0x4020 GB_AA_CONFIG 0x4024 GB_FIFO_SIZE diff --git a/drivers/gpu/drm/radeon/reg_srcs/rv515 b/drivers/gpu/drm/radeon/reg_srcs/rv515 index 38abf63bf2c..1e46233985e 100644 --- a/drivers/gpu/drm/radeon/reg_srcs/rv515 +++ b/drivers/gpu/drm/radeon/reg_srcs/rv515 @@ -35,6 +35,7 @@ rv515 0x6d40 0x1DA8 VAP_VPORT_ZSCALE 0x1DAC VAP_VPORT_ZOFFSET 0x2080 VAP_CNTL +0x208C VAP_INDEX_OFFSET 0x2090 VAP_OUT_VTX_FMT_0 0x2094 VAP_OUT_VTX_FMT_1 0x20B0 VAP_VTE_CNTL @@ -158,6 +159,8 @@ rv515 0x6d40 0x4000 GB_VAP_RASTER_VTX_FMT_0 0x4004 GB_VAP_RASTER_VTX_FMT_1 0x4008 GB_ENABLE +0x4010 GB_MSPOS0 +0x4014 GB_MSPOS1 0x401C GB_SELECT 0x4020 GB_AA_CONFIG 0x4024 GB_FIFO_SIZE diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c index abf824c2123..a81bc7a21e1 100644 --- a/drivers/gpu/drm/radeon/rs600.c +++ b/drivers/gpu/drm/radeon/rs600.c @@ -159,7 +159,7 @@ void rs600_gart_tlb_flush(struct radeon_device *rdev) WREG32_MC(R_000100_MC_PT0_CNTL, tmp); tmp = RREG32_MC(R_000100_MC_PT0_CNTL); - tmp |= S_000100_INVALIDATE_ALL_L1_TLBS(1) & S_000100_INVALIDATE_L2_CACHE(1); + tmp |= S_000100_INVALIDATE_ALL_L1_TLBS(1) | S_000100_INVALIDATE_L2_CACHE(1); WREG32_MC(R_000100_MC_PT0_CNTL, tmp); tmp = RREG32_MC(R_000100_MC_PT0_CNTL); diff --git a/drivers/hwmon/applesmc.c b/drivers/hwmon/applesmc.c index c1605b528e8..0f28d91f29d 100644 --- a/drivers/hwmon/applesmc.c +++ b/drivers/hwmon/applesmc.c @@ -142,6 +142,12 @@ static const char *temperature_sensors_sets[][41] = { "TM1S", "TM2P", "TM2S", "TM3S", "TM8P", "TM8S", "TM9P", "TM9S", "TN0C", "TN0D", "TN0H", "TS0C", "Tp0C", "Tp1C", "Tv0S", "Tv1S", NULL }, +/* Set 17: iMac 9,1 */ + { "TA0P", "TC0D", "TC0H", "TC0P", "TG0D", "TG0H", "TH0P", "TL0P", + "TN0D", "TN0H", "TN0P", "TO0P", "Tm0P", "Tp0P", NULL }, +/* Set 18: MacBook Pro 2,2 */ + { "TB0T", "TC0D", "TC0P", "TG0H", "TG0P", "TG0T", "TM0P", "TTF0", + "Th0H", "Th1H", "Tm0P", "Ts0P", NULL }, }; /* List of keys used to read/write fan speeds */ @@ -1350,6 +1356,10 @@ static __initdata struct dmi_match_data applesmc_dmi_data[] = { { .accelerometer = 1, .light = 1, .temperature_set = 15 }, /* MacPro3,1: temperature set 16 */ { .accelerometer = 0, .light = 0, .temperature_set = 16 }, +/* iMac 9,1: light sensor only, temperature set 17 */ + { .accelerometer = 0, .light = 0, .temperature_set = 17 }, +/* MacBook Pro 2,2: accelerometer, backlight and temperature set 18 */ + { .accelerometer = 1, .light = 1, .temperature_set = 18 }, }; /* Note that DMI_MATCH(...,"MacBook") will match "MacBookPro1,1". @@ -1375,6 +1385,10 @@ static __initdata struct dmi_system_id applesmc_whitelist[] = { DMI_MATCH(DMI_BOARD_VENDOR, "Apple"), DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro3") }, &applesmc_dmi_data[9]}, + { applesmc_dmi_match, "Apple MacBook Pro 2,2", { + DMI_MATCH(DMI_BOARD_VENDOR, "Apple Computer, Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro2,2") }, + &applesmc_dmi_data[18]}, { applesmc_dmi_match, "Apple MacBook Pro", { DMI_MATCH(DMI_BOARD_VENDOR,"Apple"), DMI_MATCH(DMI_PRODUCT_NAME,"MacBookPro") }, @@ -1415,6 +1429,10 @@ static __initdata struct dmi_system_id applesmc_whitelist[] = { DMI_MATCH(DMI_BOARD_VENDOR, "Apple"), DMI_MATCH(DMI_PRODUCT_NAME, "MacPro") }, &applesmc_dmi_data[4]}, + { applesmc_dmi_match, "Apple iMac 9,1", { + DMI_MATCH(DMI_BOARD_VENDOR, "Apple Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "iMac9,1") }, + &applesmc_dmi_data[17]}, { applesmc_dmi_match, "Apple iMac 8", { DMI_MATCH(DMI_BOARD_VENDOR, "Apple"), DMI_MATCH(DMI_PRODUCT_NAME, "iMac8") }, diff --git a/drivers/hwmon/it87.c b/drivers/hwmon/it87.c index 1002befd87d..5be09c048c5 100644 --- a/drivers/hwmon/it87.c +++ b/drivers/hwmon/it87.c @@ -539,14 +539,14 @@ static ssize_t set_sensor(struct device *dev, struct device_attribute *attr, struct it87_data *data = dev_get_drvdata(dev); long val; + u8 reg; if (strict_strtol(buf, 10, &val) < 0) return -EINVAL; - mutex_lock(&data->update_lock); - - data->sensor &= ~(1 << nr); - data->sensor &= ~(8 << nr); + reg = it87_read_value(data, IT87_REG_TEMP_ENABLE); + reg &= ~(1 << nr); + reg &= ~(8 << nr); if (val == 2) { /* backwards compatibility */ dev_warn(dev, "Sensor type 2 is deprecated, please use 4 " "instead\n"); @@ -554,14 +554,16 @@ static ssize_t set_sensor(struct device *dev, struct device_attribute *attr, } /* 3 = thermal diode; 4 = thermistor; 0 = disabled */ if (val == 3) - data->sensor |= 1 << nr; + reg |= 1 << nr; else if (val == 4) - data->sensor |= 8 << nr; - else if (val != 0) { - mutex_unlock(&data->update_lock); + reg |= 8 << nr; + else if (val != 0) return -EINVAL; - } + + mutex_lock(&data->update_lock); + data->sensor = reg; it87_write_value(data, IT87_REG_TEMP_ENABLE, data->sensor); + data->valid = 0; /* Force cache refresh */ mutex_unlock(&data->update_lock); return count; } @@ -1841,14 +1843,10 @@ static void __devinit it87_init_device(struct platform_device *pdev) it87_write_value(data, IT87_REG_TEMP_HIGH(i), 127); } - /* Check if temperature channels are reset manually or by some reason */ - tmp = it87_read_value(data, IT87_REG_TEMP_ENABLE); - if ((tmp & 0x3f) == 0) { - /* Temp1,Temp3=thermistor; Temp2=thermal diode */ - tmp = (tmp & 0xc0) | 0x2a; - it87_write_value(data, IT87_REG_TEMP_ENABLE, tmp); - } - data->sensor = tmp; + /* Temperature channels are not forcibly enabled, as they can be + * set to two different sensor types and we can't guess which one + * is correct for a given system. These channels can be enabled at + * run-time through the temp{1-3}_type sysfs accessors if needed. */ /* Check if voltage monitors are reset manually or by some reason */ tmp = it87_read_value(data, IT87_REG_VIN_ENABLE); diff --git a/drivers/hwmon/sht15.c b/drivers/hwmon/sht15.c index 6b2d8ae64fe..a610e7880fb 100644 --- a/drivers/hwmon/sht15.c +++ b/drivers/hwmon/sht15.c @@ -303,13 +303,13 @@ error_ret: **/ static inline int sht15_calc_temp(struct sht15_data *data) { - int d1 = 0; + int d1 = temppoints[0].d1; int i; - for (i = 1; i < ARRAY_SIZE(temppoints); i++) + for (i = ARRAY_SIZE(temppoints) - 1; i > 0; i--) /* Find pointer to interpolate */ if (data->supply_uV > temppoints[i - 1].vdd) { - d1 = (data->supply_uV/1000 - temppoints[i - 1].vdd) + d1 = (data->supply_uV - temppoints[i - 1].vdd) * (temppoints[i].d1 - temppoints[i - 1].d1) / (temppoints[i].vdd - temppoints[i - 1].vdd) + temppoints[i - 1].d1; @@ -542,7 +542,12 @@ static int __devinit sht15_probe(struct platform_device *pdev) /* If a regulator is available, query what the supply voltage actually is!*/ data->reg = regulator_get(data->dev, "vcc"); if (!IS_ERR(data->reg)) { - data->supply_uV = regulator_get_voltage(data->reg); + int voltage; + + voltage = regulator_get_voltage(data->reg); + if (voltage) + data->supply_uV = voltage; + regulator_enable(data->reg); /* setup a notifier block to update this if another device * causes the voltage to change */ diff --git a/drivers/ide/ide-atapi.c b/drivers/ide/ide-atapi.c index a4046e94158..f9daffd7d0e 100644 --- a/drivers/ide/ide-atapi.c +++ b/drivers/ide/ide-atapi.c @@ -264,8 +264,8 @@ void ide_retry_pc(ide_drive_t *drive) * of it. The failed command will be retried after sense data * is acquired. */ - blk_requeue_request(failed_rq->q, failed_rq); drive->hwif->rq = NULL; + ide_requeue_and_plug(drive, failed_rq); if (ide_queue_sense_rq(drive, pc)) { blk_start_request(failed_rq); ide_complete_rq(drive, -EIO, blk_rq_bytes(failed_rq)); diff --git a/drivers/ide/ide-dma.c b/drivers/ide/ide-dma.c index 2c17e3fb43e..06b14bc9a1d 100644 --- a/drivers/ide/ide-dma.c +++ b/drivers/ide/ide-dma.c @@ -493,6 +493,7 @@ ide_startstop_t ide_dma_timeout_retry(ide_drive_t *drive, int error) if (rq) { hwif->rq = NULL; rq->errors = 0; + ide_requeue_and_plug(drive, rq); } return ret; } diff --git a/drivers/ide/ide-io.c b/drivers/ide/ide-io.c index db96138fefc..172ac921815 100644 --- a/drivers/ide/ide-io.c +++ b/drivers/ide/ide-io.c @@ -566,7 +566,7 @@ plug_device_2: blk_plug_device(q); } -static void ide_requeue_and_plug(ide_drive_t *drive, struct request *rq) +void ide_requeue_and_plug(ide_drive_t *drive, struct request *rq) { struct request_queue *q = drive->queue; unsigned long flags; diff --git a/drivers/ide/ide-taskfile.c b/drivers/ide/ide-taskfile.c index cc8633cbe13..67fb73559fd 100644 --- a/drivers/ide/ide-taskfile.c +++ b/drivers/ide/ide-taskfile.c @@ -428,13 +428,11 @@ int ide_raw_taskfile(ide_drive_t *drive, struct ide_cmd *cmd, u8 *buf, { struct request *rq; int error; + int rw = !(cmd->tf_flags & IDE_TFLAG_WRITE) ? READ : WRITE; - rq = blk_get_request(drive->queue, READ, __GFP_WAIT); + rq = blk_get_request(drive->queue, rw, __GFP_WAIT); rq->cmd_type = REQ_TYPE_ATA_TASKFILE; - if (cmd->tf_flags & IDE_TFLAG_WRITE) - rq->cmd_flags |= REQ_RW; - /* * (ks) We transfer currently only whole sectors. * This is suffient for now. But, it would be great, diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c index fc73d6ac11b..ad63b79afac 100644 --- a/drivers/infiniband/core/cm.c +++ b/drivers/infiniband/core/cm.c @@ -3694,7 +3694,7 @@ static void cm_add_one(struct ib_device *ib_device) cm_dev->device = device_create(&cm_class, &ib_device->dev, MKDEV(0, 0), NULL, "%s", ib_device->name); - if (!cm_dev->device) { + if (IS_ERR(cm_dev->device)) { kfree(cm_dev); return; } diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c index 7794249430c..6d777069d86 100644 --- a/drivers/infiniband/core/cma.c +++ b/drivers/infiniband/core/cma.c @@ -1684,6 +1684,7 @@ int rdma_set_ib_paths(struct rdma_cm_id *id, } memcpy(id->route.path_rec, path_rec, sizeof *path_rec * num_paths); + id->route.num_paths = num_paths; return 0; err: cma_comp_exch(id_priv, CMA_ROUTE_RESOLVED, CMA_ADDR_RESOLVED); diff --git a/drivers/infiniband/hw/mlx4/mr.c b/drivers/infiniband/hw/mlx4/mr.c index 56147b28a23..1d27b9a8e2d 100644 --- a/drivers/infiniband/hw/mlx4/mr.c +++ b/drivers/infiniband/hw/mlx4/mr.c @@ -240,7 +240,7 @@ struct ib_fast_reg_page_list *mlx4_ib_alloc_fast_reg_page_list(struct ib_device mfrpl->mapped_page_list = dma_alloc_coherent(&dev->dev->pdev->dev, size, &mfrpl->map, GFP_KERNEL); - if (!mfrpl->ibfrpl.page_list) + if (!mfrpl->mapped_page_list) goto err_free; WARN_ON(mfrpl->map & 0x3f); diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c index 5a076e8f116..e54f312e4bd 100644 --- a/drivers/infiniband/hw/nes/nes_verbs.c +++ b/drivers/infiniband/hw/nes/nes_verbs.c @@ -2821,11 +2821,10 @@ static int nes_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, attr->cap.max_send_wr = nesqp->hwqp.sq_size; attr->cap.max_recv_wr = nesqp->hwqp.rq_size; attr->cap.max_recv_sge = 1; - if (nes_drv_opt & NES_DRV_OPT_NO_INLINE_DATA) { - init_attr->cap.max_inline_data = 0; - } else { - init_attr->cap.max_inline_data = 64; - } + if (nes_drv_opt & NES_DRV_OPT_NO_INLINE_DATA) + attr->cap.max_inline_data = 0; + else + attr->cap.max_inline_data = 64; init_attr->event_handler = nesqp->ibqp.event_handler; init_attr->qp_context = nesqp->ibqp.qp_context; diff --git a/drivers/input/input.c b/drivers/input/input.c index afd4e2b7658..9c79bd56b51 100644 --- a/drivers/input/input.c +++ b/drivers/input/input.c @@ -660,7 +660,14 @@ static int input_default_setkeycode(struct input_dev *dev, int input_get_keycode(struct input_dev *dev, unsigned int scancode, unsigned int *keycode) { - return dev->getkeycode(dev, scancode, keycode); + unsigned long flags; + int retval; + + spin_lock_irqsave(&dev->event_lock, flags); + retval = dev->getkeycode(dev, scancode, keycode); + spin_unlock_irqrestore(&dev->event_lock, flags); + + return retval; } EXPORT_SYMBOL(input_get_keycode); diff --git a/drivers/input/keyboard/matrix_keypad.c b/drivers/input/keyboard/matrix_keypad.c index ffc25cfcef7..b443e088fd3 100644 --- a/drivers/input/keyboard/matrix_keypad.c +++ b/drivers/input/keyboard/matrix_keypad.c @@ -374,7 +374,9 @@ static int __devinit matrix_keypad_probe(struct platform_device *pdev) input_dev->name = pdev->name; input_dev->id.bustype = BUS_HOST; input_dev->dev.parent = &pdev->dev; - input_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_REP); + input_dev->evbit[0] = BIT_MASK(EV_KEY); + if (!pdata->no_autorepeat) + input_dev->evbit[0] |= BIT_MASK(EV_REP); input_dev->open = matrix_keypad_start; input_dev->close = matrix_keypad_stop; diff --git a/drivers/input/mouse/alps.c b/drivers/input/mouse/alps.c index 99d58764ef0..0d22cb9ce42 100644 --- a/drivers/input/mouse/alps.c +++ b/drivers/input/mouse/alps.c @@ -64,6 +64,7 @@ static const struct alps_model_info alps_model_data[] = { { { 0x62, 0x02, 0x14 }, 0xcf, 0xcf, ALPS_PASS | ALPS_DUALPOINT | ALPS_PS2_INTERLEAVED }, { { 0x73, 0x02, 0x50 }, 0xcf, 0xcf, ALPS_FOUR_BUTTONS }, /* Dell Vostro 1400 */ + { { 0x73, 0x02, 0x64 }, 0xf8, 0xf8, 0 }, /* HP Pavilion dm3 */ { { 0x52, 0x01, 0x14 }, 0xff, 0xff, ALPS_PASS | ALPS_DUALPOINT | ALPS_PS2_INTERLEAVED }, /* Toshiba Tecra A11-11L */ }; diff --git a/drivers/input/mouse/bcm5974.c b/drivers/input/mouse/bcm5974.c index 4f8fe0886b2..b89879bd860 100644 --- a/drivers/input/mouse/bcm5974.c +++ b/drivers/input/mouse/bcm5974.c @@ -803,7 +803,6 @@ static struct usb_driver bcm5974_driver = { .disconnect = bcm5974_disconnect, .suspend = bcm5974_suspend, .resume = bcm5974_resume, - .reset_resume = bcm5974_resume, .id_table = bcm5974_table, .supports_autosuspend = 1, }; diff --git a/drivers/input/serio/i8042.c b/drivers/input/serio/i8042.c index 577688b5b95..6440a8f5568 100644 --- a/drivers/input/serio/i8042.c +++ b/drivers/input/serio/i8042.c @@ -39,7 +39,7 @@ MODULE_PARM_DESC(noaux, "Do not probe or use AUX (mouse) port."); static bool i8042_nomux; module_param_named(nomux, i8042_nomux, bool, 0); -MODULE_PARM_DESC(nomux, "Do not check whether an active multiplexing conrtoller is present."); +MODULE_PARM_DESC(nomux, "Do not check whether an active multiplexing controller is present."); static bool i8042_unlock; module_param_named(unlock, i8042_unlock, bool, 0); diff --git a/drivers/input/sparse-keymap.c b/drivers/input/sparse-keymap.c index 82ae18d2968..01424834476 100644 --- a/drivers/input/sparse-keymap.c +++ b/drivers/input/sparse-keymap.c @@ -68,12 +68,14 @@ static int sparse_keymap_getkeycode(struct input_dev *dev, unsigned int scancode, unsigned int *keycode) { - const struct key_entry *key = - sparse_keymap_entry_from_scancode(dev, scancode); + const struct key_entry *key; - if (key && key->type == KE_KEY) { - *keycode = key->keycode; - return 0; + if (dev->keycode) { + key = sparse_keymap_entry_from_scancode(dev, scancode); + if (key && key->type == KE_KEY) { + *keycode = key->keycode; + return 0; + } } return -EINVAL; @@ -86,17 +88,16 @@ static int sparse_keymap_setkeycode(struct input_dev *dev, struct key_entry *key; int old_keycode; - if (keycode < 0 || keycode > KEY_MAX) - return -EINVAL; - - key = sparse_keymap_entry_from_scancode(dev, scancode); - if (key && key->type == KE_KEY) { - old_keycode = key->keycode; - key->keycode = keycode; - set_bit(keycode, dev->keybit); - if (!sparse_keymap_entry_from_keycode(dev, old_keycode)) - clear_bit(old_keycode, dev->keybit); - return 0; + if (dev->keycode) { + key = sparse_keymap_entry_from_scancode(dev, scancode); + if (key && key->type == KE_KEY) { + old_keycode = key->keycode; + key->keycode = keycode; + set_bit(keycode, dev->keybit); + if (!sparse_keymap_entry_from_keycode(dev, old_keycode)) + clear_bit(old_keycode, dev->keybit); + return 0; + } } return -EINVAL; @@ -164,7 +165,7 @@ int sparse_keymap_setup(struct input_dev *dev, return 0; err_out: - kfree(keymap); + kfree(map); return error; } @@ -176,14 +177,27 @@ EXPORT_SYMBOL(sparse_keymap_setup); * * This function is used to free memory allocated by sparse keymap * in an input device that was set up by sparse_keymap_setup(). + * NOTE: It is safe to cal this function while input device is + * still registered (however the drivers should care not to try to + * use freed keymap and thus have to shut off interrups/polling + * before freeing the keymap). */ void sparse_keymap_free(struct input_dev *dev) { + unsigned long flags; + + /* + * Take event lock to prevent racing with input_get_keycode() + * and input_set_keycode() if we are called while input device + * is still registered. + */ + spin_lock_irqsave(&dev->event_lock, flags); + kfree(dev->keycode); dev->keycode = NULL; dev->keycodemax = 0; - dev->getkeycode = NULL; - dev->setkeycode = NULL; + + spin_unlock_irqrestore(&dev->event_lock, flags); } EXPORT_SYMBOL(sparse_keymap_free); diff --git a/drivers/input/tablet/wacom_sys.c b/drivers/input/tablet/wacom_sys.c index 8b5d2873f0c..f46502589e4 100644 --- a/drivers/input/tablet/wacom_sys.c +++ b/drivers/input/tablet/wacom_sys.c @@ -673,13 +673,15 @@ static int wacom_resume(struct usb_interface *intf) int rv; mutex_lock(&wacom->lock); - if (wacom->open) { + + /* switch to wacom mode first */ + wacom_query_tablet_data(intf, features); + + if (wacom->open) rv = usb_submit_urb(wacom->irq, GFP_NOIO); - /* switch to wacom mode if needed */ - if (!wacom_retrieve_hid_descriptor(intf, features)) - wacom_query_tablet_data(intf, features); - } else + else rv = 0; + mutex_unlock(&wacom->lock); return rv; diff --git a/drivers/input/tablet/wacom_wac.c b/drivers/input/tablet/wacom_wac.c index b3ba3437a2e..4a852d815c6 100644 --- a/drivers/input/tablet/wacom_wac.c +++ b/drivers/input/tablet/wacom_wac.c @@ -155,19 +155,19 @@ static int wacom_graphire_irq(struct wacom_wac *wacom, void *wcombo) { struct wacom_features *features = &wacom->features; unsigned char *data = wacom->data; - int x, y, prox; - int rw = 0; - int retval = 0; + int x, y, rw; + static int penData = 0; if (data[0] != WACOM_REPORT_PENABLED) { dbg("wacom_graphire_irq: received unknown report #%d", data[0]); - goto exit; + return 0; } - prox = data[1] & 0x80; - if (prox || wacom->id[0]) { - if (prox) { - switch ((data[1] >> 5) & 3) { + if (data[1] & 0x80) { + /* in prox and not a pad data */ + penData = 1; + + switch ((data[1] >> 5) & 3) { case 0: /* Pen */ wacom->tool[0] = BTN_TOOL_PEN; @@ -181,13 +181,23 @@ static int wacom_graphire_irq(struct wacom_wac *wacom, void *wcombo) case 2: /* Mouse with wheel */ wacom_report_key(wcombo, BTN_MIDDLE, data[1] & 0x04); + if (features->type == WACOM_G4 || features->type == WACOM_MO) { + rw = data[7] & 0x04 ? (data[7] & 0x03)-4 : (data[7] & 0x03); + wacom_report_rel(wcombo, REL_WHEEL, -rw); + } else + wacom_report_rel(wcombo, REL_WHEEL, -(signed char) data[6]); /* fall through */ case 3: /* Mouse without wheel */ wacom->tool[0] = BTN_TOOL_MOUSE; wacom->id[0] = CURSOR_DEVICE_ID; + wacom_report_key(wcombo, BTN_LEFT, data[1] & 0x01); + wacom_report_key(wcombo, BTN_RIGHT, data[1] & 0x02); + if (features->type == WACOM_G4 || features->type == WACOM_MO) + wacom_report_abs(wcombo, ABS_DISTANCE, data[6] & 0x3f); + else + wacom_report_abs(wcombo, ABS_DISTANCE, data[7] & 0x3f); break; - } } x = wacom_le16_to_cpu(&data[2]); y = wacom_le16_to_cpu(&data[4]); @@ -198,32 +208,36 @@ static int wacom_graphire_irq(struct wacom_wac *wacom, void *wcombo) wacom_report_key(wcombo, BTN_TOUCH, data[1] & 0x01); wacom_report_key(wcombo, BTN_STYLUS, data[1] & 0x02); wacom_report_key(wcombo, BTN_STYLUS2, data[1] & 0x04); - } else { - wacom_report_key(wcombo, BTN_LEFT, data[1] & 0x01); - wacom_report_key(wcombo, BTN_RIGHT, data[1] & 0x02); - if (features->type == WACOM_G4 || - features->type == WACOM_MO) { - wacom_report_abs(wcombo, ABS_DISTANCE, data[6] & 0x3f); - rw = (signed)(data[7] & 0x04) - (data[7] & 0x03); - } else { - wacom_report_abs(wcombo, ABS_DISTANCE, data[7] & 0x3f); - rw = -(signed)data[6]; - } - wacom_report_rel(wcombo, REL_WHEEL, rw); } - - if (!prox) - wacom->id[0] = 0; wacom_report_abs(wcombo, ABS_MISC, wacom->id[0]); /* report tool id */ - wacom_report_key(wcombo, wacom->tool[0], prox); - wacom_input_sync(wcombo); /* sync last event */ + wacom_report_key(wcombo, wacom->tool[0], 1); + } else if (wacom->id[0]) { + wacom_report_abs(wcombo, ABS_X, 0); + wacom_report_abs(wcombo, ABS_Y, 0); + if (wacom->tool[0] == BTN_TOOL_MOUSE) { + wacom_report_key(wcombo, BTN_LEFT, 0); + wacom_report_key(wcombo, BTN_RIGHT, 0); + wacom_report_abs(wcombo, ABS_DISTANCE, 0); + } else { + wacom_report_abs(wcombo, ABS_PRESSURE, 0); + wacom_report_key(wcombo, BTN_TOUCH, 0); + wacom_report_key(wcombo, BTN_STYLUS, 0); + wacom_report_key(wcombo, BTN_STYLUS2, 0); + } + wacom->id[0] = 0; + wacom_report_abs(wcombo, ABS_MISC, 0); /* reset tool id */ + wacom_report_key(wcombo, wacom->tool[0], 0); } /* send pad data */ switch (features->type) { case WACOM_G4: - prox = data[7] & 0xf8; - if (prox || wacom->id[1]) { + if (data[7] & 0xf8) { + if (penData) { + wacom_input_sync(wcombo); /* sync last event */ + if (!wacom->id[0]) + penData = 0; + } wacom->id[1] = PAD_DEVICE_ID; wacom_report_key(wcombo, BTN_0, (data[7] & 0x40)); wacom_report_key(wcombo, BTN_4, (data[7] & 0x80)); @@ -231,16 +245,29 @@ static int wacom_graphire_irq(struct wacom_wac *wacom, void *wcombo) wacom_report_rel(wcombo, REL_WHEEL, rw); wacom_report_key(wcombo, BTN_TOOL_FINGER, 0xf0); wacom_report_abs(wcombo, ABS_MISC, wacom->id[1]); - if (!prox) - wacom->id[1] = 0; - wacom_report_abs(wcombo, ABS_MISC, wacom->id[1]); + wacom_input_event(wcombo, EV_MSC, MSC_SERIAL, 0xf0); + } else if (wacom->id[1]) { + if (penData) { + wacom_input_sync(wcombo); /* sync last event */ + if (!wacom->id[0]) + penData = 0; + } + wacom->id[1] = 0; + wacom_report_key(wcombo, BTN_0, (data[7] & 0x40)); + wacom_report_key(wcombo, BTN_4, (data[7] & 0x80)); + wacom_report_rel(wcombo, REL_WHEEL, 0); + wacom_report_key(wcombo, BTN_TOOL_FINGER, 0); + wacom_report_abs(wcombo, ABS_MISC, 0); wacom_input_event(wcombo, EV_MSC, MSC_SERIAL, 0xf0); } - retval = 1; break; case WACOM_MO: - prox = (data[7] & 0xf8) || data[8]; - if (prox || wacom->id[1]) { + if ((data[7] & 0xf8) || (data[8] & 0xff)) { + if (penData) { + wacom_input_sync(wcombo); /* sync last event */ + if (!wacom->id[0]) + penData = 0; + } wacom->id[1] = PAD_DEVICE_ID; wacom_report_key(wcombo, BTN_0, (data[7] & 0x08)); wacom_report_key(wcombo, BTN_1, (data[7] & 0x20)); @@ -248,16 +275,27 @@ static int wacom_graphire_irq(struct wacom_wac *wacom, void *wcombo) wacom_report_key(wcombo, BTN_5, (data[7] & 0x40)); wacom_report_abs(wcombo, ABS_WHEEL, (data[8] & 0x7f)); wacom_report_key(wcombo, BTN_TOOL_FINGER, 0xf0); - if (!prox) - wacom->id[1] = 0; wacom_report_abs(wcombo, ABS_MISC, wacom->id[1]); wacom_input_event(wcombo, EV_MSC, MSC_SERIAL, 0xf0); + } else if (wacom->id[1]) { + if (penData) { + wacom_input_sync(wcombo); /* sync last event */ + if (!wacom->id[0]) + penData = 0; + } + wacom->id[1] = 0; + wacom_report_key(wcombo, BTN_0, (data[7] & 0x08)); + wacom_report_key(wcombo, BTN_1, (data[7] & 0x20)); + wacom_report_key(wcombo, BTN_4, (data[7] & 0x10)); + wacom_report_key(wcombo, BTN_5, (data[7] & 0x40)); + wacom_report_abs(wcombo, ABS_WHEEL, (data[8] & 0x7f)); + wacom_report_key(wcombo, BTN_TOOL_FINGER, 0); + wacom_report_abs(wcombo, ABS_MISC, 0); + wacom_input_event(wcombo, EV_MSC, MSC_SERIAL, 0xf0); } - retval = 1; break; } -exit: - return retval; + return 1; } static int wacom_intuos_inout(struct wacom_wac *wacom, void *wcombo) @@ -598,9 +636,9 @@ static int wacom_intuos_irq(struct wacom_wac *wacom, void *wcombo) static void wacom_tpc_finger_in(struct wacom_wac *wacom, void *wcombo, char *data, int idx) { wacom_report_abs(wcombo, ABS_X, - data[2 + idx * 2] | ((data[3 + idx * 2] & 0x7f) << 8)); + (data[2 + idx * 2] & 0xff) | ((data[3 + idx * 2] & 0x7f) << 8)); wacom_report_abs(wcombo, ABS_Y, - data[6 + idx * 2] | ((data[7 + idx * 2] & 0x7f) << 8)); + (data[6 + idx * 2] & 0xff) | ((data[7 + idx * 2] & 0x7f) << 8)); wacom_report_abs(wcombo, ABS_MISC, wacom->id[0]); wacom_report_key(wcombo, wacom->tool[idx], 1); if (idx) @@ -744,24 +782,31 @@ static int wacom_tpc_irq(struct wacom_wac *wacom, void *wcombo) touchInProx = 0; - if (!wacom->id[0]) { /* first in prox */ - /* Going into proximity select tool */ - wacom->tool[0] = (data[1] & 0x0c) ? BTN_TOOL_RUBBER : BTN_TOOL_PEN; - if (wacom->tool[0] == BTN_TOOL_PEN) - wacom->id[0] = STYLUS_DEVICE_ID; - else - wacom->id[0] = ERASER_DEVICE_ID; - } - wacom_report_key(wcombo, BTN_STYLUS, data[1] & 0x02); - wacom_report_key(wcombo, BTN_STYLUS2, data[1] & 0x10); - wacom_report_abs(wcombo, ABS_X, wacom_le16_to_cpu(&data[2])); - wacom_report_abs(wcombo, ABS_Y, wacom_le16_to_cpu(&data[4])); - pressure = ((data[7] & 0x01) << 8) | data[6]; - if (pressure < 0) - pressure = features->pressure_max + pressure + 1; - wacom_report_abs(wcombo, ABS_PRESSURE, pressure); - wacom_report_key(wcombo, BTN_TOUCH, data[1] & 0x05); - if (!prox) { /* out-prox */ + if (prox) { /* in prox */ + if (!wacom->id[0]) { + /* Going into proximity select tool */ + wacom->tool[0] = (data[1] & 0x0c) ? BTN_TOOL_RUBBER : BTN_TOOL_PEN; + if (wacom->tool[0] == BTN_TOOL_PEN) + wacom->id[0] = STYLUS_DEVICE_ID; + else + wacom->id[0] = ERASER_DEVICE_ID; + } + wacom_report_key(wcombo, BTN_STYLUS, data[1] & 0x02); + wacom_report_key(wcombo, BTN_STYLUS2, data[1] & 0x10); + wacom_report_abs(wcombo, ABS_X, wacom_le16_to_cpu(&data[2])); + wacom_report_abs(wcombo, ABS_Y, wacom_le16_to_cpu(&data[4])); + pressure = ((data[7] & 0x01) << 8) | data[6]; + if (pressure < 0) + pressure = features->pressure_max + pressure + 1; + wacom_report_abs(wcombo, ABS_PRESSURE, pressure); + wacom_report_key(wcombo, BTN_TOUCH, data[1] & 0x05); + } else { + wacom_report_abs(wcombo, ABS_X, 0); + wacom_report_abs(wcombo, ABS_Y, 0); + wacom_report_abs(wcombo, ABS_PRESSURE, 0); + wacom_report_key(wcombo, BTN_STYLUS, 0); + wacom_report_key(wcombo, BTN_STYLUS2, 0); + wacom_report_key(wcombo, BTN_TOUCH, 0); wacom->id[0] = 0; /* pen is out so touch can be enabled now */ touchInProx = 1; diff --git a/drivers/isdn/gigaset/bas-gigaset.c b/drivers/isdn/gigaset/bas-gigaset.c index 0be15c70c16..47a5ffec55a 100644 --- a/drivers/isdn/gigaset/bas-gigaset.c +++ b/drivers/isdn/gigaset/bas-gigaset.c @@ -14,11 +14,6 @@ */ #include "gigaset.h" - -#include <linux/errno.h> -#include <linux/init.h> -#include <linux/slab.h> -#include <linux/timer.h> #include <linux/usb.h> #include <linux/module.h> #include <linux/moduleparam.h> diff --git a/drivers/isdn/gigaset/capi.c b/drivers/isdn/gigaset/capi.c index eb7e27105a8..964a55fb148 100644 --- a/drivers/isdn/gigaset/capi.c +++ b/drivers/isdn/gigaset/capi.c @@ -12,8 +12,6 @@ */ #include "gigaset.h" -#include <linux/slab.h> -#include <linux/ctype.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <linux/isdn/capilli.h> diff --git a/drivers/isdn/gigaset/common.c b/drivers/isdn/gigaset/common.c index 0b39b387c12..f6f45f22192 100644 --- a/drivers/isdn/gigaset/common.c +++ b/drivers/isdn/gigaset/common.c @@ -14,10 +14,8 @@ */ #include "gigaset.h" -#include <linux/ctype.h> #include <linux/module.h> #include <linux/moduleparam.h> -#include <linux/slab.h> /* Version Information */ #define DRIVER_AUTHOR "Hansjoerg Lipp <hjlipp@web.de>, Tilman Schmidt <tilman@imap.cc>, Stefan Eilers" diff --git a/drivers/isdn/gigaset/gigaset.h b/drivers/isdn/gigaset/gigaset.h index 9ef5b0463fd..05947f9c184 100644 --- a/drivers/isdn/gigaset/gigaset.h +++ b/drivers/isdn/gigaset/gigaset.h @@ -20,11 +20,12 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/kernel.h> +#include <linux/sched.h> #include <linux/compiler.h> #include <linux/types.h> +#include <linux/ctype.h> #include <linux/slab.h> #include <linux/spinlock.h> -#include <linux/usb.h> #include <linux/skbuff.h> #include <linux/netdevice.h> #include <linux/ppp_defs.h> diff --git a/drivers/isdn/gigaset/i4l.c b/drivers/isdn/gigaset/i4l.c index c99fb9790a1..c22e5ace827 100644 --- a/drivers/isdn/gigaset/i4l.c +++ b/drivers/isdn/gigaset/i4l.c @@ -15,7 +15,6 @@ #include "gigaset.h" #include <linux/isdnif.h> -#include <linux/slab.h> #define HW_HDR_LEN 2 /* Header size used to store ack info */ diff --git a/drivers/isdn/gigaset/interface.c b/drivers/isdn/gigaset/interface.c index f0dc6c9cc28..c9f28dd40d5 100644 --- a/drivers/isdn/gigaset/interface.c +++ b/drivers/isdn/gigaset/interface.c @@ -13,7 +13,6 @@ #include "gigaset.h" #include <linux/gigaset_dev.h> -#include <linux/tty.h> #include <linux/tty_flip.h> /*** our ioctls ***/ diff --git a/drivers/isdn/gigaset/proc.c b/drivers/isdn/gigaset/proc.c index b69f73a0668..b943efbff44 100644 --- a/drivers/isdn/gigaset/proc.c +++ b/drivers/isdn/gigaset/proc.c @@ -14,7 +14,6 @@ */ #include "gigaset.h" -#include <linux/ctype.h> static ssize_t show_cidmode(struct device *dev, struct device_attribute *attr, char *buf) diff --git a/drivers/isdn/gigaset/ser-gigaset.c b/drivers/isdn/gigaset/ser-gigaset.c index 8b0afd203a0..e96c0586886 100644 --- a/drivers/isdn/gigaset/ser-gigaset.c +++ b/drivers/isdn/gigaset/ser-gigaset.c @@ -11,13 +11,10 @@ */ #include "gigaset.h" - #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/platform_device.h> -#include <linux/tty.h> #include <linux/completion.h> -#include <linux/slab.h> /* Version Information */ #define DRIVER_AUTHOR "Tilman Schmidt" diff --git a/drivers/isdn/gigaset/usb-gigaset.c b/drivers/isdn/gigaset/usb-gigaset.c index 9430a2bbb52..76dbb20f306 100644 --- a/drivers/isdn/gigaset/usb-gigaset.c +++ b/drivers/isdn/gigaset/usb-gigaset.c @@ -16,10 +16,6 @@ */ #include "gigaset.h" - -#include <linux/errno.h> -#include <linux/init.h> -#include <linux/slab.h> #include <linux/usb.h> #include <linux/module.h> #include <linux/moduleparam.h> diff --git a/drivers/lguest/lguest_device.c b/drivers/lguest/lguest_device.c index 07090f379c6..69c84a1d88e 100644 --- a/drivers/lguest/lguest_device.c +++ b/drivers/lguest/lguest_device.c @@ -178,7 +178,7 @@ static void set_status(struct virtio_device *vdev, u8 status) /* We set the status. */ to_lgdev(vdev)->desc->status = status; - kvm_hypercall1(LHCALL_NOTIFY, (max_pfn << PAGE_SHIFT) + offset); + hcall(LHCALL_NOTIFY, (max_pfn << PAGE_SHIFT) + offset, 0, 0, 0); } static void lg_set_status(struct virtio_device *vdev, u8 status) @@ -229,7 +229,7 @@ static void lg_notify(struct virtqueue *vq) */ struct lguest_vq_info *lvq = vq->priv; - kvm_hypercall1(LHCALL_NOTIFY, lvq->config.pfn << PAGE_SHIFT); + hcall(LHCALL_NOTIFY, lvq->config.pfn << PAGE_SHIFT, 0, 0, 0); } /* An extern declaration inside a C file is bad form. Don't do it. */ diff --git a/drivers/lguest/x86/core.c b/drivers/lguest/x86/core.c index fb2b7ef7868..b4eb675a807 100644 --- a/drivers/lguest/x86/core.c +++ b/drivers/lguest/x86/core.c @@ -288,6 +288,18 @@ static int emulate_insn(struct lg_cpu *cpu) insn = lgread(cpu, physaddr, u8); /* + * Around 2.6.33, the kernel started using an emulation for the + * cmpxchg8b instruction in early boot on many configurations. This + * code isn't paravirtualized, and it tries to disable interrupts. + * Ignore it, which will Mostly Work. + */ + if (insn == 0xfa) { + /* "cli", or Clear Interrupt Enable instruction. Skip it. */ + cpu->regs->eip++; + return 1; + } + + /* * 0x66 is an "operand prefix". It means it's using the upper 16 bits * of the eax register. */ diff --git a/drivers/macintosh/windfarm_core.c b/drivers/macintosh/windfarm_core.c index c092354591b..ce8897933a8 100644 --- a/drivers/macintosh/windfarm_core.c +++ b/drivers/macintosh/windfarm_core.c @@ -210,6 +210,7 @@ int wf_register_control(struct wf_control *new_ct) kref_init(&new_ct->ref); list_add(&new_ct->link, &wf_controls); + sysfs_attr_init(&new_ct->attr.attr); new_ct->attr.attr.name = new_ct->name; new_ct->attr.attr.mode = 0644; new_ct->attr.show = wf_show_control; diff --git a/drivers/media/video/pvrusb2/pvrusb2-sysfs.c b/drivers/media/video/pvrusb2/pvrusb2-sysfs.c index 6c23456e0bd..71f50565f63 100644 --- a/drivers/media/video/pvrusb2/pvrusb2-sysfs.c +++ b/drivers/media/video/pvrusb2/pvrusb2-sysfs.c @@ -423,10 +423,12 @@ static void pvr2_sysfs_add_debugifc(struct pvr2_sysfs *sfp) dip = kzalloc(sizeof(*dip),GFP_KERNEL); if (!dip) return; + sysfs_attr_init(&dip->attr_debugcmd.attr); dip->attr_debugcmd.attr.name = "debugcmd"; dip->attr_debugcmd.attr.mode = S_IRUGO|S_IWUSR|S_IWGRP; dip->attr_debugcmd.show = debugcmd_show; dip->attr_debugcmd.store = debugcmd_store; + sysfs_attr_init(&dip->attr_debuginfo.attr); dip->attr_debuginfo.attr.name = "debuginfo"; dip->attr_debuginfo.attr.mode = S_IRUGO; dip->attr_debuginfo.show = debuginfo_show; @@ -644,6 +646,7 @@ static void class_dev_create(struct pvr2_sysfs *sfp, return; } + sysfs_attr_init(&sfp->attr_v4l_minor_number.attr); sfp->attr_v4l_minor_number.attr.name = "v4l_minor_number"; sfp->attr_v4l_minor_number.attr.mode = S_IRUGO; sfp->attr_v4l_minor_number.show = v4l_minor_number_show; @@ -658,6 +661,7 @@ static void class_dev_create(struct pvr2_sysfs *sfp, sfp->v4l_minor_number_created_ok = !0; } + sysfs_attr_init(&sfp->attr_v4l_radio_minor_number.attr); sfp->attr_v4l_radio_minor_number.attr.name = "v4l_radio_minor_number"; sfp->attr_v4l_radio_minor_number.attr.mode = S_IRUGO; sfp->attr_v4l_radio_minor_number.show = v4l_radio_minor_number_show; @@ -672,6 +676,7 @@ static void class_dev_create(struct pvr2_sysfs *sfp, sfp->v4l_radio_minor_number_created_ok = !0; } + sysfs_attr_init(&sfp->attr_unit_number.attr); sfp->attr_unit_number.attr.name = "unit_number"; sfp->attr_unit_number.attr.mode = S_IRUGO; sfp->attr_unit_number.show = unit_number_show; @@ -685,6 +690,7 @@ static void class_dev_create(struct pvr2_sysfs *sfp, sfp->unit_number_created_ok = !0; } + sysfs_attr_init(&sfp->attr_bus_info.attr); sfp->attr_bus_info.attr.name = "bus_info_str"; sfp->attr_bus_info.attr.mode = S_IRUGO; sfp->attr_bus_info.show = bus_info_show; @@ -699,6 +705,7 @@ static void class_dev_create(struct pvr2_sysfs *sfp, sfp->bus_info_created_ok = !0; } + sysfs_attr_init(&sfp->attr_hdw_name.attr); sfp->attr_hdw_name.attr.name = "device_hardware_type"; sfp->attr_hdw_name.attr.mode = S_IRUGO; sfp->attr_hdw_name.show = hdw_name_show; @@ -713,6 +720,7 @@ static void class_dev_create(struct pvr2_sysfs *sfp, sfp->hdw_name_created_ok = !0; } + sysfs_attr_init(&sfp->attr_hdw_desc.attr); sfp->attr_hdw_desc.attr.name = "device_hardware_description"; sfp->attr_hdw_desc.attr.mode = S_IRUGO; sfp->attr_hdw_desc.show = hdw_desc_show; diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c index 83f0affadca..e9caf694c59 100644 --- a/drivers/mmc/host/omap_hsmmc.c +++ b/drivers/mmc/host/omap_hsmmc.c @@ -1179,15 +1179,10 @@ static void omap_hsmmc_detect(struct work_struct *work) carddetect = -ENOSYS; } - if (carddetect) { + if (carddetect) mmc_detect_change(host->mmc, (HZ * 200) / 1000); - } else { - mmc_host_enable(host->mmc); - omap_hsmmc_reset_controller_fsm(host, SRD); - mmc_host_lazy_disable(host->mmc); - + else mmc_detect_change(host->mmc, (HZ * 50) / 1000); - } } /* diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig index 0ba5b8e50a7..7b832c727f8 100644 --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig @@ -2582,6 +2582,31 @@ config CHELSIO_T3 To compile this driver as a module, choose M here: the module will be called cxgb3. +config CHELSIO_T4_DEPENDS + tristate + depends on PCI && INET + default y + +config CHELSIO_T4 + tristate "Chelsio Communications T4 Ethernet support" + depends on CHELSIO_T4_DEPENDS + select FW_LOADER + select MDIO + help + This driver supports Chelsio T4-based gigabit and 10Gb Ethernet + adapters. + + For general information about Chelsio and our products, visit + our website at <http://www.chelsio.com>. + + For customer support, please visit our customer support page at + <http://www.chelsio.com/support.htm>. + + Please send feedback to <linux-bugs@chelsio.com>. + + To compile this driver as a module choose M here; the module + will be called cxgb4. + config EHEA tristate "eHEA Ethernet support" depends on IBMEBUS && INET && SPARSEMEM diff --git a/drivers/net/Makefile b/drivers/net/Makefile index 478886234c2..a583b50d9de 100644 --- a/drivers/net/Makefile +++ b/drivers/net/Makefile @@ -19,6 +19,7 @@ obj-$(CONFIG_IXGB) += ixgb/ obj-$(CONFIG_IP1000) += ipg.o obj-$(CONFIG_CHELSIO_T1) += chelsio/ obj-$(CONFIG_CHELSIO_T3) += cxgb3/ +obj-$(CONFIG_CHELSIO_T4) += cxgb4/ obj-$(CONFIG_EHEA) += ehea/ obj-$(CONFIG_CAN) += can/ obj-$(CONFIG_BONDING) += bonding/ diff --git a/drivers/net/benet/be_cmds.c b/drivers/net/benet/be_cmds.c index 50e6259b50e..d0ef4ac987c 100644 --- a/drivers/net/benet/be_cmds.c +++ b/drivers/net/benet/be_cmds.c @@ -1464,8 +1464,8 @@ int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc, req->params.op_type = cpu_to_le32(IMG_TYPE_REDBOOT); req->params.op_code = cpu_to_le32(FLASHROM_OPER_REPORT); - req->params.offset = offset; - req->params.data_buf_size = 0x4; + req->params.offset = cpu_to_le32(offset); + req->params.data_buf_size = cpu_to_le32(0x4); status = be_mcc_notify_wait(adapter); if (!status) diff --git a/drivers/net/benet/be_main.c b/drivers/net/benet/be_main.c index 43e8032f923..ec6ace80225 100644 --- a/drivers/net/benet/be_main.c +++ b/drivers/net/benet/be_main.c @@ -807,7 +807,7 @@ static void be_rx_compl_process(struct be_adapter *adapter, return; } vid = AMAP_GET_BITS(struct amap_eth_rx_compl, vlan_tag, rxcp); - vid = be16_to_cpu(vid); + vid = swab16(vid); vlan_hwaccel_receive_skb(skb, adapter->vlan_grp, vid); } else { netif_receive_skb(skb); @@ -884,7 +884,7 @@ static void be_rx_compl_process_gro(struct be_adapter *adapter, napi_gro_frags(&eq_obj->napi); } else { vid = AMAP_GET_BITS(struct amap_eth_rx_compl, vlan_tag, rxcp); - vid = be16_to_cpu(vid); + vid = swab16(vid); if (!adapter->vlan_grp || adapter->vlans_added == 0) return; @@ -1855,7 +1855,7 @@ static bool be_flash_redboot(struct be_adapter *adapter, p += crc_offset; status = be_cmd_get_flash_crc(adapter, flashed_crc, - (img_start + image_size - 4)); + (image_size - 4)); if (status) { dev_err(&adapter->pdev->dev, "could not get crc from flash, not flashing redboot\n"); @@ -1991,7 +1991,7 @@ int be_load_fw(struct be_adapter *adapter, u8 *func) struct flash_file_hdr_g3 *fhdr3; struct image_hdr *img_hdr_ptr = NULL; struct be_dma_mem flash_cmd; - int status, i = 0; + int status, i = 0, num_imgs = 0; const u8 *p; strcpy(fw_file, func); @@ -2017,15 +2017,14 @@ int be_load_fw(struct be_adapter *adapter, u8 *func) if ((adapter->generation == BE_GEN3) && (get_ufigen_type(fhdr) == BE_GEN3)) { fhdr3 = (struct flash_file_hdr_g3 *) fw->data; - for (i = 0; i < fhdr3->num_imgs; i++) { + num_imgs = le32_to_cpu(fhdr3->num_imgs); + for (i = 0; i < num_imgs; i++) { img_hdr_ptr = (struct image_hdr *) (fw->data + (sizeof(struct flash_file_hdr_g3) + - i * sizeof(struct image_hdr))); - if (img_hdr_ptr->imageid == 1) { - status = be_flash_data(adapter, fw, - &flash_cmd, fhdr3->num_imgs); - } - + i * sizeof(struct image_hdr))); + if (le32_to_cpu(img_hdr_ptr->imageid) == 1) + status = be_flash_data(adapter, fw, &flash_cmd, + num_imgs); } } else if ((adapter->generation == BE_GEN2) && (get_ufigen_type(fhdr) == BE_GEN2)) { diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 5b92fbff431..0075514bf32 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -4156,7 +4156,7 @@ static int bond_xmit_roundrobin(struct sk_buff *skb, struct net_device *bond_dev * send the join/membership reports. The curr_active_slave found * will send all of this type of traffic. */ - if ((iph->protocol == htons(IPPROTO_IGMP)) && + if ((iph->protocol == IPPROTO_IGMP) && (skb->protocol == htons(ETH_P_IP))) { read_lock(&bond->curr_slave_lock); @@ -4450,6 +4450,14 @@ static const struct net_device_ops bond_netdev_ops = { .ndo_vlan_rx_kill_vid = bond_vlan_rx_kill_vid, }; +static void bond_destructor(struct net_device *bond_dev) +{ + struct bonding *bond = netdev_priv(bond_dev); + if (bond->wq) + destroy_workqueue(bond->wq); + free_netdev(bond_dev); +} + static void bond_setup(struct net_device *bond_dev) { struct bonding *bond = netdev_priv(bond_dev); @@ -4470,7 +4478,7 @@ static void bond_setup(struct net_device *bond_dev) bond_dev->ethtool_ops = &bond_ethtool_ops; bond_set_mode_ops(bond, bond->params.mode); - bond_dev->destructor = free_netdev; + bond_dev->destructor = bond_destructor; /* Initialize the device options */ bond_dev->tx_queue_len = 0; @@ -4542,9 +4550,6 @@ static void bond_uninit(struct net_device *bond_dev) bond_remove_proc_entry(bond); - if (bond->wq) - destroy_workqueue(bond->wq); - netif_addr_lock_bh(bond_dev); bond_mc_list_destroy(bond); netif_addr_unlock_bh(bond_dev); @@ -4956,8 +4961,8 @@ int bond_create(struct net *net, const char *name) bond_setup); if (!bond_dev) { pr_err("%s: eek! can't alloc netdev!\n", name); - res = -ENOMEM; - goto out; + rtnl_unlock(); + return -ENOMEM; } dev_net_set(bond_dev, net); @@ -4966,19 +4971,16 @@ int bond_create(struct net *net, const char *name) if (!name) { res = dev_alloc_name(bond_dev, "bond%d"); if (res < 0) - goto out_netdev; + goto out; } res = register_netdevice(bond_dev); - if (res < 0) - goto out_netdev; out: rtnl_unlock(); + if (res < 0) + bond_destructor(bond_dev); return res; -out_netdev: - free_netdev(bond_dev); - goto out; } static int __net_init bond_net_init(struct net *net) diff --git a/drivers/net/cnic.c b/drivers/net/cnic.c index 9781942992e..4b451a7c03e 100644 --- a/drivers/net/cnic.c +++ b/drivers/net/cnic.c @@ -2334,13 +2334,13 @@ static int cnic_service_bnx2x(void *data, void *status_blk) struct cnic_local *cp = dev->cnic_priv; u16 prod = cp->kcq_prod_idx & MAX_KCQ_IDX; - prefetch(cp->status_blk.bnx2x); - prefetch(&cp->kcq[KCQ_PG(prod)][KCQ_IDX(prod)]); + if (likely(test_bit(CNIC_F_CNIC_UP, &dev->flags))) { + prefetch(cp->status_blk.bnx2x); + prefetch(&cp->kcq[KCQ_PG(prod)][KCQ_IDX(prod)]); - if (likely(test_bit(CNIC_F_CNIC_UP, &dev->flags))) tasklet_schedule(&cp->cnic_irq_task); - - cnic_chk_pkt_rings(cp); + cnic_chk_pkt_rings(cp); + } return 0; } diff --git a/drivers/net/cxgb4/Makefile b/drivers/net/cxgb4/Makefile new file mode 100644 index 00000000000..498667487f5 --- /dev/null +++ b/drivers/net/cxgb4/Makefile @@ -0,0 +1,7 @@ +# +# Chelsio T4 driver +# + +obj-$(CONFIG_CHELSIO_T4) += cxgb4.o + +cxgb4-objs := cxgb4_main.o l2t.o t4_hw.o sge.o diff --git a/drivers/net/cxgb4/cxgb4.h b/drivers/net/cxgb4/cxgb4.h new file mode 100644 index 00000000000..3d8ff4889b5 --- /dev/null +++ b/drivers/net/cxgb4/cxgb4.h @@ -0,0 +1,741 @@ +/* + * This file is part of the Chelsio T4 Ethernet driver for Linux. + * + * Copyright (c) 2003-2010 Chelsio Communications, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef __CXGB4_H__ +#define __CXGB4_H__ + +#include <linux/bitops.h> +#include <linux/cache.h> +#include <linux/interrupt.h> +#include <linux/list.h> +#include <linux/netdevice.h> +#include <linux/pci.h> +#include <linux/spinlock.h> +#include <linux/timer.h> +#include <asm/io.h> +#include "cxgb4_uld.h" +#include "t4_hw.h" + +#define FW_VERSION_MAJOR 1 +#define FW_VERSION_MINOR 1 +#define FW_VERSION_MICRO 0 + +enum { + MAX_NPORTS = 4, /* max # of ports */ + SERNUM_LEN = 16, /* Serial # length */ + EC_LEN = 16, /* E/C length */ + ID_LEN = 16, /* ID length */ +}; + +enum { + MEM_EDC0, + MEM_EDC1, + MEM_MC +}; + +enum dev_master { + MASTER_CANT, + MASTER_MAY, + MASTER_MUST +}; + +enum dev_state { + DEV_STATE_UNINIT, + DEV_STATE_INIT, + DEV_STATE_ERR +}; + +enum { + PAUSE_RX = 1 << 0, + PAUSE_TX = 1 << 1, + PAUSE_AUTONEG = 1 << 2 +}; + +struct port_stats { + u64 tx_octets; /* total # of octets in good frames */ + u64 tx_frames; /* all good frames */ + u64 tx_bcast_frames; /* all broadcast frames */ + u64 tx_mcast_frames; /* all multicast frames */ + u64 tx_ucast_frames; /* all unicast frames */ + u64 tx_error_frames; /* all error frames */ + + u64 tx_frames_64; /* # of Tx frames in a particular range */ + u64 tx_frames_65_127; + u64 tx_frames_128_255; + u64 tx_frames_256_511; + u64 tx_frames_512_1023; + u64 tx_frames_1024_1518; + u64 tx_frames_1519_max; + + u64 tx_drop; /* # of dropped Tx frames */ + u64 tx_pause; /* # of transmitted pause frames */ + u64 tx_ppp0; /* # of transmitted PPP prio 0 frames */ + u64 tx_ppp1; /* # of transmitted PPP prio 1 frames */ + u64 tx_ppp2; /* # of transmitted PPP prio 2 frames */ + u64 tx_ppp3; /* # of transmitted PPP prio 3 frames */ + u64 tx_ppp4; /* # of transmitted PPP prio 4 frames */ + u64 tx_ppp5; /* # of transmitted PPP prio 5 frames */ + u64 tx_ppp6; /* # of transmitted PPP prio 6 frames */ + u64 tx_ppp7; /* # of transmitted PPP prio 7 frames */ + + u64 rx_octets; /* total # of octets in good frames */ + u64 rx_frames; /* all good frames */ + u64 rx_bcast_frames; /* all broadcast frames */ + u64 rx_mcast_frames; /* all multicast frames */ + u64 rx_ucast_frames; /* all unicast frames */ + u64 rx_too_long; /* # of frames exceeding MTU */ + u64 rx_jabber; /* # of jabber frames */ + u64 rx_fcs_err; /* # of received frames with bad FCS */ + u64 rx_len_err; /* # of received frames with length error */ + u64 rx_symbol_err; /* symbol errors */ + u64 rx_runt; /* # of short frames */ + + u64 rx_frames_64; /* # of Rx frames in a particular range */ + u64 rx_frames_65_127; + u64 rx_frames_128_255; + u64 rx_frames_256_511; + u64 rx_frames_512_1023; + u64 rx_frames_1024_1518; + u64 rx_frames_1519_max; + + u64 rx_pause; /* # of received pause frames */ + u64 rx_ppp0; /* # of received PPP prio 0 frames */ + u64 rx_ppp1; /* # of received PPP prio 1 frames */ + u64 rx_ppp2; /* # of received PPP prio 2 frames */ + u64 rx_ppp3; /* # of received PPP prio 3 frames */ + u64 rx_ppp4; /* # of received PPP prio 4 frames */ + u64 rx_ppp5; /* # of received PPP prio 5 frames */ + u64 rx_ppp6; /* # of received PPP prio 6 frames */ + u64 rx_ppp7; /* # of received PPP prio 7 frames */ + + u64 rx_ovflow0; /* drops due to buffer-group 0 overflows */ + u64 rx_ovflow1; /* drops due to buffer-group 1 overflows */ + u64 rx_ovflow2; /* drops due to buffer-group 2 overflows */ + u64 rx_ovflow3; /* drops due to buffer-group 3 overflows */ + u64 rx_trunc0; /* buffer-group 0 truncated packets */ + u64 rx_trunc1; /* buffer-group 1 truncated packets */ + u64 rx_trunc2; /* buffer-group 2 truncated packets */ + u64 rx_trunc3; /* buffer-group 3 truncated packets */ +}; + +struct lb_port_stats { + u64 octets; + u64 frames; + u64 bcast_frames; + u64 mcast_frames; + u64 ucast_frames; + u64 error_frames; + + u64 frames_64; + u64 frames_65_127; + u64 frames_128_255; + u64 frames_256_511; + u64 frames_512_1023; + u64 frames_1024_1518; + u64 frames_1519_max; + + u64 drop; + + u64 ovflow0; + u64 ovflow1; + u64 ovflow2; + u64 ovflow3; + u64 trunc0; + u64 trunc1; + u64 trunc2; + u64 trunc3; +}; + +struct tp_tcp_stats { + u32 tcpOutRsts; + u64 tcpInSegs; + u64 tcpOutSegs; + u64 tcpRetransSegs; +}; + +struct tp_err_stats { + u32 macInErrs[4]; + u32 hdrInErrs[4]; + u32 tcpInErrs[4]; + u32 tnlCongDrops[4]; + u32 ofldChanDrops[4]; + u32 tnlTxDrops[4]; + u32 ofldVlanDrops[4]; + u32 tcp6InErrs[4]; + u32 ofldNoNeigh; + u32 ofldCongDefer; +}; + +struct tp_params { + unsigned int ntxchan; /* # of Tx channels */ + unsigned int tre; /* log2 of core clocks per TP tick */ +}; + +struct vpd_params { + unsigned int cclk; + u8 ec[EC_LEN + 1]; + u8 sn[SERNUM_LEN + 1]; + u8 id[ID_LEN + 1]; +}; + +struct pci_params { + unsigned char speed; + unsigned char width; +}; + +struct adapter_params { + struct tp_params tp; + struct vpd_params vpd; + struct pci_params pci; + + unsigned int fw_vers; + unsigned int tp_vers; + u8 api_vers[7]; + + unsigned short mtus[NMTUS]; + unsigned short a_wnd[NCCTRL_WIN]; + unsigned short b_wnd[NCCTRL_WIN]; + + unsigned char nports; /* # of ethernet ports */ + unsigned char portvec; + unsigned char rev; /* chip revision */ + unsigned char offload; + + unsigned int ofldq_wr_cred; +}; + +struct trace_params { + u32 data[TRACE_LEN / 4]; + u32 mask[TRACE_LEN / 4]; + unsigned short snap_len; + unsigned short min_len; + unsigned char skip_ofst; + unsigned char skip_len; + unsigned char invert; + unsigned char port; +}; + +struct link_config { + unsigned short supported; /* link capabilities */ + unsigned short advertising; /* advertised capabilities */ + unsigned short requested_speed; /* speed user has requested */ + unsigned short speed; /* actual link speed */ + unsigned char requested_fc; /* flow control user has requested */ + unsigned char fc; /* actual link flow control */ + unsigned char autoneg; /* autonegotiating? */ + unsigned char link_ok; /* link up? */ +}; + +#define FW_LEN16(fw_struct) FW_CMD_LEN16(sizeof(fw_struct) / 16) + +enum { + MAX_ETH_QSETS = 32, /* # of Ethernet Tx/Rx queue sets */ + MAX_OFLD_QSETS = 16, /* # of offload Tx/Rx queue sets */ + MAX_CTRL_QUEUES = NCHAN, /* # of control Tx queues */ + MAX_RDMA_QUEUES = NCHAN, /* # of streaming RDMA Rx queues */ +}; + +enum { + MAX_EGRQ = 128, /* max # of egress queues, including FLs */ + MAX_INGQ = 64 /* max # of interrupt-capable ingress queues */ +}; + +struct adapter; +struct vlan_group; +struct sge_rspq; + +struct port_info { + struct adapter *adapter; + struct vlan_group *vlan_grp; + u16 viid; + s16 xact_addr_filt; /* index of exact MAC address filter */ + u16 rss_size; /* size of VI's RSS table slice */ + s8 mdio_addr; + u8 port_type; + u8 mod_type; + u8 port_id; + u8 tx_chan; + u8 lport; /* associated offload logical port */ + u8 rx_offload; /* CSO, etc */ + u8 nqsets; /* # of qsets */ + u8 first_qset; /* index of first qset */ + struct link_config link_cfg; +}; + +/* port_info.rx_offload flags */ +enum { + RX_CSO = 1 << 0, +}; + +struct dentry; +struct work_struct; + +enum { /* adapter flags */ + FULL_INIT_DONE = (1 << 0), + USING_MSI = (1 << 1), + USING_MSIX = (1 << 2), + QUEUES_BOUND = (1 << 3), + FW_OK = (1 << 4), +}; + +struct rx_sw_desc; + +struct sge_fl { /* SGE free-buffer queue state */ + unsigned int avail; /* # of available Rx buffers */ + unsigned int pend_cred; /* new buffers since last FL DB ring */ + unsigned int cidx; /* consumer index */ + unsigned int pidx; /* producer index */ + unsigned long alloc_failed; /* # of times buffer allocation failed */ + unsigned long large_alloc_failed; + unsigned long starving; + /* RO fields */ + unsigned int cntxt_id; /* SGE context id for the free list */ + unsigned int size; /* capacity of free list */ + struct rx_sw_desc *sdesc; /* address of SW Rx descriptor ring */ + __be64 *desc; /* address of HW Rx descriptor ring */ + dma_addr_t addr; /* bus address of HW ring start */ +}; + +/* A packet gather list */ +struct pkt_gl { + skb_frag_t frags[MAX_SKB_FRAGS]; + void *va; /* virtual address of first byte */ + unsigned int nfrags; /* # of fragments */ + unsigned int tot_len; /* total length of fragments */ +}; + +typedef int (*rspq_handler_t)(struct sge_rspq *q, const __be64 *rsp, + const struct pkt_gl *gl); + +struct sge_rspq { /* state for an SGE response queue */ + struct napi_struct napi; + const __be64 *cur_desc; /* current descriptor in queue */ + unsigned int cidx; /* consumer index */ + u8 gen; /* current generation bit */ + u8 intr_params; /* interrupt holdoff parameters */ + u8 next_intr_params; /* holdoff params for next interrupt */ + u8 pktcnt_idx; /* interrupt packet threshold */ + u8 uld; /* ULD handling this queue */ + u8 idx; /* queue index within its group */ + int offset; /* offset into current Rx buffer */ + u16 cntxt_id; /* SGE context id for the response q */ + u16 abs_id; /* absolute SGE id for the response q */ + __be64 *desc; /* address of HW response ring */ + dma_addr_t phys_addr; /* physical address of the ring */ + unsigned int iqe_len; /* entry size */ + unsigned int size; /* capacity of response queue */ + struct adapter *adap; + struct net_device *netdev; /* associated net device */ + rspq_handler_t handler; +}; + +struct sge_eth_stats { /* Ethernet queue statistics */ + unsigned long pkts; /* # of ethernet packets */ + unsigned long lro_pkts; /* # of LRO super packets */ + unsigned long lro_merged; /* # of wire packets merged by LRO */ + unsigned long rx_cso; /* # of Rx checksum offloads */ + unsigned long vlan_ex; /* # of Rx VLAN extractions */ + unsigned long rx_drops; /* # of packets dropped due to no mem */ +}; + +struct sge_eth_rxq { /* SW Ethernet Rx queue */ + struct sge_rspq rspq; + struct sge_fl fl; + struct sge_eth_stats stats; +} ____cacheline_aligned_in_smp; + +struct sge_ofld_stats { /* offload queue statistics */ + unsigned long pkts; /* # of packets */ + unsigned long imm; /* # of immediate-data packets */ + unsigned long an; /* # of asynchronous notifications */ + unsigned long nomem; /* # of responses deferred due to no mem */ +}; + +struct sge_ofld_rxq { /* SW offload Rx queue */ + struct sge_rspq rspq; + struct sge_fl fl; + struct sge_ofld_stats stats; +} ____cacheline_aligned_in_smp; + +struct tx_desc { + __be64 flit[8]; +}; + +struct tx_sw_desc; + +struct sge_txq { + unsigned int in_use; /* # of in-use Tx descriptors */ + unsigned int size; /* # of descriptors */ + unsigned int cidx; /* SW consumer index */ + unsigned int pidx; /* producer index */ + unsigned long stops; /* # of times q has been stopped */ + unsigned long restarts; /* # of queue restarts */ + unsigned int cntxt_id; /* SGE context id for the Tx q */ + struct tx_desc *desc; /* address of HW Tx descriptor ring */ + struct tx_sw_desc *sdesc; /* address of SW Tx descriptor ring */ + struct sge_qstat *stat; /* queue status entry */ + dma_addr_t phys_addr; /* physical address of the ring */ +}; + +struct sge_eth_txq { /* state for an SGE Ethernet Tx queue */ + struct sge_txq q; + struct netdev_queue *txq; /* associated netdev TX queue */ + unsigned long tso; /* # of TSO requests */ + unsigned long tx_cso; /* # of Tx checksum offloads */ + unsigned long vlan_ins; /* # of Tx VLAN insertions */ + unsigned long mapping_err; /* # of I/O MMU packet mapping errors */ +} ____cacheline_aligned_in_smp; + +struct sge_ofld_txq { /* state for an SGE offload Tx queue */ + struct sge_txq q; + struct adapter *adap; + struct sk_buff_head sendq; /* list of backpressured packets */ + struct tasklet_struct qresume_tsk; /* restarts the queue */ + u8 full; /* the Tx ring is full */ + unsigned long mapping_err; /* # of I/O MMU packet mapping errors */ +} ____cacheline_aligned_in_smp; + +struct sge_ctrl_txq { /* state for an SGE control Tx queue */ + struct sge_txq q; + struct adapter *adap; + struct sk_buff_head sendq; /* list of backpressured packets */ + struct tasklet_struct qresume_tsk; /* restarts the queue */ + u8 full; /* the Tx ring is full */ +} ____cacheline_aligned_in_smp; + +struct sge { + struct sge_eth_txq ethtxq[MAX_ETH_QSETS]; + struct sge_ofld_txq ofldtxq[MAX_OFLD_QSETS]; + struct sge_ctrl_txq ctrlq[MAX_CTRL_QUEUES]; + + struct sge_eth_rxq ethrxq[MAX_ETH_QSETS]; + struct sge_ofld_rxq ofldrxq[MAX_OFLD_QSETS]; + struct sge_ofld_rxq rdmarxq[MAX_RDMA_QUEUES]; + struct sge_rspq fw_evtq ____cacheline_aligned_in_smp; + + struct sge_rspq intrq ____cacheline_aligned_in_smp; + spinlock_t intrq_lock; + + u16 max_ethqsets; /* # of available Ethernet queue sets */ + u16 ethqsets; /* # of active Ethernet queue sets */ + u16 ethtxq_rover; /* Tx queue to clean up next */ + u16 ofldqsets; /* # of active offload queue sets */ + u16 rdmaqs; /* # of available RDMA Rx queues */ + u16 ofld_rxq[MAX_OFLD_QSETS]; + u16 rdma_rxq[NCHAN]; + u16 timer_val[SGE_NTIMERS]; + u8 counter_val[SGE_NCOUNTERS]; + unsigned int starve_thres; + u8 idma_state[2]; + void *egr_map[MAX_EGRQ]; /* qid->queue egress queue map */ + struct sge_rspq *ingr_map[MAX_INGQ]; /* qid->queue ingress queue map */ + DECLARE_BITMAP(starving_fl, MAX_EGRQ); + DECLARE_BITMAP(txq_maperr, MAX_EGRQ); + struct timer_list rx_timer; /* refills starving FLs */ + struct timer_list tx_timer; /* checks Tx queues */ +}; + +#define for_each_ethrxq(sge, i) for (i = 0; i < (sge)->ethqsets; i++) +#define for_each_ofldrxq(sge, i) for (i = 0; i < (sge)->ofldqsets; i++) +#define for_each_rdmarxq(sge, i) for (i = 0; i < (sge)->rdmaqs; i++) + +struct l2t_data; + +struct adapter { + void __iomem *regs; + struct pci_dev *pdev; + struct device *pdev_dev; + unsigned long registered_device_map; + unsigned long open_device_map; + unsigned long flags; + + const char *name; + int msg_enable; + + struct adapter_params params; + struct cxgb4_virt_res vres; + unsigned int swintr; + + unsigned int wol; + + struct { + unsigned short vec; + char desc[14]; + } msix_info[MAX_INGQ + 1]; + + struct sge sge; + + struct net_device *port[MAX_NPORTS]; + u8 chan_map[NCHAN]; /* channel -> port map */ + + struct l2t_data *l2t; + void *uld_handle[CXGB4_ULD_MAX]; + struct list_head list_node; + + struct tid_info tids; + void **tid_release_head; + spinlock_t tid_release_lock; + struct work_struct tid_release_task; + bool tid_release_task_busy; + + struct dentry *debugfs_root; + + spinlock_t stats_lock; +}; + +static inline u32 t4_read_reg(struct adapter *adap, u32 reg_addr) +{ + return readl(adap->regs + reg_addr); +} + +static inline void t4_write_reg(struct adapter *adap, u32 reg_addr, u32 val) +{ + writel(val, adap->regs + reg_addr); +} + +#ifndef readq +static inline u64 readq(const volatile void __iomem *addr) +{ + return readl(addr) + ((u64)readl(addr + 4) << 32); +} + +static inline void writeq(u64 val, volatile void __iomem *addr) +{ + writel(val, addr); + writel(val >> 32, addr + 4); +} +#endif + +static inline u64 t4_read_reg64(struct adapter *adap, u32 reg_addr) +{ + return readq(adap->regs + reg_addr); +} + +static inline void t4_write_reg64(struct adapter *adap, u32 reg_addr, u64 val) +{ + writeq(val, adap->regs + reg_addr); +} + +/** + * netdev2pinfo - return the port_info structure associated with a net_device + * @dev: the netdev + * + * Return the struct port_info associated with a net_device + */ +static inline struct port_info *netdev2pinfo(const struct net_device *dev) +{ + return netdev_priv(dev); +} + +/** + * adap2pinfo - return the port_info of a port + * @adap: the adapter + * @idx: the port index + * + * Return the port_info structure for the port of the given index. + */ +static inline struct port_info *adap2pinfo(struct adapter *adap, int idx) +{ + return netdev_priv(adap->port[idx]); +} + +/** + * netdev2adap - return the adapter structure associated with a net_device + * @dev: the netdev + * + * Return the struct adapter associated with a net_device + */ +static inline struct adapter *netdev2adap(const struct net_device *dev) +{ + return netdev2pinfo(dev)->adapter; +} + +void t4_os_portmod_changed(const struct adapter *adap, int port_id); +void t4_os_link_changed(struct adapter *adap, int port_id, int link_stat); + +void *t4_alloc_mem(size_t size); +void t4_free_mem(void *addr); + +void t4_free_sge_resources(struct adapter *adap); +irq_handler_t t4_intr_handler(struct adapter *adap); +netdev_tx_t t4_eth_xmit(struct sk_buff *skb, struct net_device *dev); +int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp, + const struct pkt_gl *gl); +int t4_mgmt_tx(struct adapter *adap, struct sk_buff *skb); +int t4_ofld_send(struct adapter *adap, struct sk_buff *skb); +int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq, + struct net_device *dev, int intr_idx, + struct sge_fl *fl, rspq_handler_t hnd); +int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq, + struct net_device *dev, struct netdev_queue *netdevq, + unsigned int iqid); +int t4_sge_alloc_ctrl_txq(struct adapter *adap, struct sge_ctrl_txq *txq, + struct net_device *dev, unsigned int iqid, + unsigned int cmplqid); +int t4_sge_alloc_ofld_txq(struct adapter *adap, struct sge_ofld_txq *txq, + struct net_device *dev, unsigned int iqid); +irqreturn_t t4_sge_intr_msix(int irq, void *cookie); +void t4_sge_init(struct adapter *adap); +void t4_sge_start(struct adapter *adap); +void t4_sge_stop(struct adapter *adap); + +#define for_each_port(adapter, iter) \ + for (iter = 0; iter < (adapter)->params.nports; ++iter) + +static inline unsigned int core_ticks_per_usec(const struct adapter *adap) +{ + return adap->params.vpd.cclk / 1000; +} + +static inline unsigned int us_to_core_ticks(const struct adapter *adap, + unsigned int us) +{ + return (us * adap->params.vpd.cclk) / 1000; +} + +void t4_set_reg_field(struct adapter *adap, unsigned int addr, u32 mask, + u32 val); + +int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size, + void *rpl, bool sleep_ok); + +static inline int t4_wr_mbox(struct adapter *adap, int mbox, const void *cmd, + int size, void *rpl) +{ + return t4_wr_mbox_meat(adap, mbox, cmd, size, rpl, true); +} + +static inline int t4_wr_mbox_ns(struct adapter *adap, int mbox, const void *cmd, + int size, void *rpl) +{ + return t4_wr_mbox_meat(adap, mbox, cmd, size, rpl, false); +} + +void t4_intr_enable(struct adapter *adapter); +void t4_intr_disable(struct adapter *adapter); +void t4_intr_clear(struct adapter *adapter); +int t4_slow_intr_handler(struct adapter *adapter); + +int t4_link_start(struct adapter *adap, unsigned int mbox, unsigned int port, + struct link_config *lc); +int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port); +int t4_seeprom_wp(struct adapter *adapter, bool enable); +int t4_read_flash(struct adapter *adapter, unsigned int addr, + unsigned int nwords, u32 *data, int byte_oriented); +int t4_load_fw(struct adapter *adapter, const u8 *fw_data, unsigned int size); +int t4_check_fw_version(struct adapter *adapter); +int t4_prep_adapter(struct adapter *adapter); +int t4_port_init(struct adapter *adap, int mbox, int pf, int vf); +void t4_fatal_err(struct adapter *adapter); +void t4_set_vlan_accel(struct adapter *adapter, unsigned int ports, int on); +int t4_set_trace_filter(struct adapter *adapter, const struct trace_params *tp, + int filter_index, int enable); +void t4_get_trace_filter(struct adapter *adapter, struct trace_params *tp, + int filter_index, int *enabled); +int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid, + int start, int n, const u16 *rspq, unsigned int nrspq); +int t4_config_glbl_rss(struct adapter *adapter, int mbox, unsigned int mode, + unsigned int flags); +int t4_read_rss(struct adapter *adapter, u16 *entries); +int t4_mc_read(struct adapter *adap, u32 addr, __be32 *data, u64 *parity); +int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, + u64 *parity); + +void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p); +void t4_get_lb_stats(struct adapter *adap, int idx, struct lb_port_stats *p); + +void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log); +void t4_tp_get_err_stats(struct adapter *adap, struct tp_err_stats *st); +void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4, + struct tp_tcp_stats *v6); +void t4_load_mtus(struct adapter *adap, const unsigned short *mtus, + const unsigned short *alpha, const unsigned short *beta); + +void t4_wol_magic_enable(struct adapter *adap, unsigned int port, + const u8 *addr); +int t4_wol_pat_enable(struct adapter *adap, unsigned int port, unsigned int map, + u64 mask0, u64 mask1, unsigned int crc, bool enable); + +int t4_fw_hello(struct adapter *adap, unsigned int mbox, unsigned int evt_mbox, + enum dev_master master, enum dev_state *state); +int t4_fw_bye(struct adapter *adap, unsigned int mbox); +int t4_early_init(struct adapter *adap, unsigned int mbox); +int t4_fw_reset(struct adapter *adap, unsigned int mbox, int reset); +int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf, + unsigned int vf, unsigned int nparams, const u32 *params, + u32 *val); +int t4_set_params(struct adapter *adap, unsigned int mbox, unsigned int pf, + unsigned int vf, unsigned int nparams, const u32 *params, + const u32 *val); +int t4_cfg_pfvf(struct adapter *adap, unsigned int mbox, unsigned int pf, + unsigned int vf, unsigned int txq, unsigned int txq_eth_ctrl, + unsigned int rxqi, unsigned int rxq, unsigned int tc, + unsigned int vi, unsigned int cmask, unsigned int pmask, + unsigned int nexact, unsigned int rcaps, unsigned int wxcaps); +int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port, + unsigned int pf, unsigned int vf, unsigned int nmac, u8 *mac, + unsigned int *rss_size); +int t4_free_vi(struct adapter *adap, unsigned int mbox, unsigned int pf, + unsigned int vf, unsigned int viid); +int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid, + int mtu, int promisc, int all_multi, int bcast, bool sleep_ok); +int t4_alloc_mac_filt(struct adapter *adap, unsigned int mbox, + unsigned int viid, bool free, unsigned int naddr, + const u8 **addr, u16 *idx, u64 *hash, bool sleep_ok); +int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid, + int idx, const u8 *addr, bool persist, bool add_smt); +int t4_set_addr_hash(struct adapter *adap, unsigned int mbox, unsigned int viid, + bool ucast, u64 vec, bool sleep_ok); +int t4_enable_vi(struct adapter *adap, unsigned int mbox, unsigned int viid, + bool rx_en, bool tx_en); +int t4_identify_port(struct adapter *adap, unsigned int mbox, unsigned int viid, + unsigned int nblinks); +int t4_mdio_rd(struct adapter *adap, unsigned int mbox, unsigned int phy_addr, + unsigned int mmd, unsigned int reg, u16 *valp); +int t4_mdio_wr(struct adapter *adap, unsigned int mbox, unsigned int phy_addr, + unsigned int mmd, unsigned int reg, u16 val); +int t4_iq_start_stop(struct adapter *adap, unsigned int mbox, bool start, + unsigned int pf, unsigned int vf, unsigned int iqid, + unsigned int fl0id, unsigned int fl1id); +int t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, + unsigned int vf, unsigned int iqtype, unsigned int iqid, + unsigned int fl0id, unsigned int fl1id); +int t4_eth_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, + unsigned int vf, unsigned int eqid); +int t4_ctrl_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, + unsigned int vf, unsigned int eqid); +int t4_ofld_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, + unsigned int vf, unsigned int eqid); +int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl); +#endif /* __CXGB4_H__ */ diff --git a/drivers/net/cxgb4/cxgb4_main.c b/drivers/net/cxgb4/cxgb4_main.c new file mode 100644 index 00000000000..a7e30a23d32 --- /dev/null +++ b/drivers/net/cxgb4/cxgb4_main.c @@ -0,0 +1,3388 @@ +/* + * This file is part of the Chelsio T4 Ethernet driver for Linux. + * + * Copyright (c) 2003-2010 Chelsio Communications, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/bitmap.h> +#include <linux/crc32.h> +#include <linux/ctype.h> +#include <linux/debugfs.h> +#include <linux/err.h> +#include <linux/etherdevice.h> +#include <linux/firmware.h> +#include <linux/if_vlan.h> +#include <linux/init.h> +#include <linux/log2.h> +#include <linux/mdio.h> +#include <linux/module.h> +#include <linux/moduleparam.h> +#include <linux/mutex.h> +#include <linux/netdevice.h> +#include <linux/pci.h> +#include <linux/aer.h> +#include <linux/rtnetlink.h> +#include <linux/sched.h> +#include <linux/seq_file.h> +#include <linux/sockios.h> +#include <linux/vmalloc.h> +#include <linux/workqueue.h> +#include <net/neighbour.h> +#include <net/netevent.h> +#include <asm/uaccess.h> + +#include "cxgb4.h" +#include "t4_regs.h" +#include "t4_msg.h" +#include "t4fw_api.h" +#include "l2t.h" + +#define DRV_VERSION "1.0.0-ko" +#define DRV_DESC "Chelsio T4 Network Driver" + +/* + * Max interrupt hold-off timer value in us. Queues fall back to this value + * under extreme memory pressure so it's largish to give the system time to + * recover. + */ +#define MAX_SGE_TIMERVAL 200U + +enum { + MEMWIN0_APERTURE = 65536, + MEMWIN0_BASE = 0x30000, + MEMWIN1_APERTURE = 32768, + MEMWIN1_BASE = 0x28000, + MEMWIN2_APERTURE = 2048, + MEMWIN2_BASE = 0x1b800, +}; + +enum { + MAX_TXQ_ENTRIES = 16384, + MAX_CTRL_TXQ_ENTRIES = 1024, + MAX_RSPQ_ENTRIES = 16384, + MAX_RX_BUFFERS = 16384, + MIN_TXQ_ENTRIES = 32, + MIN_CTRL_TXQ_ENTRIES = 32, + MIN_RSPQ_ENTRIES = 128, + MIN_FL_ENTRIES = 16 +}; + +#define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \ + NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\ + NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR) + +#define CH_DEVICE(devid) { PCI_VDEVICE(CHELSIO, devid), 0 } + +static DEFINE_PCI_DEVICE_TABLE(cxgb4_pci_tbl) = { + CH_DEVICE(0xa000), /* PE10K */ + { 0, } +}; + +#define FW_FNAME "cxgb4/t4fw.bin" + +MODULE_DESCRIPTION(DRV_DESC); +MODULE_AUTHOR("Chelsio Communications"); +MODULE_LICENSE("Dual BSD/GPL"); +MODULE_VERSION(DRV_VERSION); +MODULE_DEVICE_TABLE(pci, cxgb4_pci_tbl); +MODULE_FIRMWARE(FW_FNAME); + +static int dflt_msg_enable = DFLT_MSG_ENABLE; + +module_param(dflt_msg_enable, int, 0644); +MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T4 default message enable bitmap"); + +/* + * The driver uses the best interrupt scheme available on a platform in the + * order MSI-X, MSI, legacy INTx interrupts. This parameter determines which + * of these schemes the driver may consider as follows: + * + * msi = 2: choose from among all three options + * msi = 1: only consider MSI and INTx interrupts + * msi = 0: force INTx interrupts + */ +static int msi = 2; + +module_param(msi, int, 0644); +MODULE_PARM_DESC(msi, "whether to use INTx (0), MSI (1) or MSI-X (2)"); + +/* + * Queue interrupt hold-off timer values. Queues default to the first of these + * upon creation. + */ +static unsigned int intr_holdoff[SGE_NTIMERS - 1] = { 5, 10, 20, 50, 100 }; + +module_param_array(intr_holdoff, uint, NULL, 0644); +MODULE_PARM_DESC(intr_holdoff, "values for queue interrupt hold-off timers " + "0..4 in microseconds"); + +static unsigned int intr_cnt[SGE_NCOUNTERS - 1] = { 4, 8, 16 }; + +module_param_array(intr_cnt, uint, NULL, 0644); +MODULE_PARM_DESC(intr_cnt, + "thresholds 1..3 for queue interrupt packet counters"); + +static int vf_acls; + +#ifdef CONFIG_PCI_IOV +module_param(vf_acls, bool, 0644); +MODULE_PARM_DESC(vf_acls, "if set enable virtualization L2 ACL enforcement"); + +static unsigned int num_vf[4]; + +module_param_array(num_vf, uint, NULL, 0644); +MODULE_PARM_DESC(num_vf, "number of VFs for each of PFs 0-3"); +#endif + +static struct dentry *cxgb4_debugfs_root; + +static LIST_HEAD(adapter_list); +static DEFINE_MUTEX(uld_mutex); +static struct cxgb4_uld_info ulds[CXGB4_ULD_MAX]; +static const char *uld_str[] = { "RDMA", "iSCSI" }; + +static void link_report(struct net_device *dev) +{ + if (!netif_carrier_ok(dev)) + netdev_info(dev, "link down\n"); + else { + static const char *fc[] = { "no", "Rx", "Tx", "Tx/Rx" }; + + const char *s = "10Mbps"; + const struct port_info *p = netdev_priv(dev); + + switch (p->link_cfg.speed) { + case SPEED_10000: + s = "10Gbps"; + break; + case SPEED_1000: + s = "1000Mbps"; + break; + case SPEED_100: + s = "100Mbps"; + break; + } + + netdev_info(dev, "link up, %s, full-duplex, %s PAUSE\n", s, + fc[p->link_cfg.fc]); + } +} + +void t4_os_link_changed(struct adapter *adapter, int port_id, int link_stat) +{ + struct net_device *dev = adapter->port[port_id]; + + /* Skip changes from disabled ports. */ + if (netif_running(dev) && link_stat != netif_carrier_ok(dev)) { + if (link_stat) + netif_carrier_on(dev); + else + netif_carrier_off(dev); + + link_report(dev); + } +} + +void t4_os_portmod_changed(const struct adapter *adap, int port_id) +{ + static const char *mod_str[] = { + NULL, "LR", "SR", "ER", "passive DA", "active DA" + }; + + const struct net_device *dev = adap->port[port_id]; + const struct port_info *pi = netdev_priv(dev); + + if (pi->mod_type == FW_PORT_MOD_TYPE_NONE) + netdev_info(dev, "port module unplugged\n"); + else + netdev_info(dev, "%s module inserted\n", mod_str[pi->mod_type]); +} + +/* + * Configure the exact and hash address filters to handle a port's multicast + * and secondary unicast MAC addresses. + */ +static int set_addr_filters(const struct net_device *dev, bool sleep) +{ + u64 mhash = 0; + u64 uhash = 0; + bool free = true; + u16 filt_idx[7]; + const u8 *addr[7]; + int ret, naddr = 0; + const struct dev_addr_list *d; + const struct netdev_hw_addr *ha; + int uc_cnt = netdev_uc_count(dev); + const struct port_info *pi = netdev_priv(dev); + + /* first do the secondary unicast addresses */ + netdev_for_each_uc_addr(ha, dev) { + addr[naddr++] = ha->addr; + if (--uc_cnt == 0 || naddr >= ARRAY_SIZE(addr)) { + ret = t4_alloc_mac_filt(pi->adapter, 0, pi->viid, free, + naddr, addr, filt_idx, &uhash, sleep); + if (ret < 0) + return ret; + + free = false; + naddr = 0; + } + } + + /* next set up the multicast addresses */ + netdev_for_each_mc_addr(d, dev) { + addr[naddr++] = d->dmi_addr; + if (naddr >= ARRAY_SIZE(addr) || d->next == NULL) { + ret = t4_alloc_mac_filt(pi->adapter, 0, pi->viid, free, + naddr, addr, filt_idx, &mhash, sleep); + if (ret < 0) + return ret; + + free = false; + naddr = 0; + } + } + + return t4_set_addr_hash(pi->adapter, 0, pi->viid, uhash != 0, + uhash | mhash, sleep); +} + +/* + * Set Rx properties of a port, such as promiscruity, address filters, and MTU. + * If @mtu is -1 it is left unchanged. + */ +static int set_rxmode(struct net_device *dev, int mtu, bool sleep_ok) +{ + int ret; + struct port_info *pi = netdev_priv(dev); + + ret = set_addr_filters(dev, sleep_ok); + if (ret == 0) + ret = t4_set_rxmode(pi->adapter, 0, pi->viid, mtu, + (dev->flags & IFF_PROMISC) ? 1 : 0, + (dev->flags & IFF_ALLMULTI) ? 1 : 0, 1, + sleep_ok); + return ret; +} + +/** + * link_start - enable a port + * @dev: the port to enable + * + * Performs the MAC and PHY actions needed to enable a port. + */ +static int link_start(struct net_device *dev) +{ + int ret; + struct port_info *pi = netdev_priv(dev); + + /* + * We do not set address filters and promiscuity here, the stack does + * that step explicitly. + */ + ret = t4_set_rxmode(pi->adapter, 0, pi->viid, dev->mtu, -1, -1, -1, + true); + if (ret == 0) { + ret = t4_change_mac(pi->adapter, 0, pi->viid, + pi->xact_addr_filt, dev->dev_addr, true, + false); + if (ret >= 0) { + pi->xact_addr_filt = ret; + ret = 0; + } + } + if (ret == 0) + ret = t4_link_start(pi->adapter, 0, pi->tx_chan, &pi->link_cfg); + if (ret == 0) + ret = t4_enable_vi(pi->adapter, 0, pi->viid, true, true); + return ret; +} + +/* + * Response queue handler for the FW event queue. + */ +static int fwevtq_handler(struct sge_rspq *q, const __be64 *rsp, + const struct pkt_gl *gl) +{ + u8 opcode = ((const struct rss_header *)rsp)->opcode; + + rsp++; /* skip RSS header */ + if (likely(opcode == CPL_SGE_EGR_UPDATE)) { + const struct cpl_sge_egr_update *p = (void *)rsp; + unsigned int qid = EGR_QID(ntohl(p->opcode_qid)); + struct sge_txq *txq = q->adap->sge.egr_map[qid]; + + txq->restarts++; + if ((u8 *)txq < (u8 *)q->adap->sge.ethrxq) { + struct sge_eth_txq *eq; + + eq = container_of(txq, struct sge_eth_txq, q); + netif_tx_wake_queue(eq->txq); + } else { + struct sge_ofld_txq *oq; + + oq = container_of(txq, struct sge_ofld_txq, q); + tasklet_schedule(&oq->qresume_tsk); + } + } else if (opcode == CPL_FW6_MSG || opcode == CPL_FW4_MSG) { + const struct cpl_fw6_msg *p = (void *)rsp; + + if (p->type == 0) + t4_handle_fw_rpl(q->adap, p->data); + } else if (opcode == CPL_L2T_WRITE_RPL) { + const struct cpl_l2t_write_rpl *p = (void *)rsp; + + do_l2t_write_rpl(q->adap, p); + } else + dev_err(q->adap->pdev_dev, + "unexpected CPL %#x on FW event queue\n", opcode); + return 0; +} + +/** + * uldrx_handler - response queue handler for ULD queues + * @q: the response queue that received the packet + * @rsp: the response queue descriptor holding the offload message + * @gl: the gather list of packet fragments + * + * Deliver an ingress offload packet to a ULD. All processing is done by + * the ULD, we just maintain statistics. + */ +static int uldrx_handler(struct sge_rspq *q, const __be64 *rsp, + const struct pkt_gl *gl) +{ + struct sge_ofld_rxq *rxq = container_of(q, struct sge_ofld_rxq, rspq); + + if (ulds[q->uld].rx_handler(q->adap->uld_handle[q->uld], rsp, gl)) { + rxq->stats.nomem++; + return -1; + } + if (gl == NULL) + rxq->stats.imm++; + else if (gl == CXGB4_MSG_AN) + rxq->stats.an++; + else + rxq->stats.pkts++; + return 0; +} + +static void disable_msi(struct adapter *adapter) +{ + if (adapter->flags & USING_MSIX) { + pci_disable_msix(adapter->pdev); + adapter->flags &= ~USING_MSIX; + } else if (adapter->flags & USING_MSI) { + pci_disable_msi(adapter->pdev); + adapter->flags &= ~USING_MSI; + } +} + +/* + * Interrupt handler for non-data events used with MSI-X. + */ +static irqreturn_t t4_nondata_intr(int irq, void *cookie) +{ + struct adapter *adap = cookie; + + u32 v = t4_read_reg(adap, MYPF_REG(PL_PF_INT_CAUSE)); + if (v & PFSW) { + adap->swintr = 1; + t4_write_reg(adap, MYPF_REG(PL_PF_INT_CAUSE), v); + } + t4_slow_intr_handler(adap); + return IRQ_HANDLED; +} + +/* + * Name the MSI-X interrupts. + */ +static void name_msix_vecs(struct adapter *adap) +{ + int i, j, msi_idx = 2, n = sizeof(adap->msix_info[0].desc) - 1; + + /* non-data interrupts */ + snprintf(adap->msix_info[0].desc, n, "%s", adap->name); + adap->msix_info[0].desc[n] = 0; + + /* FW events */ + snprintf(adap->msix_info[1].desc, n, "%s-FWeventq", adap->name); + adap->msix_info[1].desc[n] = 0; + + /* Ethernet queues */ + for_each_port(adap, j) { + struct net_device *d = adap->port[j]; + const struct port_info *pi = netdev_priv(d); + + for (i = 0; i < pi->nqsets; i++, msi_idx++) { + snprintf(adap->msix_info[msi_idx].desc, n, "%s-Rx%d", + d->name, i); + adap->msix_info[msi_idx].desc[n] = 0; + } + } + + /* offload queues */ + for_each_ofldrxq(&adap->sge, i) { + snprintf(adap->msix_info[msi_idx].desc, n, "%s-ofld%d", + adap->name, i); + adap->msix_info[msi_idx++].desc[n] = 0; + } + for_each_rdmarxq(&adap->sge, i) { + snprintf(adap->msix_info[msi_idx].desc, n, "%s-rdma%d", + adap->name, i); + adap->msix_info[msi_idx++].desc[n] = 0; + } +} + +static int request_msix_queue_irqs(struct adapter *adap) +{ + struct sge *s = &adap->sge; + int err, ethqidx, ofldqidx = 0, rdmaqidx = 0, msi = 2; + + err = request_irq(adap->msix_info[1].vec, t4_sge_intr_msix, 0, + adap->msix_info[1].desc, &s->fw_evtq); + if (err) + return err; + + for_each_ethrxq(s, ethqidx) { + err = request_irq(adap->msix_info[msi].vec, t4_sge_intr_msix, 0, + adap->msix_info[msi].desc, + &s->ethrxq[ethqidx].rspq); + if (err) + goto unwind; + msi++; + } + for_each_ofldrxq(s, ofldqidx) { + err = request_irq(adap->msix_info[msi].vec, t4_sge_intr_msix, 0, + adap->msix_info[msi].desc, + &s->ofldrxq[ofldqidx].rspq); + if (err) + goto unwind; + msi++; + } + for_each_rdmarxq(s, rdmaqidx) { + err = request_irq(adap->msix_info[msi].vec, t4_sge_intr_msix, 0, + adap->msix_info[msi].desc, + &s->rdmarxq[rdmaqidx].rspq); + if (err) + goto unwind; + msi++; + } + return 0; + +unwind: + while (--rdmaqidx >= 0) + free_irq(adap->msix_info[--msi].vec, + &s->rdmarxq[rdmaqidx].rspq); + while (--ofldqidx >= 0) + free_irq(adap->msix_info[--msi].vec, + &s->ofldrxq[ofldqidx].rspq); + while (--ethqidx >= 0) + free_irq(adap->msix_info[--msi].vec, &s->ethrxq[ethqidx].rspq); + free_irq(adap->msix_info[1].vec, &s->fw_evtq); + return err; +} + +static void free_msix_queue_irqs(struct adapter *adap) +{ + int i, msi = 2; + struct sge *s = &adap->sge; + + free_irq(adap->msix_info[1].vec, &s->fw_evtq); + for_each_ethrxq(s, i) + free_irq(adap->msix_info[msi++].vec, &s->ethrxq[i].rspq); + for_each_ofldrxq(s, i) + free_irq(adap->msix_info[msi++].vec, &s->ofldrxq[i].rspq); + for_each_rdmarxq(s, i) + free_irq(adap->msix_info[msi++].vec, &s->rdmarxq[i].rspq); +} + +/** + * setup_rss - configure RSS + * @adap: the adapter + * + * Sets up RSS to distribute packets to multiple receive queues. We + * configure the RSS CPU lookup table to distribute to the number of HW + * receive queues, and the response queue lookup table to narrow that + * down to the response queues actually configured for each port. + * We always configure the RSS mapping for all ports since the mapping + * table has plenty of entries. + */ +static int setup_rss(struct adapter *adap) +{ + int i, j, err; + u16 rss[MAX_ETH_QSETS]; + + for_each_port(adap, i) { + const struct port_info *pi = adap2pinfo(adap, i); + const struct sge_eth_rxq *q = &adap->sge.ethrxq[pi->first_qset]; + + for (j = 0; j < pi->nqsets; j++) + rss[j] = q[j].rspq.abs_id; + + err = t4_config_rss_range(adap, 0, pi->viid, 0, pi->rss_size, + rss, pi->nqsets); + if (err) + return err; + } + return 0; +} + +/* + * Wait until all NAPI handlers are descheduled. + */ +static void quiesce_rx(struct adapter *adap) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(adap->sge.ingr_map); i++) { + struct sge_rspq *q = adap->sge.ingr_map[i]; + + if (q && q->handler) + napi_disable(&q->napi); + } +} + +/* + * Enable NAPI scheduling and interrupt generation for all Rx queues. + */ +static void enable_rx(struct adapter *adap) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(adap->sge.ingr_map); i++) { + struct sge_rspq *q = adap->sge.ingr_map[i]; + + if (!q) + continue; + if (q->handler) + napi_enable(&q->napi); + /* 0-increment GTS to start the timer and enable interrupts */ + t4_write_reg(adap, MYPF_REG(SGE_PF_GTS), + SEINTARM(q->intr_params) | + INGRESSQID(q->cntxt_id)); + } +} + +/** + * setup_sge_queues - configure SGE Tx/Rx/response queues + * @adap: the adapter + * + * Determines how many sets of SGE queues to use and initializes them. + * We support multiple queue sets per port if we have MSI-X, otherwise + * just one queue set per port. + */ +static int setup_sge_queues(struct adapter *adap) +{ + int err, msi_idx, i, j; + struct sge *s = &adap->sge; + + bitmap_zero(s->starving_fl, MAX_EGRQ); + bitmap_zero(s->txq_maperr, MAX_EGRQ); + + if (adap->flags & USING_MSIX) + msi_idx = 1; /* vector 0 is for non-queue interrupts */ + else { + err = t4_sge_alloc_rxq(adap, &s->intrq, false, adap->port[0], 0, + NULL, NULL); + if (err) + return err; + msi_idx = -((int)s->intrq.abs_id + 1); + } + + err = t4_sge_alloc_rxq(adap, &s->fw_evtq, true, adap->port[0], + msi_idx, NULL, fwevtq_handler); + if (err) { +freeout: t4_free_sge_resources(adap); + return err; + } + + for_each_port(adap, i) { + struct net_device *dev = adap->port[i]; + struct port_info *pi = netdev_priv(dev); + struct sge_eth_rxq *q = &s->ethrxq[pi->first_qset]; + struct sge_eth_txq *t = &s->ethtxq[pi->first_qset]; + + for (j = 0; j < pi->nqsets; j++, q++) { + if (msi_idx > 0) + msi_idx++; + err = t4_sge_alloc_rxq(adap, &q->rspq, false, dev, + msi_idx, &q->fl, + t4_ethrx_handler); + if (err) + goto freeout; + q->rspq.idx = j; + memset(&q->stats, 0, sizeof(q->stats)); + } + for (j = 0; j < pi->nqsets; j++, t++) { + err = t4_sge_alloc_eth_txq(adap, t, dev, + netdev_get_tx_queue(dev, j), + s->fw_evtq.cntxt_id); + if (err) + goto freeout; + } + } + + j = s->ofldqsets / adap->params.nports; /* ofld queues per channel */ + for_each_ofldrxq(s, i) { + struct sge_ofld_rxq *q = &s->ofldrxq[i]; + struct net_device *dev = adap->port[i / j]; + + if (msi_idx > 0) + msi_idx++; + err = t4_sge_alloc_rxq(adap, &q->rspq, false, dev, msi_idx, + &q->fl, uldrx_handler); + if (err) + goto freeout; + memset(&q->stats, 0, sizeof(q->stats)); + s->ofld_rxq[i] = q->rspq.abs_id; + err = t4_sge_alloc_ofld_txq(adap, &s->ofldtxq[i], dev, + s->fw_evtq.cntxt_id); + if (err) + goto freeout; + } + + for_each_rdmarxq(s, i) { + struct sge_ofld_rxq *q = &s->rdmarxq[i]; + + if (msi_idx > 0) + msi_idx++; + err = t4_sge_alloc_rxq(adap, &q->rspq, false, adap->port[i], + msi_idx, &q->fl, uldrx_handler); + if (err) + goto freeout; + memset(&q->stats, 0, sizeof(q->stats)); + s->rdma_rxq[i] = q->rspq.abs_id; + } + + for_each_port(adap, i) { + /* + * Note that ->rdmarxq[i].rspq.cntxt_id below is 0 if we don't + * have RDMA queues, and that's the right value. + */ + err = t4_sge_alloc_ctrl_txq(adap, &s->ctrlq[i], adap->port[i], + s->fw_evtq.cntxt_id, + s->rdmarxq[i].rspq.cntxt_id); + if (err) + goto freeout; + } + + t4_write_reg(adap, MPS_TRC_RSS_CONTROL, + RSSCONTROL(netdev2pinfo(adap->port[0])->tx_chan) | + QUEUENUMBER(s->ethrxq[0].rspq.abs_id)); + return 0; +} + +/* + * Returns 0 if new FW was successfully loaded, a positive errno if a load was + * started but failed, and a negative errno if flash load couldn't start. + */ +static int upgrade_fw(struct adapter *adap) +{ + int ret; + u32 vers; + const struct fw_hdr *hdr; + const struct firmware *fw; + struct device *dev = adap->pdev_dev; + + ret = request_firmware(&fw, FW_FNAME, dev); + if (ret < 0) { + dev_err(dev, "unable to load firmware image " FW_FNAME + ", error %d\n", ret); + return ret; + } + + hdr = (const struct fw_hdr *)fw->data; + vers = ntohl(hdr->fw_ver); + if (FW_HDR_FW_VER_MAJOR_GET(vers) != FW_VERSION_MAJOR) { + ret = -EINVAL; /* wrong major version, won't do */ + goto out; + } + + /* + * If the flash FW is unusable or we found something newer, load it. + */ + if (FW_HDR_FW_VER_MAJOR_GET(adap->params.fw_vers) != FW_VERSION_MAJOR || + vers > adap->params.fw_vers) { + ret = -t4_load_fw(adap, fw->data, fw->size); + if (!ret) + dev_info(dev, "firmware upgraded to version %pI4 from " + FW_FNAME "\n", &hdr->fw_ver); + } +out: release_firmware(fw); + return ret; +} + +/* + * Allocate a chunk of memory using kmalloc or, if that fails, vmalloc. + * The allocated memory is cleared. + */ +void *t4_alloc_mem(size_t size) +{ + void *p = kmalloc(size, GFP_KERNEL); + + if (!p) + p = vmalloc(size); + if (p) + memset(p, 0, size); + return p; +} + +/* + * Free memory allocated through alloc_mem(). + */ +void t4_free_mem(void *addr) +{ + if (is_vmalloc_addr(addr)) + vfree(addr); + else + kfree(addr); +} + +static inline int is_offload(const struct adapter *adap) +{ + return adap->params.offload; +} + +/* + * Implementation of ethtool operations. + */ + +static u32 get_msglevel(struct net_device *dev) +{ + return netdev2adap(dev)->msg_enable; +} + +static void set_msglevel(struct net_device *dev, u32 val) +{ + netdev2adap(dev)->msg_enable = val; +} + +static char stats_strings[][ETH_GSTRING_LEN] = { + "TxOctetsOK ", + "TxFramesOK ", + "TxBroadcastFrames ", + "TxMulticastFrames ", + "TxUnicastFrames ", + "TxErrorFrames ", + + "TxFrames64 ", + "TxFrames65To127 ", + "TxFrames128To255 ", + "TxFrames256To511 ", + "TxFrames512To1023 ", + "TxFrames1024To1518 ", + "TxFrames1519ToMax ", + + "TxFramesDropped ", + "TxPauseFrames ", + "TxPPP0Frames ", + "TxPPP1Frames ", + "TxPPP2Frames ", + "TxPPP3Frames ", + "TxPPP4Frames ", + "TxPPP5Frames ", + "TxPPP6Frames ", + "TxPPP7Frames ", + + "RxOctetsOK ", + "RxFramesOK ", + "RxBroadcastFrames ", + "RxMulticastFrames ", + "RxUnicastFrames ", + + "RxFramesTooLong ", + "RxJabberErrors ", + "RxFCSErrors ", + "RxLengthErrors ", + "RxSymbolErrors ", + "RxRuntFrames ", + + "RxFrames64 ", + "RxFrames65To127 ", + "RxFrames128To255 ", + "RxFrames256To511 ", + "RxFrames512To1023 ", + "RxFrames1024To1518 ", + "RxFrames1519ToMax ", + + "RxPauseFrames ", + "RxPPP0Frames ", + "RxPPP1Frames ", + "RxPPP2Frames ", + "RxPPP3Frames ", + "RxPPP4Frames ", + "RxPPP5Frames ", + "RxPPP6Frames ", + "RxPPP7Frames ", + + "RxBG0FramesDropped ", + "RxBG1FramesDropped ", + "RxBG2FramesDropped ", + "RxBG3FramesDropped ", + "RxBG0FramesTrunc ", + "RxBG1FramesTrunc ", + "RxBG2FramesTrunc ", + "RxBG3FramesTrunc ", + + "TSO ", + "TxCsumOffload ", + "RxCsumGood ", + "VLANextractions ", + "VLANinsertions ", +}; + +static int get_sset_count(struct net_device *dev, int sset) +{ + switch (sset) { + case ETH_SS_STATS: + return ARRAY_SIZE(stats_strings); + default: + return -EOPNOTSUPP; + } +} + +#define T4_REGMAP_SIZE (160 * 1024) + +static int get_regs_len(struct net_device *dev) +{ + return T4_REGMAP_SIZE; +} + +static int get_eeprom_len(struct net_device *dev) +{ + return EEPROMSIZE; +} + +static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) +{ + struct adapter *adapter = netdev2adap(dev); + + strcpy(info->driver, KBUILD_MODNAME); + strcpy(info->version, DRV_VERSION); + strcpy(info->bus_info, pci_name(adapter->pdev)); + + if (!adapter->params.fw_vers) + strcpy(info->fw_version, "N/A"); + else + snprintf(info->fw_version, sizeof(info->fw_version), + "%u.%u.%u.%u, TP %u.%u.%u.%u", + FW_HDR_FW_VER_MAJOR_GET(adapter->params.fw_vers), + FW_HDR_FW_VER_MINOR_GET(adapter->params.fw_vers), + FW_HDR_FW_VER_MICRO_GET(adapter->params.fw_vers), + FW_HDR_FW_VER_BUILD_GET(adapter->params.fw_vers), + FW_HDR_FW_VER_MAJOR_GET(adapter->params.tp_vers), + FW_HDR_FW_VER_MINOR_GET(adapter->params.tp_vers), + FW_HDR_FW_VER_MICRO_GET(adapter->params.tp_vers), + FW_HDR_FW_VER_BUILD_GET(adapter->params.tp_vers)); +} + +static void get_strings(struct net_device *dev, u32 stringset, u8 *data) +{ + if (stringset == ETH_SS_STATS) + memcpy(data, stats_strings, sizeof(stats_strings)); +} + +/* + * port stats maintained per queue of the port. They should be in the same + * order as in stats_strings above. + */ +struct queue_port_stats { + u64 tso; + u64 tx_csum; + u64 rx_csum; + u64 vlan_ex; + u64 vlan_ins; +}; + +static void collect_sge_port_stats(const struct adapter *adap, + const struct port_info *p, struct queue_port_stats *s) +{ + int i; + const struct sge_eth_txq *tx = &adap->sge.ethtxq[p->first_qset]; + const struct sge_eth_rxq *rx = &adap->sge.ethrxq[p->first_qset]; + + memset(s, 0, sizeof(*s)); + for (i = 0; i < p->nqsets; i++, rx++, tx++) { + s->tso += tx->tso; + s->tx_csum += tx->tx_cso; + s->rx_csum += rx->stats.rx_cso; + s->vlan_ex += rx->stats.vlan_ex; + s->vlan_ins += tx->vlan_ins; + } +} + +static void get_stats(struct net_device *dev, struct ethtool_stats *stats, + u64 *data) +{ + struct port_info *pi = netdev_priv(dev); + struct adapter *adapter = pi->adapter; + + t4_get_port_stats(adapter, pi->tx_chan, (struct port_stats *)data); + + data += sizeof(struct port_stats) / sizeof(u64); + collect_sge_port_stats(adapter, pi, (struct queue_port_stats *)data); +} + +/* + * Return a version number to identify the type of adapter. The scheme is: + * - bits 0..9: chip version + * - bits 10..15: chip revision + */ +static inline unsigned int mk_adap_vers(const struct adapter *ap) +{ + return 4 | (ap->params.rev << 10); +} + +static void reg_block_dump(struct adapter *ap, void *buf, unsigned int start, + unsigned int end) +{ + u32 *p = buf + start; + + for ( ; start <= end; start += sizeof(u32)) + *p++ = t4_read_reg(ap, start); +} + +static void get_regs(struct net_device *dev, struct ethtool_regs *regs, + void *buf) +{ + static const unsigned int reg_ranges[] = { + 0x1008, 0x1108, + 0x1180, 0x11b4, + 0x11fc, 0x123c, + 0x1300, 0x173c, + 0x1800, 0x18fc, + 0x3000, 0x30d8, + 0x30e0, 0x5924, + 0x5960, 0x59d4, + 0x5a00, 0x5af8, + 0x6000, 0x6098, + 0x6100, 0x6150, + 0x6200, 0x6208, + 0x6240, 0x6248, + 0x6280, 0x6338, + 0x6370, 0x638c, + 0x6400, 0x643c, + 0x6500, 0x6524, + 0x6a00, 0x6a38, + 0x6a60, 0x6a78, + 0x6b00, 0x6b84, + 0x6bf0, 0x6c84, + 0x6cf0, 0x6d84, + 0x6df0, 0x6e84, + 0x6ef0, 0x6f84, + 0x6ff0, 0x7084, + 0x70f0, 0x7184, + 0x71f0, 0x7284, + 0x72f0, 0x7384, + 0x73f0, 0x7450, + 0x7500, 0x7530, + 0x7600, 0x761c, + 0x7680, 0x76cc, + 0x7700, 0x7798, + 0x77c0, 0x77fc, + 0x7900, 0x79fc, + 0x7b00, 0x7c38, + 0x7d00, 0x7efc, + 0x8dc0, 0x8e1c, + 0x8e30, 0x8e78, + 0x8ea0, 0x8f6c, + 0x8fc0, 0x9074, + 0x90fc, 0x90fc, + 0x9400, 0x9458, + 0x9600, 0x96bc, + 0x9800, 0x9808, + 0x9820, 0x983c, + 0x9850, 0x9864, + 0x9c00, 0x9c6c, + 0x9c80, 0x9cec, + 0x9d00, 0x9d6c, + 0x9d80, 0x9dec, + 0x9e00, 0x9e6c, + 0x9e80, 0x9eec, + 0x9f00, 0x9f6c, + 0x9f80, 0x9fec, + 0xd004, 0xd03c, + 0xdfc0, 0xdfe0, + 0xe000, 0xea7c, + 0xf000, 0x11190, + 0x19040, 0x19124, + 0x19150, 0x191b0, + 0x191d0, 0x191e8, + 0x19238, 0x1924c, + 0x193f8, 0x19474, + 0x19490, 0x194f8, + 0x19800, 0x19f30, + 0x1a000, 0x1a06c, + 0x1a0b0, 0x1a120, + 0x1a128, 0x1a138, + 0x1a190, 0x1a1c4, + 0x1a1fc, 0x1a1fc, + 0x1e040, 0x1e04c, + 0x1e240, 0x1e28c, + 0x1e2c0, 0x1e2c0, + 0x1e2e0, 0x1e2e0, + 0x1e300, 0x1e384, + 0x1e3c0, 0x1e3c8, + 0x1e440, 0x1e44c, + 0x1e640, 0x1e68c, + 0x1e6c0, 0x1e6c0, + 0x1e6e0, 0x1e6e0, + 0x1e700, 0x1e784, + 0x1e7c0, 0x1e7c8, + 0x1e840, 0x1e84c, + 0x1ea40, 0x1ea8c, + 0x1eac0, 0x1eac0, + 0x1eae0, 0x1eae0, + 0x1eb00, 0x1eb84, + 0x1ebc0, 0x1ebc8, + 0x1ec40, 0x1ec4c, + 0x1ee40, 0x1ee8c, + 0x1eec0, 0x1eec0, + 0x1eee0, 0x1eee0, + 0x1ef00, 0x1ef84, + 0x1efc0, 0x1efc8, + 0x1f040, 0x1f04c, + 0x1f240, 0x1f28c, + 0x1f2c0, 0x1f2c0, + 0x1f2e0, 0x1f2e0, + 0x1f300, 0x1f384, + 0x1f3c0, 0x1f3c8, + 0x1f440, 0x1f44c, + 0x1f640, 0x1f68c, + 0x1f6c0, 0x1f6c0, + 0x1f6e0, 0x1f6e0, + 0x1f700, 0x1f784, + 0x1f7c0, 0x1f7c8, + 0x1f840, 0x1f84c, + 0x1fa40, 0x1fa8c, + 0x1fac0, 0x1fac0, + 0x1fae0, 0x1fae0, + 0x1fb00, 0x1fb84, + 0x1fbc0, 0x1fbc8, + 0x1fc40, 0x1fc4c, + 0x1fe40, 0x1fe8c, + 0x1fec0, 0x1fec0, + 0x1fee0, 0x1fee0, + 0x1ff00, 0x1ff84, + 0x1ffc0, 0x1ffc8, + 0x20000, 0x2002c, + 0x20100, 0x2013c, + 0x20190, 0x201c8, + 0x20200, 0x20318, + 0x20400, 0x20528, + 0x20540, 0x20614, + 0x21000, 0x21040, + 0x2104c, 0x21060, + 0x210c0, 0x210ec, + 0x21200, 0x21268, + 0x21270, 0x21284, + 0x212fc, 0x21388, + 0x21400, 0x21404, + 0x21500, 0x21518, + 0x2152c, 0x2153c, + 0x21550, 0x21554, + 0x21600, 0x21600, + 0x21608, 0x21628, + 0x21630, 0x2163c, + 0x21700, 0x2171c, + 0x21780, 0x2178c, + 0x21800, 0x21c38, + 0x21c80, 0x21d7c, + 0x21e00, 0x21e04, + 0x22000, 0x2202c, + 0x22100, 0x2213c, + 0x22190, 0x221c8, + 0x22200, 0x22318, + 0x22400, 0x22528, + 0x22540, 0x22614, + 0x23000, 0x23040, + 0x2304c, 0x23060, + 0x230c0, 0x230ec, + 0x23200, 0x23268, + 0x23270, 0x23284, + 0x232fc, 0x23388, + 0x23400, 0x23404, + 0x23500, 0x23518, + 0x2352c, 0x2353c, + 0x23550, 0x23554, + 0x23600, 0x23600, + 0x23608, 0x23628, + 0x23630, 0x2363c, + 0x23700, 0x2371c, + 0x23780, 0x2378c, + 0x23800, 0x23c38, + 0x23c80, 0x23d7c, + 0x23e00, 0x23e04, + 0x24000, 0x2402c, + 0x24100, 0x2413c, + 0x24190, 0x241c8, + 0x24200, 0x24318, + 0x24400, 0x24528, + 0x24540, 0x24614, + 0x25000, 0x25040, + 0x2504c, 0x25060, + 0x250c0, 0x250ec, + 0x25200, 0x25268, + 0x25270, 0x25284, + 0x252fc, 0x25388, + 0x25400, 0x25404, + 0x25500, 0x25518, + 0x2552c, 0x2553c, + 0x25550, 0x25554, + 0x25600, 0x25600, + 0x25608, 0x25628, + 0x25630, 0x2563c, + 0x25700, 0x2571c, + 0x25780, 0x2578c, + 0x25800, 0x25c38, + 0x25c80, 0x25d7c, + 0x25e00, 0x25e04, + 0x26000, 0x2602c, + 0x26100, 0x2613c, + 0x26190, 0x261c8, + 0x26200, 0x26318, + 0x26400, 0x26528, + 0x26540, 0x26614, + 0x27000, 0x27040, + 0x2704c, 0x27060, + 0x270c0, 0x270ec, + 0x27200, 0x27268, + 0x27270, 0x27284, + 0x272fc, 0x27388, + 0x27400, 0x27404, + 0x27500, 0x27518, + 0x2752c, 0x2753c, + 0x27550, 0x27554, + 0x27600, 0x27600, + 0x27608, 0x27628, + 0x27630, 0x2763c, + 0x27700, 0x2771c, + 0x27780, 0x2778c, + 0x27800, 0x27c38, + 0x27c80, 0x27d7c, + 0x27e00, 0x27e04 + }; + + int i; + struct adapter *ap = netdev2adap(dev); + + regs->version = mk_adap_vers(ap); + + memset(buf, 0, T4_REGMAP_SIZE); + for (i = 0; i < ARRAY_SIZE(reg_ranges); i += 2) + reg_block_dump(ap, buf, reg_ranges[i], reg_ranges[i + 1]); +} + +static int restart_autoneg(struct net_device *dev) +{ + struct port_info *p = netdev_priv(dev); + + if (!netif_running(dev)) + return -EAGAIN; + if (p->link_cfg.autoneg != AUTONEG_ENABLE) + return -EINVAL; + t4_restart_aneg(p->adapter, 0, p->tx_chan); + return 0; +} + +static int identify_port(struct net_device *dev, u32 data) +{ + if (data == 0) + data = 2; /* default to 2 seconds */ + + return t4_identify_port(netdev2adap(dev), 0, netdev2pinfo(dev)->viid, + data * 5); +} + +static unsigned int from_fw_linkcaps(unsigned int type, unsigned int caps) +{ + unsigned int v = 0; + + if (type == FW_PORT_TYPE_BT_SGMII || type == FW_PORT_TYPE_BT_XAUI) { + v |= SUPPORTED_TP; + if (caps & FW_PORT_CAP_SPEED_100M) + v |= SUPPORTED_100baseT_Full; + if (caps & FW_PORT_CAP_SPEED_1G) + v |= SUPPORTED_1000baseT_Full; + if (caps & FW_PORT_CAP_SPEED_10G) + v |= SUPPORTED_10000baseT_Full; + } else if (type == FW_PORT_TYPE_KX4 || type == FW_PORT_TYPE_KX) { + v |= SUPPORTED_Backplane; + if (caps & FW_PORT_CAP_SPEED_1G) + v |= SUPPORTED_1000baseKX_Full; + if (caps & FW_PORT_CAP_SPEED_10G) + v |= SUPPORTED_10000baseKX4_Full; + } else if (type == FW_PORT_TYPE_KR) + v |= SUPPORTED_Backplane | SUPPORTED_10000baseKR_Full; + else if (type == FW_PORT_TYPE_FIBER) + v |= SUPPORTED_FIBRE; + + if (caps & FW_PORT_CAP_ANEG) + v |= SUPPORTED_Autoneg; + return v; +} + +static unsigned int to_fw_linkcaps(unsigned int caps) +{ + unsigned int v = 0; + + if (caps & ADVERTISED_100baseT_Full) + v |= FW_PORT_CAP_SPEED_100M; + if (caps & ADVERTISED_1000baseT_Full) + v |= FW_PORT_CAP_SPEED_1G; + if (caps & ADVERTISED_10000baseT_Full) + v |= FW_PORT_CAP_SPEED_10G; + return v; +} + +static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + const struct port_info *p = netdev_priv(dev); + + if (p->port_type == FW_PORT_TYPE_BT_SGMII || + p->port_type == FW_PORT_TYPE_BT_XAUI) + cmd->port = PORT_TP; + else if (p->port_type == FW_PORT_TYPE_FIBER) + cmd->port = PORT_FIBRE; + else if (p->port_type == FW_PORT_TYPE_TWINAX) + cmd->port = PORT_DA; + else + cmd->port = PORT_OTHER; + + if (p->mdio_addr >= 0) { + cmd->phy_address = p->mdio_addr; + cmd->transceiver = XCVR_EXTERNAL; + cmd->mdio_support = p->port_type == FW_PORT_TYPE_BT_SGMII ? + MDIO_SUPPORTS_C22 : MDIO_SUPPORTS_C45; + } else { + cmd->phy_address = 0; /* not really, but no better option */ + cmd->transceiver = XCVR_INTERNAL; + cmd->mdio_support = 0; + } + + cmd->supported = from_fw_linkcaps(p->port_type, p->link_cfg.supported); + cmd->advertising = from_fw_linkcaps(p->port_type, + p->link_cfg.advertising); + cmd->speed = netif_carrier_ok(dev) ? p->link_cfg.speed : 0; + cmd->duplex = DUPLEX_FULL; + cmd->autoneg = p->link_cfg.autoneg; + cmd->maxtxpkt = 0; + cmd->maxrxpkt = 0; + return 0; +} + +static unsigned int speed_to_caps(int speed) +{ + if (speed == SPEED_100) + return FW_PORT_CAP_SPEED_100M; + if (speed == SPEED_1000) + return FW_PORT_CAP_SPEED_1G; + if (speed == SPEED_10000) + return FW_PORT_CAP_SPEED_10G; + return 0; +} + +static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + unsigned int cap; + struct port_info *p = netdev_priv(dev); + struct link_config *lc = &p->link_cfg; + + if (cmd->duplex != DUPLEX_FULL) /* only full-duplex supported */ + return -EINVAL; + + if (!(lc->supported & FW_PORT_CAP_ANEG)) { + /* + * PHY offers a single speed. See if that's what's + * being requested. + */ + if (cmd->autoneg == AUTONEG_DISABLE && + (lc->supported & speed_to_caps(cmd->speed))) + return 0; + return -EINVAL; + } + + if (cmd->autoneg == AUTONEG_DISABLE) { + cap = speed_to_caps(cmd->speed); + + if (!(lc->supported & cap) || cmd->speed == SPEED_1000 || + cmd->speed == SPEED_10000) + return -EINVAL; + lc->requested_speed = cap; + lc->advertising = 0; + } else { + cap = to_fw_linkcaps(cmd->advertising); + if (!(lc->supported & cap)) + return -EINVAL; + lc->requested_speed = 0; + lc->advertising = cap | FW_PORT_CAP_ANEG; + } + lc->autoneg = cmd->autoneg; + + if (netif_running(dev)) + return t4_link_start(p->adapter, 0, p->tx_chan, lc); + return 0; +} + +static void get_pauseparam(struct net_device *dev, + struct ethtool_pauseparam *epause) +{ + struct port_info *p = netdev_priv(dev); + + epause->autoneg = (p->link_cfg.requested_fc & PAUSE_AUTONEG) != 0; + epause->rx_pause = (p->link_cfg.fc & PAUSE_RX) != 0; + epause->tx_pause = (p->link_cfg.fc & PAUSE_TX) != 0; +} + +static int set_pauseparam(struct net_device *dev, + struct ethtool_pauseparam *epause) +{ + struct port_info *p = netdev_priv(dev); + struct link_config *lc = &p->link_cfg; + + if (epause->autoneg == AUTONEG_DISABLE) + lc->requested_fc = 0; + else if (lc->supported & FW_PORT_CAP_ANEG) + lc->requested_fc = PAUSE_AUTONEG; + else + return -EINVAL; + + if (epause->rx_pause) + lc->requested_fc |= PAUSE_RX; + if (epause->tx_pause) + lc->requested_fc |= PAUSE_TX; + if (netif_running(dev)) + return t4_link_start(p->adapter, 0, p->tx_chan, lc); + return 0; +} + +static u32 get_rx_csum(struct net_device *dev) +{ + struct port_info *p = netdev_priv(dev); + + return p->rx_offload & RX_CSO; +} + +static int set_rx_csum(struct net_device *dev, u32 data) +{ + struct port_info *p = netdev_priv(dev); + + if (data) + p->rx_offload |= RX_CSO; + else + p->rx_offload &= ~RX_CSO; + return 0; +} + +static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e) +{ + const struct port_info *pi = netdev_priv(dev); + const struct sge *s = &pi->adapter->sge; + + e->rx_max_pending = MAX_RX_BUFFERS; + e->rx_mini_max_pending = MAX_RSPQ_ENTRIES; + e->rx_jumbo_max_pending = 0; + e->tx_max_pending = MAX_TXQ_ENTRIES; + + e->rx_pending = s->ethrxq[pi->first_qset].fl.size - 8; + e->rx_mini_pending = s->ethrxq[pi->first_qset].rspq.size; + e->rx_jumbo_pending = 0; + e->tx_pending = s->ethtxq[pi->first_qset].q.size; +} + +static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e) +{ + int i; + const struct port_info *pi = netdev_priv(dev); + struct adapter *adapter = pi->adapter; + struct sge *s = &adapter->sge; + + if (e->rx_pending > MAX_RX_BUFFERS || e->rx_jumbo_pending || + e->tx_pending > MAX_TXQ_ENTRIES || + e->rx_mini_pending > MAX_RSPQ_ENTRIES || + e->rx_mini_pending < MIN_RSPQ_ENTRIES || + e->rx_pending < MIN_FL_ENTRIES || e->tx_pending < MIN_TXQ_ENTRIES) + return -EINVAL; + + if (adapter->flags & FULL_INIT_DONE) + return -EBUSY; + + for (i = 0; i < pi->nqsets; ++i) { + s->ethtxq[pi->first_qset + i].q.size = e->tx_pending; + s->ethrxq[pi->first_qset + i].fl.size = e->rx_pending + 8; + s->ethrxq[pi->first_qset + i].rspq.size = e->rx_mini_pending; + } + return 0; +} + +static int closest_timer(const struct sge *s, int time) +{ + int i, delta, match = 0, min_delta = INT_MAX; + + for (i = 0; i < ARRAY_SIZE(s->timer_val); i++) { + delta = time - s->timer_val[i]; + if (delta < 0) + delta = -delta; + if (delta < min_delta) { + min_delta = delta; + match = i; + } + } + return match; +} + +static int closest_thres(const struct sge *s, int thres) +{ + int i, delta, match = 0, min_delta = INT_MAX; + + for (i = 0; i < ARRAY_SIZE(s->counter_val); i++) { + delta = thres - s->counter_val[i]; + if (delta < 0) + delta = -delta; + if (delta < min_delta) { + min_delta = delta; + match = i; + } + } + return match; +} + +/* + * Return a queue's interrupt hold-off time in us. 0 means no timer. + */ +static unsigned int qtimer_val(const struct adapter *adap, + const struct sge_rspq *q) +{ + unsigned int idx = q->intr_params >> 1; + + return idx < SGE_NTIMERS ? adap->sge.timer_val[idx] : 0; +} + +/** + * set_rxq_intr_params - set a queue's interrupt holdoff parameters + * @adap: the adapter + * @q: the Rx queue + * @us: the hold-off time in us, or 0 to disable timer + * @cnt: the hold-off packet count, or 0 to disable counter + * + * Sets an Rx queue's interrupt hold-off time and packet count. At least + * one of the two needs to be enabled for the queue to generate interrupts. + */ +static int set_rxq_intr_params(struct adapter *adap, struct sge_rspq *q, + unsigned int us, unsigned int cnt) +{ + if ((us | cnt) == 0) + cnt = 1; + + if (cnt) { + int err; + u32 v, new_idx; + + new_idx = closest_thres(&adap->sge, cnt); + if (q->desc && q->pktcnt_idx != new_idx) { + /* the queue has already been created, update it */ + v = FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ) | + FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DMAQ_IQ_INTCNTTHRESH) | + FW_PARAMS_PARAM_YZ(q->cntxt_id); + err = t4_set_params(adap, 0, 0, 0, 1, &v, &new_idx); + if (err) + return err; + } + q->pktcnt_idx = new_idx; + } + + us = us == 0 ? 6 : closest_timer(&adap->sge, us); + q->intr_params = QINTR_TIMER_IDX(us) | (cnt > 0 ? QINTR_CNT_EN : 0); + return 0; +} + +static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c) +{ + const struct port_info *pi = netdev_priv(dev); + struct adapter *adap = pi->adapter; + + return set_rxq_intr_params(adap, &adap->sge.ethrxq[pi->first_qset].rspq, + c->rx_coalesce_usecs, c->rx_max_coalesced_frames); +} + +static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c) +{ + const struct port_info *pi = netdev_priv(dev); + const struct adapter *adap = pi->adapter; + const struct sge_rspq *rq = &adap->sge.ethrxq[pi->first_qset].rspq; + + c->rx_coalesce_usecs = qtimer_val(adap, rq); + c->rx_max_coalesced_frames = (rq->intr_params & QINTR_CNT_EN) ? + adap->sge.counter_val[rq->pktcnt_idx] : 0; + return 0; +} + +/* + * Translate a physical EEPROM address to virtual. The first 1K is accessed + * through virtual addresses starting at 31K, the rest is accessed through + * virtual addresses starting at 0. This mapping is correct only for PF0. + */ +static int eeprom_ptov(unsigned int phys_addr) +{ + if (phys_addr < 1024) + return phys_addr + (31 << 10); + if (phys_addr < EEPROMSIZE) + return phys_addr - 1024; + return -EINVAL; +} + +/* + * The next two routines implement eeprom read/write from physical addresses. + * The physical->virtual translation is correct only for PF0. + */ +static int eeprom_rd_phys(struct adapter *adap, unsigned int phys_addr, u32 *v) +{ + int vaddr = eeprom_ptov(phys_addr); + + if (vaddr >= 0) + vaddr = pci_read_vpd(adap->pdev, vaddr, sizeof(u32), v); + return vaddr < 0 ? vaddr : 0; +} + +static int eeprom_wr_phys(struct adapter *adap, unsigned int phys_addr, u32 v) +{ + int vaddr = eeprom_ptov(phys_addr); + + if (vaddr >= 0) + vaddr = pci_write_vpd(adap->pdev, vaddr, sizeof(u32), &v); + return vaddr < 0 ? vaddr : 0; +} + +#define EEPROM_MAGIC 0x38E2F10C + +static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e, + u8 *data) +{ + int i, err = 0; + struct adapter *adapter = netdev2adap(dev); + + u8 *buf = kmalloc(EEPROMSIZE, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + e->magic = EEPROM_MAGIC; + for (i = e->offset & ~3; !err && i < e->offset + e->len; i += 4) + err = eeprom_rd_phys(adapter, i, (u32 *)&buf[i]); + + if (!err) + memcpy(data, buf + e->offset, e->len); + kfree(buf); + return err; +} + +static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, + u8 *data) +{ + u8 *buf; + int err = 0; + u32 aligned_offset, aligned_len, *p; + struct adapter *adapter = netdev2adap(dev); + + if (eeprom->magic != EEPROM_MAGIC) + return -EINVAL; + + aligned_offset = eeprom->offset & ~3; + aligned_len = (eeprom->len + (eeprom->offset & 3) + 3) & ~3; + + if (aligned_offset != eeprom->offset || aligned_len != eeprom->len) { + /* + * RMW possibly needed for first or last words. + */ + buf = kmalloc(aligned_len, GFP_KERNEL); + if (!buf) + return -ENOMEM; + err = eeprom_rd_phys(adapter, aligned_offset, (u32 *)buf); + if (!err && aligned_len > 4) + err = eeprom_rd_phys(adapter, + aligned_offset + aligned_len - 4, + (u32 *)&buf[aligned_len - 4]); + if (err) + goto out; + memcpy(buf + (eeprom->offset & 3), data, eeprom->len); + } else + buf = data; + + err = t4_seeprom_wp(adapter, false); + if (err) + goto out; + + for (p = (u32 *)buf; !err && aligned_len; aligned_len -= 4, p++) { + err = eeprom_wr_phys(adapter, aligned_offset, *p); + aligned_offset += 4; + } + + if (!err) + err = t4_seeprom_wp(adapter, true); +out: + if (buf != data) + kfree(buf); + return err; +} + +static int set_flash(struct net_device *netdev, struct ethtool_flash *ef) +{ + int ret; + const struct firmware *fw; + struct adapter *adap = netdev2adap(netdev); + + ef->data[sizeof(ef->data) - 1] = '\0'; + ret = request_firmware(&fw, ef->data, adap->pdev_dev); + if (ret < 0) + return ret; + + ret = t4_load_fw(adap, fw->data, fw->size); + release_firmware(fw); + if (!ret) + dev_info(adap->pdev_dev, "loaded firmware %s\n", ef->data); + return ret; +} + +#define WOL_SUPPORTED (WAKE_BCAST | WAKE_MAGIC) +#define BCAST_CRC 0xa0ccc1a6 + +static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) +{ + wol->supported = WAKE_BCAST | WAKE_MAGIC; + wol->wolopts = netdev2adap(dev)->wol; + memset(&wol->sopass, 0, sizeof(wol->sopass)); +} + +static int set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) +{ + int err = 0; + struct port_info *pi = netdev_priv(dev); + + if (wol->wolopts & ~WOL_SUPPORTED) + return -EINVAL; + t4_wol_magic_enable(pi->adapter, pi->tx_chan, + (wol->wolopts & WAKE_MAGIC) ? dev->dev_addr : NULL); + if (wol->wolopts & WAKE_BCAST) { + err = t4_wol_pat_enable(pi->adapter, pi->tx_chan, 0xfe, ~0ULL, + ~0ULL, 0, false); + if (!err) + err = t4_wol_pat_enable(pi->adapter, pi->tx_chan, 1, + ~6ULL, ~0ULL, BCAST_CRC, true); + } else + t4_wol_pat_enable(pi->adapter, pi->tx_chan, 0, 0, 0, 0, false); + return err; +} + +static int set_tso(struct net_device *dev, u32 value) +{ + if (value) + dev->features |= NETIF_F_TSO | NETIF_F_TSO6; + else + dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6); + return 0; +} + +static struct ethtool_ops cxgb_ethtool_ops = { + .get_settings = get_settings, + .set_settings = set_settings, + .get_drvinfo = get_drvinfo, + .get_msglevel = get_msglevel, + .set_msglevel = set_msglevel, + .get_ringparam = get_sge_param, + .set_ringparam = set_sge_param, + .get_coalesce = get_coalesce, + .set_coalesce = set_coalesce, + .get_eeprom_len = get_eeprom_len, + .get_eeprom = get_eeprom, + .set_eeprom = set_eeprom, + .get_pauseparam = get_pauseparam, + .set_pauseparam = set_pauseparam, + .get_rx_csum = get_rx_csum, + .set_rx_csum = set_rx_csum, + .set_tx_csum = ethtool_op_set_tx_ipv6_csum, + .set_sg = ethtool_op_set_sg, + .get_link = ethtool_op_get_link, + .get_strings = get_strings, + .phys_id = identify_port, + .nway_reset = restart_autoneg, + .get_sset_count = get_sset_count, + .get_ethtool_stats = get_stats, + .get_regs_len = get_regs_len, + .get_regs = get_regs, + .get_wol = get_wol, + .set_wol = set_wol, + .set_tso = set_tso, + .flash_device = set_flash, +}; + +/* + * debugfs support + */ + +static int mem_open(struct inode *inode, struct file *file) +{ + file->private_data = inode->i_private; + return 0; +} + +static ssize_t mem_read(struct file *file, char __user *buf, size_t count, + loff_t *ppos) +{ + loff_t pos = *ppos; + loff_t avail = file->f_path.dentry->d_inode->i_size; + unsigned int mem = (uintptr_t)file->private_data & 3; + struct adapter *adap = file->private_data - mem; + + if (pos < 0) + return -EINVAL; + if (pos >= avail) + return 0; + if (count > avail - pos) + count = avail - pos; + + while (count) { + size_t len; + int ret, ofst; + __be32 data[16]; + + if (mem == MEM_MC) + ret = t4_mc_read(adap, pos, data, NULL); + else + ret = t4_edc_read(adap, mem, pos, data, NULL); + if (ret) + return ret; + + ofst = pos % sizeof(data); + len = min(count, sizeof(data) - ofst); + if (copy_to_user(buf, (u8 *)data + ofst, len)) + return -EFAULT; + + buf += len; + pos += len; + count -= len; + } + count = pos - *ppos; + *ppos = pos; + return count; +} + +static const struct file_operations mem_debugfs_fops = { + .owner = THIS_MODULE, + .open = mem_open, + .read = mem_read, +}; + +static void __devinit add_debugfs_mem(struct adapter *adap, const char *name, + unsigned int idx, unsigned int size_mb) +{ + struct dentry *de; + + de = debugfs_create_file(name, S_IRUSR, adap->debugfs_root, + (void *)adap + idx, &mem_debugfs_fops); + if (de && de->d_inode) + de->d_inode->i_size = size_mb << 20; +} + +static int __devinit setup_debugfs(struct adapter *adap) +{ + int i; + + if (IS_ERR_OR_NULL(adap->debugfs_root)) + return -1; + + i = t4_read_reg(adap, MA_TARGET_MEM_ENABLE); + if (i & EDRAM0_ENABLE) + add_debugfs_mem(adap, "edc0", MEM_EDC0, 5); + if (i & EDRAM1_ENABLE) + add_debugfs_mem(adap, "edc1", MEM_EDC1, 5); + if (i & EXT_MEM_ENABLE) + add_debugfs_mem(adap, "mc", MEM_MC, + EXT_MEM_SIZE_GET(t4_read_reg(adap, MA_EXT_MEMORY_BAR))); + if (adap->l2t) + debugfs_create_file("l2t", S_IRUSR, adap->debugfs_root, adap, + &t4_l2t_fops); + return 0; +} + +/* + * upper-layer driver support + */ + +/* + * Allocate an active-open TID and set it to the supplied value. + */ +int cxgb4_alloc_atid(struct tid_info *t, void *data) +{ + int atid = -1; + + spin_lock_bh(&t->atid_lock); + if (t->afree) { + union aopen_entry *p = t->afree; + + atid = p - t->atid_tab; + t->afree = p->next; + p->data = data; + t->atids_in_use++; + } + spin_unlock_bh(&t->atid_lock); + return atid; +} +EXPORT_SYMBOL(cxgb4_alloc_atid); + +/* + * Release an active-open TID. + */ +void cxgb4_free_atid(struct tid_info *t, unsigned int atid) +{ + union aopen_entry *p = &t->atid_tab[atid]; + + spin_lock_bh(&t->atid_lock); + p->next = t->afree; + t->afree = p; + t->atids_in_use--; + spin_unlock_bh(&t->atid_lock); +} +EXPORT_SYMBOL(cxgb4_free_atid); + +/* + * Allocate a server TID and set it to the supplied value. + */ +int cxgb4_alloc_stid(struct tid_info *t, int family, void *data) +{ + int stid; + + spin_lock_bh(&t->stid_lock); + if (family == PF_INET) { + stid = find_first_zero_bit(t->stid_bmap, t->nstids); + if (stid < t->nstids) + __set_bit(stid, t->stid_bmap); + else + stid = -1; + } else { + stid = bitmap_find_free_region(t->stid_bmap, t->nstids, 2); + if (stid < 0) + stid = -1; + } + if (stid >= 0) { + t->stid_tab[stid].data = data; + stid += t->stid_base; + t->stids_in_use++; + } + spin_unlock_bh(&t->stid_lock); + return stid; +} +EXPORT_SYMBOL(cxgb4_alloc_stid); + +/* + * Release a server TID. + */ +void cxgb4_free_stid(struct tid_info *t, unsigned int stid, int family) +{ + stid -= t->stid_base; + spin_lock_bh(&t->stid_lock); + if (family == PF_INET) + __clear_bit(stid, t->stid_bmap); + else + bitmap_release_region(t->stid_bmap, stid, 2); + t->stid_tab[stid].data = NULL; + t->stids_in_use--; + spin_unlock_bh(&t->stid_lock); +} +EXPORT_SYMBOL(cxgb4_free_stid); + +/* + * Populate a TID_RELEASE WR. Caller must properly size the skb. + */ +static void mk_tid_release(struct sk_buff *skb, unsigned int chan, + unsigned int tid) +{ + struct cpl_tid_release *req; + + set_wr_txq(skb, CPL_PRIORITY_SETUP, chan); + req = (struct cpl_tid_release *)__skb_put(skb, sizeof(*req)); + INIT_TP_WR(req, tid); + OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_TID_RELEASE, tid)); +} + +/* + * Queue a TID release request and if necessary schedule a work queue to + * process it. + */ +void cxgb4_queue_tid_release(struct tid_info *t, unsigned int chan, + unsigned int tid) +{ + void **p = &t->tid_tab[tid]; + struct adapter *adap = container_of(t, struct adapter, tids); + + spin_lock_bh(&adap->tid_release_lock); + *p = adap->tid_release_head; + /* Low 2 bits encode the Tx channel number */ + adap->tid_release_head = (void **)((uintptr_t)p | chan); + if (!adap->tid_release_task_busy) { + adap->tid_release_task_busy = true; + schedule_work(&adap->tid_release_task); + } + spin_unlock_bh(&adap->tid_release_lock); +} +EXPORT_SYMBOL(cxgb4_queue_tid_release); + +/* + * Process the list of pending TID release requests. + */ +static void process_tid_release_list(struct work_struct *work) +{ + struct sk_buff *skb; + struct adapter *adap; + + adap = container_of(work, struct adapter, tid_release_task); + + spin_lock_bh(&adap->tid_release_lock); + while (adap->tid_release_head) { + void **p = adap->tid_release_head; + unsigned int chan = (uintptr_t)p & 3; + p = (void *)p - chan; + + adap->tid_release_head = *p; + *p = NULL; + spin_unlock_bh(&adap->tid_release_lock); + + while (!(skb = alloc_skb(sizeof(struct cpl_tid_release), + GFP_KERNEL))) + schedule_timeout_uninterruptible(1); + + mk_tid_release(skb, chan, p - adap->tids.tid_tab); + t4_ofld_send(adap, skb); + spin_lock_bh(&adap->tid_release_lock); + } + adap->tid_release_task_busy = false; + spin_unlock_bh(&adap->tid_release_lock); +} + +/* + * Release a TID and inform HW. If we are unable to allocate the release + * message we defer to a work queue. + */ +void cxgb4_remove_tid(struct tid_info *t, unsigned int chan, unsigned int tid) +{ + void *old; + struct sk_buff *skb; + struct adapter *adap = container_of(t, struct adapter, tids); + + old = t->tid_tab[tid]; + skb = alloc_skb(sizeof(struct cpl_tid_release), GFP_ATOMIC); + if (likely(skb)) { + t->tid_tab[tid] = NULL; + mk_tid_release(skb, chan, tid); + t4_ofld_send(adap, skb); + } else + cxgb4_queue_tid_release(t, chan, tid); + if (old) + atomic_dec(&t->tids_in_use); +} +EXPORT_SYMBOL(cxgb4_remove_tid); + +/* + * Allocate and initialize the TID tables. Returns 0 on success. + */ +static int tid_init(struct tid_info *t) +{ + size_t size; + unsigned int natids = t->natids; + + size = t->ntids * sizeof(*t->tid_tab) + natids * sizeof(*t->atid_tab) + + t->nstids * sizeof(*t->stid_tab) + + BITS_TO_LONGS(t->nstids) * sizeof(long); + t->tid_tab = t4_alloc_mem(size); + if (!t->tid_tab) + return -ENOMEM; + + t->atid_tab = (union aopen_entry *)&t->tid_tab[t->ntids]; + t->stid_tab = (struct serv_entry *)&t->atid_tab[natids]; + t->stid_bmap = (unsigned long *)&t->stid_tab[t->nstids]; + spin_lock_init(&t->stid_lock); + spin_lock_init(&t->atid_lock); + + t->stids_in_use = 0; + t->afree = NULL; + t->atids_in_use = 0; + atomic_set(&t->tids_in_use, 0); + + /* Setup the free list for atid_tab and clear the stid bitmap. */ + if (natids) { + while (--natids) + t->atid_tab[natids - 1].next = &t->atid_tab[natids]; + t->afree = t->atid_tab; + } + bitmap_zero(t->stid_bmap, t->nstids); + return 0; +} + +/** + * cxgb4_create_server - create an IP server + * @dev: the device + * @stid: the server TID + * @sip: local IP address to bind server to + * @sport: the server's TCP port + * @queue: queue to direct messages from this server to + * + * Create an IP server for the given port and address. + * Returns <0 on error and one of the %NET_XMIT_* values on success. + */ +int cxgb4_create_server(const struct net_device *dev, unsigned int stid, + __be32 sip, __be16 sport, unsigned int queue) +{ + unsigned int chan; + struct sk_buff *skb; + struct adapter *adap; + struct cpl_pass_open_req *req; + + skb = alloc_skb(sizeof(*req), GFP_KERNEL); + if (!skb) + return -ENOMEM; + + adap = netdev2adap(dev); + req = (struct cpl_pass_open_req *)__skb_put(skb, sizeof(*req)); + INIT_TP_WR(req, 0); + OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ, stid)); + req->local_port = sport; + req->peer_port = htons(0); + req->local_ip = sip; + req->peer_ip = htonl(0); + chan = netdev2pinfo(adap->sge.ingr_map[queue]->netdev)->tx_chan; + req->opt0 = cpu_to_be64(TX_CHAN(chan)); + req->opt1 = cpu_to_be64(CONN_POLICY_ASK | + SYN_RSS_ENABLE | SYN_RSS_QUEUE(queue)); + return t4_mgmt_tx(adap, skb); +} +EXPORT_SYMBOL(cxgb4_create_server); + +/** + * cxgb4_create_server6 - create an IPv6 server + * @dev: the device + * @stid: the server TID + * @sip: local IPv6 address to bind server to + * @sport: the server's TCP port + * @queue: queue to direct messages from this server to + * + * Create an IPv6 server for the given port and address. + * Returns <0 on error and one of the %NET_XMIT_* values on success. + */ +int cxgb4_create_server6(const struct net_device *dev, unsigned int stid, + const struct in6_addr *sip, __be16 sport, + unsigned int queue) +{ + unsigned int chan; + struct sk_buff *skb; + struct adapter *adap; + struct cpl_pass_open_req6 *req; + + skb = alloc_skb(sizeof(*req), GFP_KERNEL); + if (!skb) + return -ENOMEM; + + adap = netdev2adap(dev); + req = (struct cpl_pass_open_req6 *)__skb_put(skb, sizeof(*req)); + INIT_TP_WR(req, 0); + OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ6, stid)); + req->local_port = sport; + req->peer_port = htons(0); + req->local_ip_hi = *(__be64 *)(sip->s6_addr); + req->local_ip_lo = *(__be64 *)(sip->s6_addr + 8); + req->peer_ip_hi = cpu_to_be64(0); + req->peer_ip_lo = cpu_to_be64(0); + chan = netdev2pinfo(adap->sge.ingr_map[queue]->netdev)->tx_chan; + req->opt0 = cpu_to_be64(TX_CHAN(chan)); + req->opt1 = cpu_to_be64(CONN_POLICY_ASK | + SYN_RSS_ENABLE | SYN_RSS_QUEUE(queue)); + return t4_mgmt_tx(adap, skb); +} +EXPORT_SYMBOL(cxgb4_create_server6); + +/** + * cxgb4_best_mtu - find the entry in the MTU table closest to an MTU + * @mtus: the HW MTU table + * @mtu: the target MTU + * @idx: index of selected entry in the MTU table + * + * Returns the index and the value in the HW MTU table that is closest to + * but does not exceed @mtu, unless @mtu is smaller than any value in the + * table, in which case that smallest available value is selected. + */ +unsigned int cxgb4_best_mtu(const unsigned short *mtus, unsigned short mtu, + unsigned int *idx) +{ + unsigned int i = 0; + + while (i < NMTUS - 1 && mtus[i + 1] <= mtu) + ++i; + if (idx) + *idx = i; + return mtus[i]; +} +EXPORT_SYMBOL(cxgb4_best_mtu); + +/** + * cxgb4_port_chan - get the HW channel of a port + * @dev: the net device for the port + * + * Return the HW Tx channel of the given port. + */ +unsigned int cxgb4_port_chan(const struct net_device *dev) +{ + return netdev2pinfo(dev)->tx_chan; +} +EXPORT_SYMBOL(cxgb4_port_chan); + +/** + * cxgb4_port_viid - get the VI id of a port + * @dev: the net device for the port + * + * Return the VI id of the given port. + */ +unsigned int cxgb4_port_viid(const struct net_device *dev) +{ + return netdev2pinfo(dev)->viid; +} +EXPORT_SYMBOL(cxgb4_port_viid); + +/** + * cxgb4_port_idx - get the index of a port + * @dev: the net device for the port + * + * Return the index of the given port. + */ +unsigned int cxgb4_port_idx(const struct net_device *dev) +{ + return netdev2pinfo(dev)->port_id; +} +EXPORT_SYMBOL(cxgb4_port_idx); + +/** + * cxgb4_netdev_by_hwid - return the net device of a HW port + * @pdev: identifies the adapter + * @id: the HW port id + * + * Return the net device associated with the interface with the given HW + * id. + */ +struct net_device *cxgb4_netdev_by_hwid(struct pci_dev *pdev, unsigned int id) +{ + const struct adapter *adap = pci_get_drvdata(pdev); + + if (!adap || id >= NCHAN) + return NULL; + id = adap->chan_map[id]; + return id < MAX_NPORTS ? adap->port[id] : NULL; +} +EXPORT_SYMBOL(cxgb4_netdev_by_hwid); + +void cxgb4_get_tcp_stats(struct pci_dev *pdev, struct tp_tcp_stats *v4, + struct tp_tcp_stats *v6) +{ + struct adapter *adap = pci_get_drvdata(pdev); + + spin_lock(&adap->stats_lock); + t4_tp_get_tcp_stats(adap, v4, v6); + spin_unlock(&adap->stats_lock); +} +EXPORT_SYMBOL(cxgb4_get_tcp_stats); + +void cxgb4_iscsi_init(struct net_device *dev, unsigned int tag_mask, + const unsigned int *pgsz_order) +{ + struct adapter *adap = netdev2adap(dev); + + t4_write_reg(adap, ULP_RX_ISCSI_TAGMASK, tag_mask); + t4_write_reg(adap, ULP_RX_ISCSI_PSZ, HPZ0(pgsz_order[0]) | + HPZ1(pgsz_order[1]) | HPZ2(pgsz_order[2]) | + HPZ3(pgsz_order[3])); +} +EXPORT_SYMBOL(cxgb4_iscsi_init); + +static struct pci_driver cxgb4_driver; + +static void check_neigh_update(struct neighbour *neigh) +{ + const struct device *parent; + const struct net_device *netdev = neigh->dev; + + if (netdev->priv_flags & IFF_802_1Q_VLAN) + netdev = vlan_dev_real_dev(netdev); + parent = netdev->dev.parent; + if (parent && parent->driver == &cxgb4_driver.driver) + t4_l2t_update(dev_get_drvdata(parent), neigh); +} + +static int netevent_cb(struct notifier_block *nb, unsigned long event, + void *data) +{ + switch (event) { + case NETEVENT_NEIGH_UPDATE: + check_neigh_update(data); + break; + case NETEVENT_PMTU_UPDATE: + case NETEVENT_REDIRECT: + default: + break; + } + return 0; +} + +static bool netevent_registered; +static struct notifier_block cxgb4_netevent_nb = { + .notifier_call = netevent_cb +}; + +static void uld_attach(struct adapter *adap, unsigned int uld) +{ + void *handle; + struct cxgb4_lld_info lli; + + lli.pdev = adap->pdev; + lli.l2t = adap->l2t; + lli.tids = &adap->tids; + lli.ports = adap->port; + lli.vr = &adap->vres; + lli.mtus = adap->params.mtus; + if (uld == CXGB4_ULD_RDMA) { + lli.rxq_ids = adap->sge.rdma_rxq; + lli.nrxq = adap->sge.rdmaqs; + } else if (uld == CXGB4_ULD_ISCSI) { + lli.rxq_ids = adap->sge.ofld_rxq; + lli.nrxq = adap->sge.ofldqsets; + } + lli.ntxq = adap->sge.ofldqsets; + lli.nchan = adap->params.nports; + lli.nports = adap->params.nports; + lli.wr_cred = adap->params.ofldq_wr_cred; + lli.adapter_type = adap->params.rev; + lli.iscsi_iolen = MAXRXDATA_GET(t4_read_reg(adap, TP_PARA_REG2)); + lli.udb_density = 1 << QUEUESPERPAGEPF0_GET( + t4_read_reg(adap, SGE_EGRESS_QUEUES_PER_PAGE_PF)); + lli.ucq_density = 1 << QUEUESPERPAGEPF0_GET( + t4_read_reg(adap, SGE_INGRESS_QUEUES_PER_PAGE_PF)); + lli.gts_reg = adap->regs + MYPF_REG(SGE_PF_GTS); + lli.db_reg = adap->regs + MYPF_REG(SGE_PF_KDOORBELL); + lli.fw_vers = adap->params.fw_vers; + + handle = ulds[uld].add(&lli); + if (IS_ERR(handle)) { + dev_warn(adap->pdev_dev, + "could not attach to the %s driver, error %ld\n", + uld_str[uld], PTR_ERR(handle)); + return; + } + + adap->uld_handle[uld] = handle; + + if (!netevent_registered) { + register_netevent_notifier(&cxgb4_netevent_nb); + netevent_registered = true; + } +} + +static void attach_ulds(struct adapter *adap) +{ + unsigned int i; + + mutex_lock(&uld_mutex); + list_add_tail(&adap->list_node, &adapter_list); + for (i = 0; i < CXGB4_ULD_MAX; i++) + if (ulds[i].add) + uld_attach(adap, i); + mutex_unlock(&uld_mutex); +} + +static void detach_ulds(struct adapter *adap) +{ + unsigned int i; + + mutex_lock(&uld_mutex); + list_del(&adap->list_node); + for (i = 0; i < CXGB4_ULD_MAX; i++) + if (adap->uld_handle[i]) { + ulds[i].state_change(adap->uld_handle[i], + CXGB4_STATE_DETACH); + adap->uld_handle[i] = NULL; + } + if (netevent_registered && list_empty(&adapter_list)) { + unregister_netevent_notifier(&cxgb4_netevent_nb); + netevent_registered = false; + } + mutex_unlock(&uld_mutex); +} + +static void notify_ulds(struct adapter *adap, enum cxgb4_state new_state) +{ + unsigned int i; + + mutex_lock(&uld_mutex); + for (i = 0; i < CXGB4_ULD_MAX; i++) + if (adap->uld_handle[i]) + ulds[i].state_change(adap->uld_handle[i], new_state); + mutex_unlock(&uld_mutex); +} + +/** + * cxgb4_register_uld - register an upper-layer driver + * @type: the ULD type + * @p: the ULD methods + * + * Registers an upper-layer driver with this driver and notifies the ULD + * about any presently available devices that support its type. Returns + * %-EBUSY if a ULD of the same type is already registered. + */ +int cxgb4_register_uld(enum cxgb4_uld type, const struct cxgb4_uld_info *p) +{ + int ret = 0; + struct adapter *adap; + + if (type >= CXGB4_ULD_MAX) + return -EINVAL; + mutex_lock(&uld_mutex); + if (ulds[type].add) { + ret = -EBUSY; + goto out; + } + ulds[type] = *p; + list_for_each_entry(adap, &adapter_list, list_node) + uld_attach(adap, type); +out: mutex_unlock(&uld_mutex); + return ret; +} +EXPORT_SYMBOL(cxgb4_register_uld); + +/** + * cxgb4_unregister_uld - unregister an upper-layer driver + * @type: the ULD type + * + * Unregisters an existing upper-layer driver. + */ +int cxgb4_unregister_uld(enum cxgb4_uld type) +{ + struct adapter *adap; + + if (type >= CXGB4_ULD_MAX) + return -EINVAL; + mutex_lock(&uld_mutex); + list_for_each_entry(adap, &adapter_list, list_node) + adap->uld_handle[type] = NULL; + ulds[type].add = NULL; + mutex_unlock(&uld_mutex); + return 0; +} +EXPORT_SYMBOL(cxgb4_unregister_uld); + +/** + * cxgb_up - enable the adapter + * @adap: adapter being enabled + * + * Called when the first port is enabled, this function performs the + * actions necessary to make an adapter operational, such as completing + * the initialization of HW modules, and enabling interrupts. + * + * Must be called with the rtnl lock held. + */ +static int cxgb_up(struct adapter *adap) +{ + int err = 0; + + if (!(adap->flags & FULL_INIT_DONE)) { + err = setup_sge_queues(adap); + if (err) + goto out; + err = setup_rss(adap); + if (err) { + t4_free_sge_resources(adap); + goto out; + } + if (adap->flags & USING_MSIX) + name_msix_vecs(adap); + adap->flags |= FULL_INIT_DONE; + } + + if (adap->flags & USING_MSIX) { + err = request_irq(adap->msix_info[0].vec, t4_nondata_intr, 0, + adap->msix_info[0].desc, adap); + if (err) + goto irq_err; + + err = request_msix_queue_irqs(adap); + if (err) { + free_irq(adap->msix_info[0].vec, adap); + goto irq_err; + } + } else { + err = request_irq(adap->pdev->irq, t4_intr_handler(adap), + (adap->flags & USING_MSI) ? 0 : IRQF_SHARED, + adap->name, adap); + if (err) + goto irq_err; + } + enable_rx(adap); + t4_sge_start(adap); + t4_intr_enable(adap); + notify_ulds(adap, CXGB4_STATE_UP); + out: + return err; + irq_err: + dev_err(adap->pdev_dev, "request_irq failed, err %d\n", err); + goto out; +} + +static void cxgb_down(struct adapter *adapter) +{ + t4_intr_disable(adapter); + cancel_work_sync(&adapter->tid_release_task); + adapter->tid_release_task_busy = false; + + if (adapter->flags & USING_MSIX) { + free_msix_queue_irqs(adapter); + free_irq(adapter->msix_info[0].vec, adapter); + } else + free_irq(adapter->pdev->irq, adapter); + quiesce_rx(adapter); +} + +/* + * net_device operations + */ +static int cxgb_open(struct net_device *dev) +{ + int err; + struct port_info *pi = netdev_priv(dev); + struct adapter *adapter = pi->adapter; + + if (!adapter->open_device_map && (err = cxgb_up(adapter)) < 0) + return err; + + dev->real_num_tx_queues = pi->nqsets; + set_bit(pi->tx_chan, &adapter->open_device_map); + link_start(dev); + netif_tx_start_all_queues(dev); + return 0; +} + +static int cxgb_close(struct net_device *dev) +{ + int ret; + struct port_info *pi = netdev_priv(dev); + struct adapter *adapter = pi->adapter; + + netif_tx_stop_all_queues(dev); + netif_carrier_off(dev); + ret = t4_enable_vi(adapter, 0, pi->viid, false, false); + + clear_bit(pi->tx_chan, &adapter->open_device_map); + + if (!adapter->open_device_map) + cxgb_down(adapter); + return 0; +} + +static struct net_device_stats *cxgb_get_stats(struct net_device *dev) +{ + struct port_stats stats; + struct port_info *p = netdev_priv(dev); + struct adapter *adapter = p->adapter; + struct net_device_stats *ns = &dev->stats; + + spin_lock(&adapter->stats_lock); + t4_get_port_stats(adapter, p->tx_chan, &stats); + spin_unlock(&adapter->stats_lock); + + ns->tx_bytes = stats.tx_octets; + ns->tx_packets = stats.tx_frames; + ns->rx_bytes = stats.rx_octets; + ns->rx_packets = stats.rx_frames; + ns->multicast = stats.rx_mcast_frames; + + /* detailed rx_errors */ + ns->rx_length_errors = stats.rx_jabber + stats.rx_too_long + + stats.rx_runt; + ns->rx_over_errors = 0; + ns->rx_crc_errors = stats.rx_fcs_err; + ns->rx_frame_errors = stats.rx_symbol_err; + ns->rx_fifo_errors = stats.rx_ovflow0 + stats.rx_ovflow1 + + stats.rx_ovflow2 + stats.rx_ovflow3 + + stats.rx_trunc0 + stats.rx_trunc1 + + stats.rx_trunc2 + stats.rx_trunc3; + ns->rx_missed_errors = 0; + + /* detailed tx_errors */ + ns->tx_aborted_errors = 0; + ns->tx_carrier_errors = 0; + ns->tx_fifo_errors = 0; + ns->tx_heartbeat_errors = 0; + ns->tx_window_errors = 0; + + ns->tx_errors = stats.tx_error_frames; + ns->rx_errors = stats.rx_symbol_err + stats.rx_fcs_err + + ns->rx_length_errors + stats.rx_len_err + ns->rx_fifo_errors; + return ns; +} + +static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd) +{ + int ret = 0, prtad, devad; + struct port_info *pi = netdev_priv(dev); + struct mii_ioctl_data *data = (struct mii_ioctl_data *)&req->ifr_data; + + switch (cmd) { + case SIOCGMIIPHY: + if (pi->mdio_addr < 0) + return -EOPNOTSUPP; + data->phy_id = pi->mdio_addr; + break; + case SIOCGMIIREG: + case SIOCSMIIREG: + if (mdio_phy_id_is_c45(data->phy_id)) { + prtad = mdio_phy_id_prtad(data->phy_id); + devad = mdio_phy_id_devad(data->phy_id); + } else if (data->phy_id < 32) { + prtad = data->phy_id; + devad = 0; + data->reg_num &= 0x1f; + } else + return -EINVAL; + + if (cmd == SIOCGMIIREG) + ret = t4_mdio_rd(pi->adapter, 0, prtad, devad, + data->reg_num, &data->val_out); + else + ret = t4_mdio_wr(pi->adapter, 0, prtad, devad, + data->reg_num, data->val_in); + break; + default: + return -EOPNOTSUPP; + } + return ret; +} + +static void cxgb_set_rxmode(struct net_device *dev) +{ + /* unfortunately we can't return errors to the stack */ + set_rxmode(dev, -1, false); +} + +static int cxgb_change_mtu(struct net_device *dev, int new_mtu) +{ + int ret; + struct port_info *pi = netdev_priv(dev); + + if (new_mtu < 81 || new_mtu > MAX_MTU) /* accommodate SACK */ + return -EINVAL; + ret = t4_set_rxmode(pi->adapter, 0, pi->viid, new_mtu, -1, -1, -1, + true); + if (!ret) + dev->mtu = new_mtu; + return ret; +} + +static int cxgb_set_mac_addr(struct net_device *dev, void *p) +{ + int ret; + struct sockaddr *addr = p; + struct port_info *pi = netdev_priv(dev); + + if (!is_valid_ether_addr(addr->sa_data)) + return -EINVAL; + + ret = t4_change_mac(pi->adapter, 0, pi->viid, pi->xact_addr_filt, + addr->sa_data, true, true); + if (ret < 0) + return ret; + + memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); + pi->xact_addr_filt = ret; + return 0; +} + +static void vlan_rx_register(struct net_device *dev, struct vlan_group *grp) +{ + struct port_info *pi = netdev_priv(dev); + + pi->vlan_grp = grp; + t4_set_vlan_accel(pi->adapter, 1 << pi->tx_chan, grp != NULL); +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +static void cxgb_netpoll(struct net_device *dev) +{ + struct port_info *pi = netdev_priv(dev); + struct adapter *adap = pi->adapter; + + if (adap->flags & USING_MSIX) { + int i; + struct sge_eth_rxq *rx = &adap->sge.ethrxq[pi->first_qset]; + + for (i = pi->nqsets; i; i--, rx++) + t4_sge_intr_msix(0, &rx->rspq); + } else + t4_intr_handler(adap)(0, adap); +} +#endif + +static const struct net_device_ops cxgb4_netdev_ops = { + .ndo_open = cxgb_open, + .ndo_stop = cxgb_close, + .ndo_start_xmit = t4_eth_xmit, + .ndo_get_stats = cxgb_get_stats, + .ndo_set_rx_mode = cxgb_set_rxmode, + .ndo_set_mac_address = cxgb_set_mac_addr, + .ndo_validate_addr = eth_validate_addr, + .ndo_do_ioctl = cxgb_ioctl, + .ndo_change_mtu = cxgb_change_mtu, + .ndo_vlan_rx_register = vlan_rx_register, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = cxgb_netpoll, +#endif +}; + +void t4_fatal_err(struct adapter *adap) +{ + t4_set_reg_field(adap, SGE_CONTROL, GLOBALENABLE, 0); + t4_intr_disable(adap); + dev_alert(adap->pdev_dev, "encountered fatal error, adapter stopped\n"); +} + +static void setup_memwin(struct adapter *adap) +{ + u32 bar0; + + bar0 = pci_resource_start(adap->pdev, 0); /* truncation intentional */ + t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 0), + (bar0 + MEMWIN0_BASE) | BIR(0) | + WINDOW(ilog2(MEMWIN0_APERTURE) - 10)); + t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 1), + (bar0 + MEMWIN1_BASE) | BIR(0) | + WINDOW(ilog2(MEMWIN1_APERTURE) - 10)); + t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 2), + (bar0 + MEMWIN2_BASE) | BIR(0) | + WINDOW(ilog2(MEMWIN2_APERTURE) - 10)); +} + +/* + * Max # of ATIDs. The absolute HW max is 16K but we keep it lower. + */ +#define MAX_ATIDS 8192U + +/* + * Phase 0 of initialization: contact FW, obtain config, perform basic init. + */ +static int adap_init0(struct adapter *adap) +{ + int ret; + u32 v, port_vec; + enum dev_state state; + u32 params[7], val[7]; + struct fw_caps_config_cmd c; + + ret = t4_check_fw_version(adap); + if (ret == -EINVAL || ret > 0) { + if (upgrade_fw(adap) >= 0) /* recache FW version */ + ret = t4_check_fw_version(adap); + } + if (ret < 0) + return ret; + + /* contact FW, request master */ + ret = t4_fw_hello(adap, 0, 0, MASTER_MUST, &state); + if (ret < 0) { + dev_err(adap->pdev_dev, "could not connect to FW, error %d\n", + ret); + return ret; + } + + /* reset device */ + ret = t4_fw_reset(adap, 0, PIORSTMODE | PIORST); + if (ret < 0) + goto bye; + + /* get device capabilities */ + memset(&c, 0, sizeof(c)); + c.op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) | + FW_CMD_REQUEST | FW_CMD_READ); + c.retval_len16 = htonl(FW_LEN16(c)); + ret = t4_wr_mbox(adap, 0, &c, sizeof(c), &c); + if (ret < 0) + goto bye; + + /* select capabilities we'll be using */ + if (c.niccaps & htons(FW_CAPS_CONFIG_NIC_VM)) { + if (!vf_acls) + c.niccaps ^= htons(FW_CAPS_CONFIG_NIC_VM); + else + c.niccaps = htons(FW_CAPS_CONFIG_NIC_VM); + } else if (vf_acls) { + dev_err(adap->pdev_dev, "virtualization ACLs not supported"); + goto bye; + } + c.op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) | + FW_CMD_REQUEST | FW_CMD_WRITE); + ret = t4_wr_mbox(adap, 0, &c, sizeof(c), NULL); + if (ret < 0) + goto bye; + + ret = t4_config_glbl_rss(adap, 0, + FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL, + FW_RSS_GLB_CONFIG_CMD_TNLMAPEN | + FW_RSS_GLB_CONFIG_CMD_TNLALLLKP); + if (ret < 0) + goto bye; + + ret = t4_cfg_pfvf(adap, 0, 0, 0, 64, 64, 64, 0, 0, 4, 0xf, 0xf, 16, + FW_CMD_CAP_PF, FW_CMD_CAP_PF); + if (ret < 0) + goto bye; + + for (v = 0; v < SGE_NTIMERS - 1; v++) + adap->sge.timer_val[v] = min(intr_holdoff[v], MAX_SGE_TIMERVAL); + adap->sge.timer_val[SGE_NTIMERS - 1] = MAX_SGE_TIMERVAL; + adap->sge.counter_val[0] = 1; + for (v = 1; v < SGE_NCOUNTERS; v++) + adap->sge.counter_val[v] = min(intr_cnt[v - 1], + THRESHOLD_3_MASK); + t4_sge_init(adap); + + /* get basic stuff going */ + ret = t4_early_init(adap, 0); + if (ret < 0) + goto bye; + +#define FW_PARAM_DEV(param) \ + (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \ + FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param)) + +#define FW_PARAM_PFVF(param) \ + (FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \ + FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param)) + + params[0] = FW_PARAM_DEV(PORTVEC); + params[1] = FW_PARAM_PFVF(L2T_START); + params[2] = FW_PARAM_PFVF(L2T_END); + params[3] = FW_PARAM_PFVF(FILTER_START); + params[4] = FW_PARAM_PFVF(FILTER_END); + ret = t4_query_params(adap, 0, 0, 0, 5, params, val); + if (ret < 0) + goto bye; + port_vec = val[0]; + adap->tids.ftid_base = val[3]; + adap->tids.nftids = val[4] - val[3] + 1; + + if (c.ofldcaps) { + /* query offload-related parameters */ + params[0] = FW_PARAM_DEV(NTID); + params[1] = FW_PARAM_PFVF(SERVER_START); + params[2] = FW_PARAM_PFVF(SERVER_END); + params[3] = FW_PARAM_PFVF(TDDP_START); + params[4] = FW_PARAM_PFVF(TDDP_END); + params[5] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ); + ret = t4_query_params(adap, 0, 0, 0, 6, params, val); + if (ret < 0) + goto bye; + adap->tids.ntids = val[0]; + adap->tids.natids = min(adap->tids.ntids / 2, MAX_ATIDS); + adap->tids.stid_base = val[1]; + adap->tids.nstids = val[2] - val[1] + 1; + adap->vres.ddp.start = val[3]; + adap->vres.ddp.size = val[4] - val[3] + 1; + adap->params.ofldq_wr_cred = val[5]; + adap->params.offload = 1; + } + if (c.rdmacaps) { + params[0] = FW_PARAM_PFVF(STAG_START); + params[1] = FW_PARAM_PFVF(STAG_END); + params[2] = FW_PARAM_PFVF(RQ_START); + params[3] = FW_PARAM_PFVF(RQ_END); + params[4] = FW_PARAM_PFVF(PBL_START); + params[5] = FW_PARAM_PFVF(PBL_END); + ret = t4_query_params(adap, 0, 0, 0, 6, params, val); + if (ret < 0) + goto bye; + adap->vres.stag.start = val[0]; + adap->vres.stag.size = val[1] - val[0] + 1; + adap->vres.rq.start = val[2]; + adap->vres.rq.size = val[3] - val[2] + 1; + adap->vres.pbl.start = val[4]; + adap->vres.pbl.size = val[5] - val[4] + 1; + } + if (c.iscsicaps) { + params[0] = FW_PARAM_PFVF(ISCSI_START); + params[1] = FW_PARAM_PFVF(ISCSI_END); + ret = t4_query_params(adap, 0, 0, 0, 2, params, val); + if (ret < 0) + goto bye; + adap->vres.iscsi.start = val[0]; + adap->vres.iscsi.size = val[1] - val[0] + 1; + } +#undef FW_PARAM_PFVF +#undef FW_PARAM_DEV + + adap->params.nports = hweight32(port_vec); + adap->params.portvec = port_vec; + adap->flags |= FW_OK; + + /* These are finalized by FW initialization, load their values now */ + v = t4_read_reg(adap, TP_TIMER_RESOLUTION); + adap->params.tp.tre = TIMERRESOLUTION_GET(v); + t4_read_mtu_tbl(adap, adap->params.mtus, NULL); + t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd, + adap->params.b_wnd); + + /* tweak some settings */ + t4_write_reg(adap, TP_SHIFT_CNT, 0x64f8849); + t4_write_reg(adap, ULP_RX_TDDP_PSZ, HPZ0(PAGE_SHIFT - 12)); + t4_write_reg(adap, TP_PIO_ADDR, TP_INGRESS_CONFIG); + v = t4_read_reg(adap, TP_PIO_DATA); + t4_write_reg(adap, TP_PIO_DATA, v & ~CSUM_HAS_PSEUDO_HDR); + setup_memwin(adap); + return 0; + + /* + * If a command timed out or failed with EIO FW does not operate within + * its spec or something catastrophic happened to HW/FW, stop issuing + * commands. + */ +bye: if (ret != -ETIMEDOUT && ret != -EIO) + t4_fw_bye(adap, 0); + return ret; +} + +static inline bool is_10g_port(const struct link_config *lc) +{ + return (lc->supported & FW_PORT_CAP_SPEED_10G) != 0; +} + +static inline void init_rspq(struct sge_rspq *q, u8 timer_idx, u8 pkt_cnt_idx, + unsigned int size, unsigned int iqe_size) +{ + q->intr_params = QINTR_TIMER_IDX(timer_idx) | + (pkt_cnt_idx < SGE_NCOUNTERS ? QINTR_CNT_EN : 0); + q->pktcnt_idx = pkt_cnt_idx < SGE_NCOUNTERS ? pkt_cnt_idx : 0; + q->iqe_len = iqe_size; + q->size = size; +} + +/* + * Perform default configuration of DMA queues depending on the number and type + * of ports we found and the number of available CPUs. Most settings can be + * modified by the admin prior to actual use. + */ +static void __devinit cfg_queues(struct adapter *adap) +{ + struct sge *s = &adap->sge; + int i, q10g = 0, n10g = 0, qidx = 0; + + for_each_port(adap, i) + n10g += is_10g_port(&adap2pinfo(adap, i)->link_cfg); + + /* + * We default to 1 queue per non-10G port and up to # of cores queues + * per 10G port. + */ + if (n10g) + q10g = (MAX_ETH_QSETS - (adap->params.nports - n10g)) / n10g; + if (q10g > num_online_cpus()) + q10g = num_online_cpus(); + + for_each_port(adap, i) { + struct port_info *pi = adap2pinfo(adap, i); + + pi->first_qset = qidx; + pi->nqsets = is_10g_port(&pi->link_cfg) ? q10g : 1; + qidx += pi->nqsets; + } + + s->ethqsets = qidx; + s->max_ethqsets = qidx; /* MSI-X may lower it later */ + + if (is_offload(adap)) { + /* + * For offload we use 1 queue/channel if all ports are up to 1G, + * otherwise we divide all available queues amongst the channels + * capped by the number of available cores. + */ + if (n10g) { + i = min_t(int, ARRAY_SIZE(s->ofldrxq), + num_online_cpus()); + s->ofldqsets = roundup(i, adap->params.nports); + } else + s->ofldqsets = adap->params.nports; + /* For RDMA one Rx queue per channel suffices */ + s->rdmaqs = adap->params.nports; + } + + for (i = 0; i < ARRAY_SIZE(s->ethrxq); i++) { + struct sge_eth_rxq *r = &s->ethrxq[i]; + + init_rspq(&r->rspq, 0, 0, 1024, 64); + r->fl.size = 72; + } + + for (i = 0; i < ARRAY_SIZE(s->ethtxq); i++) + s->ethtxq[i].q.size = 1024; + + for (i = 0; i < ARRAY_SIZE(s->ctrlq); i++) + s->ctrlq[i].q.size = 512; + + for (i = 0; i < ARRAY_SIZE(s->ofldtxq); i++) + s->ofldtxq[i].q.size = 1024; + + for (i = 0; i < ARRAY_SIZE(s->ofldrxq); i++) { + struct sge_ofld_rxq *r = &s->ofldrxq[i]; + + init_rspq(&r->rspq, 0, 0, 1024, 64); + r->rspq.uld = CXGB4_ULD_ISCSI; + r->fl.size = 72; + } + + for (i = 0; i < ARRAY_SIZE(s->rdmarxq); i++) { + struct sge_ofld_rxq *r = &s->rdmarxq[i]; + + init_rspq(&r->rspq, 0, 0, 511, 64); + r->rspq.uld = CXGB4_ULD_RDMA; + r->fl.size = 72; + } + + init_rspq(&s->fw_evtq, 6, 0, 512, 64); + init_rspq(&s->intrq, 6, 0, 2 * MAX_INGQ, 64); +} + +/* + * Reduce the number of Ethernet queues across all ports to at most n. + * n provides at least one queue per port. + */ +static void __devinit reduce_ethqs(struct adapter *adap, int n) +{ + int i; + struct port_info *pi; + + while (n < adap->sge.ethqsets) + for_each_port(adap, i) { + pi = adap2pinfo(adap, i); + if (pi->nqsets > 1) { + pi->nqsets--; + adap->sge.ethqsets--; + if (adap->sge.ethqsets <= n) + break; + } + } + + n = 0; + for_each_port(adap, i) { + pi = adap2pinfo(adap, i); + pi->first_qset = n; + n += pi->nqsets; + } +} + +/* 2 MSI-X vectors needed for the FW queue and non-data interrupts */ +#define EXTRA_VECS 2 + +static int __devinit enable_msix(struct adapter *adap) +{ + int ofld_need = 0; + int i, err, want, need; + struct sge *s = &adap->sge; + unsigned int nchan = adap->params.nports; + struct msix_entry entries[MAX_INGQ + 1]; + + for (i = 0; i < ARRAY_SIZE(entries); ++i) + entries[i].entry = i; + + want = s->max_ethqsets + EXTRA_VECS; + if (is_offload(adap)) { + want += s->rdmaqs + s->ofldqsets; + /* need nchan for each possible ULD */ + ofld_need = 2 * nchan; + } + need = adap->params.nports + EXTRA_VECS + ofld_need; + + while ((err = pci_enable_msix(adap->pdev, entries, want)) >= need) + want = err; + + if (!err) { + /* + * Distribute available vectors to the various queue groups. + * Every group gets its minimum requirement and NIC gets top + * priority for leftovers. + */ + i = want - EXTRA_VECS - ofld_need; + if (i < s->max_ethqsets) { + s->max_ethqsets = i; + if (i < s->ethqsets) + reduce_ethqs(adap, i); + } + if (is_offload(adap)) { + i = want - EXTRA_VECS - s->max_ethqsets; + i -= ofld_need - nchan; + s->ofldqsets = (i / nchan) * nchan; /* round down */ + } + for (i = 0; i < want; ++i) + adap->msix_info[i].vec = entries[i].vector; + } else if (err > 0) + dev_info(adap->pdev_dev, + "only %d MSI-X vectors left, not using MSI-X\n", err); + return err; +} + +#undef EXTRA_VECS + +static void __devinit print_port_info(struct adapter *adap) +{ + static const char *base[] = { + "R", "KX4", "T", "KX", "T", "KR", "CX4" + }; + + int i; + char buf[80]; + + for_each_port(adap, i) { + struct net_device *dev = adap->port[i]; + const struct port_info *pi = netdev_priv(dev); + char *bufp = buf; + + if (!test_bit(i, &adap->registered_device_map)) + continue; + + if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_100M) + bufp += sprintf(bufp, "100/"); + if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_1G) + bufp += sprintf(bufp, "1000/"); + if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_10G) + bufp += sprintf(bufp, "10G/"); + if (bufp != buf) + --bufp; + sprintf(bufp, "BASE-%s", base[pi->port_type]); + + netdev_info(dev, "Chelsio %s rev %d %s %sNIC PCIe x%d%s\n", + adap->params.vpd.id, adap->params.rev, + buf, is_offload(adap) ? "R" : "", + adap->params.pci.width, + (adap->flags & USING_MSIX) ? " MSI-X" : + (adap->flags & USING_MSI) ? " MSI" : ""); + if (adap->name == dev->name) + netdev_info(dev, "S/N: %s, E/C: %s\n", + adap->params.vpd.sn, adap->params.vpd.ec); + } +} + +#define VLAN_FEAT (NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO | NETIF_F_TSO6 |\ + NETIF_F_IPV6_CSUM | NETIF_F_HIGHDMA) + +static int __devinit init_one(struct pci_dev *pdev, + const struct pci_device_id *ent) +{ + int func, i, err; + struct port_info *pi; + unsigned int highdma = 0; + struct adapter *adapter = NULL; + + printk_once(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION); + + err = pci_request_regions(pdev, KBUILD_MODNAME); + if (err) { + /* Just info, some other driver may have claimed the device. */ + dev_info(&pdev->dev, "cannot obtain PCI resources\n"); + return err; + } + + /* We control everything through PF 0 */ + func = PCI_FUNC(pdev->devfn); + if (func > 0) + goto sriov; + + err = pci_enable_device(pdev); + if (err) { + dev_err(&pdev->dev, "cannot enable PCI device\n"); + goto out_release_regions; + } + + if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { + highdma = NETIF_F_HIGHDMA; + err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); + if (err) { + dev_err(&pdev->dev, "unable to obtain 64-bit DMA for " + "coherent allocations\n"); + goto out_disable_device; + } + } else { + err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); + if (err) { + dev_err(&pdev->dev, "no usable DMA configuration\n"); + goto out_disable_device; + } + } + + pci_enable_pcie_error_reporting(pdev); + pci_set_master(pdev); + pci_save_state(pdev); + + adapter = kzalloc(sizeof(*adapter), GFP_KERNEL); + if (!adapter) { + err = -ENOMEM; + goto out_disable_device; + } + + adapter->regs = pci_ioremap_bar(pdev, 0); + if (!adapter->regs) { + dev_err(&pdev->dev, "cannot map device registers\n"); + err = -ENOMEM; + goto out_free_adapter; + } + + adapter->pdev = pdev; + adapter->pdev_dev = &pdev->dev; + adapter->name = pci_name(pdev); + adapter->msg_enable = dflt_msg_enable; + memset(adapter->chan_map, 0xff, sizeof(adapter->chan_map)); + + spin_lock_init(&adapter->stats_lock); + spin_lock_init(&adapter->tid_release_lock); + + INIT_WORK(&adapter->tid_release_task, process_tid_release_list); + + err = t4_prep_adapter(adapter); + if (err) + goto out_unmap_bar; + err = adap_init0(adapter); + if (err) + goto out_unmap_bar; + + for_each_port(adapter, i) { + struct net_device *netdev; + + netdev = alloc_etherdev_mq(sizeof(struct port_info), + MAX_ETH_QSETS); + if (!netdev) { + err = -ENOMEM; + goto out_free_dev; + } + + SET_NETDEV_DEV(netdev, &pdev->dev); + + adapter->port[i] = netdev; + pi = netdev_priv(netdev); + pi->adapter = adapter; + pi->xact_addr_filt = -1; + pi->rx_offload = RX_CSO; + pi->port_id = i; + netif_carrier_off(netdev); + netif_tx_stop_all_queues(netdev); + netdev->irq = pdev->irq; + + netdev->features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6; + netdev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; + netdev->features |= NETIF_F_GRO | highdma; + netdev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; + netdev->vlan_features = netdev->features & VLAN_FEAT; + + netdev->netdev_ops = &cxgb4_netdev_ops; + SET_ETHTOOL_OPS(netdev, &cxgb_ethtool_ops); + } + + pci_set_drvdata(pdev, adapter); + + if (adapter->flags & FW_OK) { + err = t4_port_init(adapter, 0, 0, 0); + if (err) + goto out_free_dev; + } + + /* + * Configure queues and allocate tables now, they can be needed as + * soon as the first register_netdev completes. + */ + cfg_queues(adapter); + + adapter->l2t = t4_init_l2t(); + if (!adapter->l2t) { + /* We tolerate a lack of L2T, giving up some functionality */ + dev_warn(&pdev->dev, "could not allocate L2T, continuing\n"); + adapter->params.offload = 0; + } + + if (is_offload(adapter) && tid_init(&adapter->tids) < 0) { + dev_warn(&pdev->dev, "could not allocate TID table, " + "continuing\n"); + adapter->params.offload = 0; + } + + /* + * The card is now ready to go. If any errors occur during device + * registration we do not fail the whole card but rather proceed only + * with the ports we manage to register successfully. However we must + * register at least one net device. + */ + for_each_port(adapter, i) { + err = register_netdev(adapter->port[i]); + if (err) + dev_warn(&pdev->dev, + "cannot register net device %s, skipping\n", + adapter->port[i]->name); + else { + /* + * Change the name we use for messages to the name of + * the first successfully registered interface. + */ + if (!adapter->registered_device_map) + adapter->name = adapter->port[i]->name; + + __set_bit(i, &adapter->registered_device_map); + adapter->chan_map[adap2pinfo(adapter, i)->tx_chan] = i; + } + } + if (!adapter->registered_device_map) { + dev_err(&pdev->dev, "could not register any net devices\n"); + goto out_free_dev; + } + + if (cxgb4_debugfs_root) { + adapter->debugfs_root = debugfs_create_dir(pci_name(pdev), + cxgb4_debugfs_root); + setup_debugfs(adapter); + } + + /* See what interrupts we'll be using */ + if (msi > 1 && enable_msix(adapter) == 0) + adapter->flags |= USING_MSIX; + else if (msi > 0 && pci_enable_msi(pdev) == 0) + adapter->flags |= USING_MSI; + + if (is_offload(adapter)) + attach_ulds(adapter); + + print_port_info(adapter); + +sriov: +#ifdef CONFIG_PCI_IOV + if (func < ARRAY_SIZE(num_vf) && num_vf[func] > 0) + if (pci_enable_sriov(pdev, num_vf[func]) == 0) + dev_info(&pdev->dev, + "instantiated %u virtual functions\n", + num_vf[func]); +#endif + return 0; + + out_free_dev: + t4_free_mem(adapter->tids.tid_tab); + t4_free_mem(adapter->l2t); + for_each_port(adapter, i) + if (adapter->port[i]) + free_netdev(adapter->port[i]); + if (adapter->flags & FW_OK) + t4_fw_bye(adapter, 0); + out_unmap_bar: + iounmap(adapter->regs); + out_free_adapter: + kfree(adapter); + out_disable_device: + pci_disable_pcie_error_reporting(pdev); + pci_disable_device(pdev); + out_release_regions: + pci_release_regions(pdev); + pci_set_drvdata(pdev, NULL); + return err; +} + +static void __devexit remove_one(struct pci_dev *pdev) +{ + struct adapter *adapter = pci_get_drvdata(pdev); + + pci_disable_sriov(pdev); + + if (adapter) { + int i; + + if (is_offload(adapter)) + detach_ulds(adapter); + + for_each_port(adapter, i) + if (test_bit(i, &adapter->registered_device_map)) + unregister_netdev(adapter->port[i]); + + if (adapter->debugfs_root) + debugfs_remove_recursive(adapter->debugfs_root); + + t4_sge_stop(adapter); + t4_free_sge_resources(adapter); + t4_free_mem(adapter->l2t); + t4_free_mem(adapter->tids.tid_tab); + disable_msi(adapter); + + for_each_port(adapter, i) + if (adapter->port[i]) + free_netdev(adapter->port[i]); + + if (adapter->flags & FW_OK) + t4_fw_bye(adapter, 0); + iounmap(adapter->regs); + kfree(adapter); + pci_disable_pcie_error_reporting(pdev); + pci_disable_device(pdev); + pci_release_regions(pdev); + pci_set_drvdata(pdev, NULL); + } else if (PCI_FUNC(pdev->devfn) > 0) + pci_release_regions(pdev); +} + +static struct pci_driver cxgb4_driver = { + .name = KBUILD_MODNAME, + .id_table = cxgb4_pci_tbl, + .probe = init_one, + .remove = __devexit_p(remove_one), +}; + +static int __init cxgb4_init_module(void) +{ + int ret; + + /* Debugfs support is optional, just warn if this fails */ + cxgb4_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL); + if (!cxgb4_debugfs_root) + pr_warning("could not create debugfs entry, continuing\n"); + + ret = pci_register_driver(&cxgb4_driver); + if (ret < 0) + debugfs_remove(cxgb4_debugfs_root); + return ret; +} + +static void __exit cxgb4_cleanup_module(void) +{ + pci_unregister_driver(&cxgb4_driver); + debugfs_remove(cxgb4_debugfs_root); /* NULL ok */ +} + +module_init(cxgb4_init_module); +module_exit(cxgb4_cleanup_module); diff --git a/drivers/net/cxgb4/cxgb4_uld.h b/drivers/net/cxgb4/cxgb4_uld.h new file mode 100644 index 00000000000..5b98546ac92 --- /dev/null +++ b/drivers/net/cxgb4/cxgb4_uld.h @@ -0,0 +1,239 @@ +/* + * This file is part of the Chelsio T4 Ethernet driver for Linux. + * + * Copyright (c) 2003-2010 Chelsio Communications, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef __CXGB4_OFLD_H +#define __CXGB4_OFLD_H + +#include <linux/cache.h> +#include <linux/spinlock.h> +#include <linux/skbuff.h> +#include <asm/atomic.h> + +/* CPL message priority levels */ +enum { + CPL_PRIORITY_DATA = 0, /* data messages */ + CPL_PRIORITY_SETUP = 1, /* connection setup messages */ + CPL_PRIORITY_TEARDOWN = 0, /* connection teardown messages */ + CPL_PRIORITY_LISTEN = 1, /* listen start/stop messages */ + CPL_PRIORITY_ACK = 1, /* RX ACK messages */ + CPL_PRIORITY_CONTROL = 1 /* control messages */ +}; + +#define INIT_TP_WR(w, tid) do { \ + (w)->wr.wr_hi = htonl(FW_WR_OP(FW_TP_WR) | \ + FW_WR_IMMDLEN(sizeof(*w) - sizeof(w->wr))); \ + (w)->wr.wr_mid = htonl(FW_WR_LEN16(DIV_ROUND_UP(sizeof(*w), 16)) | \ + FW_WR_FLOWID(tid)); \ + (w)->wr.wr_lo = cpu_to_be64(0); \ +} while (0) + +#define INIT_TP_WR_CPL(w, cpl, tid) do { \ + INIT_TP_WR(w, tid); \ + OPCODE_TID(w) = htonl(MK_OPCODE_TID(cpl, tid)); \ +} while (0) + +#define INIT_ULPTX_WR(w, wrlen, atomic, tid) do { \ + (w)->wr.wr_hi = htonl(FW_WR_OP(FW_ULPTX_WR) | FW_WR_ATOMIC(atomic)); \ + (w)->wr.wr_mid = htonl(FW_WR_LEN16(DIV_ROUND_UP(wrlen, 16)) | \ + FW_WR_FLOWID(tid)); \ + (w)->wr.wr_lo = cpu_to_be64(0); \ +} while (0) + +/* Special asynchronous notification message */ +#define CXGB4_MSG_AN ((void *)1) + +struct serv_entry { + void *data; +}; + +union aopen_entry { + void *data; + union aopen_entry *next; +}; + +/* + * Holds the size, base address, free list start, etc of the TID, server TID, + * and active-open TID tables. The tables themselves are allocated dynamically. + */ +struct tid_info { + void **tid_tab; + unsigned int ntids; + + struct serv_entry *stid_tab; + unsigned long *stid_bmap; + unsigned int nstids; + unsigned int stid_base; + + union aopen_entry *atid_tab; + unsigned int natids; + + unsigned int nftids; + unsigned int ftid_base; + + spinlock_t atid_lock ____cacheline_aligned_in_smp; + union aopen_entry *afree; + unsigned int atids_in_use; + + spinlock_t stid_lock; + unsigned int stids_in_use; + + atomic_t tids_in_use; +}; + +static inline void *lookup_tid(const struct tid_info *t, unsigned int tid) +{ + return tid < t->ntids ? t->tid_tab[tid] : NULL; +} + +static inline void *lookup_atid(const struct tid_info *t, unsigned int atid) +{ + return atid < t->natids ? t->atid_tab[atid].data : NULL; +} + +static inline void *lookup_stid(const struct tid_info *t, unsigned int stid) +{ + stid -= t->stid_base; + return stid < t->nstids ? t->stid_tab[stid].data : NULL; +} + +static inline void cxgb4_insert_tid(struct tid_info *t, void *data, + unsigned int tid) +{ + t->tid_tab[tid] = data; + atomic_inc(&t->tids_in_use); +} + +int cxgb4_alloc_atid(struct tid_info *t, void *data); +int cxgb4_alloc_stid(struct tid_info *t, int family, void *data); +void cxgb4_free_atid(struct tid_info *t, unsigned int atid); +void cxgb4_free_stid(struct tid_info *t, unsigned int stid, int family); +void cxgb4_remove_tid(struct tid_info *t, unsigned int qid, unsigned int tid); +void cxgb4_queue_tid_release(struct tid_info *t, unsigned int chan, + unsigned int tid); + +struct in6_addr; + +int cxgb4_create_server(const struct net_device *dev, unsigned int stid, + __be32 sip, __be16 sport, unsigned int queue); +int cxgb4_create_server6(const struct net_device *dev, unsigned int stid, + const struct in6_addr *sip, __be16 sport, + unsigned int queue); + +static inline void set_wr_txq(struct sk_buff *skb, int prio, int queue) +{ + skb_set_queue_mapping(skb, (queue << 1) | prio); +} + +enum cxgb4_uld { + CXGB4_ULD_RDMA, + CXGB4_ULD_ISCSI, + CXGB4_ULD_MAX +}; + +enum cxgb4_state { + CXGB4_STATE_UP, + CXGB4_STATE_START_RECOVERY, + CXGB4_STATE_DOWN, + CXGB4_STATE_DETACH +}; + +struct pci_dev; +struct l2t_data; +struct net_device; +struct pkt_gl; +struct tp_tcp_stats; + +struct cxgb4_range { + unsigned int start; + unsigned int size; +}; + +struct cxgb4_virt_res { /* virtualized HW resources */ + struct cxgb4_range ddp; + struct cxgb4_range iscsi; + struct cxgb4_range stag; + struct cxgb4_range rq; + struct cxgb4_range pbl; +}; + +/* + * Block of information the LLD provides to ULDs attaching to a device. + */ +struct cxgb4_lld_info { + struct pci_dev *pdev; /* associated PCI device */ + struct l2t_data *l2t; /* L2 table */ + struct tid_info *tids; /* TID table */ + struct net_device **ports; /* device ports */ + const struct cxgb4_virt_res *vr; /* assorted HW resources */ + const unsigned short *mtus; /* MTU table */ + const unsigned short *rxq_ids; /* the ULD's Rx queue ids */ + unsigned short nrxq; /* # of Rx queues */ + unsigned short ntxq; /* # of Tx queues */ + unsigned char nchan:4; /* # of channels */ + unsigned char nports:4; /* # of ports */ + unsigned char wr_cred; /* WR 16-byte credits */ + unsigned char adapter_type; /* type of adapter */ + unsigned char fw_api_ver; /* FW API version */ + unsigned int fw_vers; /* FW version */ + unsigned int iscsi_iolen; /* iSCSI max I/O length */ + unsigned short udb_density; /* # of user DB/page */ + unsigned short ucq_density; /* # of user CQs/page */ + void __iomem *gts_reg; /* address of GTS register */ + void __iomem *db_reg; /* address of kernel doorbell */ +}; + +struct cxgb4_uld_info { + const char *name; + void *(*add)(const struct cxgb4_lld_info *p); + int (*rx_handler)(void *handle, const __be64 *rsp, + const struct pkt_gl *gl); + int (*state_change)(void *handle, enum cxgb4_state new_state); +}; + +int cxgb4_register_uld(enum cxgb4_uld type, const struct cxgb4_uld_info *p); +int cxgb4_unregister_uld(enum cxgb4_uld type); +int cxgb4_ofld_send(struct net_device *dev, struct sk_buff *skb); +unsigned int cxgb4_port_chan(const struct net_device *dev); +unsigned int cxgb4_port_viid(const struct net_device *dev); +unsigned int cxgb4_port_idx(const struct net_device *dev); +struct net_device *cxgb4_netdev_by_hwid(struct pci_dev *pdev, unsigned int id); +unsigned int cxgb4_best_mtu(const unsigned short *mtus, unsigned short mtu, + unsigned int *idx); +void cxgb4_get_tcp_stats(struct pci_dev *pdev, struct tp_tcp_stats *v4, + struct tp_tcp_stats *v6); +void cxgb4_iscsi_init(struct net_device *dev, unsigned int tag_mask, + const unsigned int *pgsz_order); +struct sk_buff *cxgb4_pktgl_to_skb(const struct pkt_gl *gl, + unsigned int skb_len, unsigned int pull_len); +#endif /* !__CXGB4_OFLD_H */ diff --git a/drivers/net/cxgb4/l2t.c b/drivers/net/cxgb4/l2t.c new file mode 100644 index 00000000000..9f96724a133 --- /dev/null +++ b/drivers/net/cxgb4/l2t.c @@ -0,0 +1,624 @@ +/* + * This file is part of the Chelsio T4 Ethernet driver for Linux. + * + * Copyright (c) 2003-2010 Chelsio Communications, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include <linux/skbuff.h> +#include <linux/netdevice.h> +#include <linux/if.h> +#include <linux/if_vlan.h> +#include <linux/jhash.h> +#include <net/neighbour.h> +#include "cxgb4.h" +#include "l2t.h" +#include "t4_msg.h" +#include "t4fw_api.h" + +#define VLAN_NONE 0xfff + +/* identifies sync vs async L2T_WRITE_REQs */ +#define F_SYNC_WR (1 << 12) + +enum { + L2T_STATE_VALID, /* entry is up to date */ + L2T_STATE_STALE, /* entry may be used but needs revalidation */ + L2T_STATE_RESOLVING, /* entry needs address resolution */ + L2T_STATE_SYNC_WRITE, /* synchronous write of entry underway */ + + /* when state is one of the below the entry is not hashed */ + L2T_STATE_SWITCHING, /* entry is being used by a switching filter */ + L2T_STATE_UNUSED /* entry not in use */ +}; + +struct l2t_data { + rwlock_t lock; + atomic_t nfree; /* number of free entries */ + struct l2t_entry *rover; /* starting point for next allocation */ + struct l2t_entry l2tab[L2T_SIZE]; +}; + +static inline unsigned int vlan_prio(const struct l2t_entry *e) +{ + return e->vlan >> 13; +} + +static inline void l2t_hold(struct l2t_data *d, struct l2t_entry *e) +{ + if (atomic_add_return(1, &e->refcnt) == 1) /* 0 -> 1 transition */ + atomic_dec(&d->nfree); +} + +/* + * To avoid having to check address families we do not allow v4 and v6 + * neighbors to be on the same hash chain. We keep v4 entries in the first + * half of available hash buckets and v6 in the second. + */ +enum { + L2T_SZ_HALF = L2T_SIZE / 2, + L2T_HASH_MASK = L2T_SZ_HALF - 1 +}; + +static inline unsigned int arp_hash(const u32 *key, int ifindex) +{ + return jhash_2words(*key, ifindex, 0) & L2T_HASH_MASK; +} + +static inline unsigned int ipv6_hash(const u32 *key, int ifindex) +{ + u32 xor = key[0] ^ key[1] ^ key[2] ^ key[3]; + + return L2T_SZ_HALF + (jhash_2words(xor, ifindex, 0) & L2T_HASH_MASK); +} + +static unsigned int addr_hash(const u32 *addr, int addr_len, int ifindex) +{ + return addr_len == 4 ? arp_hash(addr, ifindex) : + ipv6_hash(addr, ifindex); +} + +/* + * Checks if an L2T entry is for the given IP/IPv6 address. It does not check + * whether the L2T entry and the address are of the same address family. + * Callers ensure an address is only checked against L2T entries of the same + * family, something made trivial by the separation of IP and IPv6 hash chains + * mentioned above. Returns 0 if there's a match, + */ +static int addreq(const struct l2t_entry *e, const u32 *addr) +{ + if (e->v6) + return (e->addr[0] ^ addr[0]) | (e->addr[1] ^ addr[1]) | + (e->addr[2] ^ addr[2]) | (e->addr[3] ^ addr[3]); + return e->addr[0] ^ addr[0]; +} + +static void neigh_replace(struct l2t_entry *e, struct neighbour *n) +{ + neigh_hold(n); + if (e->neigh) + neigh_release(e->neigh); + e->neigh = n; +} + +/* + * Write an L2T entry. Must be called with the entry locked. + * The write may be synchronous or asynchronous. + */ +static int write_l2e(struct adapter *adap, struct l2t_entry *e, int sync) +{ + struct sk_buff *skb; + struct cpl_l2t_write_req *req; + + skb = alloc_skb(sizeof(*req), GFP_ATOMIC); + if (!skb) + return -ENOMEM; + + req = (struct cpl_l2t_write_req *)__skb_put(skb, sizeof(*req)); + INIT_TP_WR(req, 0); + + OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_L2T_WRITE_REQ, + e->idx | (sync ? F_SYNC_WR : 0) | + TID_QID(adap->sge.fw_evtq.abs_id))); + req->params = htons(L2T_W_PORT(e->lport) | L2T_W_NOREPLY(!sync)); + req->l2t_idx = htons(e->idx); + req->vlan = htons(e->vlan); + if (e->neigh) + memcpy(e->dmac, e->neigh->ha, sizeof(e->dmac)); + memcpy(req->dst_mac, e->dmac, sizeof(req->dst_mac)); + + set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0); + t4_ofld_send(adap, skb); + + if (sync && e->state != L2T_STATE_SWITCHING) + e->state = L2T_STATE_SYNC_WRITE; + return 0; +} + +/* + * Send packets waiting in an L2T entry's ARP queue. Must be called with the + * entry locked. + */ +static void send_pending(struct adapter *adap, struct l2t_entry *e) +{ + while (e->arpq_head) { + struct sk_buff *skb = e->arpq_head; + + e->arpq_head = skb->next; + skb->next = NULL; + t4_ofld_send(adap, skb); + } + e->arpq_tail = NULL; +} + +/* + * Process a CPL_L2T_WRITE_RPL. Wake up the ARP queue if it completes a + * synchronous L2T_WRITE. Note that the TID in the reply is really the L2T + * index it refers to. + */ +void do_l2t_write_rpl(struct adapter *adap, const struct cpl_l2t_write_rpl *rpl) +{ + unsigned int tid = GET_TID(rpl); + unsigned int idx = tid & (L2T_SIZE - 1); + + if (unlikely(rpl->status != CPL_ERR_NONE)) { + dev_err(adap->pdev_dev, + "Unexpected L2T_WRITE_RPL status %u for entry %u\n", + rpl->status, idx); + return; + } + + if (tid & F_SYNC_WR) { + struct l2t_entry *e = &adap->l2t->l2tab[idx]; + + spin_lock(&e->lock); + if (e->state != L2T_STATE_SWITCHING) { + send_pending(adap, e); + e->state = (e->neigh->nud_state & NUD_STALE) ? + L2T_STATE_STALE : L2T_STATE_VALID; + } + spin_unlock(&e->lock); + } +} + +/* + * Add a packet to an L2T entry's queue of packets awaiting resolution. + * Must be called with the entry's lock held. + */ +static inline void arpq_enqueue(struct l2t_entry *e, struct sk_buff *skb) +{ + skb->next = NULL; + if (e->arpq_head) + e->arpq_tail->next = skb; + else + e->arpq_head = skb; + e->arpq_tail = skb; +} + +int cxgb4_l2t_send(struct net_device *dev, struct sk_buff *skb, + struct l2t_entry *e) +{ + struct adapter *adap = netdev2adap(dev); + +again: + switch (e->state) { + case L2T_STATE_STALE: /* entry is stale, kick off revalidation */ + neigh_event_send(e->neigh, NULL); + spin_lock_bh(&e->lock); + if (e->state == L2T_STATE_STALE) + e->state = L2T_STATE_VALID; + spin_unlock_bh(&e->lock); + case L2T_STATE_VALID: /* fast-path, send the packet on */ + return t4_ofld_send(adap, skb); + case L2T_STATE_RESOLVING: + case L2T_STATE_SYNC_WRITE: + spin_lock_bh(&e->lock); + if (e->state != L2T_STATE_SYNC_WRITE && + e->state != L2T_STATE_RESOLVING) { + spin_unlock_bh(&e->lock); + goto again; + } + arpq_enqueue(e, skb); + spin_unlock_bh(&e->lock); + + if (e->state == L2T_STATE_RESOLVING && + !neigh_event_send(e->neigh, NULL)) { + spin_lock_bh(&e->lock); + if (e->state == L2T_STATE_RESOLVING && e->arpq_head) + write_l2e(adap, e, 1); + spin_unlock_bh(&e->lock); + } + } + return 0; +} +EXPORT_SYMBOL(cxgb4_l2t_send); + +/* + * Allocate a free L2T entry. Must be called with l2t_data.lock held. + */ +static struct l2t_entry *alloc_l2e(struct l2t_data *d) +{ + struct l2t_entry *end, *e, **p; + + if (!atomic_read(&d->nfree)) + return NULL; + + /* there's definitely a free entry */ + for (e = d->rover, end = &d->l2tab[L2T_SIZE]; e != end; ++e) + if (atomic_read(&e->refcnt) == 0) + goto found; + + for (e = d->l2tab; atomic_read(&e->refcnt); ++e) + ; +found: + d->rover = e + 1; + atomic_dec(&d->nfree); + + /* + * The entry we found may be an inactive entry that is + * presently in the hash table. We need to remove it. + */ + if (e->state < L2T_STATE_SWITCHING) + for (p = &d->l2tab[e->hash].first; *p; p = &(*p)->next) + if (*p == e) { + *p = e->next; + e->next = NULL; + break; + } + + e->state = L2T_STATE_UNUSED; + return e; +} + +/* + * Called when an L2T entry has no more users. + */ +static void t4_l2e_free(struct l2t_entry *e) +{ + struct l2t_data *d; + + spin_lock_bh(&e->lock); + if (atomic_read(&e->refcnt) == 0) { /* hasn't been recycled */ + if (e->neigh) { + neigh_release(e->neigh); + e->neigh = NULL; + } + } + spin_unlock_bh(&e->lock); + + d = container_of(e, struct l2t_data, l2tab[e->idx]); + atomic_inc(&d->nfree); +} + +void cxgb4_l2t_release(struct l2t_entry *e) +{ + if (atomic_dec_and_test(&e->refcnt)) + t4_l2e_free(e); +} +EXPORT_SYMBOL(cxgb4_l2t_release); + +/* + * Update an L2T entry that was previously used for the same next hop as neigh. + * Must be called with softirqs disabled. + */ +static void reuse_entry(struct l2t_entry *e, struct neighbour *neigh) +{ + unsigned int nud_state; + + spin_lock(&e->lock); /* avoid race with t4_l2t_free */ + if (neigh != e->neigh) + neigh_replace(e, neigh); + nud_state = neigh->nud_state; + if (memcmp(e->dmac, neigh->ha, sizeof(e->dmac)) || + !(nud_state & NUD_VALID)) + e->state = L2T_STATE_RESOLVING; + else if (nud_state & NUD_CONNECTED) + e->state = L2T_STATE_VALID; + else + e->state = L2T_STATE_STALE; + spin_unlock(&e->lock); +} + +struct l2t_entry *cxgb4_l2t_get(struct l2t_data *d, struct neighbour *neigh, + const struct net_device *physdev, + unsigned int priority) +{ + u8 lport; + u16 vlan; + struct l2t_entry *e; + int addr_len = neigh->tbl->key_len; + u32 *addr = (u32 *)neigh->primary_key; + int ifidx = neigh->dev->ifindex; + int hash = addr_hash(addr, addr_len, ifidx); + + if (neigh->dev->flags & IFF_LOOPBACK) + lport = netdev2pinfo(physdev)->tx_chan + 4; + else + lport = netdev2pinfo(physdev)->lport; + + if (neigh->dev->priv_flags & IFF_802_1Q_VLAN) + vlan = vlan_dev_vlan_id(neigh->dev); + else + vlan = VLAN_NONE; + + write_lock_bh(&d->lock); + for (e = d->l2tab[hash].first; e; e = e->next) + if (!addreq(e, addr) && e->ifindex == ifidx && + e->vlan == vlan && e->lport == lport) { + l2t_hold(d, e); + if (atomic_read(&e->refcnt) == 1) + reuse_entry(e, neigh); + goto done; + } + + /* Need to allocate a new entry */ + e = alloc_l2e(d); + if (e) { + spin_lock(&e->lock); /* avoid race with t4_l2t_free */ + e->state = L2T_STATE_RESOLVING; + memcpy(e->addr, addr, addr_len); + e->ifindex = ifidx; + e->hash = hash; + e->lport = lport; + e->v6 = addr_len == 16; + atomic_set(&e->refcnt, 1); + neigh_replace(e, neigh); + e->vlan = vlan; + e->next = d->l2tab[hash].first; + d->l2tab[hash].first = e; + spin_unlock(&e->lock); + } +done: + write_unlock_bh(&d->lock); + return e; +} +EXPORT_SYMBOL(cxgb4_l2t_get); + +/* + * Called when address resolution fails for an L2T entry to handle packets + * on the arpq head. If a packet specifies a failure handler it is invoked, + * otherwise the packet is sent to the device. + */ +static void handle_failed_resolution(struct adapter *adap, struct sk_buff *arpq) +{ + while (arpq) { + struct sk_buff *skb = arpq; + const struct l2t_skb_cb *cb = L2T_SKB_CB(skb); + + arpq = skb->next; + skb->next = NULL; + if (cb->arp_err_handler) + cb->arp_err_handler(cb->handle, skb); + else + t4_ofld_send(adap, skb); + } +} + +/* + * Called when the host's neighbor layer makes a change to some entry that is + * loaded into the HW L2 table. + */ +void t4_l2t_update(struct adapter *adap, struct neighbour *neigh) +{ + struct l2t_entry *e; + struct sk_buff *arpq = NULL; + struct l2t_data *d = adap->l2t; + int addr_len = neigh->tbl->key_len; + u32 *addr = (u32 *) neigh->primary_key; + int ifidx = neigh->dev->ifindex; + int hash = addr_hash(addr, addr_len, ifidx); + + read_lock_bh(&d->lock); + for (e = d->l2tab[hash].first; e; e = e->next) + if (!addreq(e, addr) && e->ifindex == ifidx) { + spin_lock(&e->lock); + if (atomic_read(&e->refcnt)) + goto found; + spin_unlock(&e->lock); + break; + } + read_unlock_bh(&d->lock); + return; + + found: + read_unlock(&d->lock); + + if (neigh != e->neigh) + neigh_replace(e, neigh); + + if (e->state == L2T_STATE_RESOLVING) { + if (neigh->nud_state & NUD_FAILED) { + arpq = e->arpq_head; + e->arpq_head = e->arpq_tail = NULL; + } else if ((neigh->nud_state & (NUD_CONNECTED | NUD_STALE)) && + e->arpq_head) { + write_l2e(adap, e, 1); + } + } else { + e->state = neigh->nud_state & NUD_CONNECTED ? + L2T_STATE_VALID : L2T_STATE_STALE; + if (memcmp(e->dmac, neigh->ha, sizeof(e->dmac))) + write_l2e(adap, e, 0); + } + + spin_unlock_bh(&e->lock); + + if (arpq) + handle_failed_resolution(adap, arpq); +} + +/* + * Allocate an L2T entry for use by a switching rule. Such entries need to be + * explicitly freed and while busy they are not on any hash chain, so normal + * address resolution updates do not see them. + */ +struct l2t_entry *t4_l2t_alloc_switching(struct l2t_data *d) +{ + struct l2t_entry *e; + + write_lock_bh(&d->lock); + e = alloc_l2e(d); + if (e) { + spin_lock(&e->lock); /* avoid race with t4_l2t_free */ + e->state = L2T_STATE_SWITCHING; + atomic_set(&e->refcnt, 1); + spin_unlock(&e->lock); + } + write_unlock_bh(&d->lock); + return e; +} + +/* + * Sets/updates the contents of a switching L2T entry that has been allocated + * with an earlier call to @t4_l2t_alloc_switching. + */ +int t4_l2t_set_switching(struct adapter *adap, struct l2t_entry *e, u16 vlan, + u8 port, u8 *eth_addr) +{ + e->vlan = vlan; + e->lport = port; + memcpy(e->dmac, eth_addr, ETH_ALEN); + return write_l2e(adap, e, 0); +} + +struct l2t_data *t4_init_l2t(void) +{ + int i; + struct l2t_data *d; + + d = t4_alloc_mem(sizeof(*d)); + if (!d) + return NULL; + + d->rover = d->l2tab; + atomic_set(&d->nfree, L2T_SIZE); + rwlock_init(&d->lock); + + for (i = 0; i < L2T_SIZE; ++i) { + d->l2tab[i].idx = i; + d->l2tab[i].state = L2T_STATE_UNUSED; + spin_lock_init(&d->l2tab[i].lock); + atomic_set(&d->l2tab[i].refcnt, 0); + } + return d; +} + +#include <linux/module.h> +#include <linux/debugfs.h> +#include <linux/seq_file.h> + +static inline void *l2t_get_idx(struct seq_file *seq, loff_t pos) +{ + struct l2t_entry *l2tab = seq->private; + + return pos >= L2T_SIZE ? NULL : &l2tab[pos]; +} + +static void *l2t_seq_start(struct seq_file *seq, loff_t *pos) +{ + return *pos ? l2t_get_idx(seq, *pos - 1) : SEQ_START_TOKEN; +} + +static void *l2t_seq_next(struct seq_file *seq, void *v, loff_t *pos) +{ + v = l2t_get_idx(seq, *pos); + if (v) + ++*pos; + return v; +} + +static void l2t_seq_stop(struct seq_file *seq, void *v) +{ +} + +static char l2e_state(const struct l2t_entry *e) +{ + switch (e->state) { + case L2T_STATE_VALID: return 'V'; + case L2T_STATE_STALE: return 'S'; + case L2T_STATE_SYNC_WRITE: return 'W'; + case L2T_STATE_RESOLVING: return e->arpq_head ? 'A' : 'R'; + case L2T_STATE_SWITCHING: return 'X'; + default: + return 'U'; + } +} + +static int l2t_seq_show(struct seq_file *seq, void *v) +{ + if (v == SEQ_START_TOKEN) + seq_puts(seq, " Idx IP address " + "Ethernet address VLAN/P LP State Users Port\n"); + else { + char ip[60]; + struct l2t_entry *e = v; + + spin_lock_bh(&e->lock); + if (e->state == L2T_STATE_SWITCHING) + ip[0] = '\0'; + else + sprintf(ip, e->v6 ? "%pI6c" : "%pI4", e->addr); + seq_printf(seq, "%4u %-25s %17pM %4d %u %2u %c %5u %s\n", + e->idx, ip, e->dmac, + e->vlan & VLAN_VID_MASK, vlan_prio(e), e->lport, + l2e_state(e), atomic_read(&e->refcnt), + e->neigh ? e->neigh->dev->name : ""); + spin_unlock_bh(&e->lock); + } + return 0; +} + +static const struct seq_operations l2t_seq_ops = { + .start = l2t_seq_start, + .next = l2t_seq_next, + .stop = l2t_seq_stop, + .show = l2t_seq_show +}; + +static int l2t_seq_open(struct inode *inode, struct file *file) +{ + int rc = seq_open(file, &l2t_seq_ops); + + if (!rc) { + struct adapter *adap = inode->i_private; + struct seq_file *seq = file->private_data; + + seq->private = adap->l2t->l2tab; + } + return rc; +} + +const struct file_operations t4_l2t_fops = { + .owner = THIS_MODULE, + .open = l2t_seq_open, + .read = seq_read, + .llseek = seq_lseek, + .release = seq_release, +}; diff --git a/drivers/net/cxgb4/l2t.h b/drivers/net/cxgb4/l2t.h new file mode 100644 index 00000000000..643f27ed3cf --- /dev/null +++ b/drivers/net/cxgb4/l2t.h @@ -0,0 +1,110 @@ +/* + * This file is part of the Chelsio T4 Ethernet driver for Linux. + * + * Copyright (c) 2003-2010 Chelsio Communications, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef __CXGB4_L2T_H +#define __CXGB4_L2T_H + +#include <linux/spinlock.h> +#include <linux/if_ether.h> +#include <asm/atomic.h> + +struct adapter; +struct l2t_data; +struct neighbour; +struct net_device; +struct file_operations; +struct cpl_l2t_write_rpl; + +/* + * Each L2T entry plays multiple roles. First of all, it keeps state for the + * corresponding entry of the HW L2 table and maintains a queue of offload + * packets awaiting address resolution. Second, it is a node of a hash table + * chain, where the nodes of the chain are linked together through their next + * pointer. Finally, each node is a bucket of a hash table, pointing to the + * first element in its chain through its first pointer. + */ +struct l2t_entry { + u16 state; /* entry state */ + u16 idx; /* entry index */ + u32 addr[4]; /* next hop IP or IPv6 address */ + int ifindex; /* neighbor's net_device's ifindex */ + struct neighbour *neigh; /* associated neighbour */ + struct l2t_entry *first; /* start of hash chain */ + struct l2t_entry *next; /* next l2t_entry on chain */ + struct sk_buff *arpq_head; /* queue of packets awaiting resolution */ + struct sk_buff *arpq_tail; + spinlock_t lock; + atomic_t refcnt; /* entry reference count */ + u16 hash; /* hash bucket the entry is on */ + u16 vlan; /* VLAN TCI (id: bits 0-11, prio: 13-15 */ + u8 v6; /* whether entry is for IPv6 */ + u8 lport; /* associated offload logical interface */ + u8 dmac[ETH_ALEN]; /* neighbour's MAC address */ +}; + +typedef void (*arp_err_handler_t)(void *handle, struct sk_buff *skb); + +/* + * Callback stored in an skb to handle address resolution failure. + */ +struct l2t_skb_cb { + void *handle; + arp_err_handler_t arp_err_handler; +}; + +#define L2T_SKB_CB(skb) ((struct l2t_skb_cb *)(skb)->cb) + +static inline void t4_set_arp_err_handler(struct sk_buff *skb, void *handle, + arp_err_handler_t handler) +{ + L2T_SKB_CB(skb)->handle = handle; + L2T_SKB_CB(skb)->arp_err_handler = handler; +} + +void cxgb4_l2t_release(struct l2t_entry *e); +int cxgb4_l2t_send(struct net_device *dev, struct sk_buff *skb, + struct l2t_entry *e); +struct l2t_entry *cxgb4_l2t_get(struct l2t_data *d, struct neighbour *neigh, + const struct net_device *physdev, + unsigned int priority); + +void t4_l2t_update(struct adapter *adap, struct neighbour *neigh); +struct l2t_entry *t4_l2t_alloc_switching(struct l2t_data *d); +int t4_l2t_set_switching(struct adapter *adap, struct l2t_entry *e, u16 vlan, + u8 port, u8 *eth_addr); +struct l2t_data *t4_init_l2t(void); +void do_l2t_write_rpl(struct adapter *p, const struct cpl_l2t_write_rpl *rpl); + +extern const struct file_operations t4_l2t_fops; +#endif /* __CXGB4_L2T_H */ diff --git a/drivers/net/cxgb4/sge.c b/drivers/net/cxgb4/sge.c new file mode 100644 index 00000000000..14adc58e71c --- /dev/null +++ b/drivers/net/cxgb4/sge.c @@ -0,0 +1,2431 @@ +/* + * This file is part of the Chelsio T4 Ethernet driver for Linux. + * + * Copyright (c) 2003-2010 Chelsio Communications, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include <linux/skbuff.h> +#include <linux/netdevice.h> +#include <linux/etherdevice.h> +#include <linux/if_vlan.h> +#include <linux/ip.h> +#include <linux/dma-mapping.h> +#include <linux/jiffies.h> +#include <net/ipv6.h> +#include <net/tcp.h> +#include "cxgb4.h" +#include "t4_regs.h" +#include "t4_msg.h" +#include "t4fw_api.h" + +/* + * Rx buffer size. We use largish buffers if possible but settle for single + * pages under memory shortage. + */ +#if PAGE_SHIFT >= 16 +# define FL_PG_ORDER 0 +#else +# define FL_PG_ORDER (16 - PAGE_SHIFT) +#endif + +/* RX_PULL_LEN should be <= RX_COPY_THRES */ +#define RX_COPY_THRES 256 +#define RX_PULL_LEN 128 + +/* + * Main body length for sk_buffs used for Rx Ethernet packets with fragments. + * Should be >= RX_PULL_LEN but possibly bigger to give pskb_may_pull some room. + */ +#define RX_PKT_SKB_LEN 512 + +/* Ethernet header padding prepended to RX_PKTs */ +#define RX_PKT_PAD 2 + +/* + * Max number of Tx descriptors we clean up at a time. Should be modest as + * freeing skbs isn't cheap and it happens while holding locks. We just need + * to free packets faster than they arrive, we eventually catch up and keep + * the amortized cost reasonable. Must be >= 2 * TXQ_STOP_THRES. + */ +#define MAX_TX_RECLAIM 16 + +/* + * Max number of Rx buffers we replenish at a time. Again keep this modest, + * allocating buffers isn't cheap either. + */ +#define MAX_RX_REFILL 16U + +/* + * Period of the Rx queue check timer. This timer is infrequent as it has + * something to do only when the system experiences severe memory shortage. + */ +#define RX_QCHECK_PERIOD (HZ / 2) + +/* + * Period of the Tx queue check timer. + */ +#define TX_QCHECK_PERIOD (HZ / 2) + +/* + * Max number of Tx descriptors to be reclaimed by the Tx timer. + */ +#define MAX_TIMER_TX_RECLAIM 100 + +/* + * Timer index used when backing off due to memory shortage. + */ +#define NOMEM_TMR_IDX (SGE_NTIMERS - 1) + +/* + * An FL with <= FL_STARVE_THRES buffers is starving and a periodic timer will + * attempt to refill it. + */ +#define FL_STARVE_THRES 4 + +/* + * Suspend an Ethernet Tx queue with fewer available descriptors than this. + * This is the same as calc_tx_descs() for a TSO packet with + * nr_frags == MAX_SKB_FRAGS. + */ +#define ETHTXQ_STOP_THRES \ + (1 + DIV_ROUND_UP((3 * MAX_SKB_FRAGS) / 2 + (MAX_SKB_FRAGS & 1), 8)) + +/* + * Suspension threshold for non-Ethernet Tx queues. We require enough room + * for a full sized WR. + */ +#define TXQ_STOP_THRES (SGE_MAX_WR_LEN / sizeof(struct tx_desc)) + +/* + * Max Tx descriptor space we allow for an Ethernet packet to be inlined + * into a WR. + */ +#define MAX_IMM_TX_PKT_LEN 128 + +/* + * Max size of a WR sent through a control Tx queue. + */ +#define MAX_CTRL_WR_LEN SGE_MAX_WR_LEN + +enum { + /* packet alignment in FL buffers */ + FL_ALIGN = L1_CACHE_BYTES < 32 ? 32 : L1_CACHE_BYTES, + /* egress status entry size */ + STAT_LEN = L1_CACHE_BYTES > 64 ? 128 : 64 +}; + +struct tx_sw_desc { /* SW state per Tx descriptor */ + struct sk_buff *skb; + struct ulptx_sgl *sgl; +}; + +struct rx_sw_desc { /* SW state per Rx descriptor */ + struct page *page; + dma_addr_t dma_addr; +}; + +/* + * The low bits of rx_sw_desc.dma_addr have special meaning. + */ +enum { + RX_LARGE_BUF = 1 << 0, /* buffer is larger than PAGE_SIZE */ + RX_UNMAPPED_BUF = 1 << 1, /* buffer is not mapped */ +}; + +static inline dma_addr_t get_buf_addr(const struct rx_sw_desc *d) +{ + return d->dma_addr & ~(dma_addr_t)(RX_LARGE_BUF | RX_UNMAPPED_BUF); +} + +static inline bool is_buf_mapped(const struct rx_sw_desc *d) +{ + return !(d->dma_addr & RX_UNMAPPED_BUF); +} + +/** + * txq_avail - return the number of available slots in a Tx queue + * @q: the Tx queue + * + * Returns the number of descriptors in a Tx queue available to write new + * packets. + */ +static inline unsigned int txq_avail(const struct sge_txq *q) +{ + return q->size - 1 - q->in_use; +} + +/** + * fl_cap - return the capacity of a free-buffer list + * @fl: the FL + * + * Returns the capacity of a free-buffer list. The capacity is less than + * the size because one descriptor needs to be left unpopulated, otherwise + * HW will think the FL is empty. + */ +static inline unsigned int fl_cap(const struct sge_fl *fl) +{ + return fl->size - 8; /* 1 descriptor = 8 buffers */ +} + +static inline bool fl_starving(const struct sge_fl *fl) +{ + return fl->avail - fl->pend_cred <= FL_STARVE_THRES; +} + +static int map_skb(struct device *dev, const struct sk_buff *skb, + dma_addr_t *addr) +{ + const skb_frag_t *fp, *end; + const struct skb_shared_info *si; + + *addr = dma_map_single(dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE); + if (dma_mapping_error(dev, *addr)) + goto out_err; + + si = skb_shinfo(skb); + end = &si->frags[si->nr_frags]; + + for (fp = si->frags; fp < end; fp++) { + *++addr = dma_map_page(dev, fp->page, fp->page_offset, fp->size, + DMA_TO_DEVICE); + if (dma_mapping_error(dev, *addr)) + goto unwind; + } + return 0; + +unwind: + while (fp-- > si->frags) + dma_unmap_page(dev, *--addr, fp->size, DMA_TO_DEVICE); + + dma_unmap_single(dev, addr[-1], skb_headlen(skb), DMA_TO_DEVICE); +out_err: + return -ENOMEM; +} + +#ifdef CONFIG_NEED_DMA_MAP_STATE +static void unmap_skb(struct device *dev, const struct sk_buff *skb, + const dma_addr_t *addr) +{ + const skb_frag_t *fp, *end; + const struct skb_shared_info *si; + + dma_unmap_single(dev, *addr++, skb_headlen(skb), DMA_TO_DEVICE); + + si = skb_shinfo(skb); + end = &si->frags[si->nr_frags]; + for (fp = si->frags; fp < end; fp++) + dma_unmap_page(dev, *addr++, fp->size, DMA_TO_DEVICE); +} + +/** + * deferred_unmap_destructor - unmap a packet when it is freed + * @skb: the packet + * + * This is the packet destructor used for Tx packets that need to remain + * mapped until they are freed rather than until their Tx descriptors are + * freed. + */ +static void deferred_unmap_destructor(struct sk_buff *skb) +{ + unmap_skb(skb->dev->dev.parent, skb, (dma_addr_t *)skb->head); +} +#endif + +static void unmap_sgl(struct device *dev, const struct sk_buff *skb, + const struct ulptx_sgl *sgl, const struct sge_txq *q) +{ + const struct ulptx_sge_pair *p; + unsigned int nfrags = skb_shinfo(skb)->nr_frags; + + if (likely(skb_headlen(skb))) + dma_unmap_single(dev, be64_to_cpu(sgl->addr0), ntohl(sgl->len0), + DMA_TO_DEVICE); + else { + dma_unmap_page(dev, be64_to_cpu(sgl->addr0), ntohl(sgl->len0), + DMA_TO_DEVICE); + nfrags--; + } + + /* + * the complexity below is because of the possibility of a wrap-around + * in the middle of an SGL + */ + for (p = sgl->sge; nfrags >= 2; nfrags -= 2) { + if (likely((u8 *)(p + 1) <= (u8 *)q->stat)) { +unmap: dma_unmap_page(dev, be64_to_cpu(p->addr[0]), + ntohl(p->len[0]), DMA_TO_DEVICE); + dma_unmap_page(dev, be64_to_cpu(p->addr[1]), + ntohl(p->len[1]), DMA_TO_DEVICE); + p++; + } else if ((u8 *)p == (u8 *)q->stat) { + p = (const struct ulptx_sge_pair *)q->desc; + goto unmap; + } else if ((u8 *)p + 8 == (u8 *)q->stat) { + const __be64 *addr = (const __be64 *)q->desc; + + dma_unmap_page(dev, be64_to_cpu(addr[0]), + ntohl(p->len[0]), DMA_TO_DEVICE); + dma_unmap_page(dev, be64_to_cpu(addr[1]), + ntohl(p->len[1]), DMA_TO_DEVICE); + p = (const struct ulptx_sge_pair *)&addr[2]; + } else { + const __be64 *addr = (const __be64 *)q->desc; + + dma_unmap_page(dev, be64_to_cpu(p->addr[0]), + ntohl(p->len[0]), DMA_TO_DEVICE); + dma_unmap_page(dev, be64_to_cpu(addr[0]), + ntohl(p->len[1]), DMA_TO_DEVICE); + p = (const struct ulptx_sge_pair *)&addr[1]; + } + } + if (nfrags) { + __be64 addr; + + if ((u8 *)p == (u8 *)q->stat) + p = (const struct ulptx_sge_pair *)q->desc; + addr = (u8 *)p + 16 <= (u8 *)q->stat ? p->addr[0] : + *(const __be64 *)q->desc; + dma_unmap_page(dev, be64_to_cpu(addr), ntohl(p->len[0]), + DMA_TO_DEVICE); + } +} + +/** + * free_tx_desc - reclaims Tx descriptors and their buffers + * @adapter: the adapter + * @q: the Tx queue to reclaim descriptors from + * @n: the number of descriptors to reclaim + * @unmap: whether the buffers should be unmapped for DMA + * + * Reclaims Tx descriptors from an SGE Tx queue and frees the associated + * Tx buffers. Called with the Tx queue lock held. + */ +static void free_tx_desc(struct adapter *adap, struct sge_txq *q, + unsigned int n, bool unmap) +{ + struct tx_sw_desc *d; + unsigned int cidx = q->cidx; + struct device *dev = adap->pdev_dev; + + d = &q->sdesc[cidx]; + while (n--) { + if (d->skb) { /* an SGL is present */ + if (unmap) + unmap_sgl(dev, d->skb, d->sgl, q); + kfree_skb(d->skb); + d->skb = NULL; + } + ++d; + if (++cidx == q->size) { + cidx = 0; + d = q->sdesc; + } + } + q->cidx = cidx; +} + +/* + * Return the number of reclaimable descriptors in a Tx queue. + */ +static inline int reclaimable(const struct sge_txq *q) +{ + int hw_cidx = ntohs(q->stat->cidx); + hw_cidx -= q->cidx; + return hw_cidx < 0 ? hw_cidx + q->size : hw_cidx; +} + +/** + * reclaim_completed_tx - reclaims completed Tx descriptors + * @adap: the adapter + * @q: the Tx queue to reclaim completed descriptors from + * @unmap: whether the buffers should be unmapped for DMA + * + * Reclaims Tx descriptors that the SGE has indicated it has processed, + * and frees the associated buffers if possible. Called with the Tx + * queue locked. + */ +static inline void reclaim_completed_tx(struct adapter *adap, struct sge_txq *q, + bool unmap) +{ + int avail = reclaimable(q); + + if (avail) { + /* + * Limit the amount of clean up work we do at a time to keep + * the Tx lock hold time O(1). + */ + if (avail > MAX_TX_RECLAIM) + avail = MAX_TX_RECLAIM; + + free_tx_desc(adap, q, avail, unmap); + q->in_use -= avail; + } +} + +static inline int get_buf_size(const struct rx_sw_desc *d) +{ +#if FL_PG_ORDER > 0 + return (d->dma_addr & RX_LARGE_BUF) ? (PAGE_SIZE << FL_PG_ORDER) : + PAGE_SIZE; +#else + return PAGE_SIZE; +#endif +} + +/** + * free_rx_bufs - free the Rx buffers on an SGE free list + * @adap: the adapter + * @q: the SGE free list to free buffers from + * @n: how many buffers to free + * + * Release the next @n buffers on an SGE free-buffer Rx queue. The + * buffers must be made inaccessible to HW before calling this function. + */ +static void free_rx_bufs(struct adapter *adap, struct sge_fl *q, int n) +{ + while (n--) { + struct rx_sw_desc *d = &q->sdesc[q->cidx]; + + if (is_buf_mapped(d)) + dma_unmap_page(adap->pdev_dev, get_buf_addr(d), + get_buf_size(d), PCI_DMA_FROMDEVICE); + put_page(d->page); + d->page = NULL; + if (++q->cidx == q->size) + q->cidx = 0; + q->avail--; + } +} + +/** + * unmap_rx_buf - unmap the current Rx buffer on an SGE free list + * @adap: the adapter + * @q: the SGE free list + * + * Unmap the current buffer on an SGE free-buffer Rx queue. The + * buffer must be made inaccessible to HW before calling this function. + * + * This is similar to @free_rx_bufs above but does not free the buffer. + * Do note that the FL still loses any further access to the buffer. + */ +static void unmap_rx_buf(struct adapter *adap, struct sge_fl *q) +{ + struct rx_sw_desc *d = &q->sdesc[q->cidx]; + + if (is_buf_mapped(d)) + dma_unmap_page(adap->pdev_dev, get_buf_addr(d), + get_buf_size(d), PCI_DMA_FROMDEVICE); + d->page = NULL; + if (++q->cidx == q->size) + q->cidx = 0; + q->avail--; +} + +static inline void ring_fl_db(struct adapter *adap, struct sge_fl *q) +{ + if (q->pend_cred >= 8) { + wmb(); + t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL), DBPRIO | + QID(q->cntxt_id) | PIDX(q->pend_cred / 8)); + q->pend_cred &= 7; + } +} + +static inline void set_rx_sw_desc(struct rx_sw_desc *sd, struct page *pg, + dma_addr_t mapping) +{ + sd->page = pg; + sd->dma_addr = mapping; /* includes size low bits */ +} + +/** + * refill_fl - refill an SGE Rx buffer ring + * @adap: the adapter + * @q: the ring to refill + * @n: the number of new buffers to allocate + * @gfp: the gfp flags for the allocations + * + * (Re)populate an SGE free-buffer queue with up to @n new packet buffers, + * allocated with the supplied gfp flags. The caller must assure that + * @n does not exceed the queue's capacity. If afterwards the queue is + * found critically low mark it as starving in the bitmap of starving FLs. + * + * Returns the number of buffers allocated. + */ +static unsigned int refill_fl(struct adapter *adap, struct sge_fl *q, int n, + gfp_t gfp) +{ + struct page *pg; + dma_addr_t mapping; + unsigned int cred = q->avail; + __be64 *d = &q->desc[q->pidx]; + struct rx_sw_desc *sd = &q->sdesc[q->pidx]; + + gfp |= __GFP_NOWARN; /* failures are expected */ + +#if FL_PG_ORDER > 0 + /* + * Prefer large buffers + */ + while (n) { + pg = alloc_pages(gfp | __GFP_COMP, FL_PG_ORDER); + if (unlikely(!pg)) { + q->large_alloc_failed++; + break; /* fall back to single pages */ + } + + mapping = dma_map_page(adap->pdev_dev, pg, 0, + PAGE_SIZE << FL_PG_ORDER, + PCI_DMA_FROMDEVICE); + if (unlikely(dma_mapping_error(adap->pdev_dev, mapping))) { + __free_pages(pg, FL_PG_ORDER); + goto out; /* do not try small pages for this error */ + } + mapping |= RX_LARGE_BUF; + *d++ = cpu_to_be64(mapping); + + set_rx_sw_desc(sd, pg, mapping); + sd++; + + q->avail++; + if (++q->pidx == q->size) { + q->pidx = 0; + sd = q->sdesc; + d = q->desc; + } + n--; + } +#endif + + while (n--) { + pg = __netdev_alloc_page(adap->port[0], gfp); + if (unlikely(!pg)) { + q->alloc_failed++; + break; + } + + mapping = dma_map_page(adap->pdev_dev, pg, 0, PAGE_SIZE, + PCI_DMA_FROMDEVICE); + if (unlikely(dma_mapping_error(adap->pdev_dev, mapping))) { + netdev_free_page(adap->port[0], pg); + goto out; + } + *d++ = cpu_to_be64(mapping); + + set_rx_sw_desc(sd, pg, mapping); + sd++; + + q->avail++; + if (++q->pidx == q->size) { + q->pidx = 0; + sd = q->sdesc; + d = q->desc; + } + } + +out: cred = q->avail - cred; + q->pend_cred += cred; + ring_fl_db(adap, q); + + if (unlikely(fl_starving(q))) { + smp_wmb(); + set_bit(q->cntxt_id, adap->sge.starving_fl); + } + + return cred; +} + +static inline void __refill_fl(struct adapter *adap, struct sge_fl *fl) +{ + refill_fl(adap, fl, min(MAX_RX_REFILL, fl_cap(fl) - fl->avail), + GFP_ATOMIC); +} + +/** + * alloc_ring - allocate resources for an SGE descriptor ring + * @dev: the PCI device's core device + * @nelem: the number of descriptors + * @elem_size: the size of each descriptor + * @sw_size: the size of the SW state associated with each ring element + * @phys: the physical address of the allocated ring + * @metadata: address of the array holding the SW state for the ring + * @stat_size: extra space in HW ring for status information + * + * Allocates resources for an SGE descriptor ring, such as Tx queues, + * free buffer lists, or response queues. Each SGE ring requires + * space for its HW descriptors plus, optionally, space for the SW state + * associated with each HW entry (the metadata). The function returns + * three values: the virtual address for the HW ring (the return value + * of the function), the bus address of the HW ring, and the address + * of the SW ring. + */ +static void *alloc_ring(struct device *dev, size_t nelem, size_t elem_size, + size_t sw_size, dma_addr_t *phys, void *metadata, + size_t stat_size) +{ + size_t len = nelem * elem_size + stat_size; + void *s = NULL; + void *p = dma_alloc_coherent(dev, len, phys, GFP_KERNEL); + + if (!p) + return NULL; + if (sw_size) { + s = kcalloc(nelem, sw_size, GFP_KERNEL); + + if (!s) { + dma_free_coherent(dev, len, p, *phys); + return NULL; + } + } + if (metadata) + *(void **)metadata = s; + memset(p, 0, len); + return p; +} + +/** + * sgl_len - calculates the size of an SGL of the given capacity + * @n: the number of SGL entries + * + * Calculates the number of flits needed for a scatter/gather list that + * can hold the given number of entries. + */ +static inline unsigned int sgl_len(unsigned int n) +{ + n--; + return (3 * n) / 2 + (n & 1) + 2; +} + +/** + * flits_to_desc - returns the num of Tx descriptors for the given flits + * @n: the number of flits + * + * Returns the number of Tx descriptors needed for the supplied number + * of flits. + */ +static inline unsigned int flits_to_desc(unsigned int n) +{ + BUG_ON(n > SGE_MAX_WR_LEN / 8); + return DIV_ROUND_UP(n, 8); +} + +/** + * is_eth_imm - can an Ethernet packet be sent as immediate data? + * @skb: the packet + * + * Returns whether an Ethernet packet is small enough to fit as + * immediate data. + */ +static inline int is_eth_imm(const struct sk_buff *skb) +{ + return skb->len <= MAX_IMM_TX_PKT_LEN - sizeof(struct cpl_tx_pkt); +} + +/** + * calc_tx_flits - calculate the number of flits for a packet Tx WR + * @skb: the packet + * + * Returns the number of flits needed for a Tx WR for the given Ethernet + * packet, including the needed WR and CPL headers. + */ +static inline unsigned int calc_tx_flits(const struct sk_buff *skb) +{ + unsigned int flits; + + if (is_eth_imm(skb)) + return DIV_ROUND_UP(skb->len + sizeof(struct cpl_tx_pkt), 8); + + flits = sgl_len(skb_shinfo(skb)->nr_frags + 1) + 4; + if (skb_shinfo(skb)->gso_size) + flits += 2; + return flits; +} + +/** + * calc_tx_descs - calculate the number of Tx descriptors for a packet + * @skb: the packet + * + * Returns the number of Tx descriptors needed for the given Ethernet + * packet, including the needed WR and CPL headers. + */ +static inline unsigned int calc_tx_descs(const struct sk_buff *skb) +{ + return flits_to_desc(calc_tx_flits(skb)); +} + +/** + * write_sgl - populate a scatter/gather list for a packet + * @skb: the packet + * @q: the Tx queue we are writing into + * @sgl: starting location for writing the SGL + * @end: points right after the end of the SGL + * @start: start offset into skb main-body data to include in the SGL + * @addr: the list of bus addresses for the SGL elements + * + * Generates a gather list for the buffers that make up a packet. + * The caller must provide adequate space for the SGL that will be written. + * The SGL includes all of the packet's page fragments and the data in its + * main body except for the first @start bytes. @sgl must be 16-byte + * aligned and within a Tx descriptor with available space. @end points + * right after the end of the SGL but does not account for any potential + * wrap around, i.e., @end > @sgl. + */ +static void write_sgl(const struct sk_buff *skb, struct sge_txq *q, + struct ulptx_sgl *sgl, u64 *end, unsigned int start, + const dma_addr_t *addr) +{ + unsigned int i, len; + struct ulptx_sge_pair *to; + const struct skb_shared_info *si = skb_shinfo(skb); + unsigned int nfrags = si->nr_frags; + struct ulptx_sge_pair buf[MAX_SKB_FRAGS / 2 + 1]; + + len = skb_headlen(skb) - start; + if (likely(len)) { + sgl->len0 = htonl(len); + sgl->addr0 = cpu_to_be64(addr[0] + start); + nfrags++; + } else { + sgl->len0 = htonl(si->frags[0].size); + sgl->addr0 = cpu_to_be64(addr[1]); + } + + sgl->cmd_nsge = htonl(ULPTX_CMD(ULP_TX_SC_DSGL) | ULPTX_NSGE(nfrags)); + if (likely(--nfrags == 0)) + return; + /* + * Most of the complexity below deals with the possibility we hit the + * end of the queue in the middle of writing the SGL. For this case + * only we create the SGL in a temporary buffer and then copy it. + */ + to = (u8 *)end > (u8 *)q->stat ? buf : sgl->sge; + + for (i = (nfrags != si->nr_frags); nfrags >= 2; nfrags -= 2, to++) { + to->len[0] = cpu_to_be32(si->frags[i].size); + to->len[1] = cpu_to_be32(si->frags[++i].size); + to->addr[0] = cpu_to_be64(addr[i]); + to->addr[1] = cpu_to_be64(addr[++i]); + } + if (nfrags) { + to->len[0] = cpu_to_be32(si->frags[i].size); + to->len[1] = cpu_to_be32(0); + to->addr[0] = cpu_to_be64(addr[i + 1]); + } + if (unlikely((u8 *)end > (u8 *)q->stat)) { + unsigned int part0 = (u8 *)q->stat - (u8 *)sgl->sge, part1; + + if (likely(part0)) + memcpy(sgl->sge, buf, part0); + part1 = (u8 *)end - (u8 *)q->stat; + memcpy(q->desc, (u8 *)buf + part0, part1); + end = (void *)q->desc + part1; + } + if ((uintptr_t)end & 8) /* 0-pad to multiple of 16 */ + *(u64 *)end = 0; +} + +/** + * ring_tx_db - check and potentially ring a Tx queue's doorbell + * @adap: the adapter + * @q: the Tx queue + * @n: number of new descriptors to give to HW + * + * Ring the doorbel for a Tx queue. + */ +static inline void ring_tx_db(struct adapter *adap, struct sge_txq *q, int n) +{ + wmb(); /* write descriptors before telling HW */ + t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL), + QID(q->cntxt_id) | PIDX(n)); +} + +/** + * inline_tx_skb - inline a packet's data into Tx descriptors + * @skb: the packet + * @q: the Tx queue where the packet will be inlined + * @pos: starting position in the Tx queue where to inline the packet + * + * Inline a packet's contents directly into Tx descriptors, starting at + * the given position within the Tx DMA ring. + * Most of the complexity of this operation is dealing with wrap arounds + * in the middle of the packet we want to inline. + */ +static void inline_tx_skb(const struct sk_buff *skb, const struct sge_txq *q, + void *pos) +{ + u64 *p; + int left = (void *)q->stat - pos; + + if (likely(skb->len <= left)) { + if (likely(!skb->data_len)) + skb_copy_from_linear_data(skb, pos, skb->len); + else + skb_copy_bits(skb, 0, pos, skb->len); + pos += skb->len; + } else { + skb_copy_bits(skb, 0, pos, left); + skb_copy_bits(skb, left, q->desc, skb->len - left); + pos = (void *)q->desc + (skb->len - left); + } + + /* 0-pad to multiple of 16 */ + p = PTR_ALIGN(pos, 8); + if ((uintptr_t)p & 8) + *p = 0; +} + +/* + * Figure out what HW csum a packet wants and return the appropriate control + * bits. + */ +static u64 hwcsum(const struct sk_buff *skb) +{ + int csum_type; + const struct iphdr *iph = ip_hdr(skb); + + if (iph->version == 4) { + if (iph->protocol == IPPROTO_TCP) + csum_type = TX_CSUM_TCPIP; + else if (iph->protocol == IPPROTO_UDP) + csum_type = TX_CSUM_UDPIP; + else { +nocsum: /* + * unknown protocol, disable HW csum + * and hope a bad packet is detected + */ + return TXPKT_L4CSUM_DIS; + } + } else { + /* + * this doesn't work with extension headers + */ + const struct ipv6hdr *ip6h = (const struct ipv6hdr *)iph; + + if (ip6h->nexthdr == IPPROTO_TCP) + csum_type = TX_CSUM_TCPIP6; + else if (ip6h->nexthdr == IPPROTO_UDP) + csum_type = TX_CSUM_UDPIP6; + else + goto nocsum; + } + + if (likely(csum_type >= TX_CSUM_TCPIP)) + return TXPKT_CSUM_TYPE(csum_type) | + TXPKT_IPHDR_LEN(skb_network_header_len(skb)) | + TXPKT_ETHHDR_LEN(skb_network_offset(skb) - ETH_HLEN); + else { + int start = skb_transport_offset(skb); + + return TXPKT_CSUM_TYPE(csum_type) | TXPKT_CSUM_START(start) | + TXPKT_CSUM_LOC(start + skb->csum_offset); + } +} + +static void eth_txq_stop(struct sge_eth_txq *q) +{ + netif_tx_stop_queue(q->txq); + q->q.stops++; +} + +static inline void txq_advance(struct sge_txq *q, unsigned int n) +{ + q->in_use += n; + q->pidx += n; + if (q->pidx >= q->size) + q->pidx -= q->size; +} + +/** + * t4_eth_xmit - add a packet to an Ethernet Tx queue + * @skb: the packet + * @dev: the egress net device + * + * Add a packet to an SGE Ethernet Tx queue. Runs with softirqs disabled. + */ +netdev_tx_t t4_eth_xmit(struct sk_buff *skb, struct net_device *dev) +{ + u32 wr_mid; + u64 cntrl, *end; + int qidx, credits; + unsigned int flits, ndesc; + struct adapter *adap; + struct sge_eth_txq *q; + const struct port_info *pi; + struct fw_eth_tx_pkt_wr *wr; + struct cpl_tx_pkt_core *cpl; + const struct skb_shared_info *ssi; + dma_addr_t addr[MAX_SKB_FRAGS + 1]; + + /* + * The chip min packet length is 10 octets but play safe and reject + * anything shorter than an Ethernet header. + */ + if (unlikely(skb->len < ETH_HLEN)) { +out_free: dev_kfree_skb(skb); + return NETDEV_TX_OK; + } + + pi = netdev_priv(dev); + adap = pi->adapter; + qidx = skb_get_queue_mapping(skb); + q = &adap->sge.ethtxq[qidx + pi->first_qset]; + + reclaim_completed_tx(adap, &q->q, true); + + flits = calc_tx_flits(skb); + ndesc = flits_to_desc(flits); + credits = txq_avail(&q->q) - ndesc; + + if (unlikely(credits < 0)) { + eth_txq_stop(q); + dev_err(adap->pdev_dev, + "%s: Tx ring %u full while queue awake!\n", + dev->name, qidx); + return NETDEV_TX_BUSY; + } + + if (!is_eth_imm(skb) && + unlikely(map_skb(adap->pdev_dev, skb, addr) < 0)) { + q->mapping_err++; + goto out_free; + } + + wr_mid = FW_WR_LEN16(DIV_ROUND_UP(flits, 2)); + if (unlikely(credits < ETHTXQ_STOP_THRES)) { + eth_txq_stop(q); + wr_mid |= FW_WR_EQUEQ | FW_WR_EQUIQ; + } + + wr = (void *)&q->q.desc[q->q.pidx]; + wr->equiq_to_len16 = htonl(wr_mid); + wr->r3 = cpu_to_be64(0); + end = (u64 *)wr + flits; + + ssi = skb_shinfo(skb); + if (ssi->gso_size) { + struct cpl_tx_pkt_lso *lso = (void *)wr; + bool v6 = (ssi->gso_type & SKB_GSO_TCPV6) != 0; + int l3hdr_len = skb_network_header_len(skb); + int eth_xtra_len = skb_network_offset(skb) - ETH_HLEN; + + wr->op_immdlen = htonl(FW_WR_OP(FW_ETH_TX_PKT_WR) | + FW_WR_IMMDLEN(sizeof(*lso))); + lso->lso_ctrl = htonl(LSO_OPCODE(CPL_TX_PKT_LSO) | + LSO_FIRST_SLICE | LSO_LAST_SLICE | + LSO_IPV6(v6) | + LSO_ETHHDR_LEN(eth_xtra_len / 4) | + LSO_IPHDR_LEN(l3hdr_len / 4) | + LSO_TCPHDR_LEN(tcp_hdr(skb)->doff)); + lso->ipid_ofst = htons(0); + lso->mss = htons(ssi->gso_size); + lso->seqno_offset = htonl(0); + lso->len = htonl(skb->len); + cpl = (void *)(lso + 1); + cntrl = TXPKT_CSUM_TYPE(v6 ? TX_CSUM_TCPIP6 : TX_CSUM_TCPIP) | + TXPKT_IPHDR_LEN(l3hdr_len) | + TXPKT_ETHHDR_LEN(eth_xtra_len); + q->tso++; + q->tx_cso += ssi->gso_segs; + } else { + int len; + + len = is_eth_imm(skb) ? skb->len + sizeof(*cpl) : sizeof(*cpl); + wr->op_immdlen = htonl(FW_WR_OP(FW_ETH_TX_PKT_WR) | + FW_WR_IMMDLEN(len)); + cpl = (void *)(wr + 1); + if (skb->ip_summed == CHECKSUM_PARTIAL) { + cntrl = hwcsum(skb) | TXPKT_IPCSUM_DIS; + q->tx_cso++; + } else + cntrl = TXPKT_L4CSUM_DIS | TXPKT_IPCSUM_DIS; + } + + if (vlan_tx_tag_present(skb)) { + q->vlan_ins++; + cntrl |= TXPKT_VLAN_VLD | TXPKT_VLAN(vlan_tx_tag_get(skb)); + } + + cpl->ctrl0 = htonl(TXPKT_OPCODE(CPL_TX_PKT_XT) | + TXPKT_INTF(pi->tx_chan) | TXPKT_PF(0)); + cpl->pack = htons(0); + cpl->len = htons(skb->len); + cpl->ctrl1 = cpu_to_be64(cntrl); + + if (is_eth_imm(skb)) { + inline_tx_skb(skb, &q->q, cpl + 1); + dev_kfree_skb(skb); + } else { + int last_desc; + + write_sgl(skb, &q->q, (struct ulptx_sgl *)(cpl + 1), end, 0, + addr); + skb_orphan(skb); + + last_desc = q->q.pidx + ndesc - 1; + if (last_desc >= q->q.size) + last_desc -= q->q.size; + q->q.sdesc[last_desc].skb = skb; + q->q.sdesc[last_desc].sgl = (struct ulptx_sgl *)(cpl + 1); + } + + txq_advance(&q->q, ndesc); + + ring_tx_db(adap, &q->q, ndesc); + return NETDEV_TX_OK; +} + +/** + * reclaim_completed_tx_imm - reclaim completed control-queue Tx descs + * @q: the SGE control Tx queue + * + * This is a variant of reclaim_completed_tx() that is used for Tx queues + * that send only immediate data (presently just the control queues) and + * thus do not have any sk_buffs to release. + */ +static inline void reclaim_completed_tx_imm(struct sge_txq *q) +{ + int hw_cidx = ntohs(q->stat->cidx); + int reclaim = hw_cidx - q->cidx; + + if (reclaim < 0) + reclaim += q->size; + + q->in_use -= reclaim; + q->cidx = hw_cidx; +} + +/** + * is_imm - check whether a packet can be sent as immediate data + * @skb: the packet + * + * Returns true if a packet can be sent as a WR with immediate data. + */ +static inline int is_imm(const struct sk_buff *skb) +{ + return skb->len <= MAX_CTRL_WR_LEN; +} + +/** + * ctrlq_check_stop - check if a control queue is full and should stop + * @q: the queue + * @wr: most recent WR written to the queue + * + * Check if a control queue has become full and should be stopped. + * We clean up control queue descriptors very lazily, only when we are out. + * If the queue is still full after reclaiming any completed descriptors + * we suspend it and have the last WR wake it up. + */ +static void ctrlq_check_stop(struct sge_ctrl_txq *q, struct fw_wr_hdr *wr) +{ + reclaim_completed_tx_imm(&q->q); + if (unlikely(txq_avail(&q->q) < TXQ_STOP_THRES)) { + wr->lo |= htonl(FW_WR_EQUEQ | FW_WR_EQUIQ); + q->q.stops++; + q->full = 1; + } +} + +/** + * ctrl_xmit - send a packet through an SGE control Tx queue + * @q: the control queue + * @skb: the packet + * + * Send a packet through an SGE control Tx queue. Packets sent through + * a control queue must fit entirely as immediate data. + */ +static int ctrl_xmit(struct sge_ctrl_txq *q, struct sk_buff *skb) +{ + unsigned int ndesc; + struct fw_wr_hdr *wr; + + if (unlikely(!is_imm(skb))) { + WARN_ON(1); + dev_kfree_skb(skb); + return NET_XMIT_DROP; + } + + ndesc = DIV_ROUND_UP(skb->len, sizeof(struct tx_desc)); + spin_lock(&q->sendq.lock); + + if (unlikely(q->full)) { + skb->priority = ndesc; /* save for restart */ + __skb_queue_tail(&q->sendq, skb); + spin_unlock(&q->sendq.lock); + return NET_XMIT_CN; + } + + wr = (struct fw_wr_hdr *)&q->q.desc[q->q.pidx]; + inline_tx_skb(skb, &q->q, wr); + + txq_advance(&q->q, ndesc); + if (unlikely(txq_avail(&q->q) < TXQ_STOP_THRES)) + ctrlq_check_stop(q, wr); + + ring_tx_db(q->adap, &q->q, ndesc); + spin_unlock(&q->sendq.lock); + + kfree_skb(skb); + return NET_XMIT_SUCCESS; +} + +/** + * restart_ctrlq - restart a suspended control queue + * @data: the control queue to restart + * + * Resumes transmission on a suspended Tx control queue. + */ +static void restart_ctrlq(unsigned long data) +{ + struct sk_buff *skb; + unsigned int written = 0; + struct sge_ctrl_txq *q = (struct sge_ctrl_txq *)data; + + spin_lock(&q->sendq.lock); + reclaim_completed_tx_imm(&q->q); + BUG_ON(txq_avail(&q->q) < TXQ_STOP_THRES); /* q should be empty */ + + while ((skb = __skb_dequeue(&q->sendq)) != NULL) { + struct fw_wr_hdr *wr; + unsigned int ndesc = skb->priority; /* previously saved */ + + /* + * Write descriptors and free skbs outside the lock to limit + * wait times. q->full is still set so new skbs will be queued. + */ + spin_unlock(&q->sendq.lock); + + wr = (struct fw_wr_hdr *)&q->q.desc[q->q.pidx]; + inline_tx_skb(skb, &q->q, wr); + kfree_skb(skb); + + written += ndesc; + txq_advance(&q->q, ndesc); + if (unlikely(txq_avail(&q->q) < TXQ_STOP_THRES)) { + unsigned long old = q->q.stops; + + ctrlq_check_stop(q, wr); + if (q->q.stops != old) { /* suspended anew */ + spin_lock(&q->sendq.lock); + goto ringdb; + } + } + if (written > 16) { + ring_tx_db(q->adap, &q->q, written); + written = 0; + } + spin_lock(&q->sendq.lock); + } + q->full = 0; +ringdb: if (written) + ring_tx_db(q->adap, &q->q, written); + spin_unlock(&q->sendq.lock); +} + +/** + * t4_mgmt_tx - send a management message + * @adap: the adapter + * @skb: the packet containing the management message + * + * Send a management message through control queue 0. + */ +int t4_mgmt_tx(struct adapter *adap, struct sk_buff *skb) +{ + int ret; + + local_bh_disable(); + ret = ctrl_xmit(&adap->sge.ctrlq[0], skb); + local_bh_enable(); + return ret; +} + +/** + * is_ofld_imm - check whether a packet can be sent as immediate data + * @skb: the packet + * + * Returns true if a packet can be sent as an offload WR with immediate + * data. We currently use the same limit as for Ethernet packets. + */ +static inline int is_ofld_imm(const struct sk_buff *skb) +{ + return skb->len <= MAX_IMM_TX_PKT_LEN; +} + +/** + * calc_tx_flits_ofld - calculate # of flits for an offload packet + * @skb: the packet + * + * Returns the number of flits needed for the given offload packet. + * These packets are already fully constructed and no additional headers + * will be added. + */ +static inline unsigned int calc_tx_flits_ofld(const struct sk_buff *skb) +{ + unsigned int flits, cnt; + + if (is_ofld_imm(skb)) + return DIV_ROUND_UP(skb->len, 8); + + flits = skb_transport_offset(skb) / 8U; /* headers */ + cnt = skb_shinfo(skb)->nr_frags; + if (skb->tail != skb->transport_header) + cnt++; + return flits + sgl_len(cnt); +} + +/** + * txq_stop_maperr - stop a Tx queue due to I/O MMU exhaustion + * @adap: the adapter + * @q: the queue to stop + * + * Mark a Tx queue stopped due to I/O MMU exhaustion and resulting + * inability to map packets. A periodic timer attempts to restart + * queues so marked. + */ +static void txq_stop_maperr(struct sge_ofld_txq *q) +{ + q->mapping_err++; + q->q.stops++; + set_bit(q->q.cntxt_id, q->adap->sge.txq_maperr); +} + +/** + * ofldtxq_stop - stop an offload Tx queue that has become full + * @q: the queue to stop + * @skb: the packet causing the queue to become full + * + * Stops an offload Tx queue that has become full and modifies the packet + * being written to request a wakeup. + */ +static void ofldtxq_stop(struct sge_ofld_txq *q, struct sk_buff *skb) +{ + struct fw_wr_hdr *wr = (struct fw_wr_hdr *)skb->data; + + wr->lo |= htonl(FW_WR_EQUEQ | FW_WR_EQUIQ); + q->q.stops++; + q->full = 1; +} + +/** + * service_ofldq - restart a suspended offload queue + * @q: the offload queue + * + * Services an offload Tx queue by moving packets from its packet queue + * to the HW Tx ring. The function starts and ends with the queue locked. + */ +static void service_ofldq(struct sge_ofld_txq *q) +{ + u64 *pos; + int credits; + struct sk_buff *skb; + unsigned int written = 0; + unsigned int flits, ndesc; + + while ((skb = skb_peek(&q->sendq)) != NULL && !q->full) { + /* + * We drop the lock but leave skb on sendq, thus retaining + * exclusive access to the state of the queue. + */ + spin_unlock(&q->sendq.lock); + + reclaim_completed_tx(q->adap, &q->q, false); + + flits = skb->priority; /* previously saved */ + ndesc = flits_to_desc(flits); + credits = txq_avail(&q->q) - ndesc; + BUG_ON(credits < 0); + if (unlikely(credits < TXQ_STOP_THRES)) + ofldtxq_stop(q, skb); + + pos = (u64 *)&q->q.desc[q->q.pidx]; + if (is_ofld_imm(skb)) + inline_tx_skb(skb, &q->q, pos); + else if (map_skb(q->adap->pdev_dev, skb, + (dma_addr_t *)skb->head)) { + txq_stop_maperr(q); + spin_lock(&q->sendq.lock); + break; + } else { + int last_desc, hdr_len = skb_transport_offset(skb); + + memcpy(pos, skb->data, hdr_len); + write_sgl(skb, &q->q, (void *)pos + hdr_len, + pos + flits, hdr_len, + (dma_addr_t *)skb->head); +#ifdef CONFIG_NEED_DMA_MAP_STATE + skb->dev = q->adap->port[0]; + skb->destructor = deferred_unmap_destructor; +#endif + last_desc = q->q.pidx + ndesc - 1; + if (last_desc >= q->q.size) + last_desc -= q->q.size; + q->q.sdesc[last_desc].skb = skb; + } + + txq_advance(&q->q, ndesc); + written += ndesc; + if (unlikely(written > 32)) { + ring_tx_db(q->adap, &q->q, written); + written = 0; + } + + spin_lock(&q->sendq.lock); + __skb_unlink(skb, &q->sendq); + if (is_ofld_imm(skb)) + kfree_skb(skb); + } + if (likely(written)) + ring_tx_db(q->adap, &q->q, written); +} + +/** + * ofld_xmit - send a packet through an offload queue + * @q: the Tx offload queue + * @skb: the packet + * + * Send an offload packet through an SGE offload queue. + */ +static int ofld_xmit(struct sge_ofld_txq *q, struct sk_buff *skb) +{ + skb->priority = calc_tx_flits_ofld(skb); /* save for restart */ + spin_lock(&q->sendq.lock); + __skb_queue_tail(&q->sendq, skb); + if (q->sendq.qlen == 1) + service_ofldq(q); + spin_unlock(&q->sendq.lock); + return NET_XMIT_SUCCESS; +} + +/** + * restart_ofldq - restart a suspended offload queue + * @data: the offload queue to restart + * + * Resumes transmission on a suspended Tx offload queue. + */ +static void restart_ofldq(unsigned long data) +{ + struct sge_ofld_txq *q = (struct sge_ofld_txq *)data; + + spin_lock(&q->sendq.lock); + q->full = 0; /* the queue actually is completely empty now */ + service_ofldq(q); + spin_unlock(&q->sendq.lock); +} + +/** + * skb_txq - return the Tx queue an offload packet should use + * @skb: the packet + * + * Returns the Tx queue an offload packet should use as indicated by bits + * 1-15 in the packet's queue_mapping. + */ +static inline unsigned int skb_txq(const struct sk_buff *skb) +{ + return skb->queue_mapping >> 1; +} + +/** + * is_ctrl_pkt - return whether an offload packet is a control packet + * @skb: the packet + * + * Returns whether an offload packet should use an OFLD or a CTRL + * Tx queue as indicated by bit 0 in the packet's queue_mapping. + */ +static inline unsigned int is_ctrl_pkt(const struct sk_buff *skb) +{ + return skb->queue_mapping & 1; +} + +static inline int ofld_send(struct adapter *adap, struct sk_buff *skb) +{ + unsigned int idx = skb_txq(skb); + + if (unlikely(is_ctrl_pkt(skb))) + return ctrl_xmit(&adap->sge.ctrlq[idx], skb); + return ofld_xmit(&adap->sge.ofldtxq[idx], skb); +} + +/** + * t4_ofld_send - send an offload packet + * @adap: the adapter + * @skb: the packet + * + * Sends an offload packet. We use the packet queue_mapping to select the + * appropriate Tx queue as follows: bit 0 indicates whether the packet + * should be sent as regular or control, bits 1-15 select the queue. + */ +int t4_ofld_send(struct adapter *adap, struct sk_buff *skb) +{ + int ret; + + local_bh_disable(); + ret = ofld_send(adap, skb); + local_bh_enable(); + return ret; +} + +/** + * cxgb4_ofld_send - send an offload packet + * @dev: the net device + * @skb: the packet + * + * Sends an offload packet. This is an exported version of @t4_ofld_send, + * intended for ULDs. + */ +int cxgb4_ofld_send(struct net_device *dev, struct sk_buff *skb) +{ + return t4_ofld_send(netdev2adap(dev), skb); +} +EXPORT_SYMBOL(cxgb4_ofld_send); + +static inline void copy_frags(struct skb_shared_info *ssi, + const struct pkt_gl *gl, unsigned int offset) +{ + unsigned int n; + + /* usually there's just one frag */ + ssi->frags[0].page = gl->frags[0].page; + ssi->frags[0].page_offset = gl->frags[0].page_offset + offset; + ssi->frags[0].size = gl->frags[0].size - offset; + ssi->nr_frags = gl->nfrags; + n = gl->nfrags - 1; + if (n) + memcpy(&ssi->frags[1], &gl->frags[1], n * sizeof(skb_frag_t)); + + /* get a reference to the last page, we don't own it */ + get_page(gl->frags[n].page); +} + +/** + * cxgb4_pktgl_to_skb - build an sk_buff from a packet gather list + * @gl: the gather list + * @skb_len: size of sk_buff main body if it carries fragments + * @pull_len: amount of data to move to the sk_buff's main body + * + * Builds an sk_buff from the given packet gather list. Returns the + * sk_buff or %NULL if sk_buff allocation failed. + */ +struct sk_buff *cxgb4_pktgl_to_skb(const struct pkt_gl *gl, + unsigned int skb_len, unsigned int pull_len) +{ + struct sk_buff *skb; + + /* + * Below we rely on RX_COPY_THRES being less than the smallest Rx buffer + * size, which is expected since buffers are at least PAGE_SIZEd. + * In this case packets up to RX_COPY_THRES have only one fragment. + */ + if (gl->tot_len <= RX_COPY_THRES) { + skb = dev_alloc_skb(gl->tot_len); + if (unlikely(!skb)) + goto out; + __skb_put(skb, gl->tot_len); + skb_copy_to_linear_data(skb, gl->va, gl->tot_len); + } else { + skb = dev_alloc_skb(skb_len); + if (unlikely(!skb)) + goto out; + __skb_put(skb, pull_len); + skb_copy_to_linear_data(skb, gl->va, pull_len); + + copy_frags(skb_shinfo(skb), gl, pull_len); + skb->len = gl->tot_len; + skb->data_len = skb->len - pull_len; + skb->truesize += skb->data_len; + } +out: return skb; +} +EXPORT_SYMBOL(cxgb4_pktgl_to_skb); + +/** + * t4_pktgl_free - free a packet gather list + * @gl: the gather list + * + * Releases the pages of a packet gather list. We do not own the last + * page on the list and do not free it. + */ +void t4_pktgl_free(const struct pkt_gl *gl) +{ + int n; + const skb_frag_t *p; + + for (p = gl->frags, n = gl->nfrags - 1; n--; p++) + put_page(p->page); +} + +/* + * Process an MPS trace packet. Give it an unused protocol number so it won't + * be delivered to anyone and send it to the stack for capture. + */ +static noinline int handle_trace_pkt(struct adapter *adap, + const struct pkt_gl *gl) +{ + struct sk_buff *skb; + struct cpl_trace_pkt *p; + + skb = cxgb4_pktgl_to_skb(gl, RX_PULL_LEN, RX_PULL_LEN); + if (unlikely(!skb)) { + t4_pktgl_free(gl); + return 0; + } + + p = (struct cpl_trace_pkt *)skb->data; + __skb_pull(skb, sizeof(*p)); + skb_reset_mac_header(skb); + skb->protocol = htons(0xffff); + skb->dev = adap->port[0]; + netif_receive_skb(skb); + return 0; +} + +static void do_gro(struct sge_eth_rxq *rxq, const struct pkt_gl *gl, + const struct cpl_rx_pkt *pkt) +{ + int ret; + struct sk_buff *skb; + + skb = napi_get_frags(&rxq->rspq.napi); + if (unlikely(!skb)) { + t4_pktgl_free(gl); + rxq->stats.rx_drops++; + return; + } + + copy_frags(skb_shinfo(skb), gl, RX_PKT_PAD); + skb->len = gl->tot_len - RX_PKT_PAD; + skb->data_len = skb->len; + skb->truesize += skb->data_len; + skb->ip_summed = CHECKSUM_UNNECESSARY; + skb_record_rx_queue(skb, rxq->rspq.idx); + + if (unlikely(pkt->vlan_ex)) { + struct port_info *pi = netdev_priv(rxq->rspq.netdev); + struct vlan_group *grp = pi->vlan_grp; + + rxq->stats.vlan_ex++; + if (likely(grp)) { + ret = vlan_gro_frags(&rxq->rspq.napi, grp, + ntohs(pkt->vlan)); + goto stats; + } + } + ret = napi_gro_frags(&rxq->rspq.napi); +stats: if (ret == GRO_HELD) + rxq->stats.lro_pkts++; + else if (ret == GRO_MERGED || ret == GRO_MERGED_FREE) + rxq->stats.lro_merged++; + rxq->stats.pkts++; + rxq->stats.rx_cso++; +} + +/** + * t4_ethrx_handler - process an ingress ethernet packet + * @q: the response queue that received the packet + * @rsp: the response queue descriptor holding the RX_PKT message + * @si: the gather list of packet fragments + * + * Process an ingress ethernet packet and deliver it to the stack. + */ +int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp, + const struct pkt_gl *si) +{ + bool csum_ok; + struct sk_buff *skb; + struct port_info *pi; + const struct cpl_rx_pkt *pkt; + struct sge_eth_rxq *rxq = container_of(q, struct sge_eth_rxq, rspq); + + if (unlikely(*(u8 *)rsp == CPL_TRACE_PKT)) + return handle_trace_pkt(q->adap, si); + + pkt = (void *)&rsp[1]; + csum_ok = pkt->csum_calc && !pkt->err_vec; + if ((pkt->l2info & htonl(RXF_TCP)) && + (q->netdev->features & NETIF_F_GRO) && csum_ok && !pkt->ip_frag) { + do_gro(rxq, si, pkt); + return 0; + } + + skb = cxgb4_pktgl_to_skb(si, RX_PKT_SKB_LEN, RX_PULL_LEN); + if (unlikely(!skb)) { + t4_pktgl_free(si); + rxq->stats.rx_drops++; + return 0; + } + + __skb_pull(skb, RX_PKT_PAD); /* remove ethernet header padding */ + skb->protocol = eth_type_trans(skb, q->netdev); + skb_record_rx_queue(skb, q->idx); + pi = netdev_priv(skb->dev); + rxq->stats.pkts++; + + if (csum_ok && (pi->rx_offload & RX_CSO) && + (pkt->l2info & htonl(RXF_UDP | RXF_TCP))) { + if (!pkt->ip_frag) + skb->ip_summed = CHECKSUM_UNNECESSARY; + else { + __sum16 c = (__force __sum16)pkt->csum; + skb->csum = csum_unfold(c); + skb->ip_summed = CHECKSUM_COMPLETE; + } + rxq->stats.rx_cso++; + } else + skb->ip_summed = CHECKSUM_NONE; + + if (unlikely(pkt->vlan_ex)) { + struct vlan_group *grp = pi->vlan_grp; + + rxq->stats.vlan_ex++; + if (likely(grp)) + vlan_hwaccel_receive_skb(skb, grp, ntohs(pkt->vlan)); + else + dev_kfree_skb_any(skb); + } else + netif_receive_skb(skb); + + return 0; +} + +/** + * restore_rx_bufs - put back a packet's Rx buffers + * @si: the packet gather list + * @q: the SGE free list + * @frags: number of FL buffers to restore + * + * Puts back on an FL the Rx buffers associated with @si. The buffers + * have already been unmapped and are left unmapped, we mark them so to + * prevent further unmapping attempts. + * + * This function undoes a series of @unmap_rx_buf calls when we find out + * that the current packet can't be processed right away afterall and we + * need to come back to it later. This is a very rare event and there's + * no effort to make this particularly efficient. + */ +static void restore_rx_bufs(const struct pkt_gl *si, struct sge_fl *q, + int frags) +{ + struct rx_sw_desc *d; + + while (frags--) { + if (q->cidx == 0) + q->cidx = q->size - 1; + else + q->cidx--; + d = &q->sdesc[q->cidx]; + d->page = si->frags[frags].page; + d->dma_addr |= RX_UNMAPPED_BUF; + q->avail++; + } +} + +/** + * is_new_response - check if a response is newly written + * @r: the response descriptor + * @q: the response queue + * + * Returns true if a response descriptor contains a yet unprocessed + * response. + */ +static inline bool is_new_response(const struct rsp_ctrl *r, + const struct sge_rspq *q) +{ + return RSPD_GEN(r->type_gen) == q->gen; +} + +/** + * rspq_next - advance to the next entry in a response queue + * @q: the queue + * + * Updates the state of a response queue to advance it to the next entry. + */ +static inline void rspq_next(struct sge_rspq *q) +{ + q->cur_desc = (void *)q->cur_desc + q->iqe_len; + if (unlikely(++q->cidx == q->size)) { + q->cidx = 0; + q->gen ^= 1; + q->cur_desc = q->desc; + } +} + +/** + * process_responses - process responses from an SGE response queue + * @q: the ingress queue to process + * @budget: how many responses can be processed in this round + * + * Process responses from an SGE response queue up to the supplied budget. + * Responses include received packets as well as control messages from FW + * or HW. + * + * Additionally choose the interrupt holdoff time for the next interrupt + * on this queue. If the system is under memory shortage use a fairly + * long delay to help recovery. + */ +static int process_responses(struct sge_rspq *q, int budget) +{ + int ret, rsp_type; + int budget_left = budget; + const struct rsp_ctrl *rc; + struct sge_eth_rxq *rxq = container_of(q, struct sge_eth_rxq, rspq); + + while (likely(budget_left)) { + rc = (void *)q->cur_desc + (q->iqe_len - sizeof(*rc)); + if (!is_new_response(rc, q)) + break; + + rmb(); + rsp_type = RSPD_TYPE(rc->type_gen); + if (likely(rsp_type == RSP_TYPE_FLBUF)) { + skb_frag_t *fp; + struct pkt_gl si; + const struct rx_sw_desc *rsd; + u32 len = ntohl(rc->pldbuflen_qid), bufsz, frags; + + if (len & RSPD_NEWBUF) { + if (likely(q->offset > 0)) { + free_rx_bufs(q->adap, &rxq->fl, 1); + q->offset = 0; + } + len &= RSPD_LEN; + } + si.tot_len = len; + + /* gather packet fragments */ + for (frags = 0, fp = si.frags; ; frags++, fp++) { + rsd = &rxq->fl.sdesc[rxq->fl.cidx]; + bufsz = get_buf_size(rsd); + fp->page = rsd->page; + fp->page_offset = q->offset; + fp->size = min(bufsz, len); + len -= fp->size; + if (!len) + break; + unmap_rx_buf(q->adap, &rxq->fl); + } + + /* + * Last buffer remains mapped so explicitly make it + * coherent for CPU access. + */ + dma_sync_single_for_cpu(q->adap->pdev_dev, + get_buf_addr(rsd), + fp->size, DMA_FROM_DEVICE); + + si.va = page_address(si.frags[0].page) + + si.frags[0].page_offset; + prefetch(si.va); + + si.nfrags = frags + 1; + ret = q->handler(q, q->cur_desc, &si); + if (likely(ret == 0)) + q->offset += ALIGN(fp->size, FL_ALIGN); + else + restore_rx_bufs(&si, &rxq->fl, frags); + } else if (likely(rsp_type == RSP_TYPE_CPL)) { + ret = q->handler(q, q->cur_desc, NULL); + } else { + ret = q->handler(q, (const __be64 *)rc, CXGB4_MSG_AN); + } + + if (unlikely(ret)) { + /* couldn't process descriptor, back off for recovery */ + q->next_intr_params = QINTR_TIMER_IDX(NOMEM_TMR_IDX); + break; + } + + rspq_next(q); + budget_left--; + } + + if (q->offset >= 0 && rxq->fl.size - rxq->fl.avail >= 16) + __refill_fl(q->adap, &rxq->fl); + return budget - budget_left; +} + +/** + * napi_rx_handler - the NAPI handler for Rx processing + * @napi: the napi instance + * @budget: how many packets we can process in this round + * + * Handler for new data events when using NAPI. This does not need any + * locking or protection from interrupts as data interrupts are off at + * this point and other adapter interrupts do not interfere (the latter + * in not a concern at all with MSI-X as non-data interrupts then have + * a separate handler). + */ +static int napi_rx_handler(struct napi_struct *napi, int budget) +{ + unsigned int params; + struct sge_rspq *q = container_of(napi, struct sge_rspq, napi); + int work_done = process_responses(q, budget); + + if (likely(work_done < budget)) { + napi_complete(napi); + params = q->next_intr_params; + q->next_intr_params = q->intr_params; + } else + params = QINTR_TIMER_IDX(7); + + t4_write_reg(q->adap, MYPF_REG(SGE_PF_GTS), CIDXINC(work_done) | + INGRESSQID((u32)q->cntxt_id) | SEINTARM(params)); + return work_done; +} + +/* + * The MSI-X interrupt handler for an SGE response queue. + */ +irqreturn_t t4_sge_intr_msix(int irq, void *cookie) +{ + struct sge_rspq *q = cookie; + + napi_schedule(&q->napi); + return IRQ_HANDLED; +} + +/* + * Process the indirect interrupt entries in the interrupt queue and kick off + * NAPI for each queue that has generated an entry. + */ +static unsigned int process_intrq(struct adapter *adap) +{ + unsigned int credits; + const struct rsp_ctrl *rc; + struct sge_rspq *q = &adap->sge.intrq; + + spin_lock(&adap->sge.intrq_lock); + for (credits = 0; ; credits++) { + rc = (void *)q->cur_desc + (q->iqe_len - sizeof(*rc)); + if (!is_new_response(rc, q)) + break; + + rmb(); + if (RSPD_TYPE(rc->type_gen) == RSP_TYPE_INTR) { + unsigned int qid = ntohl(rc->pldbuflen_qid); + + napi_schedule(&adap->sge.ingr_map[qid]->napi); + } + + rspq_next(q); + } + + t4_write_reg(adap, MYPF_REG(SGE_PF_GTS), CIDXINC(credits) | + INGRESSQID(q->cntxt_id) | SEINTARM(q->intr_params)); + spin_unlock(&adap->sge.intrq_lock); + return credits; +} + +/* + * The MSI interrupt handler, which handles data events from SGE response queues + * as well as error and other async events as they all use the same MSI vector. + */ +static irqreturn_t t4_intr_msi(int irq, void *cookie) +{ + struct adapter *adap = cookie; + + t4_slow_intr_handler(adap); + process_intrq(adap); + return IRQ_HANDLED; +} + +/* + * Interrupt handler for legacy INTx interrupts. + * Handles data events from SGE response queues as well as error and other + * async events as they all use the same interrupt line. + */ +static irqreturn_t t4_intr_intx(int irq, void *cookie) +{ + struct adapter *adap = cookie; + + t4_write_reg(adap, MYPF_REG(PCIE_PF_CLI), 0); + if (t4_slow_intr_handler(adap) | process_intrq(adap)) + return IRQ_HANDLED; + return IRQ_NONE; /* probably shared interrupt */ +} + +/** + * t4_intr_handler - select the top-level interrupt handler + * @adap: the adapter + * + * Selects the top-level interrupt handler based on the type of interrupts + * (MSI-X, MSI, or INTx). + */ +irq_handler_t t4_intr_handler(struct adapter *adap) +{ + if (adap->flags & USING_MSIX) + return t4_sge_intr_msix; + if (adap->flags & USING_MSI) + return t4_intr_msi; + return t4_intr_intx; +} + +static void sge_rx_timer_cb(unsigned long data) +{ + unsigned long m; + unsigned int i, cnt[2]; + struct adapter *adap = (struct adapter *)data; + struct sge *s = &adap->sge; + + for (i = 0; i < ARRAY_SIZE(s->starving_fl); i++) + for (m = s->starving_fl[i]; m; m &= m - 1) { + struct sge_eth_rxq *rxq; + unsigned int id = __ffs(m) + i * BITS_PER_LONG; + struct sge_fl *fl = s->egr_map[id]; + + clear_bit(id, s->starving_fl); + smp_mb__after_clear_bit(); + + if (fl_starving(fl)) { + rxq = container_of(fl, struct sge_eth_rxq, fl); + if (napi_reschedule(&rxq->rspq.napi)) + fl->starving++; + else + set_bit(id, s->starving_fl); + } + } + + t4_write_reg(adap, SGE_DEBUG_INDEX, 13); + cnt[0] = t4_read_reg(adap, SGE_DEBUG_DATA_HIGH); + cnt[1] = t4_read_reg(adap, SGE_DEBUG_DATA_LOW); + + for (i = 0; i < 2; i++) + if (cnt[i] >= s->starve_thres) { + if (s->idma_state[i] || cnt[i] == 0xffffffff) + continue; + s->idma_state[i] = 1; + t4_write_reg(adap, SGE_DEBUG_INDEX, 11); + m = t4_read_reg(adap, SGE_DEBUG_DATA_LOW) >> (i * 16); + dev_warn(adap->pdev_dev, + "SGE idma%u starvation detected for " + "queue %lu\n", i, m & 0xffff); + } else if (s->idma_state[i]) + s->idma_state[i] = 0; + + mod_timer(&s->rx_timer, jiffies + RX_QCHECK_PERIOD); +} + +static void sge_tx_timer_cb(unsigned long data) +{ + unsigned long m; + unsigned int i, budget; + struct adapter *adap = (struct adapter *)data; + struct sge *s = &adap->sge; + + for (i = 0; i < ARRAY_SIZE(s->txq_maperr); i++) + for (m = s->txq_maperr[i]; m; m &= m - 1) { + unsigned long id = __ffs(m) + i * BITS_PER_LONG; + struct sge_ofld_txq *txq = s->egr_map[id]; + + clear_bit(id, s->txq_maperr); + tasklet_schedule(&txq->qresume_tsk); + } + + budget = MAX_TIMER_TX_RECLAIM; + i = s->ethtxq_rover; + do { + struct sge_eth_txq *q = &s->ethtxq[i]; + + if (q->q.in_use && + time_after_eq(jiffies, q->txq->trans_start + HZ / 100) && + __netif_tx_trylock(q->txq)) { + int avail = reclaimable(&q->q); + + if (avail) { + if (avail > budget) + avail = budget; + + free_tx_desc(adap, &q->q, avail, true); + q->q.in_use -= avail; + budget -= avail; + } + __netif_tx_unlock(q->txq); + } + + if (++i >= s->ethqsets) + i = 0; + } while (budget && i != s->ethtxq_rover); + s->ethtxq_rover = i; + mod_timer(&s->tx_timer, jiffies + (budget ? TX_QCHECK_PERIOD : 2)); +} + +int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq, + struct net_device *dev, int intr_idx, + struct sge_fl *fl, rspq_handler_t hnd) +{ + int ret, flsz = 0; + struct fw_iq_cmd c; + struct port_info *pi = netdev_priv(dev); + + /* Size needs to be multiple of 16, including status entry. */ + iq->size = roundup(iq->size, 16); + + iq->desc = alloc_ring(adap->pdev_dev, iq->size, iq->iqe_len, 0, + &iq->phys_addr, NULL, 0); + if (!iq->desc) + return -ENOMEM; + + memset(&c, 0, sizeof(c)); + c.op_to_vfn = htonl(FW_CMD_OP(FW_IQ_CMD) | FW_CMD_REQUEST | + FW_CMD_WRITE | FW_CMD_EXEC | + FW_IQ_CMD_PFN(0) | FW_IQ_CMD_VFN(0)); + c.alloc_to_len16 = htonl(FW_IQ_CMD_ALLOC | FW_IQ_CMD_IQSTART(1) | + FW_LEN16(c)); + c.type_to_iqandstindex = htonl(FW_IQ_CMD_TYPE(FW_IQ_TYPE_FL_INT_CAP) | + FW_IQ_CMD_IQASYNCH(fwevtq) | FW_IQ_CMD_VIID(pi->viid) | + FW_IQ_CMD_IQANDST(intr_idx < 0) | FW_IQ_CMD_IQANUD(1) | + FW_IQ_CMD_IQANDSTINDEX(intr_idx >= 0 ? intr_idx : + -intr_idx - 1)); + c.iqdroprss_to_iqesize = htons(FW_IQ_CMD_IQPCIECH(pi->tx_chan) | + FW_IQ_CMD_IQGTSMODE | + FW_IQ_CMD_IQINTCNTTHRESH(iq->pktcnt_idx) | + FW_IQ_CMD_IQESIZE(ilog2(iq->iqe_len) - 4)); + c.iqsize = htons(iq->size); + c.iqaddr = cpu_to_be64(iq->phys_addr); + + if (fl) { + fl->size = roundup(fl->size, 8); + fl->desc = alloc_ring(adap->pdev_dev, fl->size, sizeof(__be64), + sizeof(struct rx_sw_desc), &fl->addr, + &fl->sdesc, STAT_LEN); + if (!fl->desc) + goto fl_nomem; + + flsz = fl->size / 8 + STAT_LEN / sizeof(struct tx_desc); + c.iqns_to_fl0congen = htonl(FW_IQ_CMD_FL0PACKEN | + FW_IQ_CMD_FL0PADEN); + c.fl0dcaen_to_fl0cidxfthresh = htons(FW_IQ_CMD_FL0FBMIN(2) | + FW_IQ_CMD_FL0FBMAX(3)); + c.fl0size = htons(flsz); + c.fl0addr = cpu_to_be64(fl->addr); + } + + ret = t4_wr_mbox(adap, 0, &c, sizeof(c), &c); + if (ret) + goto err; + + netif_napi_add(dev, &iq->napi, napi_rx_handler, 64); + iq->cur_desc = iq->desc; + iq->cidx = 0; + iq->gen = 1; + iq->next_intr_params = iq->intr_params; + iq->cntxt_id = ntohs(c.iqid); + iq->abs_id = ntohs(c.physiqid); + iq->size--; /* subtract status entry */ + iq->adap = adap; + iq->netdev = dev; + iq->handler = hnd; + + /* set offset to -1 to distinguish ingress queues without FL */ + iq->offset = fl ? 0 : -1; + + adap->sge.ingr_map[iq->cntxt_id] = iq; + + if (fl) { + fl->cntxt_id = htons(c.fl0id); + fl->avail = fl->pend_cred = 0; + fl->pidx = fl->cidx = 0; + fl->alloc_failed = fl->large_alloc_failed = fl->starving = 0; + adap->sge.egr_map[fl->cntxt_id] = fl; + refill_fl(adap, fl, fl_cap(fl), GFP_KERNEL); + } + return 0; + +fl_nomem: + ret = -ENOMEM; +err: + if (iq->desc) { + dma_free_coherent(adap->pdev_dev, iq->size * iq->iqe_len, + iq->desc, iq->phys_addr); + iq->desc = NULL; + } + if (fl && fl->desc) { + kfree(fl->sdesc); + fl->sdesc = NULL; + dma_free_coherent(adap->pdev_dev, flsz * sizeof(struct tx_desc), + fl->desc, fl->addr); + fl->desc = NULL; + } + return ret; +} + +static void init_txq(struct adapter *adap, struct sge_txq *q, unsigned int id) +{ + q->in_use = 0; + q->cidx = q->pidx = 0; + q->stops = q->restarts = 0; + q->stat = (void *)&q->desc[q->size]; + q->cntxt_id = id; + adap->sge.egr_map[id] = q; +} + +int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq, + struct net_device *dev, struct netdev_queue *netdevq, + unsigned int iqid) +{ + int ret, nentries; + struct fw_eq_eth_cmd c; + struct port_info *pi = netdev_priv(dev); + + /* Add status entries */ + nentries = txq->q.size + STAT_LEN / sizeof(struct tx_desc); + + txq->q.desc = alloc_ring(adap->pdev_dev, txq->q.size, + sizeof(struct tx_desc), sizeof(struct tx_sw_desc), + &txq->q.phys_addr, &txq->q.sdesc, STAT_LEN); + if (!txq->q.desc) + return -ENOMEM; + + memset(&c, 0, sizeof(c)); + c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_ETH_CMD) | FW_CMD_REQUEST | + FW_CMD_WRITE | FW_CMD_EXEC | + FW_EQ_ETH_CMD_PFN(0) | FW_EQ_ETH_CMD_VFN(0)); + c.alloc_to_len16 = htonl(FW_EQ_ETH_CMD_ALLOC | + FW_EQ_ETH_CMD_EQSTART | FW_LEN16(c)); + c.viid_pkd = htonl(FW_EQ_ETH_CMD_VIID(pi->viid)); + c.fetchszm_to_iqid = htonl(FW_EQ_ETH_CMD_HOSTFCMODE(2) | + FW_EQ_ETH_CMD_PCIECHN(pi->tx_chan) | + FW_EQ_ETH_CMD_IQID(iqid)); + c.dcaen_to_eqsize = htonl(FW_EQ_ETH_CMD_FBMIN(2) | + FW_EQ_ETH_CMD_FBMAX(3) | + FW_EQ_ETH_CMD_CIDXFTHRESH(5) | + FW_EQ_ETH_CMD_EQSIZE(nentries)); + c.eqaddr = cpu_to_be64(txq->q.phys_addr); + + ret = t4_wr_mbox(adap, 0, &c, sizeof(c), &c); + if (ret) { + kfree(txq->q.sdesc); + txq->q.sdesc = NULL; + dma_free_coherent(adap->pdev_dev, + nentries * sizeof(struct tx_desc), + txq->q.desc, txq->q.phys_addr); + txq->q.desc = NULL; + return ret; + } + + init_txq(adap, &txq->q, FW_EQ_ETH_CMD_EQID_GET(ntohl(c.eqid_pkd))); + txq->txq = netdevq; + txq->tso = txq->tx_cso = txq->vlan_ins = 0; + txq->mapping_err = 0; + return 0; +} + +int t4_sge_alloc_ctrl_txq(struct adapter *adap, struct sge_ctrl_txq *txq, + struct net_device *dev, unsigned int iqid, + unsigned int cmplqid) +{ + int ret, nentries; + struct fw_eq_ctrl_cmd c; + struct port_info *pi = netdev_priv(dev); + + /* Add status entries */ + nentries = txq->q.size + STAT_LEN / sizeof(struct tx_desc); + + txq->q.desc = alloc_ring(adap->pdev_dev, nentries, + sizeof(struct tx_desc), 0, &txq->q.phys_addr, + NULL, 0); + if (!txq->q.desc) + return -ENOMEM; + + c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_CTRL_CMD) | FW_CMD_REQUEST | + FW_CMD_WRITE | FW_CMD_EXEC | + FW_EQ_CTRL_CMD_PFN(0) | FW_EQ_CTRL_CMD_VFN(0)); + c.alloc_to_len16 = htonl(FW_EQ_CTRL_CMD_ALLOC | + FW_EQ_CTRL_CMD_EQSTART | FW_LEN16(c)); + c.cmpliqid_eqid = htonl(FW_EQ_CTRL_CMD_CMPLIQID(cmplqid)); + c.physeqid_pkd = htonl(0); + c.fetchszm_to_iqid = htonl(FW_EQ_CTRL_CMD_HOSTFCMODE(2) | + FW_EQ_CTRL_CMD_PCIECHN(pi->tx_chan) | + FW_EQ_CTRL_CMD_IQID(iqid)); + c.dcaen_to_eqsize = htonl(FW_EQ_CTRL_CMD_FBMIN(2) | + FW_EQ_CTRL_CMD_FBMAX(3) | + FW_EQ_CTRL_CMD_CIDXFTHRESH(5) | + FW_EQ_CTRL_CMD_EQSIZE(nentries)); + c.eqaddr = cpu_to_be64(txq->q.phys_addr); + + ret = t4_wr_mbox(adap, 0, &c, sizeof(c), &c); + if (ret) { + dma_free_coherent(adap->pdev_dev, + nentries * sizeof(struct tx_desc), + txq->q.desc, txq->q.phys_addr); + txq->q.desc = NULL; + return ret; + } + + init_txq(adap, &txq->q, FW_EQ_CTRL_CMD_EQID_GET(ntohl(c.cmpliqid_eqid))); + txq->adap = adap; + skb_queue_head_init(&txq->sendq); + tasklet_init(&txq->qresume_tsk, restart_ctrlq, (unsigned long)txq); + txq->full = 0; + return 0; +} + +int t4_sge_alloc_ofld_txq(struct adapter *adap, struct sge_ofld_txq *txq, + struct net_device *dev, unsigned int iqid) +{ + int ret, nentries; + struct fw_eq_ofld_cmd c; + struct port_info *pi = netdev_priv(dev); + + /* Add status entries */ + nentries = txq->q.size + STAT_LEN / sizeof(struct tx_desc); + + txq->q.desc = alloc_ring(adap->pdev_dev, txq->q.size, + sizeof(struct tx_desc), sizeof(struct tx_sw_desc), + &txq->q.phys_addr, &txq->q.sdesc, STAT_LEN); + if (!txq->q.desc) + return -ENOMEM; + + memset(&c, 0, sizeof(c)); + c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_OFLD_CMD) | FW_CMD_REQUEST | + FW_CMD_WRITE | FW_CMD_EXEC | + FW_EQ_OFLD_CMD_PFN(0) | FW_EQ_OFLD_CMD_VFN(0)); + c.alloc_to_len16 = htonl(FW_EQ_OFLD_CMD_ALLOC | + FW_EQ_OFLD_CMD_EQSTART | FW_LEN16(c)); + c.fetchszm_to_iqid = htonl(FW_EQ_OFLD_CMD_HOSTFCMODE(2) | + FW_EQ_OFLD_CMD_PCIECHN(pi->tx_chan) | + FW_EQ_OFLD_CMD_IQID(iqid)); + c.dcaen_to_eqsize = htonl(FW_EQ_OFLD_CMD_FBMIN(2) | + FW_EQ_OFLD_CMD_FBMAX(3) | + FW_EQ_OFLD_CMD_CIDXFTHRESH(5) | + FW_EQ_OFLD_CMD_EQSIZE(nentries)); + c.eqaddr = cpu_to_be64(txq->q.phys_addr); + + ret = t4_wr_mbox(adap, 0, &c, sizeof(c), &c); + if (ret) { + kfree(txq->q.sdesc); + txq->q.sdesc = NULL; + dma_free_coherent(adap->pdev_dev, + nentries * sizeof(struct tx_desc), + txq->q.desc, txq->q.phys_addr); + txq->q.desc = NULL; + return ret; + } + + init_txq(adap, &txq->q, FW_EQ_OFLD_CMD_EQID_GET(ntohl(c.eqid_pkd))); + txq->adap = adap; + skb_queue_head_init(&txq->sendq); + tasklet_init(&txq->qresume_tsk, restart_ofldq, (unsigned long)txq); + txq->full = 0; + txq->mapping_err = 0; + return 0; +} + +static void free_txq(struct adapter *adap, struct sge_txq *q) +{ + dma_free_coherent(adap->pdev_dev, + q->size * sizeof(struct tx_desc) + STAT_LEN, + q->desc, q->phys_addr); + q->cntxt_id = 0; + q->sdesc = NULL; + q->desc = NULL; +} + +static void free_rspq_fl(struct adapter *adap, struct sge_rspq *rq, + struct sge_fl *fl) +{ + unsigned int fl_id = fl ? fl->cntxt_id : 0xffff; + + adap->sge.ingr_map[rq->cntxt_id] = NULL; + t4_iq_free(adap, 0, 0, 0, FW_IQ_TYPE_FL_INT_CAP, rq->cntxt_id, fl_id, + 0xffff); + dma_free_coherent(adap->pdev_dev, (rq->size + 1) * rq->iqe_len, + rq->desc, rq->phys_addr); + netif_napi_del(&rq->napi); + rq->netdev = NULL; + rq->cntxt_id = rq->abs_id = 0; + rq->desc = NULL; + + if (fl) { + free_rx_bufs(adap, fl, fl->avail); + dma_free_coherent(adap->pdev_dev, fl->size * 8 + STAT_LEN, + fl->desc, fl->addr); + kfree(fl->sdesc); + fl->sdesc = NULL; + fl->cntxt_id = 0; + fl->desc = NULL; + } +} + +/** + * t4_free_sge_resources - free SGE resources + * @adap: the adapter + * + * Frees resources used by the SGE queue sets. + */ +void t4_free_sge_resources(struct adapter *adap) +{ + int i; + struct sge_eth_rxq *eq = adap->sge.ethrxq; + struct sge_eth_txq *etq = adap->sge.ethtxq; + struct sge_ofld_rxq *oq = adap->sge.ofldrxq; + + /* clean up Ethernet Tx/Rx queues */ + for (i = 0; i < adap->sge.ethqsets; i++, eq++, etq++) { + if (eq->rspq.desc) + free_rspq_fl(adap, &eq->rspq, &eq->fl); + if (etq->q.desc) { + t4_eth_eq_free(adap, 0, 0, 0, etq->q.cntxt_id); + free_tx_desc(adap, &etq->q, etq->q.in_use, true); + kfree(etq->q.sdesc); + free_txq(adap, &etq->q); + } + } + + /* clean up RDMA and iSCSI Rx queues */ + for (i = 0; i < adap->sge.ofldqsets; i++, oq++) { + if (oq->rspq.desc) + free_rspq_fl(adap, &oq->rspq, &oq->fl); + } + for (i = 0, oq = adap->sge.rdmarxq; i < adap->sge.rdmaqs; i++, oq++) { + if (oq->rspq.desc) + free_rspq_fl(adap, &oq->rspq, &oq->fl); + } + + /* clean up offload Tx queues */ + for (i = 0; i < ARRAY_SIZE(adap->sge.ofldtxq); i++) { + struct sge_ofld_txq *q = &adap->sge.ofldtxq[i]; + + if (q->q.desc) { + tasklet_kill(&q->qresume_tsk); + t4_ofld_eq_free(adap, 0, 0, 0, q->q.cntxt_id); + free_tx_desc(adap, &q->q, q->q.in_use, false); + kfree(q->q.sdesc); + __skb_queue_purge(&q->sendq); + free_txq(adap, &q->q); + } + } + + /* clean up control Tx queues */ + for (i = 0; i < ARRAY_SIZE(adap->sge.ctrlq); i++) { + struct sge_ctrl_txq *cq = &adap->sge.ctrlq[i]; + + if (cq->q.desc) { + tasklet_kill(&cq->qresume_tsk); + t4_ctrl_eq_free(adap, 0, 0, 0, cq->q.cntxt_id); + __skb_queue_purge(&cq->sendq); + free_txq(adap, &cq->q); + } + } + + if (adap->sge.fw_evtq.desc) + free_rspq_fl(adap, &adap->sge.fw_evtq, NULL); + + if (adap->sge.intrq.desc) + free_rspq_fl(adap, &adap->sge.intrq, NULL); + + /* clear the reverse egress queue map */ + memset(adap->sge.egr_map, 0, sizeof(adap->sge.egr_map)); +} + +void t4_sge_start(struct adapter *adap) +{ + adap->sge.ethtxq_rover = 0; + mod_timer(&adap->sge.rx_timer, jiffies + RX_QCHECK_PERIOD); + mod_timer(&adap->sge.tx_timer, jiffies + TX_QCHECK_PERIOD); +} + +/** + * t4_sge_stop - disable SGE operation + * @adap: the adapter + * + * Stop tasklets and timers associated with the DMA engine. Note that + * this is effective only if measures have been taken to disable any HW + * events that may restart them. + */ +void t4_sge_stop(struct adapter *adap) +{ + int i; + struct sge *s = &adap->sge; + + if (in_interrupt()) /* actions below require waiting */ + return; + + if (s->rx_timer.function) + del_timer_sync(&s->rx_timer); + if (s->tx_timer.function) + del_timer_sync(&s->tx_timer); + + for (i = 0; i < ARRAY_SIZE(s->ofldtxq); i++) { + struct sge_ofld_txq *q = &s->ofldtxq[i]; + + if (q->q.desc) + tasklet_kill(&q->qresume_tsk); + } + for (i = 0; i < ARRAY_SIZE(s->ctrlq); i++) { + struct sge_ctrl_txq *cq = &s->ctrlq[i]; + + if (cq->q.desc) + tasklet_kill(&cq->qresume_tsk); + } +} + +/** + * t4_sge_init - initialize SGE + * @adap: the adapter + * + * Performs SGE initialization needed every time after a chip reset. + * We do not initialize any of the queues here, instead the driver + * top-level must request them individually. + */ +void t4_sge_init(struct adapter *adap) +{ + struct sge *s = &adap->sge; + unsigned int fl_align_log = ilog2(FL_ALIGN); + + t4_set_reg_field(adap, SGE_CONTROL, PKTSHIFT_MASK | + INGPADBOUNDARY_MASK | EGRSTATUSPAGESIZE, + INGPADBOUNDARY(fl_align_log - 5) | PKTSHIFT(2) | + RXPKTCPLMODE | + (STAT_LEN == 128 ? EGRSTATUSPAGESIZE : 0)); + t4_set_reg_field(adap, SGE_HOST_PAGE_SIZE, HOSTPAGESIZEPF0_MASK, + HOSTPAGESIZEPF0(PAGE_SHIFT - 10)); + t4_write_reg(adap, SGE_FL_BUFFER_SIZE0, PAGE_SIZE); +#if FL_PG_ORDER > 0 + t4_write_reg(adap, SGE_FL_BUFFER_SIZE1, PAGE_SIZE << FL_PG_ORDER); +#endif + t4_write_reg(adap, SGE_INGRESS_RX_THRESHOLD, + THRESHOLD_0(s->counter_val[0]) | + THRESHOLD_1(s->counter_val[1]) | + THRESHOLD_2(s->counter_val[2]) | + THRESHOLD_3(s->counter_val[3])); + t4_write_reg(adap, SGE_TIMER_VALUE_0_AND_1, + TIMERVALUE0(us_to_core_ticks(adap, s->timer_val[0])) | + TIMERVALUE1(us_to_core_ticks(adap, s->timer_val[1]))); + t4_write_reg(adap, SGE_TIMER_VALUE_2_AND_3, + TIMERVALUE0(us_to_core_ticks(adap, s->timer_val[2])) | + TIMERVALUE1(us_to_core_ticks(adap, s->timer_val[3]))); + t4_write_reg(adap, SGE_TIMER_VALUE_4_AND_5, + TIMERVALUE0(us_to_core_ticks(adap, s->timer_val[4])) | + TIMERVALUE1(us_to_core_ticks(adap, s->timer_val[5]))); + setup_timer(&s->rx_timer, sge_rx_timer_cb, (unsigned long)adap); + setup_timer(&s->tx_timer, sge_tx_timer_cb, (unsigned long)adap); + s->starve_thres = core_ticks_per_usec(adap) * 1000000; /* 1 s */ + s->idma_state[0] = s->idma_state[1] = 0; + spin_lock_init(&s->intrq_lock); +} diff --git a/drivers/net/cxgb4/t4_hw.c b/drivers/net/cxgb4/t4_hw.c new file mode 100644 index 00000000000..a814a3afe12 --- /dev/null +++ b/drivers/net/cxgb4/t4_hw.c @@ -0,0 +1,3131 @@ +/* + * This file is part of the Chelsio T4 Ethernet driver for Linux. + * + * Copyright (c) 2003-2010 Chelsio Communications, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include <linux/init.h> +#include <linux/delay.h> +#include "cxgb4.h" +#include "t4_regs.h" +#include "t4fw_api.h" + +/** + * t4_wait_op_done_val - wait until an operation is completed + * @adapter: the adapter performing the operation + * @reg: the register to check for completion + * @mask: a single-bit field within @reg that indicates completion + * @polarity: the value of the field when the operation is completed + * @attempts: number of check iterations + * @delay: delay in usecs between iterations + * @valp: where to store the value of the register at completion time + * + * Wait until an operation is completed by checking a bit in a register + * up to @attempts times. If @valp is not NULL the value of the register + * at the time it indicated completion is stored there. Returns 0 if the + * operation completes and -EAGAIN otherwise. + */ +int t4_wait_op_done_val(struct adapter *adapter, int reg, u32 mask, + int polarity, int attempts, int delay, u32 *valp) +{ + while (1) { + u32 val = t4_read_reg(adapter, reg); + + if (!!(val & mask) == polarity) { + if (valp) + *valp = val; + return 0; + } + if (--attempts == 0) + return -EAGAIN; + if (delay) + udelay(delay); + } +} + +static inline int t4_wait_op_done(struct adapter *adapter, int reg, u32 mask, + int polarity, int attempts, int delay) +{ + return t4_wait_op_done_val(adapter, reg, mask, polarity, attempts, + delay, NULL); +} + +/** + * t4_set_reg_field - set a register field to a value + * @adapter: the adapter to program + * @addr: the register address + * @mask: specifies the portion of the register to modify + * @val: the new value for the register field + * + * Sets a register field specified by the supplied mask to the + * given value. + */ +void t4_set_reg_field(struct adapter *adapter, unsigned int addr, u32 mask, + u32 val) +{ + u32 v = t4_read_reg(adapter, addr) & ~mask; + + t4_write_reg(adapter, addr, v | val); + (void) t4_read_reg(adapter, addr); /* flush */ +} + +/** + * t4_read_indirect - read indirectly addressed registers + * @adap: the adapter + * @addr_reg: register holding the indirect address + * @data_reg: register holding the value of the indirect register + * @vals: where the read register values are stored + * @nregs: how many indirect registers to read + * @start_idx: index of first indirect register to read + * + * Reads registers that are accessed indirectly through an address/data + * register pair. + */ +void t4_read_indirect(struct adapter *adap, unsigned int addr_reg, + unsigned int data_reg, u32 *vals, unsigned int nregs, + unsigned int start_idx) +{ + while (nregs--) { + t4_write_reg(adap, addr_reg, start_idx); + *vals++ = t4_read_reg(adap, data_reg); + start_idx++; + } +} + +/** + * t4_write_indirect - write indirectly addressed registers + * @adap: the adapter + * @addr_reg: register holding the indirect addresses + * @data_reg: register holding the value for the indirect registers + * @vals: values to write + * @nregs: how many indirect registers to write + * @start_idx: address of first indirect register to write + * + * Writes a sequential block of registers that are accessed indirectly + * through an address/data register pair. + */ +void t4_write_indirect(struct adapter *adap, unsigned int addr_reg, + unsigned int data_reg, const u32 *vals, + unsigned int nregs, unsigned int start_idx) +{ + while (nregs--) { + t4_write_reg(adap, addr_reg, start_idx++); + t4_write_reg(adap, data_reg, *vals++); + } +} + +/* + * Get the reply to a mailbox command and store it in @rpl in big-endian order. + */ +static void get_mbox_rpl(struct adapter *adap, __be64 *rpl, int nflit, + u32 mbox_addr) +{ + for ( ; nflit; nflit--, mbox_addr += 8) + *rpl++ = cpu_to_be64(t4_read_reg64(adap, mbox_addr)); +} + +/* + * Handle a FW assertion reported in a mailbox. + */ +static void fw_asrt(struct adapter *adap, u32 mbox_addr) +{ + struct fw_debug_cmd asrt; + + get_mbox_rpl(adap, (__be64 *)&asrt, sizeof(asrt) / 8, mbox_addr); + dev_alert(adap->pdev_dev, + "FW assertion at %.16s:%u, val0 %#x, val1 %#x\n", + asrt.u.assert.filename_0_7, ntohl(asrt.u.assert.line), + ntohl(asrt.u.assert.x), ntohl(asrt.u.assert.y)); +} + +static void dump_mbox(struct adapter *adap, int mbox, u32 data_reg) +{ + dev_err(adap->pdev_dev, + "mbox %d: %llx %llx %llx %llx %llx %llx %llx %llx\n", mbox, + (unsigned long long)t4_read_reg64(adap, data_reg), + (unsigned long long)t4_read_reg64(adap, data_reg + 8), + (unsigned long long)t4_read_reg64(adap, data_reg + 16), + (unsigned long long)t4_read_reg64(adap, data_reg + 24), + (unsigned long long)t4_read_reg64(adap, data_reg + 32), + (unsigned long long)t4_read_reg64(adap, data_reg + 40), + (unsigned long long)t4_read_reg64(adap, data_reg + 48), + (unsigned long long)t4_read_reg64(adap, data_reg + 56)); +} + +/** + * t4_wr_mbox_meat - send a command to FW through the given mailbox + * @adap: the adapter + * @mbox: index of the mailbox to use + * @cmd: the command to write + * @size: command length in bytes + * @rpl: where to optionally store the reply + * @sleep_ok: if true we may sleep while awaiting command completion + * + * Sends the given command to FW through the selected mailbox and waits + * for the FW to execute the command. If @rpl is not %NULL it is used to + * store the FW's reply to the command. The command and its optional + * reply are of the same length. FW can take up to %FW_CMD_MAX_TIMEOUT ms + * to respond. @sleep_ok determines whether we may sleep while awaiting + * the response. If sleeping is allowed we use progressive backoff + * otherwise we spin. + * + * The return value is 0 on success or a negative errno on failure. A + * failure can happen either because we are not able to execute the + * command or FW executes it but signals an error. In the latter case + * the return value is the error code indicated by FW (negated). + */ +int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size, + void *rpl, bool sleep_ok) +{ + static int delay[] = { + 1, 1, 3, 5, 10, 10, 20, 50, 100, 200 + }; + + u32 v; + u64 res; + int i, ms, delay_idx; + const __be64 *p = cmd; + u32 data_reg = PF_REG(mbox, CIM_PF_MAILBOX_DATA); + u32 ctl_reg = PF_REG(mbox, CIM_PF_MAILBOX_CTRL); + + if ((size & 15) || size > MBOX_LEN) + return -EINVAL; + + v = MBOWNER_GET(t4_read_reg(adap, ctl_reg)); + for (i = 0; v == MBOX_OWNER_NONE && i < 3; i++) + v = MBOWNER_GET(t4_read_reg(adap, ctl_reg)); + + if (v != MBOX_OWNER_DRV) + return v ? -EBUSY : -ETIMEDOUT; + + for (i = 0; i < size; i += 8) + t4_write_reg64(adap, data_reg + i, be64_to_cpu(*p++)); + + t4_write_reg(adap, ctl_reg, MBMSGVALID | MBOWNER(MBOX_OWNER_FW)); + t4_read_reg(adap, ctl_reg); /* flush write */ + + delay_idx = 0; + ms = delay[0]; + + for (i = 0; i < FW_CMD_MAX_TIMEOUT; i += ms) { + if (sleep_ok) { + ms = delay[delay_idx]; /* last element may repeat */ + if (delay_idx < ARRAY_SIZE(delay) - 1) + delay_idx++; + msleep(ms); + } else + mdelay(ms); + + v = t4_read_reg(adap, ctl_reg); + if (MBOWNER_GET(v) == MBOX_OWNER_DRV) { + if (!(v & MBMSGVALID)) { + t4_write_reg(adap, ctl_reg, 0); + continue; + } + + res = t4_read_reg64(adap, data_reg); + if (FW_CMD_OP_GET(res >> 32) == FW_DEBUG_CMD) { + fw_asrt(adap, data_reg); + res = FW_CMD_RETVAL(EIO); + } else if (rpl) + get_mbox_rpl(adap, rpl, size / 8, data_reg); + + if (FW_CMD_RETVAL_GET((int)res)) + dump_mbox(adap, mbox, data_reg); + t4_write_reg(adap, ctl_reg, 0); + return -FW_CMD_RETVAL_GET((int)res); + } + } + + dump_mbox(adap, mbox, data_reg); + dev_err(adap->pdev_dev, "command %#x in mailbox %d timed out\n", + *(const u8 *)cmd, mbox); + return -ETIMEDOUT; +} + +/** + * t4_mc_read - read from MC through backdoor accesses + * @adap: the adapter + * @addr: address of first byte requested + * @data: 64 bytes of data containing the requested address + * @ecc: where to store the corresponding 64-bit ECC word + * + * Read 64 bytes of data from MC starting at a 64-byte-aligned address + * that covers the requested address @addr. If @parity is not %NULL it + * is assigned the 64-bit ECC word for the read data. + */ +int t4_mc_read(struct adapter *adap, u32 addr, __be32 *data, u64 *ecc) +{ + int i; + + if (t4_read_reg(adap, MC_BIST_CMD) & START_BIST) + return -EBUSY; + t4_write_reg(adap, MC_BIST_CMD_ADDR, addr & ~0x3fU); + t4_write_reg(adap, MC_BIST_CMD_LEN, 64); + t4_write_reg(adap, MC_BIST_DATA_PATTERN, 0xc); + t4_write_reg(adap, MC_BIST_CMD, BIST_OPCODE(1) | START_BIST | + BIST_CMD_GAP(1)); + i = t4_wait_op_done(adap, MC_BIST_CMD, START_BIST, 0, 10, 1); + if (i) + return i; + +#define MC_DATA(i) MC_BIST_STATUS_REG(MC_BIST_STATUS_RDATA, i) + + for (i = 15; i >= 0; i--) + *data++ = htonl(t4_read_reg(adap, MC_DATA(i))); + if (ecc) + *ecc = t4_read_reg64(adap, MC_DATA(16)); +#undef MC_DATA + return 0; +} + +/** + * t4_edc_read - read from EDC through backdoor accesses + * @adap: the adapter + * @idx: which EDC to access + * @addr: address of first byte requested + * @data: 64 bytes of data containing the requested address + * @ecc: where to store the corresponding 64-bit ECC word + * + * Read 64 bytes of data from EDC starting at a 64-byte-aligned address + * that covers the requested address @addr. If @parity is not %NULL it + * is assigned the 64-bit ECC word for the read data. + */ +int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc) +{ + int i; + + idx *= EDC_STRIDE; + if (t4_read_reg(adap, EDC_BIST_CMD + idx) & START_BIST) + return -EBUSY; + t4_write_reg(adap, EDC_BIST_CMD_ADDR + idx, addr & ~0x3fU); + t4_write_reg(adap, EDC_BIST_CMD_LEN + idx, 64); + t4_write_reg(adap, EDC_BIST_DATA_PATTERN + idx, 0xc); + t4_write_reg(adap, EDC_BIST_CMD + idx, + BIST_OPCODE(1) | BIST_CMD_GAP(1) | START_BIST); + i = t4_wait_op_done(adap, EDC_BIST_CMD + idx, START_BIST, 0, 10, 1); + if (i) + return i; + +#define EDC_DATA(i) (EDC_BIST_STATUS_REG(EDC_BIST_STATUS_RDATA, i) + idx) + + for (i = 15; i >= 0; i--) + *data++ = htonl(t4_read_reg(adap, EDC_DATA(i))); + if (ecc) + *ecc = t4_read_reg64(adap, EDC_DATA(16)); +#undef EDC_DATA + return 0; +} + +#define VPD_ENTRY(name, len) \ + u8 name##_kword[2]; u8 name##_len; u8 name##_data[len] + +/* + * Partial EEPROM Vital Product Data structure. Includes only the ID and + * VPD-R sections. + */ +struct t4_vpd { + u8 id_tag; + u8 id_len[2]; + u8 id_data[ID_LEN]; + u8 vpdr_tag; + u8 vpdr_len[2]; + VPD_ENTRY(pn, 16); /* part number */ + VPD_ENTRY(ec, EC_LEN); /* EC level */ + VPD_ENTRY(sn, SERNUM_LEN); /* serial number */ + VPD_ENTRY(na, 12); /* MAC address base */ + VPD_ENTRY(port_type, 8); /* port types */ + VPD_ENTRY(gpio, 14); /* GPIO usage */ + VPD_ENTRY(cclk, 6); /* core clock */ + VPD_ENTRY(port_addr, 8); /* port MDIO addresses */ + VPD_ENTRY(rv, 1); /* csum */ + u32 pad; /* for multiple-of-4 sizing and alignment */ +}; + +#define EEPROM_STAT_ADDR 0x7bfc +#define VPD_BASE 0 + +/** + * t4_seeprom_wp - enable/disable EEPROM write protection + * @adapter: the adapter + * @enable: whether to enable or disable write protection + * + * Enables or disables write protection on the serial EEPROM. + */ +int t4_seeprom_wp(struct adapter *adapter, bool enable) +{ + unsigned int v = enable ? 0xc : 0; + int ret = pci_write_vpd(adapter->pdev, EEPROM_STAT_ADDR, 4, &v); + return ret < 0 ? ret : 0; +} + +/** + * get_vpd_params - read VPD parameters from VPD EEPROM + * @adapter: adapter to read + * @p: where to store the parameters + * + * Reads card parameters stored in VPD EEPROM. + */ +static int get_vpd_params(struct adapter *adapter, struct vpd_params *p) +{ + int ret; + struct t4_vpd vpd; + u8 *q = (u8 *)&vpd, csum; + + ret = pci_read_vpd(adapter->pdev, VPD_BASE, sizeof(vpd), &vpd); + if (ret < 0) + return ret; + + for (csum = 0; q <= vpd.rv_data; q++) + csum += *q; + + if (csum) { + dev_err(adapter->pdev_dev, + "corrupted VPD EEPROM, actual csum %u\n", csum); + return -EINVAL; + } + + p->cclk = simple_strtoul(vpd.cclk_data, NULL, 10); + memcpy(p->id, vpd.id_data, sizeof(vpd.id_data)); + strim(p->id); + memcpy(p->ec, vpd.ec_data, sizeof(vpd.ec_data)); + strim(p->ec); + memcpy(p->sn, vpd.sn_data, sizeof(vpd.sn_data)); + strim(p->sn); + return 0; +} + +/* serial flash and firmware constants */ +enum { + SF_ATTEMPTS = 10, /* max retries for SF operations */ + + /* flash command opcodes */ + SF_PROG_PAGE = 2, /* program page */ + SF_WR_DISABLE = 4, /* disable writes */ + SF_RD_STATUS = 5, /* read status register */ + SF_WR_ENABLE = 6, /* enable writes */ + SF_RD_DATA_FAST = 0xb, /* read flash */ + SF_ERASE_SECTOR = 0xd8, /* erase sector */ + + FW_START_SEC = 8, /* first flash sector for FW */ + FW_END_SEC = 15, /* last flash sector for FW */ + FW_IMG_START = FW_START_SEC * SF_SEC_SIZE, + FW_MAX_SIZE = (FW_END_SEC - FW_START_SEC + 1) * SF_SEC_SIZE, +}; + +/** + * sf1_read - read data from the serial flash + * @adapter: the adapter + * @byte_cnt: number of bytes to read + * @cont: whether another operation will be chained + * @lock: whether to lock SF for PL access only + * @valp: where to store the read data + * + * Reads up to 4 bytes of data from the serial flash. The location of + * the read needs to be specified prior to calling this by issuing the + * appropriate commands to the serial flash. + */ +static int sf1_read(struct adapter *adapter, unsigned int byte_cnt, int cont, + int lock, u32 *valp) +{ + int ret; + + if (!byte_cnt || byte_cnt > 4) + return -EINVAL; + if (t4_read_reg(adapter, SF_OP) & BUSY) + return -EBUSY; + cont = cont ? SF_CONT : 0; + lock = lock ? SF_LOCK : 0; + t4_write_reg(adapter, SF_OP, lock | cont | BYTECNT(byte_cnt - 1)); + ret = t4_wait_op_done(adapter, SF_OP, BUSY, 0, SF_ATTEMPTS, 5); + if (!ret) + *valp = t4_read_reg(adapter, SF_DATA); + return ret; +} + +/** + * sf1_write - write data to the serial flash + * @adapter: the adapter + * @byte_cnt: number of bytes to write + * @cont: whether another operation will be chained + * @lock: whether to lock SF for PL access only + * @val: value to write + * + * Writes up to 4 bytes of data to the serial flash. The location of + * the write needs to be specified prior to calling this by issuing the + * appropriate commands to the serial flash. + */ +static int sf1_write(struct adapter *adapter, unsigned int byte_cnt, int cont, + int lock, u32 val) +{ + if (!byte_cnt || byte_cnt > 4) + return -EINVAL; + if (t4_read_reg(adapter, SF_OP) & BUSY) + return -EBUSY; + cont = cont ? SF_CONT : 0; + lock = lock ? SF_LOCK : 0; + t4_write_reg(adapter, SF_DATA, val); + t4_write_reg(adapter, SF_OP, lock | + cont | BYTECNT(byte_cnt - 1) | OP_WR); + return t4_wait_op_done(adapter, SF_OP, BUSY, 0, SF_ATTEMPTS, 5); +} + +/** + * flash_wait_op - wait for a flash operation to complete + * @adapter: the adapter + * @attempts: max number of polls of the status register + * @delay: delay between polls in ms + * + * Wait for a flash operation to complete by polling the status register. + */ +static int flash_wait_op(struct adapter *adapter, int attempts, int delay) +{ + int ret; + u32 status; + + while (1) { + if ((ret = sf1_write(adapter, 1, 1, 1, SF_RD_STATUS)) != 0 || + (ret = sf1_read(adapter, 1, 0, 1, &status)) != 0) + return ret; + if (!(status & 1)) + return 0; + if (--attempts == 0) + return -EAGAIN; + if (delay) + msleep(delay); + } +} + +/** + * t4_read_flash - read words from serial flash + * @adapter: the adapter + * @addr: the start address for the read + * @nwords: how many 32-bit words to read + * @data: where to store the read data + * @byte_oriented: whether to store data as bytes or as words + * + * Read the specified number of 32-bit words from the serial flash. + * If @byte_oriented is set the read data is stored as a byte array + * (i.e., big-endian), otherwise as 32-bit words in the platform's + * natural endianess. + */ +int t4_read_flash(struct adapter *adapter, unsigned int addr, + unsigned int nwords, u32 *data, int byte_oriented) +{ + int ret; + + if (addr + nwords * sizeof(u32) > SF_SIZE || (addr & 3)) + return -EINVAL; + + addr = swab32(addr) | SF_RD_DATA_FAST; + + if ((ret = sf1_write(adapter, 4, 1, 0, addr)) != 0 || + (ret = sf1_read(adapter, 1, 1, 0, data)) != 0) + return ret; + + for ( ; nwords; nwords--, data++) { + ret = sf1_read(adapter, 4, nwords > 1, nwords == 1, data); + if (nwords == 1) + t4_write_reg(adapter, SF_OP, 0); /* unlock SF */ + if (ret) + return ret; + if (byte_oriented) + *data = htonl(*data); + } + return 0; +} + +/** + * t4_write_flash - write up to a page of data to the serial flash + * @adapter: the adapter + * @addr: the start address to write + * @n: length of data to write in bytes + * @data: the data to write + * + * Writes up to a page of data (256 bytes) to the serial flash starting + * at the given address. All the data must be written to the same page. + */ +static int t4_write_flash(struct adapter *adapter, unsigned int addr, + unsigned int n, const u8 *data) +{ + int ret; + u32 buf[64]; + unsigned int i, c, left, val, offset = addr & 0xff; + + if (addr >= SF_SIZE || offset + n > SF_PAGE_SIZE) + return -EINVAL; + + val = swab32(addr) | SF_PROG_PAGE; + + if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 || + (ret = sf1_write(adapter, 4, 1, 1, val)) != 0) + goto unlock; + + for (left = n; left; left -= c) { + c = min(left, 4U); + for (val = 0, i = 0; i < c; ++i) + val = (val << 8) + *data++; + + ret = sf1_write(adapter, c, c != left, 1, val); + if (ret) + goto unlock; + } + ret = flash_wait_op(adapter, 5, 1); + if (ret) + goto unlock; + + t4_write_reg(adapter, SF_OP, 0); /* unlock SF */ + + /* Read the page to verify the write succeeded */ + ret = t4_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf, 1); + if (ret) + return ret; + + if (memcmp(data - n, (u8 *)buf + offset, n)) { + dev_err(adapter->pdev_dev, + "failed to correctly write the flash page at %#x\n", + addr); + return -EIO; + } + return 0; + +unlock: + t4_write_reg(adapter, SF_OP, 0); /* unlock SF */ + return ret; +} + +/** + * get_fw_version - read the firmware version + * @adapter: the adapter + * @vers: where to place the version + * + * Reads the FW version from flash. + */ +static int get_fw_version(struct adapter *adapter, u32 *vers) +{ + return t4_read_flash(adapter, + FW_IMG_START + offsetof(struct fw_hdr, fw_ver), 1, + vers, 0); +} + +/** + * get_tp_version - read the TP microcode version + * @adapter: the adapter + * @vers: where to place the version + * + * Reads the TP microcode version from flash. + */ +static int get_tp_version(struct adapter *adapter, u32 *vers) +{ + return t4_read_flash(adapter, FW_IMG_START + offsetof(struct fw_hdr, + tp_microcode_ver), + 1, vers, 0); +} + +/** + * t4_check_fw_version - check if the FW is compatible with this driver + * @adapter: the adapter + * + * Checks if an adapter's FW is compatible with the driver. Returns 0 + * if there's exact match, a negative error if the version could not be + * read or there's a major version mismatch, and a positive value if the + * expected major version is found but there's a minor version mismatch. + */ +int t4_check_fw_version(struct adapter *adapter) +{ + u32 api_vers[2]; + int ret, major, minor, micro; + + ret = get_fw_version(adapter, &adapter->params.fw_vers); + if (!ret) + ret = get_tp_version(adapter, &adapter->params.tp_vers); + if (!ret) + ret = t4_read_flash(adapter, + FW_IMG_START + offsetof(struct fw_hdr, intfver_nic), + 2, api_vers, 1); + if (ret) + return ret; + + major = FW_HDR_FW_VER_MAJOR_GET(adapter->params.fw_vers); + minor = FW_HDR_FW_VER_MINOR_GET(adapter->params.fw_vers); + micro = FW_HDR_FW_VER_MICRO_GET(adapter->params.fw_vers); + memcpy(adapter->params.api_vers, api_vers, + sizeof(adapter->params.api_vers)); + + if (major != FW_VERSION_MAJOR) { /* major mismatch - fail */ + dev_err(adapter->pdev_dev, + "card FW has major version %u, driver wants %u\n", + major, FW_VERSION_MAJOR); + return -EINVAL; + } + + if (minor == FW_VERSION_MINOR && micro == FW_VERSION_MICRO) + return 0; /* perfect match */ + + /* Minor/micro version mismatch. Report it but often it's OK. */ + return 1; +} + +/** + * t4_flash_erase_sectors - erase a range of flash sectors + * @adapter: the adapter + * @start: the first sector to erase + * @end: the last sector to erase + * + * Erases the sectors in the given inclusive range. + */ +static int t4_flash_erase_sectors(struct adapter *adapter, int start, int end) +{ + int ret = 0; + + while (start <= end) { + if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 || + (ret = sf1_write(adapter, 4, 0, 1, + SF_ERASE_SECTOR | (start << 8))) != 0 || + (ret = flash_wait_op(adapter, 5, 500)) != 0) { + dev_err(adapter->pdev_dev, + "erase of flash sector %d failed, error %d\n", + start, ret); + break; + } + start++; + } + t4_write_reg(adapter, SF_OP, 0); /* unlock SF */ + return ret; +} + +/** + * t4_load_fw - download firmware + * @adap: the adapter + * @fw_data: the firmware image to write + * @size: image size + * + * Write the supplied firmware image to the card's serial flash. + */ +int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size) +{ + u32 csum; + int ret, addr; + unsigned int i; + u8 first_page[SF_PAGE_SIZE]; + const u32 *p = (const u32 *)fw_data; + const struct fw_hdr *hdr = (const struct fw_hdr *)fw_data; + + if (!size) { + dev_err(adap->pdev_dev, "FW image has no data\n"); + return -EINVAL; + } + if (size & 511) { + dev_err(adap->pdev_dev, + "FW image size not multiple of 512 bytes\n"); + return -EINVAL; + } + if (ntohs(hdr->len512) * 512 != size) { + dev_err(adap->pdev_dev, + "FW image size differs from size in FW header\n"); + return -EINVAL; + } + if (size > FW_MAX_SIZE) { + dev_err(adap->pdev_dev, "FW image too large, max is %u bytes\n", + FW_MAX_SIZE); + return -EFBIG; + } + + for (csum = 0, i = 0; i < size / sizeof(csum); i++) + csum += ntohl(p[i]); + + if (csum != 0xffffffff) { + dev_err(adap->pdev_dev, + "corrupted firmware image, checksum %#x\n", csum); + return -EINVAL; + } + + i = DIV_ROUND_UP(size, SF_SEC_SIZE); /* # of sectors spanned */ + ret = t4_flash_erase_sectors(adap, FW_START_SEC, FW_START_SEC + i - 1); + if (ret) + goto out; + + /* + * We write the correct version at the end so the driver can see a bad + * version if the FW write fails. Start by writing a copy of the + * first page with a bad version. + */ + memcpy(first_page, fw_data, SF_PAGE_SIZE); + ((struct fw_hdr *)first_page)->fw_ver = htonl(0xffffffff); + ret = t4_write_flash(adap, FW_IMG_START, SF_PAGE_SIZE, first_page); + if (ret) + goto out; + + addr = FW_IMG_START; + for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) { + addr += SF_PAGE_SIZE; + fw_data += SF_PAGE_SIZE; + ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, fw_data); + if (ret) + goto out; + } + + ret = t4_write_flash(adap, + FW_IMG_START + offsetof(struct fw_hdr, fw_ver), + sizeof(hdr->fw_ver), (const u8 *)&hdr->fw_ver); +out: + if (ret) + dev_err(adap->pdev_dev, "firmware download failed, error %d\n", + ret); + return ret; +} + +#define ADVERT_MASK (FW_PORT_CAP_SPEED_100M | FW_PORT_CAP_SPEED_1G |\ + FW_PORT_CAP_SPEED_10G | FW_PORT_CAP_ANEG) + +/** + * t4_link_start - apply link configuration to MAC/PHY + * @phy: the PHY to setup + * @mac: the MAC to setup + * @lc: the requested link configuration + * + * Set up a port's MAC and PHY according to a desired link configuration. + * - If the PHY can auto-negotiate first decide what to advertise, then + * enable/disable auto-negotiation as desired, and reset. + * - If the PHY does not auto-negotiate just reset it. + * - If auto-negotiation is off set the MAC to the proper speed/duplex/FC, + * otherwise do it later based on the outcome of auto-negotiation. + */ +int t4_link_start(struct adapter *adap, unsigned int mbox, unsigned int port, + struct link_config *lc) +{ + struct fw_port_cmd c; + unsigned int fc = 0, mdi = FW_PORT_MDI(FW_PORT_MDI_AUTO); + + lc->link_ok = 0; + if (lc->requested_fc & PAUSE_RX) + fc |= FW_PORT_CAP_FC_RX; + if (lc->requested_fc & PAUSE_TX) + fc |= FW_PORT_CAP_FC_TX; + + memset(&c, 0, sizeof(c)); + c.op_to_portid = htonl(FW_CMD_OP(FW_PORT_CMD) | FW_CMD_REQUEST | + FW_CMD_EXEC | FW_PORT_CMD_PORTID(port)); + c.action_to_len16 = htonl(FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG) | + FW_LEN16(c)); + + if (!(lc->supported & FW_PORT_CAP_ANEG)) { + c.u.l1cfg.rcap = htonl((lc->supported & ADVERT_MASK) | fc); + lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX); + } else if (lc->autoneg == AUTONEG_DISABLE) { + c.u.l1cfg.rcap = htonl(lc->requested_speed | fc | mdi); + lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX); + } else + c.u.l1cfg.rcap = htonl(lc->advertising | fc | mdi); + + return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); +} + +/** + * t4_restart_aneg - restart autonegotiation + * @adap: the adapter + * @mbox: mbox to use for the FW command + * @port: the port id + * + * Restarts autonegotiation for the selected port. + */ +int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port) +{ + struct fw_port_cmd c; + + memset(&c, 0, sizeof(c)); + c.op_to_portid = htonl(FW_CMD_OP(FW_PORT_CMD) | FW_CMD_REQUEST | + FW_CMD_EXEC | FW_PORT_CMD_PORTID(port)); + c.action_to_len16 = htonl(FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG) | + FW_LEN16(c)); + c.u.l1cfg.rcap = htonl(FW_PORT_CAP_ANEG); + return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); +} + +/** + * t4_set_vlan_accel - configure HW VLAN extraction + * @adap: the adapter + * @ports: bitmap of adapter ports to operate on + * @on: enable (1) or disable (0) HW VLAN extraction + * + * Enables or disables HW extraction of VLAN tags for the ports specified + * by @ports. @ports is a bitmap with the ith bit designating the port + * associated with the ith adapter channel. + */ +void t4_set_vlan_accel(struct adapter *adap, unsigned int ports, int on) +{ + ports <<= VLANEXTENABLE_SHIFT; + t4_set_reg_field(adap, TP_OUT_CONFIG, ports, on ? ports : 0); +} + +struct intr_info { + unsigned int mask; /* bits to check in interrupt status */ + const char *msg; /* message to print or NULL */ + short stat_idx; /* stat counter to increment or -1 */ + unsigned short fatal; /* whether the condition reported is fatal */ +}; + +/** + * t4_handle_intr_status - table driven interrupt handler + * @adapter: the adapter that generated the interrupt + * @reg: the interrupt status register to process + * @acts: table of interrupt actions + * + * A table driven interrupt handler that applies a set of masks to an + * interrupt status word and performs the corresponding actions if the + * interrupts described by the mask have occured. The actions include + * optionally emitting a warning or alert message. The table is terminated + * by an entry specifying mask 0. Returns the number of fatal interrupt + * conditions. + */ +static int t4_handle_intr_status(struct adapter *adapter, unsigned int reg, + const struct intr_info *acts) +{ + int fatal = 0; + unsigned int mask = 0; + unsigned int status = t4_read_reg(adapter, reg); + + for ( ; acts->mask; ++acts) { + if (!(status & acts->mask)) + continue; + if (acts->fatal) { + fatal++; + dev_alert(adapter->pdev_dev, "%s (0x%x)\n", acts->msg, + status & acts->mask); + } else if (acts->msg && printk_ratelimit()) + dev_warn(adapter->pdev_dev, "%s (0x%x)\n", acts->msg, + status & acts->mask); + mask |= acts->mask; + } + status &= mask; + if (status) /* clear processed interrupts */ + t4_write_reg(adapter, reg, status); + return fatal; +} + +/* + * Interrupt handler for the PCIE module. + */ +static void pcie_intr_handler(struct adapter *adapter) +{ + static struct intr_info sysbus_intr_info[] = { + { RNPP, "RXNP array parity error", -1, 1 }, + { RPCP, "RXPC array parity error", -1, 1 }, + { RCIP, "RXCIF array parity error", -1, 1 }, + { RCCP, "Rx completions control array parity error", -1, 1 }, + { RFTP, "RXFT array parity error", -1, 1 }, + { 0 } + }; + static struct intr_info pcie_port_intr_info[] = { + { TPCP, "TXPC array parity error", -1, 1 }, + { TNPP, "TXNP array parity error", -1, 1 }, + { TFTP, "TXFT array parity error", -1, 1 }, + { TCAP, "TXCA array parity error", -1, 1 }, + { TCIP, "TXCIF array parity error", -1, 1 }, + { RCAP, "RXCA array parity error", -1, 1 }, + { OTDD, "outbound request TLP discarded", -1, 1 }, + { RDPE, "Rx data parity error", -1, 1 }, + { TDUE, "Tx uncorrectable data error", -1, 1 }, + { 0 } + }; + static struct intr_info pcie_intr_info[] = { + { MSIADDRLPERR, "MSI AddrL parity error", -1, 1 }, + { MSIADDRHPERR, "MSI AddrH parity error", -1, 1 }, + { MSIDATAPERR, "MSI data parity error", -1, 1 }, + { MSIXADDRLPERR, "MSI-X AddrL parity error", -1, 1 }, + { MSIXADDRHPERR, "MSI-X AddrH parity error", -1, 1 }, + { MSIXDATAPERR, "MSI-X data parity error", -1, 1 }, + { MSIXDIPERR, "MSI-X DI parity error", -1, 1 }, + { PIOCPLPERR, "PCI PIO completion FIFO parity error", -1, 1 }, + { PIOREQPERR, "PCI PIO request FIFO parity error", -1, 1 }, + { TARTAGPERR, "PCI PCI target tag FIFO parity error", -1, 1 }, + { CCNTPERR, "PCI CMD channel count parity error", -1, 1 }, + { CREQPERR, "PCI CMD channel request parity error", -1, 1 }, + { CRSPPERR, "PCI CMD channel response parity error", -1, 1 }, + { DCNTPERR, "PCI DMA channel count parity error", -1, 1 }, + { DREQPERR, "PCI DMA channel request parity error", -1, 1 }, + { DRSPPERR, "PCI DMA channel response parity error", -1, 1 }, + { HCNTPERR, "PCI HMA channel count parity error", -1, 1 }, + { HREQPERR, "PCI HMA channel request parity error", -1, 1 }, + { HRSPPERR, "PCI HMA channel response parity error", -1, 1 }, + { CFGSNPPERR, "PCI config snoop FIFO parity error", -1, 1 }, + { FIDPERR, "PCI FID parity error", -1, 1 }, + { INTXCLRPERR, "PCI INTx clear parity error", -1, 1 }, + { MATAGPERR, "PCI MA tag parity error", -1, 1 }, + { PIOTAGPERR, "PCI PIO tag parity error", -1, 1 }, + { RXCPLPERR, "PCI Rx completion parity error", -1, 1 }, + { RXWRPERR, "PCI Rx write parity error", -1, 1 }, + { RPLPERR, "PCI replay buffer parity error", -1, 1 }, + { PCIESINT, "PCI core secondary fault", -1, 1 }, + { PCIEPINT, "PCI core primary fault", -1, 1 }, + { UNXSPLCPLERR, "PCI unexpected split completion error", -1, 0 }, + { 0 } + }; + + int fat; + + fat = t4_handle_intr_status(adapter, + PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS, + sysbus_intr_info) + + t4_handle_intr_status(adapter, + PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS, + pcie_port_intr_info) + + t4_handle_intr_status(adapter, PCIE_INT_CAUSE, pcie_intr_info); + if (fat) + t4_fatal_err(adapter); +} + +/* + * TP interrupt handler. + */ +static void tp_intr_handler(struct adapter *adapter) +{ + static struct intr_info tp_intr_info[] = { + { 0x3fffffff, "TP parity error", -1, 1 }, + { FLMTXFLSTEMPTY, "TP out of Tx pages", -1, 1 }, + { 0 } + }; + + if (t4_handle_intr_status(adapter, TP_INT_CAUSE, tp_intr_info)) + t4_fatal_err(adapter); +} + +/* + * SGE interrupt handler. + */ +static void sge_intr_handler(struct adapter *adapter) +{ + u64 v; + + static struct intr_info sge_intr_info[] = { + { ERR_CPL_EXCEED_IQE_SIZE, + "SGE received CPL exceeding IQE size", -1, 1 }, + { ERR_INVALID_CIDX_INC, + "SGE GTS CIDX increment too large", -1, 0 }, + { ERR_CPL_OPCODE_0, "SGE received 0-length CPL", -1, 0 }, + { ERR_DROPPED_DB, "SGE doorbell dropped", -1, 0 }, + { ERR_DATA_CPL_ON_HIGH_QID1 | ERR_DATA_CPL_ON_HIGH_QID0, + "SGE IQID > 1023 received CPL for FL", -1, 0 }, + { ERR_BAD_DB_PIDX3, "SGE DBP 3 pidx increment too large", -1, + 0 }, + { ERR_BAD_DB_PIDX2, "SGE DBP 2 pidx increment too large", -1, + 0 }, + { ERR_BAD_DB_PIDX1, "SGE DBP 1 pidx increment too large", -1, + 0 }, + { ERR_BAD_DB_PIDX0, "SGE DBP 0 pidx increment too large", -1, + 0 }, + { ERR_ING_CTXT_PRIO, + "SGE too many priority ingress contexts", -1, 0 }, + { ERR_EGR_CTXT_PRIO, + "SGE too many priority egress contexts", -1, 0 }, + { INGRESS_SIZE_ERR, "SGE illegal ingress QID", -1, 0 }, + { EGRESS_SIZE_ERR, "SGE illegal egress QID", -1, 0 }, + { 0 } + }; + + v = (u64)t4_read_reg(adapter, SGE_INT_CAUSE1) | + ((u64)t4_read_reg(adapter, SGE_INT_CAUSE2) << 32); + if (v) { + dev_alert(adapter->pdev_dev, "SGE parity error (%#llx)\n", + (unsigned long long)v); + t4_write_reg(adapter, SGE_INT_CAUSE1, v); + t4_write_reg(adapter, SGE_INT_CAUSE2, v >> 32); + } + + if (t4_handle_intr_status(adapter, SGE_INT_CAUSE3, sge_intr_info) || + v != 0) + t4_fatal_err(adapter); +} + +/* + * CIM interrupt handler. + */ +static void cim_intr_handler(struct adapter *adapter) +{ + static struct intr_info cim_intr_info[] = { + { PREFDROPINT, "CIM control register prefetch drop", -1, 1 }, + { OBQPARERR, "CIM OBQ parity error", -1, 1 }, + { IBQPARERR, "CIM IBQ parity error", -1, 1 }, + { MBUPPARERR, "CIM mailbox uP parity error", -1, 1 }, + { MBHOSTPARERR, "CIM mailbox host parity error", -1, 1 }, + { TIEQINPARERRINT, "CIM TIEQ outgoing parity error", -1, 1 }, + { TIEQOUTPARERRINT, "CIM TIEQ incoming parity error", -1, 1 }, + { 0 } + }; + static struct intr_info cim_upintr_info[] = { + { RSVDSPACEINT, "CIM reserved space access", -1, 1 }, + { ILLTRANSINT, "CIM illegal transaction", -1, 1 }, + { ILLWRINT, "CIM illegal write", -1, 1 }, + { ILLRDINT, "CIM illegal read", -1, 1 }, + { ILLRDBEINT, "CIM illegal read BE", -1, 1 }, + { ILLWRBEINT, "CIM illegal write BE", -1, 1 }, + { SGLRDBOOTINT, "CIM single read from boot space", -1, 1 }, + { SGLWRBOOTINT, "CIM single write to boot space", -1, 1 }, + { BLKWRBOOTINT, "CIM block write to boot space", -1, 1 }, + { SGLRDFLASHINT, "CIM single read from flash space", -1, 1 }, + { SGLWRFLASHINT, "CIM single write to flash space", -1, 1 }, + { BLKWRFLASHINT, "CIM block write to flash space", -1, 1 }, + { SGLRDEEPROMINT, "CIM single EEPROM read", -1, 1 }, + { SGLWREEPROMINT, "CIM single EEPROM write", -1, 1 }, + { BLKRDEEPROMINT, "CIM block EEPROM read", -1, 1 }, + { BLKWREEPROMINT, "CIM block EEPROM write", -1, 1 }, + { SGLRDCTLINT , "CIM single read from CTL space", -1, 1 }, + { SGLWRCTLINT , "CIM single write to CTL space", -1, 1 }, + { BLKRDCTLINT , "CIM block read from CTL space", -1, 1 }, + { BLKWRCTLINT , "CIM block write to CTL space", -1, 1 }, + { SGLRDPLINT , "CIM single read from PL space", -1, 1 }, + { SGLWRPLINT , "CIM single write to PL space", -1, 1 }, + { BLKRDPLINT , "CIM block read from PL space", -1, 1 }, + { BLKWRPLINT , "CIM block write to PL space", -1, 1 }, + { REQOVRLOOKUPINT , "CIM request FIFO overwrite", -1, 1 }, + { RSPOVRLOOKUPINT , "CIM response FIFO overwrite", -1, 1 }, + { TIMEOUTINT , "CIM PIF timeout", -1, 1 }, + { TIMEOUTMAINT , "CIM PIF MA timeout", -1, 1 }, + { 0 } + }; + + int fat; + + fat = t4_handle_intr_status(adapter, CIM_HOST_INT_CAUSE, + cim_intr_info) + + t4_handle_intr_status(adapter, CIM_HOST_UPACC_INT_CAUSE, + cim_upintr_info); + if (fat) + t4_fatal_err(adapter); +} + +/* + * ULP RX interrupt handler. + */ +static void ulprx_intr_handler(struct adapter *adapter) +{ + static struct intr_info ulprx_intr_info[] = { + { 0x7fffff, "ULPRX parity error", -1, 1 }, + { 0 } + }; + + if (t4_handle_intr_status(adapter, ULP_RX_INT_CAUSE, ulprx_intr_info)) + t4_fatal_err(adapter); +} + +/* + * ULP TX interrupt handler. + */ +static void ulptx_intr_handler(struct adapter *adapter) +{ + static struct intr_info ulptx_intr_info[] = { + { PBL_BOUND_ERR_CH3, "ULPTX channel 3 PBL out of bounds", -1, + 0 }, + { PBL_BOUND_ERR_CH2, "ULPTX channel 2 PBL out of bounds", -1, + 0 }, + { PBL_BOUND_ERR_CH1, "ULPTX channel 1 PBL out of bounds", -1, + 0 }, + { PBL_BOUND_ERR_CH0, "ULPTX channel 0 PBL out of bounds", -1, + 0 }, + { 0xfffffff, "ULPTX parity error", -1, 1 }, + { 0 } + }; + + if (t4_handle_intr_status(adapter, ULP_TX_INT_CAUSE, ulptx_intr_info)) + t4_fatal_err(adapter); +} + +/* + * PM TX interrupt handler. + */ +static void pmtx_intr_handler(struct adapter *adapter) +{ + static struct intr_info pmtx_intr_info[] = { + { PCMD_LEN_OVFL0, "PMTX channel 0 pcmd too large", -1, 1 }, + { PCMD_LEN_OVFL1, "PMTX channel 1 pcmd too large", -1, 1 }, + { PCMD_LEN_OVFL2, "PMTX channel 2 pcmd too large", -1, 1 }, + { ZERO_C_CMD_ERROR, "PMTX 0-length pcmd", -1, 1 }, + { PMTX_FRAMING_ERROR, "PMTX framing error", -1, 1 }, + { OESPI_PAR_ERROR, "PMTX oespi parity error", -1, 1 }, + { DB_OPTIONS_PAR_ERROR, "PMTX db_options parity error", -1, 1 }, + { ICSPI_PAR_ERROR, "PMTX icspi parity error", -1, 1 }, + { C_PCMD_PAR_ERROR, "PMTX c_pcmd parity error", -1, 1}, + { 0 } + }; + + if (t4_handle_intr_status(adapter, PM_TX_INT_CAUSE, pmtx_intr_info)) + t4_fatal_err(adapter); +} + +/* + * PM RX interrupt handler. + */ +static void pmrx_intr_handler(struct adapter *adapter) +{ + static struct intr_info pmrx_intr_info[] = { + { ZERO_E_CMD_ERROR, "PMRX 0-length pcmd", -1, 1 }, + { PMRX_FRAMING_ERROR, "PMRX framing error", -1, 1 }, + { OCSPI_PAR_ERROR, "PMRX ocspi parity error", -1, 1 }, + { DB_OPTIONS_PAR_ERROR, "PMRX db_options parity error", -1, 1 }, + { IESPI_PAR_ERROR, "PMRX iespi parity error", -1, 1 }, + { E_PCMD_PAR_ERROR, "PMRX e_pcmd parity error", -1, 1}, + { 0 } + }; + + if (t4_handle_intr_status(adapter, PM_RX_INT_CAUSE, pmrx_intr_info)) + t4_fatal_err(adapter); +} + +/* + * CPL switch interrupt handler. + */ +static void cplsw_intr_handler(struct adapter *adapter) +{ + static struct intr_info cplsw_intr_info[] = { + { CIM_OP_MAP_PERR, "CPLSW CIM op_map parity error", -1, 1 }, + { CIM_OVFL_ERROR, "CPLSW CIM overflow", -1, 1 }, + { TP_FRAMING_ERROR, "CPLSW TP framing error", -1, 1 }, + { SGE_FRAMING_ERROR, "CPLSW SGE framing error", -1, 1 }, + { CIM_FRAMING_ERROR, "CPLSW CIM framing error", -1, 1 }, + { ZERO_SWITCH_ERROR, "CPLSW no-switch error", -1, 1 }, + { 0 } + }; + + if (t4_handle_intr_status(adapter, CPL_INTR_CAUSE, cplsw_intr_info)) + t4_fatal_err(adapter); +} + +/* + * LE interrupt handler. + */ +static void le_intr_handler(struct adapter *adap) +{ + static struct intr_info le_intr_info[] = { + { LIPMISS, "LE LIP miss", -1, 0 }, + { LIP0, "LE 0 LIP error", -1, 0 }, + { PARITYERR, "LE parity error", -1, 1 }, + { UNKNOWNCMD, "LE unknown command", -1, 1 }, + { REQQPARERR, "LE request queue parity error", -1, 1 }, + { 0 } + }; + + if (t4_handle_intr_status(adap, LE_DB_INT_CAUSE, le_intr_info)) + t4_fatal_err(adap); +} + +/* + * MPS interrupt handler. + */ +static void mps_intr_handler(struct adapter *adapter) +{ + static struct intr_info mps_rx_intr_info[] = { + { 0xffffff, "MPS Rx parity error", -1, 1 }, + { 0 } + }; + static struct intr_info mps_tx_intr_info[] = { + { TPFIFO, "MPS Tx TP FIFO parity error", -1, 1 }, + { NCSIFIFO, "MPS Tx NC-SI FIFO parity error", -1, 1 }, + { TXDATAFIFO, "MPS Tx data FIFO parity error", -1, 1 }, + { TXDESCFIFO, "MPS Tx desc FIFO parity error", -1, 1 }, + { BUBBLE, "MPS Tx underflow", -1, 1 }, + { SECNTERR, "MPS Tx SOP/EOP error", -1, 1 }, + { FRMERR, "MPS Tx framing error", -1, 1 }, + { 0 } + }; + static struct intr_info mps_trc_intr_info[] = { + { FILTMEM, "MPS TRC filter parity error", -1, 1 }, + { PKTFIFO, "MPS TRC packet FIFO parity error", -1, 1 }, + { MISCPERR, "MPS TRC misc parity error", -1, 1 }, + { 0 } + }; + static struct intr_info mps_stat_sram_intr_info[] = { + { 0x1fffff, "MPS statistics SRAM parity error", -1, 1 }, + { 0 } + }; + static struct intr_info mps_stat_tx_intr_info[] = { + { 0xfffff, "MPS statistics Tx FIFO parity error", -1, 1 }, + { 0 } + }; + static struct intr_info mps_stat_rx_intr_info[] = { + { 0xffffff, "MPS statistics Rx FIFO parity error", -1, 1 }, + { 0 } + }; + static struct intr_info mps_cls_intr_info[] = { + { MATCHSRAM, "MPS match SRAM parity error", -1, 1 }, + { MATCHTCAM, "MPS match TCAM parity error", -1, 1 }, + { HASHSRAM, "MPS hash SRAM parity error", -1, 1 }, + { 0 } + }; + + int fat; + + fat = t4_handle_intr_status(adapter, MPS_RX_PERR_INT_CAUSE, + mps_rx_intr_info) + + t4_handle_intr_status(adapter, MPS_TX_INT_CAUSE, + mps_tx_intr_info) + + t4_handle_intr_status(adapter, MPS_TRC_INT_CAUSE, + mps_trc_intr_info) + + t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_SRAM, + mps_stat_sram_intr_info) + + t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_TX_FIFO, + mps_stat_tx_intr_info) + + t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_RX_FIFO, + mps_stat_rx_intr_info) + + t4_handle_intr_status(adapter, MPS_CLS_INT_CAUSE, + mps_cls_intr_info); + + t4_write_reg(adapter, MPS_INT_CAUSE, CLSINT | TRCINT | + RXINT | TXINT | STATINT); + t4_read_reg(adapter, MPS_INT_CAUSE); /* flush */ + if (fat) + t4_fatal_err(adapter); +} + +#define MEM_INT_MASK (PERR_INT_CAUSE | ECC_CE_INT_CAUSE | ECC_UE_INT_CAUSE) + +/* + * EDC/MC interrupt handler. + */ +static void mem_intr_handler(struct adapter *adapter, int idx) +{ + static const char name[3][5] = { "EDC0", "EDC1", "MC" }; + + unsigned int addr, cnt_addr, v; + + if (idx <= MEM_EDC1) { + addr = EDC_REG(EDC_INT_CAUSE, idx); + cnt_addr = EDC_REG(EDC_ECC_STATUS, idx); + } else { + addr = MC_INT_CAUSE; + cnt_addr = MC_ECC_STATUS; + } + + v = t4_read_reg(adapter, addr) & MEM_INT_MASK; + if (v & PERR_INT_CAUSE) + dev_alert(adapter->pdev_dev, "%s FIFO parity error\n", + name[idx]); + if (v & ECC_CE_INT_CAUSE) { + u32 cnt = ECC_CECNT_GET(t4_read_reg(adapter, cnt_addr)); + + t4_write_reg(adapter, cnt_addr, ECC_CECNT_MASK); + if (printk_ratelimit()) + dev_warn(adapter->pdev_dev, + "%u %s correctable ECC data error%s\n", + cnt, name[idx], cnt > 1 ? "s" : ""); + } + if (v & ECC_UE_INT_CAUSE) + dev_alert(adapter->pdev_dev, + "%s uncorrectable ECC data error\n", name[idx]); + + t4_write_reg(adapter, addr, v); + if (v & (PERR_INT_CAUSE | ECC_UE_INT_CAUSE)) + t4_fatal_err(adapter); +} + +/* + * MA interrupt handler. + */ +static void ma_intr_handler(struct adapter *adap) +{ + u32 v, status = t4_read_reg(adap, MA_INT_CAUSE); + + if (status & MEM_PERR_INT_CAUSE) + dev_alert(adap->pdev_dev, + "MA parity error, parity status %#x\n", + t4_read_reg(adap, MA_PARITY_ERROR_STATUS)); + if (status & MEM_WRAP_INT_CAUSE) { + v = t4_read_reg(adap, MA_INT_WRAP_STATUS); + dev_alert(adap->pdev_dev, "MA address wrap-around error by " + "client %u to address %#x\n", + MEM_WRAP_CLIENT_NUM_GET(v), + MEM_WRAP_ADDRESS_GET(v) << 4); + } + t4_write_reg(adap, MA_INT_CAUSE, status); + t4_fatal_err(adap); +} + +/* + * SMB interrupt handler. + */ +static void smb_intr_handler(struct adapter *adap) +{ + static struct intr_info smb_intr_info[] = { + { MSTTXFIFOPARINT, "SMB master Tx FIFO parity error", -1, 1 }, + { MSTRXFIFOPARINT, "SMB master Rx FIFO parity error", -1, 1 }, + { SLVFIFOPARINT, "SMB slave FIFO parity error", -1, 1 }, + { 0 } + }; + + if (t4_handle_intr_status(adap, SMB_INT_CAUSE, smb_intr_info)) + t4_fatal_err(adap); +} + +/* + * NC-SI interrupt handler. + */ +static void ncsi_intr_handler(struct adapter *adap) +{ + static struct intr_info ncsi_intr_info[] = { + { CIM_DM_PRTY_ERR, "NC-SI CIM parity error", -1, 1 }, + { MPS_DM_PRTY_ERR, "NC-SI MPS parity error", -1, 1 }, + { TXFIFO_PRTY_ERR, "NC-SI Tx FIFO parity error", -1, 1 }, + { RXFIFO_PRTY_ERR, "NC-SI Rx FIFO parity error", -1, 1 }, + { 0 } + }; + + if (t4_handle_intr_status(adap, NCSI_INT_CAUSE, ncsi_intr_info)) + t4_fatal_err(adap); +} + +/* + * XGMAC interrupt handler. + */ +static void xgmac_intr_handler(struct adapter *adap, int port) +{ + u32 v = t4_read_reg(adap, PORT_REG(port, XGMAC_PORT_INT_CAUSE)); + + v &= TXFIFO_PRTY_ERR | RXFIFO_PRTY_ERR; + if (!v) + return; + + if (v & TXFIFO_PRTY_ERR) + dev_alert(adap->pdev_dev, "XGMAC %d Tx FIFO parity error\n", + port); + if (v & RXFIFO_PRTY_ERR) + dev_alert(adap->pdev_dev, "XGMAC %d Rx FIFO parity error\n", + port); + t4_write_reg(adap, PORT_REG(port, XGMAC_PORT_INT_CAUSE), v); + t4_fatal_err(adap); +} + +/* + * PL interrupt handler. + */ +static void pl_intr_handler(struct adapter *adap) +{ + static struct intr_info pl_intr_info[] = { + { FATALPERR, "T4 fatal parity error", -1, 1 }, + { PERRVFID, "PL VFID_MAP parity error", -1, 1 }, + { 0 } + }; + + if (t4_handle_intr_status(adap, PL_PL_INT_CAUSE, pl_intr_info)) + t4_fatal_err(adap); +} + +#define PF_INTR_MASK (PFSW | PFCIM) +#define GLBL_INTR_MASK (CIM | MPS | PL | PCIE | MC | EDC0 | \ + EDC1 | LE | TP | MA | PM_TX | PM_RX | ULP_RX | \ + CPL_SWITCH | SGE | ULP_TX) + +/** + * t4_slow_intr_handler - control path interrupt handler + * @adapter: the adapter + * + * T4 interrupt handler for non-data global interrupt events, e.g., errors. + * The designation 'slow' is because it involves register reads, while + * data interrupts typically don't involve any MMIOs. + */ +int t4_slow_intr_handler(struct adapter *adapter) +{ + u32 cause = t4_read_reg(adapter, PL_INT_CAUSE); + + if (!(cause & GLBL_INTR_MASK)) + return 0; + if (cause & CIM) + cim_intr_handler(adapter); + if (cause & MPS) + mps_intr_handler(adapter); + if (cause & NCSI) + ncsi_intr_handler(adapter); + if (cause & PL) + pl_intr_handler(adapter); + if (cause & SMB) + smb_intr_handler(adapter); + if (cause & XGMAC0) + xgmac_intr_handler(adapter, 0); + if (cause & XGMAC1) + xgmac_intr_handler(adapter, 1); + if (cause & XGMAC_KR0) + xgmac_intr_handler(adapter, 2); + if (cause & XGMAC_KR1) + xgmac_intr_handler(adapter, 3); + if (cause & PCIE) + pcie_intr_handler(adapter); + if (cause & MC) + mem_intr_handler(adapter, MEM_MC); + if (cause & EDC0) + mem_intr_handler(adapter, MEM_EDC0); + if (cause & EDC1) + mem_intr_handler(adapter, MEM_EDC1); + if (cause & LE) + le_intr_handler(adapter); + if (cause & TP) + tp_intr_handler(adapter); + if (cause & MA) + ma_intr_handler(adapter); + if (cause & PM_TX) + pmtx_intr_handler(adapter); + if (cause & PM_RX) + pmrx_intr_handler(adapter); + if (cause & ULP_RX) + ulprx_intr_handler(adapter); + if (cause & CPL_SWITCH) + cplsw_intr_handler(adapter); + if (cause & SGE) + sge_intr_handler(adapter); + if (cause & ULP_TX) + ulptx_intr_handler(adapter); + + /* Clear the interrupts just processed for which we are the master. */ + t4_write_reg(adapter, PL_INT_CAUSE, cause & GLBL_INTR_MASK); + (void) t4_read_reg(adapter, PL_INT_CAUSE); /* flush */ + return 1; +} + +/** + * t4_intr_enable - enable interrupts + * @adapter: the adapter whose interrupts should be enabled + * + * Enable PF-specific interrupts for the calling function and the top-level + * interrupt concentrator for global interrupts. Interrupts are already + * enabled at each module, here we just enable the roots of the interrupt + * hierarchies. + * + * Note: this function should be called only when the driver manages + * non PF-specific interrupts from the various HW modules. Only one PCI + * function at a time should be doing this. + */ +void t4_intr_enable(struct adapter *adapter) +{ + u32 pf = SOURCEPF_GET(t4_read_reg(adapter, PL_WHOAMI)); + + t4_write_reg(adapter, SGE_INT_ENABLE3, ERR_CPL_EXCEED_IQE_SIZE | + ERR_INVALID_CIDX_INC | ERR_CPL_OPCODE_0 | + ERR_DROPPED_DB | ERR_DATA_CPL_ON_HIGH_QID1 | + ERR_DATA_CPL_ON_HIGH_QID0 | ERR_BAD_DB_PIDX3 | + ERR_BAD_DB_PIDX2 | ERR_BAD_DB_PIDX1 | + ERR_BAD_DB_PIDX0 | ERR_ING_CTXT_PRIO | + ERR_EGR_CTXT_PRIO | INGRESS_SIZE_ERR | + EGRESS_SIZE_ERR); + t4_write_reg(adapter, MYPF_REG(PL_PF_INT_ENABLE), PF_INTR_MASK); + t4_set_reg_field(adapter, PL_INT_MAP0, 0, 1 << pf); +} + +/** + * t4_intr_disable - disable interrupts + * @adapter: the adapter whose interrupts should be disabled + * + * Disable interrupts. We only disable the top-level interrupt + * concentrators. The caller must be a PCI function managing global + * interrupts. + */ +void t4_intr_disable(struct adapter *adapter) +{ + u32 pf = SOURCEPF_GET(t4_read_reg(adapter, PL_WHOAMI)); + + t4_write_reg(adapter, MYPF_REG(PL_PF_INT_ENABLE), 0); + t4_set_reg_field(adapter, PL_INT_MAP0, 1 << pf, 0); +} + +/** + * t4_intr_clear - clear all interrupts + * @adapter: the adapter whose interrupts should be cleared + * + * Clears all interrupts. The caller must be a PCI function managing + * global interrupts. + */ +void t4_intr_clear(struct adapter *adapter) +{ + static const unsigned int cause_reg[] = { + SGE_INT_CAUSE1, SGE_INT_CAUSE2, SGE_INT_CAUSE3, + PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS, + PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS, + PCIE_NONFAT_ERR, PCIE_INT_CAUSE, + MC_INT_CAUSE, + MA_INT_WRAP_STATUS, MA_PARITY_ERROR_STATUS, MA_INT_CAUSE, + EDC_INT_CAUSE, EDC_REG(EDC_INT_CAUSE, 1), + CIM_HOST_INT_CAUSE, CIM_HOST_UPACC_INT_CAUSE, + MYPF_REG(CIM_PF_HOST_INT_CAUSE), + TP_INT_CAUSE, + ULP_RX_INT_CAUSE, ULP_TX_INT_CAUSE, + PM_RX_INT_CAUSE, PM_TX_INT_CAUSE, + MPS_RX_PERR_INT_CAUSE, + CPL_INTR_CAUSE, + MYPF_REG(PL_PF_INT_CAUSE), + PL_PL_INT_CAUSE, + LE_DB_INT_CAUSE, + }; + + unsigned int i; + + for (i = 0; i < ARRAY_SIZE(cause_reg); ++i) + t4_write_reg(adapter, cause_reg[i], 0xffffffff); + + t4_write_reg(adapter, PL_INT_CAUSE, GLBL_INTR_MASK); + (void) t4_read_reg(adapter, PL_INT_CAUSE); /* flush */ +} + +/** + * hash_mac_addr - return the hash value of a MAC address + * @addr: the 48-bit Ethernet MAC address + * + * Hashes a MAC address according to the hash function used by HW inexact + * (hash) address matching. + */ +static int hash_mac_addr(const u8 *addr) +{ + u32 a = ((u32)addr[0] << 16) | ((u32)addr[1] << 8) | addr[2]; + u32 b = ((u32)addr[3] << 16) | ((u32)addr[4] << 8) | addr[5]; + a ^= b; + a ^= (a >> 12); + a ^= (a >> 6); + return a & 0x3f; +} + +/** + * t4_config_rss_range - configure a portion of the RSS mapping table + * @adapter: the adapter + * @mbox: mbox to use for the FW command + * @viid: virtual interface whose RSS subtable is to be written + * @start: start entry in the table to write + * @n: how many table entries to write + * @rspq: values for the response queue lookup table + * @nrspq: number of values in @rspq + * + * Programs the selected part of the VI's RSS mapping table with the + * provided values. If @nrspq < @n the supplied values are used repeatedly + * until the full table range is populated. + * + * The caller must ensure the values in @rspq are in the range allowed for + * @viid. + */ +int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid, + int start, int n, const u16 *rspq, unsigned int nrspq) +{ + int ret; + const u16 *rsp = rspq; + const u16 *rsp_end = rspq + nrspq; + struct fw_rss_ind_tbl_cmd cmd; + + memset(&cmd, 0, sizeof(cmd)); + cmd.op_to_viid = htonl(FW_CMD_OP(FW_RSS_IND_TBL_CMD) | + FW_CMD_REQUEST | FW_CMD_WRITE | + FW_RSS_IND_TBL_CMD_VIID(viid)); + cmd.retval_len16 = htonl(FW_LEN16(cmd)); + + /* each fw_rss_ind_tbl_cmd takes up to 32 entries */ + while (n > 0) { + int nq = min(n, 32); + __be32 *qp = &cmd.iq0_to_iq2; + + cmd.niqid = htons(nq); + cmd.startidx = htons(start); + + start += nq; + n -= nq; + + while (nq > 0) { + unsigned int v; + + v = FW_RSS_IND_TBL_CMD_IQ0(*rsp); + if (++rsp >= rsp_end) + rsp = rspq; + v |= FW_RSS_IND_TBL_CMD_IQ1(*rsp); + if (++rsp >= rsp_end) + rsp = rspq; + v |= FW_RSS_IND_TBL_CMD_IQ2(*rsp); + if (++rsp >= rsp_end) + rsp = rspq; + + *qp++ = htonl(v); + nq -= 3; + } + + ret = t4_wr_mbox(adapter, mbox, &cmd, sizeof(cmd), NULL); + if (ret) + return ret; + } + return 0; +} + +/** + * t4_config_glbl_rss - configure the global RSS mode + * @adapter: the adapter + * @mbox: mbox to use for the FW command + * @mode: global RSS mode + * @flags: mode-specific flags + * + * Sets the global RSS mode. + */ +int t4_config_glbl_rss(struct adapter *adapter, int mbox, unsigned int mode, + unsigned int flags) +{ + struct fw_rss_glb_config_cmd c; + + memset(&c, 0, sizeof(c)); + c.op_to_write = htonl(FW_CMD_OP(FW_RSS_GLB_CONFIG_CMD) | + FW_CMD_REQUEST | FW_CMD_WRITE); + c.retval_len16 = htonl(FW_LEN16(c)); + if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_MANUAL) { + c.u.manual.mode_pkd = htonl(FW_RSS_GLB_CONFIG_CMD_MODE(mode)); + } else if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL) { + c.u.basicvirtual.mode_pkd = + htonl(FW_RSS_GLB_CONFIG_CMD_MODE(mode)); + c.u.basicvirtual.synmapen_to_hashtoeplitz = htonl(flags); + } else + return -EINVAL; + return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL); +} + +/* Read an RSS table row */ +static int rd_rss_row(struct adapter *adap, int row, u32 *val) +{ + t4_write_reg(adap, TP_RSS_LKP_TABLE, 0xfff00000 | row); + return t4_wait_op_done_val(adap, TP_RSS_LKP_TABLE, LKPTBLROWVLD, 1, + 5, 0, val); +} + +/** + * t4_read_rss - read the contents of the RSS mapping table + * @adapter: the adapter + * @map: holds the contents of the RSS mapping table + * + * Reads the contents of the RSS hash->queue mapping table. + */ +int t4_read_rss(struct adapter *adapter, u16 *map) +{ + u32 val; + int i, ret; + + for (i = 0; i < RSS_NENTRIES / 2; ++i) { + ret = rd_rss_row(adapter, i, &val); + if (ret) + return ret; + *map++ = LKPTBLQUEUE0_GET(val); + *map++ = LKPTBLQUEUE1_GET(val); + } + return 0; +} + +/** + * t4_tp_get_tcp_stats - read TP's TCP MIB counters + * @adap: the adapter + * @v4: holds the TCP/IP counter values + * @v6: holds the TCP/IPv6 counter values + * + * Returns the values of TP's TCP/IP and TCP/IPv6 MIB counters. + * Either @v4 or @v6 may be %NULL to skip the corresponding stats. + */ +void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4, + struct tp_tcp_stats *v6) +{ + u32 val[TP_MIB_TCP_RXT_SEG_LO - TP_MIB_TCP_OUT_RST + 1]; + +#define STAT_IDX(x) ((TP_MIB_TCP_##x) - TP_MIB_TCP_OUT_RST) +#define STAT(x) val[STAT_IDX(x)] +#define STAT64(x) (((u64)STAT(x##_HI) << 32) | STAT(x##_LO)) + + if (v4) { + t4_read_indirect(adap, TP_MIB_INDEX, TP_MIB_DATA, val, + ARRAY_SIZE(val), TP_MIB_TCP_OUT_RST); + v4->tcpOutRsts = STAT(OUT_RST); + v4->tcpInSegs = STAT64(IN_SEG); + v4->tcpOutSegs = STAT64(OUT_SEG); + v4->tcpRetransSegs = STAT64(RXT_SEG); + } + if (v6) { + t4_read_indirect(adap, TP_MIB_INDEX, TP_MIB_DATA, val, + ARRAY_SIZE(val), TP_MIB_TCP_V6OUT_RST); + v6->tcpOutRsts = STAT(OUT_RST); + v6->tcpInSegs = STAT64(IN_SEG); + v6->tcpOutSegs = STAT64(OUT_SEG); + v6->tcpRetransSegs = STAT64(RXT_SEG); + } +#undef STAT64 +#undef STAT +#undef STAT_IDX +} + +/** + * t4_tp_get_err_stats - read TP's error MIB counters + * @adap: the adapter + * @st: holds the counter values + * + * Returns the values of TP's error counters. + */ +void t4_tp_get_err_stats(struct adapter *adap, struct tp_err_stats *st) +{ + t4_read_indirect(adap, TP_MIB_INDEX, TP_MIB_DATA, st->macInErrs, + 12, TP_MIB_MAC_IN_ERR_0); + t4_read_indirect(adap, TP_MIB_INDEX, TP_MIB_DATA, st->tnlCongDrops, + 8, TP_MIB_TNL_CNG_DROP_0); + t4_read_indirect(adap, TP_MIB_INDEX, TP_MIB_DATA, st->tnlTxDrops, + 4, TP_MIB_TNL_DROP_0); + t4_read_indirect(adap, TP_MIB_INDEX, TP_MIB_DATA, st->ofldVlanDrops, + 4, TP_MIB_OFD_VLN_DROP_0); + t4_read_indirect(adap, TP_MIB_INDEX, TP_MIB_DATA, st->tcp6InErrs, + 4, TP_MIB_TCP_V6IN_ERR_0); + t4_read_indirect(adap, TP_MIB_INDEX, TP_MIB_DATA, &st->ofldNoNeigh, + 2, TP_MIB_OFD_ARP_DROP); +} + +/** + * t4_read_mtu_tbl - returns the values in the HW path MTU table + * @adap: the adapter + * @mtus: where to store the MTU values + * @mtu_log: where to store the MTU base-2 log (may be %NULL) + * + * Reads the HW path MTU table. + */ +void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log) +{ + u32 v; + int i; + + for (i = 0; i < NMTUS; ++i) { + t4_write_reg(adap, TP_MTU_TABLE, + MTUINDEX(0xff) | MTUVALUE(i)); + v = t4_read_reg(adap, TP_MTU_TABLE); + mtus[i] = MTUVALUE_GET(v); + if (mtu_log) + mtu_log[i] = MTUWIDTH_GET(v); + } +} + +/** + * init_cong_ctrl - initialize congestion control parameters + * @a: the alpha values for congestion control + * @b: the beta values for congestion control + * + * Initialize the congestion control parameters. + */ +static void __devinit init_cong_ctrl(unsigned short *a, unsigned short *b) +{ + a[0] = a[1] = a[2] = a[3] = a[4] = a[5] = a[6] = a[7] = a[8] = 1; + a[9] = 2; + a[10] = 3; + a[11] = 4; + a[12] = 5; + a[13] = 6; + a[14] = 7; + a[15] = 8; + a[16] = 9; + a[17] = 10; + a[18] = 14; + a[19] = 17; + a[20] = 21; + a[21] = 25; + a[22] = 30; + a[23] = 35; + a[24] = 45; + a[25] = 60; + a[26] = 80; + a[27] = 100; + a[28] = 200; + a[29] = 300; + a[30] = 400; + a[31] = 500; + + b[0] = b[1] = b[2] = b[3] = b[4] = b[5] = b[6] = b[7] = b[8] = 0; + b[9] = b[10] = 1; + b[11] = b[12] = 2; + b[13] = b[14] = b[15] = b[16] = 3; + b[17] = b[18] = b[19] = b[20] = b[21] = 4; + b[22] = b[23] = b[24] = b[25] = b[26] = b[27] = 5; + b[28] = b[29] = 6; + b[30] = b[31] = 7; +} + +/* The minimum additive increment value for the congestion control table */ +#define CC_MIN_INCR 2U + +/** + * t4_load_mtus - write the MTU and congestion control HW tables + * @adap: the adapter + * @mtus: the values for the MTU table + * @alpha: the values for the congestion control alpha parameter + * @beta: the values for the congestion control beta parameter + * + * Write the HW MTU table with the supplied MTUs and the high-speed + * congestion control table with the supplied alpha, beta, and MTUs. + * We write the two tables together because the additive increments + * depend on the MTUs. + */ +void t4_load_mtus(struct adapter *adap, const unsigned short *mtus, + const unsigned short *alpha, const unsigned short *beta) +{ + static const unsigned int avg_pkts[NCCTRL_WIN] = { + 2, 6, 10, 14, 20, 28, 40, 56, 80, 112, 160, 224, 320, 448, 640, + 896, 1281, 1792, 2560, 3584, 5120, 7168, 10240, 14336, 20480, + 28672, 40960, 57344, 81920, 114688, 163840, 229376 + }; + + unsigned int i, w; + + for (i = 0; i < NMTUS; ++i) { + unsigned int mtu = mtus[i]; + unsigned int log2 = fls(mtu); + + if (!(mtu & ((1 << log2) >> 2))) /* round */ + log2--; + t4_write_reg(adap, TP_MTU_TABLE, MTUINDEX(i) | + MTUWIDTH(log2) | MTUVALUE(mtu)); + + for (w = 0; w < NCCTRL_WIN; ++w) { + unsigned int inc; + + inc = max(((mtu - 40) * alpha[w]) / avg_pkts[w], + CC_MIN_INCR); + + t4_write_reg(adap, TP_CCTRL_TABLE, (i << 21) | + (w << 16) | (beta[w] << 13) | inc); + } + } +} + +/** + * t4_set_trace_filter - configure one of the tracing filters + * @adap: the adapter + * @tp: the desired trace filter parameters + * @idx: which filter to configure + * @enable: whether to enable or disable the filter + * + * Configures one of the tracing filters available in HW. If @enable is + * %0 @tp is not examined and may be %NULL. + */ +int t4_set_trace_filter(struct adapter *adap, const struct trace_params *tp, + int idx, int enable) +{ + int i, ofst = idx * 4; + u32 data_reg, mask_reg, cfg; + u32 multitrc = TRCMULTIFILTER; + + if (!enable) { + t4_write_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A + ofst, 0); + goto out; + } + + if (tp->port > 11 || tp->invert > 1 || tp->skip_len > 0x1f || + tp->skip_ofst > 0x1f || tp->min_len > 0x1ff || + tp->snap_len > 9600 || (idx && tp->snap_len > 256)) + return -EINVAL; + + if (tp->snap_len > 256) { /* must be tracer 0 */ + if ((t4_read_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A + 4) | + t4_read_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A + 8) | + t4_read_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A + 12)) & TFEN) + return -EINVAL; /* other tracers are enabled */ + multitrc = 0; + } else if (idx) { + i = t4_read_reg(adap, MPS_TRC_FILTER_MATCH_CTL_B); + if (TFCAPTUREMAX_GET(i) > 256 && + (t4_read_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A) & TFEN)) + return -EINVAL; + } + + /* stop the tracer we'll be changing */ + t4_write_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A + ofst, 0); + + /* disable tracing globally if running in the wrong single/multi mode */ + cfg = t4_read_reg(adap, MPS_TRC_CFG); + if ((cfg & TRCEN) && multitrc != (cfg & TRCMULTIFILTER)) { + t4_write_reg(adap, MPS_TRC_CFG, cfg ^ TRCEN); + t4_read_reg(adap, MPS_TRC_CFG); /* flush */ + msleep(1); + if (!(t4_read_reg(adap, MPS_TRC_CFG) & TRCFIFOEMPTY)) + return -ETIMEDOUT; + } + /* + * At this point either the tracing is enabled and in the right mode or + * disabled. + */ + + idx *= (MPS_TRC_FILTER1_MATCH - MPS_TRC_FILTER0_MATCH); + data_reg = MPS_TRC_FILTER0_MATCH + idx; + mask_reg = MPS_TRC_FILTER0_DONT_CARE + idx; + + for (i = 0; i < TRACE_LEN / 4; i++, data_reg += 4, mask_reg += 4) { + t4_write_reg(adap, data_reg, tp->data[i]); + t4_write_reg(adap, mask_reg, ~tp->mask[i]); + } + t4_write_reg(adap, MPS_TRC_FILTER_MATCH_CTL_B + ofst, + TFCAPTUREMAX(tp->snap_len) | + TFMINPKTSIZE(tp->min_len)); + t4_write_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A + ofst, + TFOFFSET(tp->skip_ofst) | TFLENGTH(tp->skip_len) | + TFPORT(tp->port) | TFEN | + (tp->invert ? TFINVERTMATCH : 0)); + + cfg &= ~TRCMULTIFILTER; + t4_write_reg(adap, MPS_TRC_CFG, cfg | TRCEN | multitrc); +out: t4_read_reg(adap, MPS_TRC_CFG); /* flush */ + return 0; +} + +/** + * t4_get_trace_filter - query one of the tracing filters + * @adap: the adapter + * @tp: the current trace filter parameters + * @idx: which trace filter to query + * @enabled: non-zero if the filter is enabled + * + * Returns the current settings of one of the HW tracing filters. + */ +void t4_get_trace_filter(struct adapter *adap, struct trace_params *tp, int idx, + int *enabled) +{ + u32 ctla, ctlb; + int i, ofst = idx * 4; + u32 data_reg, mask_reg; + + ctla = t4_read_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A + ofst); + ctlb = t4_read_reg(adap, MPS_TRC_FILTER_MATCH_CTL_B + ofst); + + *enabled = !!(ctla & TFEN); + tp->snap_len = TFCAPTUREMAX_GET(ctlb); + tp->min_len = TFMINPKTSIZE_GET(ctlb); + tp->skip_ofst = TFOFFSET_GET(ctla); + tp->skip_len = TFLENGTH_GET(ctla); + tp->invert = !!(ctla & TFINVERTMATCH); + tp->port = TFPORT_GET(ctla); + + ofst = (MPS_TRC_FILTER1_MATCH - MPS_TRC_FILTER0_MATCH) * idx; + data_reg = MPS_TRC_FILTER0_MATCH + ofst; + mask_reg = MPS_TRC_FILTER0_DONT_CARE + ofst; + + for (i = 0; i < TRACE_LEN / 4; i++, data_reg += 4, mask_reg += 4) { + tp->mask[i] = ~t4_read_reg(adap, mask_reg); + tp->data[i] = t4_read_reg(adap, data_reg) & tp->mask[i]; + } +} + +/** + * get_mps_bg_map - return the buffer groups associated with a port + * @adap: the adapter + * @idx: the port index + * + * Returns a bitmap indicating which MPS buffer groups are associated + * with the given port. Bit i is set if buffer group i is used by the + * port. + */ +static unsigned int get_mps_bg_map(struct adapter *adap, int idx) +{ + u32 n = NUMPORTS_GET(t4_read_reg(adap, MPS_CMN_CTL)); + + if (n == 0) + return idx == 0 ? 0xf : 0; + if (n == 1) + return idx < 2 ? (3 << (2 * idx)) : 0; + return 1 << idx; +} + +/** + * t4_get_port_stats - collect port statistics + * @adap: the adapter + * @idx: the port index + * @p: the stats structure to fill + * + * Collect statistics related to the given port from HW. + */ +void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p) +{ + u32 bgmap = get_mps_bg_map(adap, idx); + +#define GET_STAT(name) \ + t4_read_reg64(adap, PORT_REG(idx, MPS_PORT_STAT_##name##_L)) +#define GET_STAT_COM(name) t4_read_reg64(adap, MPS_STAT_##name##_L) + + p->tx_octets = GET_STAT(TX_PORT_BYTES); + p->tx_frames = GET_STAT(TX_PORT_FRAMES); + p->tx_bcast_frames = GET_STAT(TX_PORT_BCAST); + p->tx_mcast_frames = GET_STAT(TX_PORT_MCAST); + p->tx_ucast_frames = GET_STAT(TX_PORT_UCAST); + p->tx_error_frames = GET_STAT(TX_PORT_ERROR); + p->tx_frames_64 = GET_STAT(TX_PORT_64B); + p->tx_frames_65_127 = GET_STAT(TX_PORT_65B_127B); + p->tx_frames_128_255 = GET_STAT(TX_PORT_128B_255B); + p->tx_frames_256_511 = GET_STAT(TX_PORT_256B_511B); + p->tx_frames_512_1023 = GET_STAT(TX_PORT_512B_1023B); + p->tx_frames_1024_1518 = GET_STAT(TX_PORT_1024B_1518B); + p->tx_frames_1519_max = GET_STAT(TX_PORT_1519B_MAX); + p->tx_drop = GET_STAT(TX_PORT_DROP); + p->tx_pause = GET_STAT(TX_PORT_PAUSE); + p->tx_ppp0 = GET_STAT(TX_PORT_PPP0); + p->tx_ppp1 = GET_STAT(TX_PORT_PPP1); + p->tx_ppp2 = GET_STAT(TX_PORT_PPP2); + p->tx_ppp3 = GET_STAT(TX_PORT_PPP3); + p->tx_ppp4 = GET_STAT(TX_PORT_PPP4); + p->tx_ppp5 = GET_STAT(TX_PORT_PPP5); + p->tx_ppp6 = GET_STAT(TX_PORT_PPP6); + p->tx_ppp7 = GET_STAT(TX_PORT_PPP7); + + p->rx_octets = GET_STAT(RX_PORT_BYTES); + p->rx_frames = GET_STAT(RX_PORT_FRAMES); + p->rx_bcast_frames = GET_STAT(RX_PORT_BCAST); + p->rx_mcast_frames = GET_STAT(RX_PORT_MCAST); + p->rx_ucast_frames = GET_STAT(RX_PORT_UCAST); + p->rx_too_long = GET_STAT(RX_PORT_MTU_ERROR); + p->rx_jabber = GET_STAT(RX_PORT_MTU_CRC_ERROR); + p->rx_fcs_err = GET_STAT(RX_PORT_CRC_ERROR); + p->rx_len_err = GET_STAT(RX_PORT_LEN_ERROR); + p->rx_symbol_err = GET_STAT(RX_PORT_SYM_ERROR); + p->rx_runt = GET_STAT(RX_PORT_LESS_64B); + p->rx_frames_64 = GET_STAT(RX_PORT_64B); + p->rx_frames_65_127 = GET_STAT(RX_PORT_65B_127B); + p->rx_frames_128_255 = GET_STAT(RX_PORT_128B_255B); + p->rx_frames_256_511 = GET_STAT(RX_PORT_256B_511B); + p->rx_frames_512_1023 = GET_STAT(RX_PORT_512B_1023B); + p->rx_frames_1024_1518 = GET_STAT(RX_PORT_1024B_1518B); + p->rx_frames_1519_max = GET_STAT(RX_PORT_1519B_MAX); + p->rx_pause = GET_STAT(RX_PORT_PAUSE); + p->rx_ppp0 = GET_STAT(RX_PORT_PPP0); + p->rx_ppp1 = GET_STAT(RX_PORT_PPP1); + p->rx_ppp2 = GET_STAT(RX_PORT_PPP2); + p->rx_ppp3 = GET_STAT(RX_PORT_PPP3); + p->rx_ppp4 = GET_STAT(RX_PORT_PPP4); + p->rx_ppp5 = GET_STAT(RX_PORT_PPP5); + p->rx_ppp6 = GET_STAT(RX_PORT_PPP6); + p->rx_ppp7 = GET_STAT(RX_PORT_PPP7); + + p->rx_ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_DROP_FRAME) : 0; + p->rx_ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_DROP_FRAME) : 0; + p->rx_ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_DROP_FRAME) : 0; + p->rx_ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_DROP_FRAME) : 0; + p->rx_trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_TRUNC_FRAME) : 0; + p->rx_trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_TRUNC_FRAME) : 0; + p->rx_trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_TRUNC_FRAME) : 0; + p->rx_trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_TRUNC_FRAME) : 0; + +#undef GET_STAT +#undef GET_STAT_COM +} + +/** + * t4_get_lb_stats - collect loopback port statistics + * @adap: the adapter + * @idx: the loopback port index + * @p: the stats structure to fill + * + * Return HW statistics for the given loopback port. + */ +void t4_get_lb_stats(struct adapter *adap, int idx, struct lb_port_stats *p) +{ + u32 bgmap = get_mps_bg_map(adap, idx); + +#define GET_STAT(name) \ + t4_read_reg64(adap, PORT_REG(idx, MPS_PORT_STAT_LB_PORT_##name##_L)) +#define GET_STAT_COM(name) t4_read_reg64(adap, MPS_STAT_##name##_L) + + p->octets = GET_STAT(BYTES); + p->frames = GET_STAT(FRAMES); + p->bcast_frames = GET_STAT(BCAST); + p->mcast_frames = GET_STAT(MCAST); + p->ucast_frames = GET_STAT(UCAST); + p->error_frames = GET_STAT(ERROR); + + p->frames_64 = GET_STAT(64B); + p->frames_65_127 = GET_STAT(65B_127B); + p->frames_128_255 = GET_STAT(128B_255B); + p->frames_256_511 = GET_STAT(256B_511B); + p->frames_512_1023 = GET_STAT(512B_1023B); + p->frames_1024_1518 = GET_STAT(1024B_1518B); + p->frames_1519_max = GET_STAT(1519B_MAX); + p->drop = t4_read_reg(adap, PORT_REG(idx, + MPS_PORT_STAT_LB_PORT_DROP_FRAMES)); + + p->ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_LB_DROP_FRAME) : 0; + p->ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_LB_DROP_FRAME) : 0; + p->ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_LB_DROP_FRAME) : 0; + p->ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_LB_DROP_FRAME) : 0; + p->trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_LB_TRUNC_FRAME) : 0; + p->trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_LB_TRUNC_FRAME) : 0; + p->trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_LB_TRUNC_FRAME) : 0; + p->trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_LB_TRUNC_FRAME) : 0; + +#undef GET_STAT +#undef GET_STAT_COM +} + +/** + * t4_wol_magic_enable - enable/disable magic packet WoL + * @adap: the adapter + * @port: the physical port index + * @addr: MAC address expected in magic packets, %NULL to disable + * + * Enables/disables magic packet wake-on-LAN for the selected port. + */ +void t4_wol_magic_enable(struct adapter *adap, unsigned int port, + const u8 *addr) +{ + if (addr) { + t4_write_reg(adap, PORT_REG(port, XGMAC_PORT_MAGIC_MACID_LO), + (addr[2] << 24) | (addr[3] << 16) | + (addr[4] << 8) | addr[5]); + t4_write_reg(adap, PORT_REG(port, XGMAC_PORT_MAGIC_MACID_HI), + (addr[0] << 8) | addr[1]); + } + t4_set_reg_field(adap, PORT_REG(port, XGMAC_PORT_CFG2), MAGICEN, + addr ? MAGICEN : 0); +} + +/** + * t4_wol_pat_enable - enable/disable pattern-based WoL + * @adap: the adapter + * @port: the physical port index + * @map: bitmap of which HW pattern filters to set + * @mask0: byte mask for bytes 0-63 of a packet + * @mask1: byte mask for bytes 64-127 of a packet + * @crc: Ethernet CRC for selected bytes + * @enable: enable/disable switch + * + * Sets the pattern filters indicated in @map to mask out the bytes + * specified in @mask0/@mask1 in received packets and compare the CRC of + * the resulting packet against @crc. If @enable is %true pattern-based + * WoL is enabled, otherwise disabled. + */ +int t4_wol_pat_enable(struct adapter *adap, unsigned int port, unsigned int map, + u64 mask0, u64 mask1, unsigned int crc, bool enable) +{ + int i; + + if (!enable) { + t4_set_reg_field(adap, PORT_REG(port, XGMAC_PORT_CFG2), + PATEN, 0); + return 0; + } + if (map > 0xff) + return -EINVAL; + +#define EPIO_REG(name) PORT_REG(port, XGMAC_PORT_EPIO_##name) + + t4_write_reg(adap, EPIO_REG(DATA1), mask0 >> 32); + t4_write_reg(adap, EPIO_REG(DATA2), mask1); + t4_write_reg(adap, EPIO_REG(DATA3), mask1 >> 32); + + for (i = 0; i < NWOL_PAT; i++, map >>= 1) { + if (!(map & 1)) + continue; + + /* write byte masks */ + t4_write_reg(adap, EPIO_REG(DATA0), mask0); + t4_write_reg(adap, EPIO_REG(OP), ADDRESS(i) | EPIOWR); + t4_read_reg(adap, EPIO_REG(OP)); /* flush */ + if (t4_read_reg(adap, EPIO_REG(OP)) & BUSY) + return -ETIMEDOUT; + + /* write CRC */ + t4_write_reg(adap, EPIO_REG(DATA0), crc); + t4_write_reg(adap, EPIO_REG(OP), ADDRESS(i + 32) | EPIOWR); + t4_read_reg(adap, EPIO_REG(OP)); /* flush */ + if (t4_read_reg(adap, EPIO_REG(OP)) & BUSY) + return -ETIMEDOUT; + } +#undef EPIO_REG + + t4_set_reg_field(adap, PORT_REG(port, XGMAC_PORT_CFG2), 0, PATEN); + return 0; +} + +#define INIT_CMD(var, cmd, rd_wr) do { \ + (var).op_to_write = htonl(FW_CMD_OP(FW_##cmd##_CMD) | \ + FW_CMD_REQUEST | FW_CMD_##rd_wr); \ + (var).retval_len16 = htonl(FW_LEN16(var)); \ +} while (0) + +/** + * t4_mdio_rd - read a PHY register through MDIO + * @adap: the adapter + * @mbox: mailbox to use for the FW command + * @phy_addr: the PHY address + * @mmd: the PHY MMD to access (0 for clause 22 PHYs) + * @reg: the register to read + * @valp: where to store the value + * + * Issues a FW command through the given mailbox to read a PHY register. + */ +int t4_mdio_rd(struct adapter *adap, unsigned int mbox, unsigned int phy_addr, + unsigned int mmd, unsigned int reg, u16 *valp) +{ + int ret; + struct fw_ldst_cmd c; + + memset(&c, 0, sizeof(c)); + c.op_to_addrspace = htonl(FW_CMD_OP(FW_LDST_CMD) | FW_CMD_REQUEST | + FW_CMD_READ | FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MDIO)); + c.cycles_to_len16 = htonl(FW_LEN16(c)); + c.u.mdio.paddr_mmd = htons(FW_LDST_CMD_PADDR(phy_addr) | + FW_LDST_CMD_MMD(mmd)); + c.u.mdio.raddr = htons(reg); + + ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); + if (ret == 0) + *valp = ntohs(c.u.mdio.rval); + return ret; +} + +/** + * t4_mdio_wr - write a PHY register through MDIO + * @adap: the adapter + * @mbox: mailbox to use for the FW command + * @phy_addr: the PHY address + * @mmd: the PHY MMD to access (0 for clause 22 PHYs) + * @reg: the register to write + * @valp: value to write + * + * Issues a FW command through the given mailbox to write a PHY register. + */ +int t4_mdio_wr(struct adapter *adap, unsigned int mbox, unsigned int phy_addr, + unsigned int mmd, unsigned int reg, u16 val) +{ + struct fw_ldst_cmd c; + + memset(&c, 0, sizeof(c)); + c.op_to_addrspace = htonl(FW_CMD_OP(FW_LDST_CMD) | FW_CMD_REQUEST | + FW_CMD_WRITE | FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MDIO)); + c.cycles_to_len16 = htonl(FW_LEN16(c)); + c.u.mdio.paddr_mmd = htons(FW_LDST_CMD_PADDR(phy_addr) | + FW_LDST_CMD_MMD(mmd)); + c.u.mdio.raddr = htons(reg); + c.u.mdio.rval = htons(val); + + return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); +} + +/** + * t4_fw_hello - establish communication with FW + * @adap: the adapter + * @mbox: mailbox to use for the FW command + * @evt_mbox: mailbox to receive async FW events + * @master: specifies the caller's willingness to be the device master + * @state: returns the current device state + * + * Issues a command to establish communication with FW. + */ +int t4_fw_hello(struct adapter *adap, unsigned int mbox, unsigned int evt_mbox, + enum dev_master master, enum dev_state *state) +{ + int ret; + struct fw_hello_cmd c; + + INIT_CMD(c, HELLO, WRITE); + c.err_to_mbasyncnot = htonl( + FW_HELLO_CMD_MASTERDIS(master == MASTER_CANT) | + FW_HELLO_CMD_MASTERFORCE(master == MASTER_MUST) | + FW_HELLO_CMD_MBMASTER(master == MASTER_MUST ? mbox : 0xff) | + FW_HELLO_CMD_MBASYNCNOT(evt_mbox)); + + ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); + if (ret == 0 && state) { + u32 v = ntohl(c.err_to_mbasyncnot); + if (v & FW_HELLO_CMD_INIT) + *state = DEV_STATE_INIT; + else if (v & FW_HELLO_CMD_ERR) + *state = DEV_STATE_ERR; + else + *state = DEV_STATE_UNINIT; + } + return ret; +} + +/** + * t4_fw_bye - end communication with FW + * @adap: the adapter + * @mbox: mailbox to use for the FW command + * + * Issues a command to terminate communication with FW. + */ +int t4_fw_bye(struct adapter *adap, unsigned int mbox) +{ + struct fw_bye_cmd c; + + INIT_CMD(c, BYE, WRITE); + return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); +} + +/** + * t4_init_cmd - ask FW to initialize the device + * @adap: the adapter + * @mbox: mailbox to use for the FW command + * + * Issues a command to FW to partially initialize the device. This + * performs initialization that generally doesn't depend on user input. + */ +int t4_early_init(struct adapter *adap, unsigned int mbox) +{ + struct fw_initialize_cmd c; + + INIT_CMD(c, INITIALIZE, WRITE); + return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); +} + +/** + * t4_fw_reset - issue a reset to FW + * @adap: the adapter + * @mbox: mailbox to use for the FW command + * @reset: specifies the type of reset to perform + * + * Issues a reset command of the specified type to FW. + */ +int t4_fw_reset(struct adapter *adap, unsigned int mbox, int reset) +{ + struct fw_reset_cmd c; + + INIT_CMD(c, RESET, WRITE); + c.val = htonl(reset); + return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); +} + +/** + * t4_query_params - query FW or device parameters + * @adap: the adapter + * @mbox: mailbox to use for the FW command + * @pf: the PF + * @vf: the VF + * @nparams: the number of parameters + * @params: the parameter names + * @val: the parameter values + * + * Reads the value of FW or device parameters. Up to 7 parameters can be + * queried at once. + */ +int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf, + unsigned int vf, unsigned int nparams, const u32 *params, + u32 *val) +{ + int i, ret; + struct fw_params_cmd c; + __be32 *p = &c.param[0].mnem; + + if (nparams > 7) + return -EINVAL; + + memset(&c, 0, sizeof(c)); + c.op_to_vfn = htonl(FW_CMD_OP(FW_PARAMS_CMD) | FW_CMD_REQUEST | + FW_CMD_READ | FW_PARAMS_CMD_PFN(pf) | + FW_PARAMS_CMD_VFN(vf)); + c.retval_len16 = htonl(FW_LEN16(c)); + for (i = 0; i < nparams; i++, p += 2) + *p = htonl(*params++); + + ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); + if (ret == 0) + for (i = 0, p = &c.param[0].val; i < nparams; i++, p += 2) + *val++ = ntohl(*p); + return ret; +} + +/** + * t4_set_params - sets FW or device parameters + * @adap: the adapter + * @mbox: mailbox to use for the FW command + * @pf: the PF + * @vf: the VF + * @nparams: the number of parameters + * @params: the parameter names + * @val: the parameter values + * + * Sets the value of FW or device parameters. Up to 7 parameters can be + * specified at once. + */ +int t4_set_params(struct adapter *adap, unsigned int mbox, unsigned int pf, + unsigned int vf, unsigned int nparams, const u32 *params, + const u32 *val) +{ + struct fw_params_cmd c; + __be32 *p = &c.param[0].mnem; + + if (nparams > 7) + return -EINVAL; + + memset(&c, 0, sizeof(c)); + c.op_to_vfn = htonl(FW_CMD_OP(FW_PARAMS_CMD) | FW_CMD_REQUEST | + FW_CMD_WRITE | FW_PARAMS_CMD_PFN(pf) | + FW_PARAMS_CMD_VFN(vf)); + c.retval_len16 = htonl(FW_LEN16(c)); + while (nparams--) { + *p++ = htonl(*params++); + *p++ = htonl(*val++); + } + + return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); +} + +/** + * t4_cfg_pfvf - configure PF/VF resource limits + * @adap: the adapter + * @mbox: mailbox to use for the FW command + * @pf: the PF being configured + * @vf: the VF being configured + * @txq: the max number of egress queues + * @txq_eth_ctrl: the max number of egress Ethernet or control queues + * @rxqi: the max number of interrupt-capable ingress queues + * @rxq: the max number of interruptless ingress queues + * @tc: the PCI traffic class + * @vi: the max number of virtual interfaces + * @cmask: the channel access rights mask for the PF/VF + * @pmask: the port access rights mask for the PF/VF + * @nexact: the maximum number of exact MPS filters + * @rcaps: read capabilities + * @wxcaps: write/execute capabilities + * + * Configures resource limits and capabilities for a physical or virtual + * function. + */ +int t4_cfg_pfvf(struct adapter *adap, unsigned int mbox, unsigned int pf, + unsigned int vf, unsigned int txq, unsigned int txq_eth_ctrl, + unsigned int rxqi, unsigned int rxq, unsigned int tc, + unsigned int vi, unsigned int cmask, unsigned int pmask, + unsigned int nexact, unsigned int rcaps, unsigned int wxcaps) +{ + struct fw_pfvf_cmd c; + + memset(&c, 0, sizeof(c)); + c.op_to_vfn = htonl(FW_CMD_OP(FW_PFVF_CMD) | FW_CMD_REQUEST | + FW_CMD_WRITE | FW_PFVF_CMD_PFN(pf) | + FW_PFVF_CMD_VFN(vf)); + c.retval_len16 = htonl(FW_LEN16(c)); + c.niqflint_niq = htonl(FW_PFVF_CMD_NIQFLINT(rxqi) | + FW_PFVF_CMD_NIQ(rxq)); + c.cmask_to_neq = htonl(FW_PFVF_CMD_CMASK(cmask) | + FW_PFVF_CMD_PMASK(pmask) | + FW_PFVF_CMD_NEQ(txq)); + c.tc_to_nexactf = htonl(FW_PFVF_CMD_TC(tc) | FW_PFVF_CMD_NVI(vi) | + FW_PFVF_CMD_NEXACTF(nexact)); + c.r_caps_to_nethctrl = htonl(FW_PFVF_CMD_R_CAPS(rcaps) | + FW_PFVF_CMD_WX_CAPS(wxcaps) | + FW_PFVF_CMD_NETHCTRL(txq_eth_ctrl)); + return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); +} + +/** + * t4_alloc_vi - allocate a virtual interface + * @adap: the adapter + * @mbox: mailbox to use for the FW command + * @port: physical port associated with the VI + * @pf: the PF owning the VI + * @vf: the VF owning the VI + * @nmac: number of MAC addresses needed (1 to 5) + * @mac: the MAC addresses of the VI + * @rss_size: size of RSS table slice associated with this VI + * + * Allocates a virtual interface for the given physical port. If @mac is + * not %NULL it contains the MAC addresses of the VI as assigned by FW. + * @mac should be large enough to hold @nmac Ethernet addresses, they are + * stored consecutively so the space needed is @nmac * 6 bytes. + * Returns a negative error number or the non-negative VI id. + */ +int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port, + unsigned int pf, unsigned int vf, unsigned int nmac, u8 *mac, + unsigned int *rss_size) +{ + int ret; + struct fw_vi_cmd c; + + memset(&c, 0, sizeof(c)); + c.op_to_vfn = htonl(FW_CMD_OP(FW_VI_CMD) | FW_CMD_REQUEST | + FW_CMD_WRITE | FW_CMD_EXEC | + FW_VI_CMD_PFN(pf) | FW_VI_CMD_VFN(vf)); + c.alloc_to_len16 = htonl(FW_VI_CMD_ALLOC | FW_LEN16(c)); + c.portid_pkd = FW_VI_CMD_PORTID(port); + c.nmac = nmac - 1; + + ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); + if (ret) + return ret; + + if (mac) { + memcpy(mac, c.mac, sizeof(c.mac)); + switch (nmac) { + case 5: + memcpy(mac + 24, c.nmac3, sizeof(c.nmac3)); + case 4: + memcpy(mac + 18, c.nmac2, sizeof(c.nmac2)); + case 3: + memcpy(mac + 12, c.nmac1, sizeof(c.nmac1)); + case 2: + memcpy(mac + 6, c.nmac0, sizeof(c.nmac0)); + } + } + if (rss_size) + *rss_size = FW_VI_CMD_RSSSIZE_GET(ntohs(c.rsssize_pkd)); + return ntohs(c.viid_pkd); +} + +/** + * t4_free_vi - free a virtual interface + * @adap: the adapter + * @mbox: mailbox to use for the FW command + * @pf: the PF owning the VI + * @vf: the VF owning the VI + * @viid: virtual interface identifiler + * + * Free a previously allocated virtual interface. + */ +int t4_free_vi(struct adapter *adap, unsigned int mbox, unsigned int pf, + unsigned int vf, unsigned int viid) +{ + struct fw_vi_cmd c; + + memset(&c, 0, sizeof(c)); + c.op_to_vfn = htonl(FW_CMD_OP(FW_VI_CMD) | FW_CMD_REQUEST | + FW_CMD_EXEC | FW_VI_CMD_PFN(pf) | + FW_VI_CMD_VFN(vf)); + c.alloc_to_len16 = htonl(FW_VI_CMD_FREE | FW_LEN16(c)); + c.viid_pkd = htons(FW_VI_CMD_VIID(viid)); + return t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); +} + +/** + * t4_set_rxmode - set Rx properties of a virtual interface + * @adap: the adapter + * @mbox: mailbox to use for the FW command + * @viid: the VI id + * @mtu: the new MTU or -1 + * @promisc: 1 to enable promiscuous mode, 0 to disable it, -1 no change + * @all_multi: 1 to enable all-multi mode, 0 to disable it, -1 no change + * @bcast: 1 to enable broadcast Rx, 0 to disable it, -1 no change + * @sleep_ok: if true we may sleep while awaiting command completion + * + * Sets Rx properties of a virtual interface. + */ +int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid, + int mtu, int promisc, int all_multi, int bcast, bool sleep_ok) +{ + struct fw_vi_rxmode_cmd c; + + /* convert to FW values */ + if (mtu < 0) + mtu = FW_RXMODE_MTU_NO_CHG; + if (promisc < 0) + promisc = FW_VI_RXMODE_CMD_PROMISCEN_MASK; + if (all_multi < 0) + all_multi = FW_VI_RXMODE_CMD_ALLMULTIEN_MASK; + if (bcast < 0) + bcast = FW_VI_RXMODE_CMD_BROADCASTEN_MASK; + + memset(&c, 0, sizeof(c)); + c.op_to_viid = htonl(FW_CMD_OP(FW_VI_RXMODE_CMD) | FW_CMD_REQUEST | + FW_CMD_WRITE | FW_VI_RXMODE_CMD_VIID(viid)); + c.retval_len16 = htonl(FW_LEN16(c)); + c.mtu_to_broadcasten = htonl(FW_VI_RXMODE_CMD_MTU(mtu) | + FW_VI_RXMODE_CMD_PROMISCEN(promisc) | + FW_VI_RXMODE_CMD_ALLMULTIEN(all_multi) | + FW_VI_RXMODE_CMD_BROADCASTEN(bcast)); + return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok); +} + +/** + * t4_alloc_mac_filt - allocates exact-match filters for MAC addresses + * @adap: the adapter + * @mbox: mailbox to use for the FW command + * @viid: the VI id + * @free: if true any existing filters for this VI id are first removed + * @naddr: the number of MAC addresses to allocate filters for (up to 7) + * @addr: the MAC address(es) + * @idx: where to store the index of each allocated filter + * @hash: pointer to hash address filter bitmap + * @sleep_ok: call is allowed to sleep + * + * Allocates an exact-match filter for each of the supplied addresses and + * sets it to the corresponding address. If @idx is not %NULL it should + * have at least @naddr entries, each of which will be set to the index of + * the filter allocated for the corresponding MAC address. If a filter + * could not be allocated for an address its index is set to 0xffff. + * If @hash is not %NULL addresses that fail to allocate an exact filter + * are hashed and update the hash filter bitmap pointed at by @hash. + * + * Returns a negative error number or the number of filters allocated. + */ +int t4_alloc_mac_filt(struct adapter *adap, unsigned int mbox, + unsigned int viid, bool free, unsigned int naddr, + const u8 **addr, u16 *idx, u64 *hash, bool sleep_ok) +{ + int i, ret; + struct fw_vi_mac_cmd c; + struct fw_vi_mac_exact *p; + + if (naddr > 7) + return -EINVAL; + + memset(&c, 0, sizeof(c)); + c.op_to_viid = htonl(FW_CMD_OP(FW_VI_MAC_CMD) | FW_CMD_REQUEST | + FW_CMD_WRITE | (free ? FW_CMD_EXEC : 0) | + FW_VI_MAC_CMD_VIID(viid)); + c.freemacs_to_len16 = htonl(FW_VI_MAC_CMD_FREEMACS(free) | + FW_CMD_LEN16((naddr + 2) / 2)); + + for (i = 0, p = c.u.exact; i < naddr; i++, p++) { + p->valid_to_idx = htons(FW_VI_MAC_CMD_VALID | + FW_VI_MAC_CMD_IDX(FW_VI_MAC_ADD_MAC)); + memcpy(p->macaddr, addr[i], sizeof(p->macaddr)); + } + + ret = t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), &c, sleep_ok); + if (ret) + return ret; + + for (i = 0, p = c.u.exact; i < naddr; i++, p++) { + u16 index = FW_VI_MAC_CMD_IDX_GET(ntohs(p->valid_to_idx)); + + if (idx) + idx[i] = index >= NEXACT_MAC ? 0xffff : index; + if (index < NEXACT_MAC) + ret++; + else if (hash) + *hash |= (1 << hash_mac_addr(addr[i])); + } + return ret; +} + +/** + * t4_change_mac - modifies the exact-match filter for a MAC address + * @adap: the adapter + * @mbox: mailbox to use for the FW command + * @viid: the VI id + * @idx: index of existing filter for old value of MAC address, or -1 + * @addr: the new MAC address value + * @persist: whether a new MAC allocation should be persistent + * @add_smt: if true also add the address to the HW SMT + * + * Modifies an exact-match filter and sets it to the new MAC address. + * Note that in general it is not possible to modify the value of a given + * filter so the generic way to modify an address filter is to free the one + * being used by the old address value and allocate a new filter for the + * new address value. @idx can be -1 if the address is a new addition. + * + * Returns a negative error number or the index of the filter with the new + * MAC value. + */ +int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid, + int idx, const u8 *addr, bool persist, bool add_smt) +{ + int ret, mode; + struct fw_vi_mac_cmd c; + struct fw_vi_mac_exact *p = c.u.exact; + + if (idx < 0) /* new allocation */ + idx = persist ? FW_VI_MAC_ADD_PERSIST_MAC : FW_VI_MAC_ADD_MAC; + mode = add_smt ? FW_VI_MAC_SMT_AND_MPSTCAM : FW_VI_MAC_MPS_TCAM_ENTRY; + + memset(&c, 0, sizeof(c)); + c.op_to_viid = htonl(FW_CMD_OP(FW_VI_MAC_CMD) | FW_CMD_REQUEST | + FW_CMD_WRITE | FW_VI_MAC_CMD_VIID(viid)); + c.freemacs_to_len16 = htonl(FW_CMD_LEN16(1)); + p->valid_to_idx = htons(FW_VI_MAC_CMD_VALID | + FW_VI_MAC_CMD_SMAC_RESULT(mode) | + FW_VI_MAC_CMD_IDX(idx)); + memcpy(p->macaddr, addr, sizeof(p->macaddr)); + + ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); + if (ret == 0) { + ret = FW_VI_MAC_CMD_IDX_GET(ntohs(p->valid_to_idx)); + if (ret >= NEXACT_MAC) + ret = -ENOMEM; + } + return ret; +} + +/** + * t4_set_addr_hash - program the MAC inexact-match hash filter + * @adap: the adapter + * @mbox: mailbox to use for the FW command + * @viid: the VI id + * @ucast: whether the hash filter should also match unicast addresses + * @vec: the value to be written to the hash filter + * @sleep_ok: call is allowed to sleep + * + * Sets the 64-bit inexact-match hash filter for a virtual interface. + */ +int t4_set_addr_hash(struct adapter *adap, unsigned int mbox, unsigned int viid, + bool ucast, u64 vec, bool sleep_ok) +{ + struct fw_vi_mac_cmd c; + + memset(&c, 0, sizeof(c)); + c.op_to_viid = htonl(FW_CMD_OP(FW_VI_MAC_CMD) | FW_CMD_REQUEST | + FW_CMD_WRITE | FW_VI_ENABLE_CMD_VIID(viid)); + c.freemacs_to_len16 = htonl(FW_VI_MAC_CMD_HASHVECEN | + FW_VI_MAC_CMD_HASHUNIEN(ucast) | + FW_CMD_LEN16(1)); + c.u.hash.hashvec = cpu_to_be64(vec); + return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok); +} + +/** + * t4_enable_vi - enable/disable a virtual interface + * @adap: the adapter + * @mbox: mailbox to use for the FW command + * @viid: the VI id + * @rx_en: 1=enable Rx, 0=disable Rx + * @tx_en: 1=enable Tx, 0=disable Tx + * + * Enables/disables a virtual interface. + */ +int t4_enable_vi(struct adapter *adap, unsigned int mbox, unsigned int viid, + bool rx_en, bool tx_en) +{ + struct fw_vi_enable_cmd c; + + memset(&c, 0, sizeof(c)); + c.op_to_viid = htonl(FW_CMD_OP(FW_VI_ENABLE_CMD) | FW_CMD_REQUEST | + FW_CMD_EXEC | FW_VI_ENABLE_CMD_VIID(viid)); + c.ien_to_len16 = htonl(FW_VI_ENABLE_CMD_IEN(rx_en) | + FW_VI_ENABLE_CMD_EEN(tx_en) | FW_LEN16(c)); + return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); +} + +/** + * t4_identify_port - identify a VI's port by blinking its LED + * @adap: the adapter + * @mbox: mailbox to use for the FW command + * @viid: the VI id + * @nblinks: how many times to blink LED at 2.5 Hz + * + * Identifies a VI's port by blinking its LED. + */ +int t4_identify_port(struct adapter *adap, unsigned int mbox, unsigned int viid, + unsigned int nblinks) +{ + struct fw_vi_enable_cmd c; + + c.op_to_viid = htonl(FW_CMD_OP(FW_VI_ENABLE_CMD) | FW_CMD_REQUEST | + FW_CMD_EXEC | FW_VI_ENABLE_CMD_VIID(viid)); + c.ien_to_len16 = htonl(FW_VI_ENABLE_CMD_LED | FW_LEN16(c)); + c.blinkdur = htons(nblinks); + return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); +} + +/** + * t4_iq_start_stop - enable/disable an ingress queue and its FLs + * @adap: the adapter + * @mbox: mailbox to use for the FW command + * @start: %true to enable the queues, %false to disable them + * @pf: the PF owning the queues + * @vf: the VF owning the queues + * @iqid: ingress queue id + * @fl0id: FL0 queue id or 0xffff if no attached FL0 + * @fl1id: FL1 queue id or 0xffff if no attached FL1 + * + * Starts or stops an ingress queue and its associated FLs, if any. + */ +int t4_iq_start_stop(struct adapter *adap, unsigned int mbox, bool start, + unsigned int pf, unsigned int vf, unsigned int iqid, + unsigned int fl0id, unsigned int fl1id) +{ + struct fw_iq_cmd c; + + memset(&c, 0, sizeof(c)); + c.op_to_vfn = htonl(FW_CMD_OP(FW_IQ_CMD) | FW_CMD_REQUEST | + FW_CMD_EXEC | FW_IQ_CMD_PFN(pf) | + FW_IQ_CMD_VFN(vf)); + c.alloc_to_len16 = htonl(FW_IQ_CMD_IQSTART(start) | + FW_IQ_CMD_IQSTOP(!start) | FW_LEN16(c)); + c.iqid = htons(iqid); + c.fl0id = htons(fl0id); + c.fl1id = htons(fl1id); + return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); +} + +/** + * t4_iq_free - free an ingress queue and its FLs + * @adap: the adapter + * @mbox: mailbox to use for the FW command + * @pf: the PF owning the queues + * @vf: the VF owning the queues + * @iqtype: the ingress queue type + * @iqid: ingress queue id + * @fl0id: FL0 queue id or 0xffff if no attached FL0 + * @fl1id: FL1 queue id or 0xffff if no attached FL1 + * + * Frees an ingress queue and its associated FLs, if any. + */ +int t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, + unsigned int vf, unsigned int iqtype, unsigned int iqid, + unsigned int fl0id, unsigned int fl1id) +{ + struct fw_iq_cmd c; + + memset(&c, 0, sizeof(c)); + c.op_to_vfn = htonl(FW_CMD_OP(FW_IQ_CMD) | FW_CMD_REQUEST | + FW_CMD_EXEC | FW_IQ_CMD_PFN(pf) | + FW_IQ_CMD_VFN(vf)); + c.alloc_to_len16 = htonl(FW_IQ_CMD_FREE | FW_LEN16(c)); + c.type_to_iqandstindex = htonl(FW_IQ_CMD_TYPE(iqtype)); + c.iqid = htons(iqid); + c.fl0id = htons(fl0id); + c.fl1id = htons(fl1id); + return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); +} + +/** + * t4_eth_eq_free - free an Ethernet egress queue + * @adap: the adapter + * @mbox: mailbox to use for the FW command + * @pf: the PF owning the queue + * @vf: the VF owning the queue + * @eqid: egress queue id + * + * Frees an Ethernet egress queue. + */ +int t4_eth_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, + unsigned int vf, unsigned int eqid) +{ + struct fw_eq_eth_cmd c; + + memset(&c, 0, sizeof(c)); + c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_ETH_CMD) | FW_CMD_REQUEST | + FW_CMD_EXEC | FW_EQ_ETH_CMD_PFN(pf) | + FW_EQ_ETH_CMD_VFN(vf)); + c.alloc_to_len16 = htonl(FW_EQ_ETH_CMD_FREE | FW_LEN16(c)); + c.eqid_pkd = htonl(FW_EQ_ETH_CMD_EQID(eqid)); + return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); +} + +/** + * t4_ctrl_eq_free - free a control egress queue + * @adap: the adapter + * @mbox: mailbox to use for the FW command + * @pf: the PF owning the queue + * @vf: the VF owning the queue + * @eqid: egress queue id + * + * Frees a control egress queue. + */ +int t4_ctrl_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, + unsigned int vf, unsigned int eqid) +{ + struct fw_eq_ctrl_cmd c; + + memset(&c, 0, sizeof(c)); + c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_CTRL_CMD) | FW_CMD_REQUEST | + FW_CMD_EXEC | FW_EQ_CTRL_CMD_PFN(pf) | + FW_EQ_CTRL_CMD_VFN(vf)); + c.alloc_to_len16 = htonl(FW_EQ_CTRL_CMD_FREE | FW_LEN16(c)); + c.cmpliqid_eqid = htonl(FW_EQ_CTRL_CMD_EQID(eqid)); + return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); +} + +/** + * t4_ofld_eq_free - free an offload egress queue + * @adap: the adapter + * @mbox: mailbox to use for the FW command + * @pf: the PF owning the queue + * @vf: the VF owning the queue + * @eqid: egress queue id + * + * Frees a control egress queue. + */ +int t4_ofld_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, + unsigned int vf, unsigned int eqid) +{ + struct fw_eq_ofld_cmd c; + + memset(&c, 0, sizeof(c)); + c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_OFLD_CMD) | FW_CMD_REQUEST | + FW_CMD_EXEC | FW_EQ_OFLD_CMD_PFN(pf) | + FW_EQ_OFLD_CMD_VFN(vf)); + c.alloc_to_len16 = htonl(FW_EQ_OFLD_CMD_FREE | FW_LEN16(c)); + c.eqid_pkd = htonl(FW_EQ_OFLD_CMD_EQID(eqid)); + return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); +} + +/** + * t4_handle_fw_rpl - process a FW reply message + * @adap: the adapter + * @rpl: start of the FW message + * + * Processes a FW message, such as link state change messages. + */ +int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl) +{ + u8 opcode = *(const u8 *)rpl; + + if (opcode == FW_PORT_CMD) { /* link/module state change message */ + int speed = 0, fc = 0; + const struct fw_port_cmd *p = (void *)rpl; + int chan = FW_PORT_CMD_PORTID_GET(ntohl(p->op_to_portid)); + int port = adap->chan_map[chan]; + struct port_info *pi = adap2pinfo(adap, port); + struct link_config *lc = &pi->link_cfg; + u32 stat = ntohl(p->u.info.lstatus_to_modtype); + int link_ok = (stat & FW_PORT_CMD_LSTATUS) != 0; + u32 mod = FW_PORT_CMD_MODTYPE_GET(stat); + + if (stat & FW_PORT_CMD_RXPAUSE) + fc |= PAUSE_RX; + if (stat & FW_PORT_CMD_TXPAUSE) + fc |= PAUSE_TX; + if (stat & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_100M)) + speed = SPEED_100; + else if (stat & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_1G)) + speed = SPEED_1000; + else if (stat & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_10G)) + speed = SPEED_10000; + + if (link_ok != lc->link_ok || speed != lc->speed || + fc != lc->fc) { /* something changed */ + lc->link_ok = link_ok; + lc->speed = speed; + lc->fc = fc; + t4_os_link_changed(adap, port, link_ok); + } + if (mod != pi->mod_type) { + pi->mod_type = mod; + t4_os_portmod_changed(adap, port); + } + } + return 0; +} + +static void __devinit get_pci_mode(struct adapter *adapter, + struct pci_params *p) +{ + u16 val; + u32 pcie_cap = pci_pcie_cap(adapter->pdev); + + if (pcie_cap) { + pci_read_config_word(adapter->pdev, pcie_cap + PCI_EXP_LNKSTA, + &val); + p->speed = val & PCI_EXP_LNKSTA_CLS; + p->width = (val & PCI_EXP_LNKSTA_NLW) >> 4; + } +} + +/** + * init_link_config - initialize a link's SW state + * @lc: structure holding the link state + * @caps: link capabilities + * + * Initializes the SW state maintained for each link, including the link's + * capabilities and default speed/flow-control/autonegotiation settings. + */ +static void __devinit init_link_config(struct link_config *lc, + unsigned int caps) +{ + lc->supported = caps; + lc->requested_speed = 0; + lc->speed = 0; + lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX; + if (lc->supported & FW_PORT_CAP_ANEG) { + lc->advertising = lc->supported & ADVERT_MASK; + lc->autoneg = AUTONEG_ENABLE; + lc->requested_fc |= PAUSE_AUTONEG; + } else { + lc->advertising = 0; + lc->autoneg = AUTONEG_DISABLE; + } +} + +static int __devinit wait_dev_ready(struct adapter *adap) +{ + if (t4_read_reg(adap, PL_WHOAMI) != 0xffffffff) + return 0; + msleep(500); + return t4_read_reg(adap, PL_WHOAMI) != 0xffffffff ? 0 : -EIO; +} + +/** + * t4_prep_adapter - prepare SW and HW for operation + * @adapter: the adapter + * @reset: if true perform a HW reset + * + * Initialize adapter SW state for the various HW modules, set initial + * values for some adapter tunables, take PHYs out of reset, and + * initialize the MDIO interface. + */ +int __devinit t4_prep_adapter(struct adapter *adapter) +{ + int ret; + + ret = wait_dev_ready(adapter); + if (ret < 0) + return ret; + + get_pci_mode(adapter, &adapter->params.pci); + adapter->params.rev = t4_read_reg(adapter, PL_REV); + + ret = get_vpd_params(adapter, &adapter->params.vpd); + if (ret < 0) + return ret; + + init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd); + + /* + * Default port for debugging in case we can't reach FW. + */ + adapter->params.nports = 1; + adapter->params.portvec = 1; + return 0; +} + +int __devinit t4_port_init(struct adapter *adap, int mbox, int pf, int vf) +{ + u8 addr[6]; + int ret, i, j = 0; + struct fw_port_cmd c; + + memset(&c, 0, sizeof(c)); + + for_each_port(adap, i) { + unsigned int rss_size; + struct port_info *p = adap2pinfo(adap, i); + + while ((adap->params.portvec & (1 << j)) == 0) + j++; + + c.op_to_portid = htonl(FW_CMD_OP(FW_PORT_CMD) | + FW_CMD_REQUEST | FW_CMD_READ | + FW_PORT_CMD_PORTID(j)); + c.action_to_len16 = htonl( + FW_PORT_CMD_ACTION(FW_PORT_ACTION_GET_PORT_INFO) | + FW_LEN16(c)); + ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); + if (ret) + return ret; + + ret = t4_alloc_vi(adap, mbox, j, pf, vf, 1, addr, &rss_size); + if (ret < 0) + return ret; + + p->viid = ret; + p->tx_chan = j; + p->lport = j; + p->rss_size = rss_size; + memcpy(adap->port[i]->dev_addr, addr, ETH_ALEN); + memcpy(adap->port[i]->perm_addr, addr, ETH_ALEN); + + ret = ntohl(c.u.info.lstatus_to_modtype); + p->mdio_addr = (ret & FW_PORT_CMD_MDIOCAP) ? + FW_PORT_CMD_MDIOADDR_GET(ret) : -1; + p->port_type = FW_PORT_CMD_PTYPE_GET(ret); + p->mod_type = FW_PORT_CMD_MODTYPE_GET(ret); + + init_link_config(&p->link_cfg, ntohs(c.u.info.pcap)); + j++; + } + return 0; +} diff --git a/drivers/net/cxgb4/t4_hw.h b/drivers/net/cxgb4/t4_hw.h new file mode 100644 index 00000000000..025623285c9 --- /dev/null +++ b/drivers/net/cxgb4/t4_hw.h @@ -0,0 +1,100 @@ +/* + * This file is part of the Chelsio T4 Ethernet driver for Linux. + * + * Copyright (c) 2003-2010 Chelsio Communications, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef __T4_HW_H +#define __T4_HW_H + +#include <linux/types.h> + +enum { + NCHAN = 4, /* # of HW channels */ + MAX_MTU = 9600, /* max MAC MTU, excluding header + FCS */ + EEPROMSIZE = 17408, /* Serial EEPROM physical size */ + EEPROMVSIZE = 32768, /* Serial EEPROM virtual address space size */ + RSS_NENTRIES = 2048, /* # of entries in RSS mapping table */ + TCB_SIZE = 128, /* TCB size */ + NMTUS = 16, /* size of MTU table */ + NCCTRL_WIN = 32, /* # of congestion control windows */ + NEXACT_MAC = 336, /* # of exact MAC address filters */ + L2T_SIZE = 4096, /* # of L2T entries */ + MBOX_LEN = 64, /* mailbox size in bytes */ + TRACE_LEN = 112, /* length of trace data and mask */ + FILTER_OPT_LEN = 36, /* filter tuple width for optional components */ + NWOL_PAT = 8, /* # of WoL patterns */ + WOL_PAT_LEN = 128, /* length of WoL patterns */ +}; + +enum { + SF_PAGE_SIZE = 256, /* serial flash page size */ + SF_SEC_SIZE = 64 * 1024, /* serial flash sector size */ + SF_SIZE = SF_SEC_SIZE * 16, /* serial flash size */ +}; + +enum { RSP_TYPE_FLBUF, RSP_TYPE_CPL, RSP_TYPE_INTR }; /* response entry types */ + +enum { MBOX_OWNER_NONE, MBOX_OWNER_FW, MBOX_OWNER_DRV }; /* mailbox owners */ + +enum { + SGE_MAX_WR_LEN = 512, /* max WR size in bytes */ + SGE_NTIMERS = 6, /* # of interrupt holdoff timer values */ + SGE_NCOUNTERS = 4, /* # of interrupt packet counter values */ +}; + +struct sge_qstat { /* data written to SGE queue status entries */ + __be32 qid; + __be16 cidx; + __be16 pidx; +}; + +/* + * Structure for last 128 bits of response descriptors + */ +struct rsp_ctrl { + __be32 hdrbuflen_pidx; + __be32 pldbuflen_qid; + union { + u8 type_gen; + __be64 last_flit; + }; +}; + +#define RSPD_NEWBUF 0x80000000U +#define RSPD_LEN 0x7fffffffU + +#define RSPD_GEN(x) ((x) >> 7) +#define RSPD_TYPE(x) (((x) >> 4) & 3) + +#define QINTR_CNT_EN 0x1 +#define QINTR_TIMER_IDX(x) ((x) << 1) +#endif /* __T4_HW_H */ diff --git a/drivers/net/cxgb4/t4_msg.h b/drivers/net/cxgb4/t4_msg.h new file mode 100644 index 00000000000..fdb11744314 --- /dev/null +++ b/drivers/net/cxgb4/t4_msg.h @@ -0,0 +1,664 @@ +/* + * This file is part of the Chelsio T4 Ethernet driver for Linux. + * + * Copyright (c) 2003-2010 Chelsio Communications, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef __T4_MSG_H +#define __T4_MSG_H + +#include <linux/types.h> + +enum { + CPL_PASS_OPEN_REQ = 0x1, + CPL_PASS_ACCEPT_RPL = 0x2, + CPL_ACT_OPEN_REQ = 0x3, + CPL_SET_TCB_FIELD = 0x5, + CPL_GET_TCB = 0x6, + CPL_CLOSE_CON_REQ = 0x8, + CPL_CLOSE_LISTSRV_REQ = 0x9, + CPL_ABORT_REQ = 0xA, + CPL_ABORT_RPL = 0xB, + CPL_RX_DATA_ACK = 0xD, + CPL_TX_PKT = 0xE, + CPL_L2T_WRITE_REQ = 0x12, + CPL_TID_RELEASE = 0x1A, + + CPL_CLOSE_LISTSRV_RPL = 0x20, + CPL_L2T_WRITE_RPL = 0x23, + CPL_PASS_OPEN_RPL = 0x24, + CPL_ACT_OPEN_RPL = 0x25, + CPL_PEER_CLOSE = 0x26, + CPL_ABORT_REQ_RSS = 0x2B, + CPL_ABORT_RPL_RSS = 0x2D, + + CPL_CLOSE_CON_RPL = 0x32, + CPL_ISCSI_HDR = 0x33, + CPL_RDMA_CQE = 0x35, + CPL_RDMA_CQE_READ_RSP = 0x36, + CPL_RDMA_CQE_ERR = 0x37, + CPL_RX_DATA = 0x39, + CPL_SET_TCB_RPL = 0x3A, + CPL_RX_PKT = 0x3B, + CPL_RX_DDP_COMPLETE = 0x3F, + + CPL_ACT_ESTABLISH = 0x40, + CPL_PASS_ESTABLISH = 0x41, + CPL_RX_DATA_DDP = 0x42, + CPL_PASS_ACCEPT_REQ = 0x44, + + CPL_RDMA_READ_REQ = 0x60, + + CPL_PASS_OPEN_REQ6 = 0x81, + CPL_ACT_OPEN_REQ6 = 0x83, + + CPL_RDMA_TERMINATE = 0xA2, + CPL_RDMA_WRITE = 0xA4, + CPL_SGE_EGR_UPDATE = 0xA5, + + CPL_TRACE_PKT = 0xB0, + + CPL_FW4_MSG = 0xC0, + CPL_FW4_PLD = 0xC1, + CPL_FW4_ACK = 0xC3, + + CPL_FW6_MSG = 0xE0, + CPL_FW6_PLD = 0xE1, + CPL_TX_PKT_LSO = 0xED, + CPL_TX_PKT_XT = 0xEE, + + NUM_CPL_CMDS +}; + +enum CPL_error { + CPL_ERR_NONE = 0, + CPL_ERR_TCAM_FULL = 3, + CPL_ERR_BAD_LENGTH = 15, + CPL_ERR_BAD_ROUTE = 18, + CPL_ERR_CONN_RESET = 20, + CPL_ERR_CONN_EXIST_SYNRECV = 21, + CPL_ERR_CONN_EXIST = 22, + CPL_ERR_ARP_MISS = 23, + CPL_ERR_BAD_SYN = 24, + CPL_ERR_CONN_TIMEDOUT = 30, + CPL_ERR_XMIT_TIMEDOUT = 31, + CPL_ERR_PERSIST_TIMEDOUT = 32, + CPL_ERR_FINWAIT2_TIMEDOUT = 33, + CPL_ERR_KEEPALIVE_TIMEDOUT = 34, + CPL_ERR_RTX_NEG_ADVICE = 35, + CPL_ERR_PERSIST_NEG_ADVICE = 36, + CPL_ERR_ABORT_FAILED = 42, + CPL_ERR_IWARP_FLM = 50, +}; + +enum { + ULP_MODE_NONE = 0, + ULP_MODE_ISCSI = 2, + ULP_MODE_RDMA = 4, + ULP_MODE_FCOE = 6, +}; + +enum { + ULP_CRC_HEADER = 1 << 0, + ULP_CRC_DATA = 1 << 1 +}; + +enum { + CPL_ABORT_SEND_RST = 0, + CPL_ABORT_NO_RST, +}; + +enum { /* TX_PKT_XT checksum types */ + TX_CSUM_TCP = 0, + TX_CSUM_UDP = 1, + TX_CSUM_CRC16 = 4, + TX_CSUM_CRC32 = 5, + TX_CSUM_CRC32C = 6, + TX_CSUM_FCOE = 7, + TX_CSUM_TCPIP = 8, + TX_CSUM_UDPIP = 9, + TX_CSUM_TCPIP6 = 10, + TX_CSUM_UDPIP6 = 11, + TX_CSUM_IP = 12, +}; + +union opcode_tid { + __be32 opcode_tid; + u8 opcode; +}; + +#define CPL_OPCODE(x) ((x) << 24) +#define MK_OPCODE_TID(opcode, tid) (CPL_OPCODE(opcode) | (tid)) +#define OPCODE_TID(cmd) ((cmd)->ot.opcode_tid) +#define GET_TID(cmd) (ntohl(OPCODE_TID(cmd)) & 0xFFFFFF) + +/* partitioning of TID fields that also carry a queue id */ +#define GET_TID_TID(x) ((x) & 0x3fff) +#define GET_TID_QID(x) (((x) >> 14) & 0x3ff) +#define TID_QID(x) ((x) << 14) + +struct rss_header { + u8 opcode; +#if defined(__LITTLE_ENDIAN_BITFIELD) + u8 channel:2; + u8 filter_hit:1; + u8 filter_tid:1; + u8 hash_type:2; + u8 ipv6:1; + u8 send2fw:1; +#else + u8 send2fw:1; + u8 ipv6:1; + u8 hash_type:2; + u8 filter_tid:1; + u8 filter_hit:1; + u8 channel:2; +#endif + __be16 qid; + __be32 hash_val; +}; + +struct work_request_hdr { + __be32 wr_hi; + __be32 wr_mid; + __be64 wr_lo; +}; + +#define WR_HDR struct work_request_hdr wr + +struct cpl_pass_open_req { + WR_HDR; + union opcode_tid ot; + __be16 local_port; + __be16 peer_port; + __be32 local_ip; + __be32 peer_ip; + __be64 opt0; +#define TX_CHAN(x) ((x) << 2) +#define DELACK(x) ((x) << 5) +#define ULP_MODE(x) ((x) << 8) +#define RCV_BUFSIZ(x) ((x) << 12) +#define DSCP(x) ((x) << 22) +#define SMAC_SEL(x) ((u64)(x) << 28) +#define L2T_IDX(x) ((u64)(x) << 36) +#define NAGLE(x) ((u64)(x) << 49) +#define WND_SCALE(x) ((u64)(x) << 50) +#define KEEP_ALIVE(x) ((u64)(x) << 54) +#define MSS_IDX(x) ((u64)(x) << 60) + __be64 opt1; +#define SYN_RSS_ENABLE (1 << 0) +#define SYN_RSS_QUEUE(x) ((x) << 2) +#define CONN_POLICY_ASK (1 << 22) +}; + +struct cpl_pass_open_req6 { + WR_HDR; + union opcode_tid ot; + __be16 local_port; + __be16 peer_port; + __be64 local_ip_hi; + __be64 local_ip_lo; + __be64 peer_ip_hi; + __be64 peer_ip_lo; + __be64 opt0; + __be64 opt1; +}; + +struct cpl_pass_open_rpl { + union opcode_tid ot; + u8 rsvd[3]; + u8 status; +}; + +struct cpl_pass_accept_rpl { + WR_HDR; + union opcode_tid ot; + __be32 opt2; +#define RSS_QUEUE(x) ((x) << 0) +#define RSS_QUEUE_VALID (1 << 10) +#define RX_COALESCE_VALID(x) ((x) << 11) +#define RX_COALESCE(x) ((x) << 12) +#define TX_QUEUE(x) ((x) << 23) +#define RX_CHANNEL(x) ((x) << 26) +#define WND_SCALE_EN(x) ((x) << 28) +#define TSTAMPS_EN(x) ((x) << 29) +#define SACK_EN(x) ((x) << 30) + __be64 opt0; +}; + +struct cpl_act_open_req { + WR_HDR; + union opcode_tid ot; + __be16 local_port; + __be16 peer_port; + __be32 local_ip; + __be32 peer_ip; + __be64 opt0; + __be32 params; + __be32 opt2; +}; + +struct cpl_act_open_req6 { + WR_HDR; + union opcode_tid ot; + __be16 local_port; + __be16 peer_port; + __be64 local_ip_hi; + __be64 local_ip_lo; + __be64 peer_ip_hi; + __be64 peer_ip_lo; + __be64 opt0; + __be32 params; + __be32 opt2; +}; + +struct cpl_act_open_rpl { + union opcode_tid ot; + __be32 atid_status; +#define GET_AOPEN_STATUS(x) ((x) & 0xff) +#define GET_AOPEN_ATID(x) (((x) >> 8) & 0xffffff) +}; + +struct cpl_pass_establish { + union opcode_tid ot; + __be32 rsvd; + __be32 tos_stid; +#define GET_POPEN_TID(x) ((x) & 0xffffff) +#define GET_POPEN_TOS(x) (((x) >> 24) & 0xff) + __be16 mac_idx; + __be16 tcp_opt; +#define GET_TCPOPT_WSCALE_OK(x) (((x) >> 5) & 1) +#define GET_TCPOPT_SACK(x) (((x) >> 6) & 1) +#define GET_TCPOPT_TSTAMP(x) (((x) >> 7) & 1) +#define GET_TCPOPT_SND_WSCALE(x) (((x) >> 8) & 0xf) +#define GET_TCPOPT_MSS(x) (((x) >> 12) & 0xf) + __be32 snd_isn; + __be32 rcv_isn; +}; + +struct cpl_act_establish { + union opcode_tid ot; + __be32 rsvd; + __be32 tos_atid; + __be16 mac_idx; + __be16 tcp_opt; + __be32 snd_isn; + __be32 rcv_isn; +}; + +struct cpl_get_tcb { + WR_HDR; + union opcode_tid ot; + __be16 reply_ctrl; +#define QUEUENO(x) ((x) << 0) +#define REPLY_CHAN(x) ((x) << 14) +#define NO_REPLY(x) ((x) << 15) + __be16 cookie; +}; + +struct cpl_set_tcb_field { + WR_HDR; + union opcode_tid ot; + __be16 reply_ctrl; + __be16 word_cookie; +#define TCB_WORD(x) ((x) << 0) +#define TCB_COOKIE(x) ((x) << 5) + __be64 mask; + __be64 val; +}; + +struct cpl_set_tcb_rpl { + union opcode_tid ot; + __be16 rsvd; + u8 cookie; + u8 status; + __be64 oldval; +}; + +struct cpl_close_con_req { + WR_HDR; + union opcode_tid ot; + __be32 rsvd; +}; + +struct cpl_close_con_rpl { + union opcode_tid ot; + u8 rsvd[3]; + u8 status; + __be32 snd_nxt; + __be32 rcv_nxt; +}; + +struct cpl_close_listsvr_req { + WR_HDR; + union opcode_tid ot; + __be16 reply_ctrl; +#define LISTSVR_IPV6 (1 << 14) + __be16 rsvd; +}; + +struct cpl_close_listsvr_rpl { + union opcode_tid ot; + u8 rsvd[3]; + u8 status; +}; + +struct cpl_abort_req_rss { + union opcode_tid ot; + u8 rsvd[3]; + u8 status; +}; + +struct cpl_abort_req { + WR_HDR; + union opcode_tid ot; + __be32 rsvd0; + u8 rsvd1; + u8 cmd; + u8 rsvd2[6]; +}; + +struct cpl_abort_rpl_rss { + union opcode_tid ot; + u8 rsvd[3]; + u8 status; +}; + +struct cpl_abort_rpl { + WR_HDR; + union opcode_tid ot; + __be32 rsvd0; + u8 rsvd1; + u8 cmd; + u8 rsvd2[6]; +}; + +struct cpl_peer_close { + union opcode_tid ot; + __be32 rcv_nxt; +}; + +struct cpl_tid_release { + WR_HDR; + union opcode_tid ot; + __be32 rsvd; +}; + +struct cpl_tx_pkt_core { + __be32 ctrl0; +#define TXPKT_VF(x) ((x) << 0) +#define TXPKT_PF(x) ((x) << 8) +#define TXPKT_VF_VLD (1 << 11) +#define TXPKT_OVLAN_IDX(x) ((x) << 12) +#define TXPKT_INTF(x) ((x) << 16) +#define TXPKT_INS_OVLAN (1 << 21) +#define TXPKT_OPCODE(x) ((x) << 24) + __be16 pack; + __be16 len; + __be64 ctrl1; +#define TXPKT_CSUM_END(x) ((x) << 12) +#define TXPKT_CSUM_START(x) ((x) << 20) +#define TXPKT_IPHDR_LEN(x) ((u64)(x) << 20) +#define TXPKT_CSUM_LOC(x) ((u64)(x) << 30) +#define TXPKT_ETHHDR_LEN(x) ((u64)(x) << 34) +#define TXPKT_CSUM_TYPE(x) ((u64)(x) << 40) +#define TXPKT_VLAN(x) ((u64)(x) << 44) +#define TXPKT_VLAN_VLD (1ULL << 60) +#define TXPKT_IPCSUM_DIS (1ULL << 62) +#define TXPKT_L4CSUM_DIS (1ULL << 63) +}; + +struct cpl_tx_pkt { + WR_HDR; + struct cpl_tx_pkt_core c; +}; + +#define cpl_tx_pkt_xt cpl_tx_pkt + +struct cpl_tx_pkt_lso { + WR_HDR; + __be32 lso_ctrl; +#define LSO_TCPHDR_LEN(x) ((x) << 0) +#define LSO_IPHDR_LEN(x) ((x) << 4) +#define LSO_ETHHDR_LEN(x) ((x) << 16) +#define LSO_IPV6(x) ((x) << 20) +#define LSO_LAST_SLICE (1 << 22) +#define LSO_FIRST_SLICE (1 << 23) +#define LSO_OPCODE(x) ((x) << 24) + __be16 ipid_ofst; + __be16 mss; + __be32 seqno_offset; + __be32 len; + /* encapsulated CPL (TX_PKT, TX_PKT_XT or TX_DATA) follows here */ +}; + +struct cpl_iscsi_hdr { + union opcode_tid ot; + __be16 pdu_len_ddp; +#define ISCSI_PDU_LEN(x) ((x) & 0x7FFF) +#define ISCSI_DDP (1 << 15) + __be16 len; + __be32 seq; + __be16 urg; + u8 rsvd; + u8 status; +}; + +struct cpl_rx_data { + union opcode_tid ot; + __be16 rsvd; + __be16 len; + __be32 seq; + __be16 urg; +#if defined(__LITTLE_ENDIAN_BITFIELD) + u8 dack_mode:2; + u8 psh:1; + u8 heartbeat:1; + u8 ddp_off:1; + u8 :3; +#else + u8 :3; + u8 ddp_off:1; + u8 heartbeat:1; + u8 psh:1; + u8 dack_mode:2; +#endif + u8 status; +}; + +struct cpl_rx_data_ack { + WR_HDR; + union opcode_tid ot; + __be32 credit_dack; +#define RX_CREDITS(x) ((x) << 0) +#define RX_FORCE_ACK(x) ((x) << 28) +}; + +struct cpl_rx_pkt { + u8 opcode; +#if defined(__LITTLE_ENDIAN_BITFIELD) + u8 iff:4; + u8 csum_calc:1; + u8 ipmi_pkt:1; + u8 vlan_ex:1; + u8 ip_frag:1; +#else + u8 ip_frag:1; + u8 vlan_ex:1; + u8 ipmi_pkt:1; + u8 csum_calc:1; + u8 iff:4; +#endif + __be16 csum; + __be16 vlan; + __be16 len; + __be32 l2info; +#define RXF_UDP (1 << 22) +#define RXF_TCP (1 << 23) + __be16 hdr_len; + __be16 err_vec; +}; + +struct cpl_trace_pkt { + u8 opcode; + u8 intf; +#if defined(__LITTLE_ENDIAN_BITFIELD) + u8 runt:4; + u8 filter_hit:4; + u8 :6; + u8 err:1; + u8 trunc:1; +#else + u8 filter_hit:4; + u8 runt:4; + u8 trunc:1; + u8 err:1; + u8 :6; +#endif + __be16 rsvd; + __be16 len; + __be64 tstamp; +}; + +struct cpl_l2t_write_req { + WR_HDR; + union opcode_tid ot; + __be16 params; +#define L2T_W_INFO(x) ((x) << 2) +#define L2T_W_PORT(x) ((x) << 8) +#define L2T_W_NOREPLY(x) ((x) << 15) + __be16 l2t_idx; + __be16 vlan; + u8 dst_mac[6]; +}; + +struct cpl_l2t_write_rpl { + union opcode_tid ot; + u8 status; + u8 rsvd[3]; +}; + +struct cpl_rdma_terminate { + union opcode_tid ot; + __be16 rsvd; + __be16 len; +}; + +struct cpl_sge_egr_update { + __be32 opcode_qid; +#define EGR_QID(x) ((x) & 0x1FFFF) + __be16 cidx; + __be16 pidx; +}; + +struct cpl_fw4_pld { + u8 opcode; + u8 rsvd0[3]; + u8 type; + u8 rsvd1; + __be16 len; + __be64 data; + __be64 rsvd2; +}; + +struct cpl_fw6_pld { + u8 opcode; + u8 rsvd[5]; + __be16 len; + __be64 data[4]; +}; + +struct cpl_fw4_msg { + u8 opcode; + u8 type; + __be16 rsvd0; + __be32 rsvd1; + __be64 data[2]; +}; + +struct cpl_fw4_ack { + union opcode_tid ot; + u8 credits; + u8 rsvd0[2]; + u8 seq_vld; + __be32 snd_nxt; + __be32 snd_una; + __be64 rsvd1; +}; + +struct cpl_fw6_msg { + u8 opcode; + u8 type; + __be16 rsvd0; + __be32 rsvd1; + __be64 data[4]; +}; + +enum { + ULP_TX_MEM_READ = 2, + ULP_TX_MEM_WRITE = 3, + ULP_TX_PKT = 4 +}; + +enum { + ULP_TX_SC_NOOP = 0x80, + ULP_TX_SC_IMM = 0x81, + ULP_TX_SC_DSGL = 0x82, + ULP_TX_SC_ISGL = 0x83 +}; + +struct ulptx_sge_pair { + __be32 len[2]; + __be64 addr[2]; +}; + +struct ulptx_sgl { + __be32 cmd_nsge; +#define ULPTX_CMD(x) ((x) << 24) +#define ULPTX_NSGE(x) ((x) << 0) + __be32 len0; + __be64 addr0; + struct ulptx_sge_pair sge[0]; +}; + +struct ulp_mem_io { + WR_HDR; + __be32 cmd; +#define ULP_MEMIO_ORDER(x) ((x) << 23) + __be32 len16; /* command length */ + __be32 dlen; /* data length in 32-byte units */ +#define ULP_MEMIO_DATA_LEN(x) ((x) << 0) + __be32 lock_addr; +#define ULP_MEMIO_ADDR(x) ((x) << 0) +#define ULP_MEMIO_LOCK(x) ((x) << 31) +}; + +#endif /* __T4_MSG_H */ diff --git a/drivers/net/cxgb4/t4_regs.h b/drivers/net/cxgb4/t4_regs.h new file mode 100644 index 00000000000..5ed56483cbc --- /dev/null +++ b/drivers/net/cxgb4/t4_regs.h @@ -0,0 +1,878 @@ +/* + * This file is part of the Chelsio T4 Ethernet driver for Linux. + * + * Copyright (c) 2010 Chelsio Communications, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef __T4_REGS_H +#define __T4_REGS_H + +#define MYPF_BASE 0x1b000 +#define MYPF_REG(reg_addr) (MYPF_BASE + (reg_addr)) + +#define PF0_BASE 0x1e000 +#define PF0_REG(reg_addr) (PF0_BASE + (reg_addr)) + +#define PF_STRIDE 0x400 +#define PF_BASE(idx) (PF0_BASE + (idx) * PF_STRIDE) +#define PF_REG(idx, reg) (PF_BASE(idx) + (reg)) + +#define MYPORT_BASE 0x1c000 +#define MYPORT_REG(reg_addr) (MYPORT_BASE + (reg_addr)) + +#define PORT0_BASE 0x20000 +#define PORT0_REG(reg_addr) (PORT0_BASE + (reg_addr)) + +#define PORT_STRIDE 0x2000 +#define PORT_BASE(idx) (PORT0_BASE + (idx) * PORT_STRIDE) +#define PORT_REG(idx, reg) (PORT_BASE(idx) + (reg)) + +#define EDC_STRIDE (EDC_1_BASE_ADDR - EDC_0_BASE_ADDR) +#define EDC_REG(reg, idx) (reg + EDC_STRIDE * idx) + +#define PCIE_MEM_ACCESS_REG(reg_addr, idx) ((reg_addr) + (idx) * 8) +#define PCIE_MAILBOX_REG(reg_addr, idx) ((reg_addr) + (idx) * 8) +#define MC_BIST_STATUS_REG(reg_addr, idx) ((reg_addr) + (idx) * 4) +#define EDC_BIST_STATUS_REG(reg_addr, idx) ((reg_addr) + (idx) * 4) + +#define SGE_PF_KDOORBELL 0x0 +#define QID_MASK 0xffff8000U +#define QID_SHIFT 15 +#define QID(x) ((x) << QID_SHIFT) +#define DBPRIO 0x00004000U +#define PIDX_MASK 0x00003fffU +#define PIDX_SHIFT 0 +#define PIDX(x) ((x) << PIDX_SHIFT) + +#define SGE_PF_GTS 0x4 +#define INGRESSQID_MASK 0xffff0000U +#define INGRESSQID_SHIFT 16 +#define INGRESSQID(x) ((x) << INGRESSQID_SHIFT) +#define TIMERREG_MASK 0x0000e000U +#define TIMERREG_SHIFT 13 +#define TIMERREG(x) ((x) << TIMERREG_SHIFT) +#define SEINTARM_MASK 0x00001000U +#define SEINTARM_SHIFT 12 +#define SEINTARM(x) ((x) << SEINTARM_SHIFT) +#define CIDXINC_MASK 0x00000fffU +#define CIDXINC_SHIFT 0 +#define CIDXINC(x) ((x) << CIDXINC_SHIFT) + +#define SGE_CONTROL 0x1008 +#define DCASYSTYPE 0x00080000U +#define RXPKTCPLMODE 0x00040000U +#define EGRSTATUSPAGESIZE 0x00020000U +#define PKTSHIFT_MASK 0x00001c00U +#define PKTSHIFT_SHIFT 10 +#define PKTSHIFT(x) ((x) << PKTSHIFT_SHIFT) +#define INGPCIEBOUNDARY_MASK 0x00000380U +#define INGPCIEBOUNDARY_SHIFT 7 +#define INGPCIEBOUNDARY(x) ((x) << INGPCIEBOUNDARY_SHIFT) +#define INGPADBOUNDARY_MASK 0x00000070U +#define INGPADBOUNDARY_SHIFT 4 +#define INGPADBOUNDARY(x) ((x) << INGPADBOUNDARY_SHIFT) +#define EGRPCIEBOUNDARY_MASK 0x0000000eU +#define EGRPCIEBOUNDARY_SHIFT 1 +#define EGRPCIEBOUNDARY(x) ((x) << EGRPCIEBOUNDARY_SHIFT) +#define GLOBALENABLE 0x00000001U + +#define SGE_HOST_PAGE_SIZE 0x100c +#define HOSTPAGESIZEPF0_MASK 0x0000000fU +#define HOSTPAGESIZEPF0_SHIFT 0 +#define HOSTPAGESIZEPF0(x) ((x) << HOSTPAGESIZEPF0_SHIFT) + +#define SGE_EGRESS_QUEUES_PER_PAGE_PF 0x1010 +#define QUEUESPERPAGEPF0_MASK 0x0000000fU +#define QUEUESPERPAGEPF0_GET(x) ((x) & QUEUESPERPAGEPF0_MASK) + +#define SGE_INT_CAUSE1 0x1024 +#define SGE_INT_CAUSE2 0x1030 +#define SGE_INT_CAUSE3 0x103c +#define ERR_FLM_DBP 0x80000000U +#define ERR_FLM_IDMA1 0x40000000U +#define ERR_FLM_IDMA0 0x20000000U +#define ERR_FLM_HINT 0x10000000U +#define ERR_PCIE_ERROR3 0x08000000U +#define ERR_PCIE_ERROR2 0x04000000U +#define ERR_PCIE_ERROR1 0x02000000U +#define ERR_PCIE_ERROR0 0x01000000U +#define ERR_TIMER_ABOVE_MAX_QID 0x00800000U +#define ERR_CPL_EXCEED_IQE_SIZE 0x00400000U +#define ERR_INVALID_CIDX_INC 0x00200000U +#define ERR_ITP_TIME_PAUSED 0x00100000U +#define ERR_CPL_OPCODE_0 0x00080000U +#define ERR_DROPPED_DB 0x00040000U +#define ERR_DATA_CPL_ON_HIGH_QID1 0x00020000U +#define ERR_DATA_CPL_ON_HIGH_QID0 0x00010000U +#define ERR_BAD_DB_PIDX3 0x00008000U +#define ERR_BAD_DB_PIDX2 0x00004000U +#define ERR_BAD_DB_PIDX1 0x00002000U +#define ERR_BAD_DB_PIDX0 0x00001000U +#define ERR_ING_PCIE_CHAN 0x00000800U +#define ERR_ING_CTXT_PRIO 0x00000400U +#define ERR_EGR_CTXT_PRIO 0x00000200U +#define DBFIFO_HP_INT 0x00000100U +#define DBFIFO_LP_INT 0x00000080U +#define REG_ADDRESS_ERR 0x00000040U +#define INGRESS_SIZE_ERR 0x00000020U +#define EGRESS_SIZE_ERR 0x00000010U +#define ERR_INV_CTXT3 0x00000008U +#define ERR_INV_CTXT2 0x00000004U +#define ERR_INV_CTXT1 0x00000002U +#define ERR_INV_CTXT0 0x00000001U + +#define SGE_INT_ENABLE3 0x1040 +#define SGE_FL_BUFFER_SIZE0 0x1044 +#define SGE_FL_BUFFER_SIZE1 0x1048 +#define SGE_INGRESS_RX_THRESHOLD 0x10a0 +#define THRESHOLD_0_MASK 0x3f000000U +#define THRESHOLD_0_SHIFT 24 +#define THRESHOLD_0(x) ((x) << THRESHOLD_0_SHIFT) +#define THRESHOLD_0_GET(x) (((x) & THRESHOLD_0_MASK) >> THRESHOLD_0_SHIFT) +#define THRESHOLD_1_MASK 0x003f0000U +#define THRESHOLD_1_SHIFT 16 +#define THRESHOLD_1(x) ((x) << THRESHOLD_1_SHIFT) +#define THRESHOLD_1_GET(x) (((x) & THRESHOLD_1_MASK) >> THRESHOLD_1_SHIFT) +#define THRESHOLD_2_MASK 0x00003f00U +#define THRESHOLD_2_SHIFT 8 +#define THRESHOLD_2(x) ((x) << THRESHOLD_2_SHIFT) +#define THRESHOLD_2_GET(x) (((x) & THRESHOLD_2_MASK) >> THRESHOLD_2_SHIFT) +#define THRESHOLD_3_MASK 0x0000003fU +#define THRESHOLD_3_SHIFT 0 +#define THRESHOLD_3(x) ((x) << THRESHOLD_3_SHIFT) +#define THRESHOLD_3_GET(x) (((x) & THRESHOLD_3_MASK) >> THRESHOLD_3_SHIFT) + +#define SGE_TIMER_VALUE_0_AND_1 0x10b8 +#define TIMERVALUE0_MASK 0xffff0000U +#define TIMERVALUE0_SHIFT 16 +#define TIMERVALUE0(x) ((x) << TIMERVALUE0_SHIFT) +#define TIMERVALUE0_GET(x) (((x) & TIMERVALUE0_MASK) >> TIMERVALUE0_SHIFT) +#define TIMERVALUE1_MASK 0x0000ffffU +#define TIMERVALUE1_SHIFT 0 +#define TIMERVALUE1(x) ((x) << TIMERVALUE1_SHIFT) +#define TIMERVALUE1_GET(x) (((x) & TIMERVALUE1_MASK) >> TIMERVALUE1_SHIFT) + +#define SGE_TIMER_VALUE_2_AND_3 0x10bc +#define SGE_TIMER_VALUE_4_AND_5 0x10c0 +#define SGE_DEBUG_INDEX 0x10cc +#define SGE_DEBUG_DATA_HIGH 0x10d0 +#define SGE_DEBUG_DATA_LOW 0x10d4 +#define SGE_INGRESS_QUEUES_PER_PAGE_PF 0x10f4 + +#define PCIE_PF_CLI 0x44 +#define PCIE_INT_CAUSE 0x3004 +#define UNXSPLCPLERR 0x20000000U +#define PCIEPINT 0x10000000U +#define PCIESINT 0x08000000U +#define RPLPERR 0x04000000U +#define RXWRPERR 0x02000000U +#define RXCPLPERR 0x01000000U +#define PIOTAGPERR 0x00800000U +#define MATAGPERR 0x00400000U +#define INTXCLRPERR 0x00200000U +#define FIDPERR 0x00100000U +#define CFGSNPPERR 0x00080000U +#define HRSPPERR 0x00040000U +#define HREQPERR 0x00020000U +#define HCNTPERR 0x00010000U +#define DRSPPERR 0x00008000U +#define DREQPERR 0x00004000U +#define DCNTPERR 0x00002000U +#define CRSPPERR 0x00001000U +#define CREQPERR 0x00000800U +#define CCNTPERR 0x00000400U +#define TARTAGPERR 0x00000200U +#define PIOREQPERR 0x00000100U +#define PIOCPLPERR 0x00000080U +#define MSIXDIPERR 0x00000040U +#define MSIXDATAPERR 0x00000020U +#define MSIXADDRHPERR 0x00000010U +#define MSIXADDRLPERR 0x00000008U +#define MSIDATAPERR 0x00000004U +#define MSIADDRHPERR 0x00000002U +#define MSIADDRLPERR 0x00000001U + +#define PCIE_NONFAT_ERR 0x3010 +#define PCIE_MEM_ACCESS_BASE_WIN 0x3068 +#define PCIEOFST_MASK 0xfffffc00U +#define BIR_MASK 0x00000300U +#define BIR_SHIFT 8 +#define BIR(x) ((x) << BIR_SHIFT) +#define WINDOW_MASK 0x000000ffU +#define WINDOW_SHIFT 0 +#define WINDOW(x) ((x) << WINDOW_SHIFT) + +#define PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS 0x5908 +#define RNPP 0x80000000U +#define RPCP 0x20000000U +#define RCIP 0x08000000U +#define RCCP 0x04000000U +#define RFTP 0x00800000U +#define PTRP 0x00100000U + +#define PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS 0x59a4 +#define TPCP 0x40000000U +#define TNPP 0x20000000U +#define TFTP 0x10000000U +#define TCAP 0x08000000U +#define TCIP 0x04000000U +#define RCAP 0x02000000U +#define PLUP 0x00800000U +#define PLDN 0x00400000U +#define OTDD 0x00200000U +#define GTRP 0x00100000U +#define RDPE 0x00040000U +#define TDCE 0x00020000U +#define TDUE 0x00010000U + +#define MC_INT_CAUSE 0x7518 +#define ECC_UE_INT_CAUSE 0x00000004U +#define ECC_CE_INT_CAUSE 0x00000002U +#define PERR_INT_CAUSE 0x00000001U + +#define MC_ECC_STATUS 0x751c +#define ECC_CECNT_MASK 0xffff0000U +#define ECC_CECNT_SHIFT 16 +#define ECC_CECNT(x) ((x) << ECC_CECNT_SHIFT) +#define ECC_CECNT_GET(x) (((x) & ECC_CECNT_MASK) >> ECC_CECNT_SHIFT) +#define ECC_UECNT_MASK 0x0000ffffU +#define ECC_UECNT_SHIFT 0 +#define ECC_UECNT(x) ((x) << ECC_UECNT_SHIFT) +#define ECC_UECNT_GET(x) (((x) & ECC_UECNT_MASK) >> ECC_UECNT_SHIFT) + +#define MC_BIST_CMD 0x7600 +#define START_BIST 0x80000000U +#define BIST_CMD_GAP_MASK 0x0000ff00U +#define BIST_CMD_GAP_SHIFT 8 +#define BIST_CMD_GAP(x) ((x) << BIST_CMD_GAP_SHIFT) +#define BIST_OPCODE_MASK 0x00000003U +#define BIST_OPCODE_SHIFT 0 +#define BIST_OPCODE(x) ((x) << BIST_OPCODE_SHIFT) + +#define MC_BIST_CMD_ADDR 0x7604 +#define MC_BIST_CMD_LEN 0x7608 +#define MC_BIST_DATA_PATTERN 0x760c +#define BIST_DATA_TYPE_MASK 0x0000000fU +#define BIST_DATA_TYPE_SHIFT 0 +#define BIST_DATA_TYPE(x) ((x) << BIST_DATA_TYPE_SHIFT) + +#define MC_BIST_STATUS_RDATA 0x7688 + +#define MA_EXT_MEMORY_BAR 0x77c8 +#define EXT_MEM_SIZE_MASK 0x00000fffU +#define EXT_MEM_SIZE_SHIFT 0 +#define EXT_MEM_SIZE_GET(x) (((x) & EXT_MEM_SIZE_MASK) >> EXT_MEM_SIZE_SHIFT) + +#define MA_TARGET_MEM_ENABLE 0x77d8 +#define EXT_MEM_ENABLE 0x00000004U +#define EDRAM1_ENABLE 0x00000002U +#define EDRAM0_ENABLE 0x00000001U + +#define MA_INT_CAUSE 0x77e0 +#define MEM_PERR_INT_CAUSE 0x00000002U +#define MEM_WRAP_INT_CAUSE 0x00000001U + +#define MA_INT_WRAP_STATUS 0x77e4 +#define MEM_WRAP_ADDRESS_MASK 0xfffffff0U +#define MEM_WRAP_ADDRESS_SHIFT 4 +#define MEM_WRAP_ADDRESS_GET(x) (((x) & MEM_WRAP_ADDRESS_MASK) >> MEM_WRAP_ADDRESS_SHIFT) +#define MEM_WRAP_CLIENT_NUM_MASK 0x0000000fU +#define MEM_WRAP_CLIENT_NUM_SHIFT 0 +#define MEM_WRAP_CLIENT_NUM_GET(x) (((x) & MEM_WRAP_CLIENT_NUM_MASK) >> MEM_WRAP_CLIENT_NUM_SHIFT) + +#define MA_PARITY_ERROR_STATUS 0x77f4 + +#define EDC_0_BASE_ADDR 0x7900 + +#define EDC_BIST_CMD 0x7904 +#define EDC_BIST_CMD_ADDR 0x7908 +#define EDC_BIST_CMD_LEN 0x790c +#define EDC_BIST_DATA_PATTERN 0x7910 +#define EDC_BIST_STATUS_RDATA 0x7928 +#define EDC_INT_CAUSE 0x7978 +#define ECC_UE_PAR 0x00000020U +#define ECC_CE_PAR 0x00000010U +#define PERR_PAR_CAUSE 0x00000008U + +#define EDC_ECC_STATUS 0x797c + +#define EDC_1_BASE_ADDR 0x7980 + +#define CIM_PF_MAILBOX_DATA 0x240 +#define CIM_PF_MAILBOX_CTRL 0x280 +#define MBMSGVALID 0x00000008U +#define MBINTREQ 0x00000004U +#define MBOWNER_MASK 0x00000003U +#define MBOWNER_SHIFT 0 +#define MBOWNER(x) ((x) << MBOWNER_SHIFT) +#define MBOWNER_GET(x) (((x) & MBOWNER_MASK) >> MBOWNER_SHIFT) + +#define CIM_PF_HOST_INT_CAUSE 0x28c +#define MBMSGRDYINT 0x00080000U + +#define CIM_HOST_INT_CAUSE 0x7b2c +#define TIEQOUTPARERRINT 0x00100000U +#define TIEQINPARERRINT 0x00080000U +#define MBHOSTPARERR 0x00040000U +#define MBUPPARERR 0x00020000U +#define IBQPARERR 0x0001f800U +#define IBQTP0PARERR 0x00010000U +#define IBQTP1PARERR 0x00008000U +#define IBQULPPARERR 0x00004000U +#define IBQSGELOPARERR 0x00002000U +#define IBQSGEHIPARERR 0x00001000U +#define IBQNCSIPARERR 0x00000800U +#define OBQPARERR 0x000007e0U +#define OBQULP0PARERR 0x00000400U +#define OBQULP1PARERR 0x00000200U +#define OBQULP2PARERR 0x00000100U +#define OBQULP3PARERR 0x00000080U +#define OBQSGEPARERR 0x00000040U +#define OBQNCSIPARERR 0x00000020U +#define PREFDROPINT 0x00000002U +#define UPACCNONZERO 0x00000001U + +#define CIM_HOST_UPACC_INT_CAUSE 0x7b34 +#define EEPROMWRINT 0x40000000U +#define TIMEOUTMAINT 0x20000000U +#define TIMEOUTINT 0x10000000U +#define RSPOVRLOOKUPINT 0x08000000U +#define REQOVRLOOKUPINT 0x04000000U +#define BLKWRPLINT 0x02000000U +#define BLKRDPLINT 0x01000000U +#define SGLWRPLINT 0x00800000U +#define SGLRDPLINT 0x00400000U +#define BLKWRCTLINT 0x00200000U +#define BLKRDCTLINT 0x00100000U +#define SGLWRCTLINT 0x00080000U +#define SGLRDCTLINT 0x00040000U +#define BLKWREEPROMINT 0x00020000U +#define BLKRDEEPROMINT 0x00010000U +#define SGLWREEPROMINT 0x00008000U +#define SGLRDEEPROMINT 0x00004000U +#define BLKWRFLASHINT 0x00002000U +#define BLKRDFLASHINT 0x00001000U +#define SGLWRFLASHINT 0x00000800U +#define SGLRDFLASHINT 0x00000400U +#define BLKWRBOOTINT 0x00000200U +#define BLKRDBOOTINT 0x00000100U +#define SGLWRBOOTINT 0x00000080U +#define SGLRDBOOTINT 0x00000040U +#define ILLWRBEINT 0x00000020U +#define ILLRDBEINT 0x00000010U +#define ILLRDINT 0x00000008U +#define ILLWRINT 0x00000004U +#define ILLTRANSINT 0x00000002U +#define RSVDSPACEINT 0x00000001U + +#define TP_OUT_CONFIG 0x7d04 +#define VLANEXTENABLE_MASK 0x0000f000U +#define VLANEXTENABLE_SHIFT 12 + +#define TP_PARA_REG2 0x7d68 +#define MAXRXDATA_MASK 0xffff0000U +#define MAXRXDATA_SHIFT 16 +#define MAXRXDATA_GET(x) (((x) & MAXRXDATA_MASK) >> MAXRXDATA_SHIFT) + +#define TP_TIMER_RESOLUTION 0x7d90 +#define TIMERRESOLUTION_MASK 0x00ff0000U +#define TIMERRESOLUTION_SHIFT 16 +#define TIMERRESOLUTION_GET(x) (((x) & TIMERRESOLUTION_MASK) >> TIMERRESOLUTION_SHIFT) + +#define TP_SHIFT_CNT 0x7dc0 + +#define TP_CCTRL_TABLE 0x7ddc +#define TP_MTU_TABLE 0x7de4 +#define MTUINDEX_MASK 0xff000000U +#define MTUINDEX_SHIFT 24 +#define MTUINDEX(x) ((x) << MTUINDEX_SHIFT) +#define MTUWIDTH_MASK 0x000f0000U +#define MTUWIDTH_SHIFT 16 +#define MTUWIDTH(x) ((x) << MTUWIDTH_SHIFT) +#define MTUWIDTH_GET(x) (((x) & MTUWIDTH_MASK) >> MTUWIDTH_SHIFT) +#define MTUVALUE_MASK 0x00003fffU +#define MTUVALUE_SHIFT 0 +#define MTUVALUE(x) ((x) << MTUVALUE_SHIFT) +#define MTUVALUE_GET(x) (((x) & MTUVALUE_MASK) >> MTUVALUE_SHIFT) + +#define TP_RSS_LKP_TABLE 0x7dec +#define LKPTBLROWVLD 0x80000000U +#define LKPTBLQUEUE1_MASK 0x000ffc00U +#define LKPTBLQUEUE1_SHIFT 10 +#define LKPTBLQUEUE1(x) ((x) << LKPTBLQUEUE1_SHIFT) +#define LKPTBLQUEUE1_GET(x) (((x) & LKPTBLQUEUE1_MASK) >> LKPTBLQUEUE1_SHIFT) +#define LKPTBLQUEUE0_MASK 0x000003ffU +#define LKPTBLQUEUE0_SHIFT 0 +#define LKPTBLQUEUE0(x) ((x) << LKPTBLQUEUE0_SHIFT) +#define LKPTBLQUEUE0_GET(x) (((x) & LKPTBLQUEUE0_MASK) >> LKPTBLQUEUE0_SHIFT) + +#define TP_PIO_ADDR 0x7e40 +#define TP_PIO_DATA 0x7e44 +#define TP_MIB_INDEX 0x7e50 +#define TP_MIB_DATA 0x7e54 +#define TP_INT_CAUSE 0x7e74 +#define FLMTXFLSTEMPTY 0x40000000U + +#define TP_INGRESS_CONFIG 0x141 +#define VNIC 0x00000800U +#define CSUM_HAS_PSEUDO_HDR 0x00000400U +#define RM_OVLAN 0x00000200U +#define LOOKUPEVERYPKT 0x00000100U + +#define TP_MIB_MAC_IN_ERR_0 0x0 +#define TP_MIB_TCP_OUT_RST 0xc +#define TP_MIB_TCP_IN_SEG_HI 0x10 +#define TP_MIB_TCP_IN_SEG_LO 0x11 +#define TP_MIB_TCP_OUT_SEG_HI 0x12 +#define TP_MIB_TCP_OUT_SEG_LO 0x13 +#define TP_MIB_TCP_RXT_SEG_HI 0x14 +#define TP_MIB_TCP_RXT_SEG_LO 0x15 +#define TP_MIB_TNL_CNG_DROP_0 0x18 +#define TP_MIB_TCP_V6IN_ERR_0 0x28 +#define TP_MIB_TCP_V6OUT_RST 0x2c +#define TP_MIB_OFD_ARP_DROP 0x36 +#define TP_MIB_TNL_DROP_0 0x44 +#define TP_MIB_OFD_VLN_DROP_0 0x58 + +#define ULP_TX_INT_CAUSE 0x8dcc +#define PBL_BOUND_ERR_CH3 0x80000000U +#define PBL_BOUND_ERR_CH2 0x40000000U +#define PBL_BOUND_ERR_CH1 0x20000000U +#define PBL_BOUND_ERR_CH0 0x10000000U + +#define PM_RX_INT_CAUSE 0x8fdc +#define ZERO_E_CMD_ERROR 0x00400000U +#define PMRX_FRAMING_ERROR 0x003ffff0U +#define OCSPI_PAR_ERROR 0x00000008U +#define DB_OPTIONS_PAR_ERROR 0x00000004U +#define IESPI_PAR_ERROR 0x00000002U +#define E_PCMD_PAR_ERROR 0x00000001U + +#define PM_TX_INT_CAUSE 0x8ffc +#define PCMD_LEN_OVFL0 0x80000000U +#define PCMD_LEN_OVFL1 0x40000000U +#define PCMD_LEN_OVFL2 0x20000000U +#define ZERO_C_CMD_ERROR 0x10000000U +#define PMTX_FRAMING_ERROR 0x0ffffff0U +#define OESPI_PAR_ERROR 0x00000008U +#define ICSPI_PAR_ERROR 0x00000002U +#define C_PCMD_PAR_ERROR 0x00000001U + +#define MPS_PORT_STAT_TX_PORT_BYTES_L 0x400 +#define MPS_PORT_STAT_TX_PORT_BYTES_H 0x404 +#define MPS_PORT_STAT_TX_PORT_FRAMES_L 0x408 +#define MPS_PORT_STAT_TX_PORT_FRAMES_H 0x40c +#define MPS_PORT_STAT_TX_PORT_BCAST_L 0x410 +#define MPS_PORT_STAT_TX_PORT_BCAST_H 0x414 +#define MPS_PORT_STAT_TX_PORT_MCAST_L 0x418 +#define MPS_PORT_STAT_TX_PORT_MCAST_H 0x41c +#define MPS_PORT_STAT_TX_PORT_UCAST_L 0x420 +#define MPS_PORT_STAT_TX_PORT_UCAST_H 0x424 +#define MPS_PORT_STAT_TX_PORT_ERROR_L 0x428 +#define MPS_PORT_STAT_TX_PORT_ERROR_H 0x42c +#define MPS_PORT_STAT_TX_PORT_64B_L 0x430 +#define MPS_PORT_STAT_TX_PORT_64B_H 0x434 +#define MPS_PORT_STAT_TX_PORT_65B_127B_L 0x438 +#define MPS_PORT_STAT_TX_PORT_65B_127B_H 0x43c +#define MPS_PORT_STAT_TX_PORT_128B_255B_L 0x440 +#define MPS_PORT_STAT_TX_PORT_128B_255B_H 0x444 +#define MPS_PORT_STAT_TX_PORT_256B_511B_L 0x448 +#define MPS_PORT_STAT_TX_PORT_256B_511B_H 0x44c +#define MPS_PORT_STAT_TX_PORT_512B_1023B_L 0x450 +#define MPS_PORT_STAT_TX_PORT_512B_1023B_H 0x454 +#define MPS_PORT_STAT_TX_PORT_1024B_1518B_L 0x458 +#define MPS_PORT_STAT_TX_PORT_1024B_1518B_H 0x45c +#define MPS_PORT_STAT_TX_PORT_1519B_MAX_L 0x460 +#define MPS_PORT_STAT_TX_PORT_1519B_MAX_H 0x464 +#define MPS_PORT_STAT_TX_PORT_DROP_L 0x468 +#define MPS_PORT_STAT_TX_PORT_DROP_H 0x46c +#define MPS_PORT_STAT_TX_PORT_PAUSE_L 0x470 +#define MPS_PORT_STAT_TX_PORT_PAUSE_H 0x474 +#define MPS_PORT_STAT_TX_PORT_PPP0_L 0x478 +#define MPS_PORT_STAT_TX_PORT_PPP0_H 0x47c +#define MPS_PORT_STAT_TX_PORT_PPP1_L 0x480 +#define MPS_PORT_STAT_TX_PORT_PPP1_H 0x484 +#define MPS_PORT_STAT_TX_PORT_PPP2_L 0x488 +#define MPS_PORT_STAT_TX_PORT_PPP2_H 0x48c +#define MPS_PORT_STAT_TX_PORT_PPP3_L 0x490 +#define MPS_PORT_STAT_TX_PORT_PPP3_H 0x494 +#define MPS_PORT_STAT_TX_PORT_PPP4_L 0x498 +#define MPS_PORT_STAT_TX_PORT_PPP4_H 0x49c +#define MPS_PORT_STAT_TX_PORT_PPP5_L 0x4a0 +#define MPS_PORT_STAT_TX_PORT_PPP5_H 0x4a4 +#define MPS_PORT_STAT_TX_PORT_PPP6_L 0x4a8 +#define MPS_PORT_STAT_TX_PORT_PPP6_H 0x4ac +#define MPS_PORT_STAT_TX_PORT_PPP7_L 0x4b0 +#define MPS_PORT_STAT_TX_PORT_PPP7_H 0x4b4 +#define MPS_PORT_STAT_LB_PORT_BYTES_L 0x4c0 +#define MPS_PORT_STAT_LB_PORT_BYTES_H 0x4c4 +#define MPS_PORT_STAT_LB_PORT_FRAMES_L 0x4c8 +#define MPS_PORT_STAT_LB_PORT_FRAMES_H 0x4cc +#define MPS_PORT_STAT_LB_PORT_BCAST_L 0x4d0 +#define MPS_PORT_STAT_LB_PORT_BCAST_H 0x4d4 +#define MPS_PORT_STAT_LB_PORT_MCAST_L 0x4d8 +#define MPS_PORT_STAT_LB_PORT_MCAST_H 0x4dc +#define MPS_PORT_STAT_LB_PORT_UCAST_L 0x4e0 +#define MPS_PORT_STAT_LB_PORT_UCAST_H 0x4e4 +#define MPS_PORT_STAT_LB_PORT_ERROR_L 0x4e8 +#define MPS_PORT_STAT_LB_PORT_ERROR_H 0x4ec +#define MPS_PORT_STAT_LB_PORT_64B_L 0x4f0 +#define MPS_PORT_STAT_LB_PORT_64B_H 0x4f4 +#define MPS_PORT_STAT_LB_PORT_65B_127B_L 0x4f8 +#define MPS_PORT_STAT_LB_PORT_65B_127B_H 0x4fc +#define MPS_PORT_STAT_LB_PORT_128B_255B_L 0x500 +#define MPS_PORT_STAT_LB_PORT_128B_255B_H 0x504 +#define MPS_PORT_STAT_LB_PORT_256B_511B_L 0x508 +#define MPS_PORT_STAT_LB_PORT_256B_511B_H 0x50c +#define MPS_PORT_STAT_LB_PORT_512B_1023B_L 0x510 +#define MPS_PORT_STAT_LB_PORT_512B_1023B_H 0x514 +#define MPS_PORT_STAT_LB_PORT_1024B_1518B_L 0x518 +#define MPS_PORT_STAT_LB_PORT_1024B_1518B_H 0x51c +#define MPS_PORT_STAT_LB_PORT_1519B_MAX_L 0x520 +#define MPS_PORT_STAT_LB_PORT_1519B_MAX_H 0x524 +#define MPS_PORT_STAT_LB_PORT_DROP_FRAMES 0x528 +#define MPS_PORT_STAT_RX_PORT_BYTES_L 0x540 +#define MPS_PORT_STAT_RX_PORT_BYTES_H 0x544 +#define MPS_PORT_STAT_RX_PORT_FRAMES_L 0x548 +#define MPS_PORT_STAT_RX_PORT_FRAMES_H 0x54c +#define MPS_PORT_STAT_RX_PORT_BCAST_L 0x550 +#define MPS_PORT_STAT_RX_PORT_BCAST_H 0x554 +#define MPS_PORT_STAT_RX_PORT_MCAST_L 0x558 +#define MPS_PORT_STAT_RX_PORT_MCAST_H 0x55c +#define MPS_PORT_STAT_RX_PORT_UCAST_L 0x560 +#define MPS_PORT_STAT_RX_PORT_UCAST_H 0x564 +#define MPS_PORT_STAT_RX_PORT_MTU_ERROR_L 0x568 +#define MPS_PORT_STAT_RX_PORT_MTU_ERROR_H 0x56c +#define MPS_PORT_STAT_RX_PORT_MTU_CRC_ERROR_L 0x570 +#define MPS_PORT_STAT_RX_PORT_MTU_CRC_ERROR_H 0x574 +#define MPS_PORT_STAT_RX_PORT_CRC_ERROR_L 0x578 +#define MPS_PORT_STAT_RX_PORT_CRC_ERROR_H 0x57c +#define MPS_PORT_STAT_RX_PORT_LEN_ERROR_L 0x580 +#define MPS_PORT_STAT_RX_PORT_LEN_ERROR_H 0x584 +#define MPS_PORT_STAT_RX_PORT_SYM_ERROR_L 0x588 +#define MPS_PORT_STAT_RX_PORT_SYM_ERROR_H 0x58c +#define MPS_PORT_STAT_RX_PORT_64B_L 0x590 +#define MPS_PORT_STAT_RX_PORT_64B_H 0x594 +#define MPS_PORT_STAT_RX_PORT_65B_127B_L 0x598 +#define MPS_PORT_STAT_RX_PORT_65B_127B_H 0x59c +#define MPS_PORT_STAT_RX_PORT_128B_255B_L 0x5a0 +#define MPS_PORT_STAT_RX_PORT_128B_255B_H 0x5a4 +#define MPS_PORT_STAT_RX_PORT_256B_511B_L 0x5a8 +#define MPS_PORT_STAT_RX_PORT_256B_511B_H 0x5ac +#define MPS_PORT_STAT_RX_PORT_512B_1023B_L 0x5b0 +#define MPS_PORT_STAT_RX_PORT_512B_1023B_H 0x5b4 +#define MPS_PORT_STAT_RX_PORT_1024B_1518B_L 0x5b8 +#define MPS_PORT_STAT_RX_PORT_1024B_1518B_H 0x5bc +#define MPS_PORT_STAT_RX_PORT_1519B_MAX_L 0x5c0 +#define MPS_PORT_STAT_RX_PORT_1519B_MAX_H 0x5c4 +#define MPS_PORT_STAT_RX_PORT_PAUSE_L 0x5c8 +#define MPS_PORT_STAT_RX_PORT_PAUSE_H 0x5cc +#define MPS_PORT_STAT_RX_PORT_PPP0_L 0x5d0 +#define MPS_PORT_STAT_RX_PORT_PPP0_H 0x5d4 +#define MPS_PORT_STAT_RX_PORT_PPP1_L 0x5d8 +#define MPS_PORT_STAT_RX_PORT_PPP1_H 0x5dc +#define MPS_PORT_STAT_RX_PORT_PPP2_L 0x5e0 +#define MPS_PORT_STAT_RX_PORT_PPP2_H 0x5e4 +#define MPS_PORT_STAT_RX_PORT_PPP3_L 0x5e8 +#define MPS_PORT_STAT_RX_PORT_PPP3_H 0x5ec +#define MPS_PORT_STAT_RX_PORT_PPP4_L 0x5f0 +#define MPS_PORT_STAT_RX_PORT_PPP4_H 0x5f4 +#define MPS_PORT_STAT_RX_PORT_PPP5_L 0x5f8 +#define MPS_PORT_STAT_RX_PORT_PPP5_H 0x5fc +#define MPS_PORT_STAT_RX_PORT_PPP6_L 0x600 +#define MPS_PORT_STAT_RX_PORT_PPP6_H 0x604 +#define MPS_PORT_STAT_RX_PORT_PPP7_L 0x608 +#define MPS_PORT_STAT_RX_PORT_PPP7_H 0x60c +#define MPS_PORT_STAT_RX_PORT_LESS_64B_L 0x610 +#define MPS_PORT_STAT_RX_PORT_LESS_64B_H 0x614 +#define MPS_CMN_CTL 0x9000 +#define NUMPORTS_MASK 0x00000003U +#define NUMPORTS_SHIFT 0 +#define NUMPORTS_GET(x) (((x) & NUMPORTS_MASK) >> NUMPORTS_SHIFT) + +#define MPS_INT_CAUSE 0x9008 +#define STATINT 0x00000020U +#define TXINT 0x00000010U +#define RXINT 0x00000008U +#define TRCINT 0x00000004U +#define CLSINT 0x00000002U +#define PLINT 0x00000001U + +#define MPS_TX_INT_CAUSE 0x9408 +#define PORTERR 0x00010000U +#define FRMERR 0x00008000U +#define SECNTERR 0x00004000U +#define BUBBLE 0x00002000U +#define TXDESCFIFO 0x00001e00U +#define TXDATAFIFO 0x000001e0U +#define NCSIFIFO 0x00000010U +#define TPFIFO 0x0000000fU + +#define MPS_STAT_PERR_INT_CAUSE_SRAM 0x9614 +#define MPS_STAT_PERR_INT_CAUSE_TX_FIFO 0x9620 +#define MPS_STAT_PERR_INT_CAUSE_RX_FIFO 0x962c + +#define MPS_STAT_RX_BG_0_MAC_DROP_FRAME_L 0x9640 +#define MPS_STAT_RX_BG_0_MAC_DROP_FRAME_H 0x9644 +#define MPS_STAT_RX_BG_1_MAC_DROP_FRAME_L 0x9648 +#define MPS_STAT_RX_BG_1_MAC_DROP_FRAME_H 0x964c +#define MPS_STAT_RX_BG_2_MAC_DROP_FRAME_L 0x9650 +#define MPS_STAT_RX_BG_2_MAC_DROP_FRAME_H 0x9654 +#define MPS_STAT_RX_BG_3_MAC_DROP_FRAME_L 0x9658 +#define MPS_STAT_RX_BG_3_MAC_DROP_FRAME_H 0x965c +#define MPS_STAT_RX_BG_0_LB_DROP_FRAME_L 0x9660 +#define MPS_STAT_RX_BG_0_LB_DROP_FRAME_H 0x9664 +#define MPS_STAT_RX_BG_1_LB_DROP_FRAME_L 0x9668 +#define MPS_STAT_RX_BG_1_LB_DROP_FRAME_H 0x966c +#define MPS_STAT_RX_BG_2_LB_DROP_FRAME_L 0x9670 +#define MPS_STAT_RX_BG_2_LB_DROP_FRAME_H 0x9674 +#define MPS_STAT_RX_BG_3_LB_DROP_FRAME_L 0x9678 +#define MPS_STAT_RX_BG_3_LB_DROP_FRAME_H 0x967c +#define MPS_STAT_RX_BG_0_MAC_TRUNC_FRAME_L 0x9680 +#define MPS_STAT_RX_BG_0_MAC_TRUNC_FRAME_H 0x9684 +#define MPS_STAT_RX_BG_1_MAC_TRUNC_FRAME_L 0x9688 +#define MPS_STAT_RX_BG_1_MAC_TRUNC_FRAME_H 0x968c +#define MPS_STAT_RX_BG_2_MAC_TRUNC_FRAME_L 0x9690 +#define MPS_STAT_RX_BG_2_MAC_TRUNC_FRAME_H 0x9694 +#define MPS_STAT_RX_BG_3_MAC_TRUNC_FRAME_L 0x9698 +#define MPS_STAT_RX_BG_3_MAC_TRUNC_FRAME_H 0x969c +#define MPS_STAT_RX_BG_0_LB_TRUNC_FRAME_L 0x96a0 +#define MPS_STAT_RX_BG_0_LB_TRUNC_FRAME_H 0x96a4 +#define MPS_STAT_RX_BG_1_LB_TRUNC_FRAME_L 0x96a8 +#define MPS_STAT_RX_BG_1_LB_TRUNC_FRAME_H 0x96ac +#define MPS_STAT_RX_BG_2_LB_TRUNC_FRAME_L 0x96b0 +#define MPS_STAT_RX_BG_2_LB_TRUNC_FRAME_H 0x96b4 +#define MPS_STAT_RX_BG_3_LB_TRUNC_FRAME_L 0x96b8 +#define MPS_STAT_RX_BG_3_LB_TRUNC_FRAME_H 0x96bc +#define MPS_TRC_CFG 0x9800 +#define TRCFIFOEMPTY 0x00000010U +#define TRCIGNOREDROPINPUT 0x00000008U +#define TRCKEEPDUPLICATES 0x00000004U +#define TRCEN 0x00000002U +#define TRCMULTIFILTER 0x00000001U + +#define MPS_TRC_RSS_CONTROL 0x9808 +#define RSSCONTROL_MASK 0x00ff0000U +#define RSSCONTROL_SHIFT 16 +#define RSSCONTROL(x) ((x) << RSSCONTROL_SHIFT) +#define QUEUENUMBER_MASK 0x0000ffffU +#define QUEUENUMBER_SHIFT 0 +#define QUEUENUMBER(x) ((x) << QUEUENUMBER_SHIFT) + +#define MPS_TRC_FILTER_MATCH_CTL_A 0x9810 +#define TFINVERTMATCH 0x01000000U +#define TFPKTTOOLARGE 0x00800000U +#define TFEN 0x00400000U +#define TFPORT_MASK 0x003c0000U +#define TFPORT_SHIFT 18 +#define TFPORT(x) ((x) << TFPORT_SHIFT) +#define TFPORT_GET(x) (((x) & TFPORT_MASK) >> TFPORT_SHIFT) +#define TFDROP 0x00020000U +#define TFSOPEOPERR 0x00010000U +#define TFLENGTH_MASK 0x00001f00U +#define TFLENGTH_SHIFT 8 +#define TFLENGTH(x) ((x) << TFLENGTH_SHIFT) +#define TFLENGTH_GET(x) (((x) & TFLENGTH_MASK) >> TFLENGTH_SHIFT) +#define TFOFFSET_MASK 0x0000001fU +#define TFOFFSET_SHIFT 0 +#define TFOFFSET(x) ((x) << TFOFFSET_SHIFT) +#define TFOFFSET_GET(x) (((x) & TFOFFSET_MASK) >> TFOFFSET_SHIFT) + +#define MPS_TRC_FILTER_MATCH_CTL_B 0x9820 +#define TFMINPKTSIZE_MASK 0x01ff0000U +#define TFMINPKTSIZE_SHIFT 16 +#define TFMINPKTSIZE(x) ((x) << TFMINPKTSIZE_SHIFT) +#define TFMINPKTSIZE_GET(x) (((x) & TFMINPKTSIZE_MASK) >> TFMINPKTSIZE_SHIFT) +#define TFCAPTUREMAX_MASK 0x00003fffU +#define TFCAPTUREMAX_SHIFT 0 +#define TFCAPTUREMAX(x) ((x) << TFCAPTUREMAX_SHIFT) +#define TFCAPTUREMAX_GET(x) (((x) & TFCAPTUREMAX_MASK) >> TFCAPTUREMAX_SHIFT) + +#define MPS_TRC_INT_CAUSE 0x985c +#define MISCPERR 0x00000100U +#define PKTFIFO 0x000000f0U +#define FILTMEM 0x0000000fU + +#define MPS_TRC_FILTER0_MATCH 0x9c00 +#define MPS_TRC_FILTER0_DONT_CARE 0x9c80 +#define MPS_TRC_FILTER1_MATCH 0x9d00 +#define MPS_CLS_INT_CAUSE 0xd028 +#define PLERRENB 0x00000008U +#define HASHSRAM 0x00000004U +#define MATCHTCAM 0x00000002U +#define MATCHSRAM 0x00000001U + +#define MPS_RX_PERR_INT_CAUSE 0x11074 + +#define CPL_INTR_CAUSE 0x19054 +#define CIM_OP_MAP_PERR 0x00000020U +#define CIM_OVFL_ERROR 0x00000010U +#define TP_FRAMING_ERROR 0x00000008U +#define SGE_FRAMING_ERROR 0x00000004U +#define CIM_FRAMING_ERROR 0x00000002U +#define ZERO_SWITCH_ERROR 0x00000001U + +#define SMB_INT_CAUSE 0x19090 +#define MSTTXFIFOPARINT 0x00200000U +#define MSTRXFIFOPARINT 0x00100000U +#define SLVFIFOPARINT 0x00080000U + +#define ULP_RX_INT_CAUSE 0x19158 +#define ULP_RX_ISCSI_TAGMASK 0x19164 +#define ULP_RX_ISCSI_PSZ 0x19168 +#define HPZ3_MASK 0x0f000000U +#define HPZ3_SHIFT 24 +#define HPZ3(x) ((x) << HPZ3_SHIFT) +#define HPZ2_MASK 0x000f0000U +#define HPZ2_SHIFT 16 +#define HPZ2(x) ((x) << HPZ2_SHIFT) +#define HPZ1_MASK 0x00000f00U +#define HPZ1_SHIFT 8 +#define HPZ1(x) ((x) << HPZ1_SHIFT) +#define HPZ0_MASK 0x0000000fU +#define HPZ0_SHIFT 0 +#define HPZ0(x) ((x) << HPZ0_SHIFT) + +#define ULP_RX_TDDP_PSZ 0x19178 + +#define SF_DATA 0x193f8 +#define SF_OP 0x193fc +#define BUSY 0x80000000U +#define SF_LOCK 0x00000010U +#define SF_CONT 0x00000008U +#define BYTECNT_MASK 0x00000006U +#define BYTECNT_SHIFT 1 +#define BYTECNT(x) ((x) << BYTECNT_SHIFT) +#define OP_WR 0x00000001U + +#define PL_PF_INT_CAUSE 0x3c0 +#define PFSW 0x00000008U +#define PFSGE 0x00000004U +#define PFCIM 0x00000002U +#define PFMPS 0x00000001U + +#define PL_PF_INT_ENABLE 0x3c4 +#define PL_PF_CTL 0x3c8 +#define SWINT 0x00000001U + +#define PL_WHOAMI 0x19400 +#define SOURCEPF_MASK 0x00000700U +#define SOURCEPF_SHIFT 8 +#define SOURCEPF(x) ((x) << SOURCEPF_SHIFT) +#define SOURCEPF_GET(x) (((x) & SOURCEPF_MASK) >> SOURCEPF_SHIFT) +#define ISVF 0x00000080U +#define VFID_MASK 0x0000007fU +#define VFID_SHIFT 0 +#define VFID(x) ((x) << VFID_SHIFT) +#define VFID_GET(x) (((x) & VFID_MASK) >> VFID_SHIFT) + +#define PL_INT_CAUSE 0x1940c +#define ULP_TX 0x08000000U +#define SGE 0x04000000U +#define HMA 0x02000000U +#define CPL_SWITCH 0x01000000U +#define ULP_RX 0x00800000U +#define PM_RX 0x00400000U +#define PM_TX 0x00200000U +#define MA 0x00100000U +#define TP 0x00080000U +#define LE 0x00040000U +#define EDC1 0x00020000U +#define EDC0 0x00010000U +#define MC 0x00008000U +#define PCIE 0x00004000U +#define PMU 0x00002000U +#define XGMAC_KR1 0x00001000U +#define XGMAC_KR0 0x00000800U +#define XGMAC1 0x00000400U +#define XGMAC0 0x00000200U +#define SMB 0x00000100U +#define SF 0x00000080U +#define PL 0x00000040U +#define NCSI 0x00000020U +#define MPS 0x00000010U +#define MI 0x00000008U +#define DBG 0x00000004U +#define I2CM 0x00000002U +#define CIM 0x00000001U + +#define PL_INT_MAP0 0x19414 +#define PL_RST 0x19428 +#define PIORST 0x00000002U +#define PIORSTMODE 0x00000001U + +#define PL_PL_INT_CAUSE 0x19430 +#define FATALPERR 0x00000010U +#define PERRVFID 0x00000001U + +#define PL_REV 0x1943c + +#define LE_DB_CONFIG 0x19c04 +#define HASHEN 0x00100000U + +#define LE_DB_SERVER_INDEX 0x19c18 +#define LE_DB_ACT_CNT_IPV4 0x19c20 +#define LE_DB_ACT_CNT_IPV6 0x19c24 + +#define LE_DB_INT_CAUSE 0x19c3c +#define REQQPARERR 0x00010000U +#define UNKNOWNCMD 0x00008000U +#define PARITYERR 0x00000040U +#define LIPMISS 0x00000020U +#define LIP0 0x00000010U + +#define LE_DB_TID_HASHBASE 0x19df8 + +#define NCSI_INT_CAUSE 0x1a0d8 +#define CIM_DM_PRTY_ERR 0x00000100U +#define MPS_DM_PRTY_ERR 0x00000080U +#define TXFIFO_PRTY_ERR 0x00000002U +#define RXFIFO_PRTY_ERR 0x00000001U + +#define XGMAC_PORT_CFG2 0x1018 +#define PATEN 0x00040000U +#define MAGICEN 0x00020000U + +#define XGMAC_PORT_MAGIC_MACID_LO 0x1024 +#define XGMAC_PORT_MAGIC_MACID_HI 0x1028 + +#define XGMAC_PORT_EPIO_DATA0 0x10c0 +#define XGMAC_PORT_EPIO_DATA1 0x10c4 +#define XGMAC_PORT_EPIO_DATA2 0x10c8 +#define XGMAC_PORT_EPIO_DATA3 0x10cc +#define XGMAC_PORT_EPIO_OP 0x10d0 +#define EPIOWR 0x00000100U +#define ADDRESS_MASK 0x000000ffU +#define ADDRESS_SHIFT 0 +#define ADDRESS(x) ((x) << ADDRESS_SHIFT) + +#define XGMAC_PORT_INT_CAUSE 0x10dc +#endif /* __T4_REGS_H */ diff --git a/drivers/net/cxgb4/t4fw_api.h b/drivers/net/cxgb4/t4fw_api.h new file mode 100644 index 00000000000..3393d05a388 --- /dev/null +++ b/drivers/net/cxgb4/t4fw_api.h @@ -0,0 +1,1580 @@ +/* + * This file is part of the Chelsio T4 Ethernet driver for Linux. + * + * Copyright (c) 2009-2010 Chelsio Communications, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef _T4FW_INTERFACE_H_ +#define _T4FW_INTERFACE_H_ + +#define FW_T4VF_SGE_BASE_ADDR 0x0000 +#define FW_T4VF_MPS_BASE_ADDR 0x0100 +#define FW_T4VF_PL_BASE_ADDR 0x0200 +#define FW_T4VF_MBDATA_BASE_ADDR 0x0240 +#define FW_T4VF_CIM_BASE_ADDR 0x0300 + +enum fw_wr_opcodes { + FW_FILTER_WR = 0x02, + FW_ULPTX_WR = 0x04, + FW_TP_WR = 0x05, + FW_ETH_TX_PKT_WR = 0x08, + FW_FLOWC_WR = 0x0a, + FW_OFLD_TX_DATA_WR = 0x0b, + FW_CMD_WR = 0x10, + FW_ETH_TX_PKT_VM_WR = 0x11, + FW_RI_RES_WR = 0x0c, + FW_RI_INIT_WR = 0x0d, + FW_RI_RDMA_WRITE_WR = 0x14, + FW_RI_SEND_WR = 0x15, + FW_RI_RDMA_READ_WR = 0x16, + FW_RI_RECV_WR = 0x17, + FW_RI_BIND_MW_WR = 0x18, + FW_RI_FR_NSMR_WR = 0x19, + FW_RI_INV_LSTAG_WR = 0x1a, + FW_LASTC2E_WR = 0x40 +}; + +struct fw_wr_hdr { + __be32 hi; + __be32 lo; +}; + +#define FW_WR_OP(x) ((x) << 24) +#define FW_WR_ATOMIC(x) ((x) << 23) +#define FW_WR_FLUSH(x) ((x) << 22) +#define FW_WR_COMPL(x) ((x) << 21) +#define FW_WR_IMMDLEN(x) ((x) << 0) + +#define FW_WR_EQUIQ (1U << 31) +#define FW_WR_EQUEQ (1U << 30) +#define FW_WR_FLOWID(x) ((x) << 8) +#define FW_WR_LEN16(x) ((x) << 0) + +struct fw_ulptx_wr { + __be32 op_to_compl; + __be32 flowid_len16; + u64 cookie; +}; + +struct fw_tp_wr { + __be32 op_to_immdlen; + __be32 flowid_len16; + u64 cookie; +}; + +struct fw_eth_tx_pkt_wr { + __be32 op_immdlen; + __be32 equiq_to_len16; + __be64 r3; +}; + +enum fw_flowc_mnem { + FW_FLOWC_MNEM_PFNVFN, /* PFN [15:8] VFN [7:0] */ + FW_FLOWC_MNEM_CH, + FW_FLOWC_MNEM_PORT, + FW_FLOWC_MNEM_IQID, + FW_FLOWC_MNEM_SNDNXT, + FW_FLOWC_MNEM_RCVNXT, + FW_FLOWC_MNEM_SNDBUF, + FW_FLOWC_MNEM_MSS, +}; + +struct fw_flowc_mnemval { + u8 mnemonic; + u8 r4[3]; + __be32 val; +}; + +struct fw_flowc_wr { + __be32 op_to_nparams; +#define FW_FLOWC_WR_NPARAMS(x) ((x) << 0) + __be32 flowid_len16; + struct fw_flowc_mnemval mnemval[0]; +}; + +struct fw_ofld_tx_data_wr { + __be32 op_to_immdlen; + __be32 flowid_len16; + __be32 plen; + __be32 tunnel_to_proxy; +#define FW_OFLD_TX_DATA_WR_TUNNEL(x) ((x) << 19) +#define FW_OFLD_TX_DATA_WR_SAVE(x) ((x) << 18) +#define FW_OFLD_TX_DATA_WR_FLUSH(x) ((x) << 17) +#define FW_OFLD_TX_DATA_WR_URGENT(x) ((x) << 16) +#define FW_OFLD_TX_DATA_WR_MORE(x) ((x) << 15) +#define FW_OFLD_TX_DATA_WR_SHOVE(x) ((x) << 14) +#define FW_OFLD_TX_DATA_WR_ULPMODE(x) ((x) << 10) +#define FW_OFLD_TX_DATA_WR_ULPSUBMODE(x) ((x) << 6) +}; + +struct fw_cmd_wr { + __be32 op_dma; +#define FW_CMD_WR_DMA (1U << 17) + __be32 len16_pkd; + __be64 cookie_daddr; +}; + +struct fw_eth_tx_pkt_vm_wr { + __be32 op_immdlen; + __be32 equiq_to_len16; + __be32 r3[2]; + u8 ethmacdst[6]; + u8 ethmacsrc[6]; + __be16 ethtype; + __be16 vlantci; +}; + +#define FW_CMD_MAX_TIMEOUT 3000 + +enum fw_cmd_opcodes { + FW_LDST_CMD = 0x01, + FW_RESET_CMD = 0x03, + FW_HELLO_CMD = 0x04, + FW_BYE_CMD = 0x05, + FW_INITIALIZE_CMD = 0x06, + FW_CAPS_CONFIG_CMD = 0x07, + FW_PARAMS_CMD = 0x08, + FW_PFVF_CMD = 0x09, + FW_IQ_CMD = 0x10, + FW_EQ_MNGT_CMD = 0x11, + FW_EQ_ETH_CMD = 0x12, + FW_EQ_CTRL_CMD = 0x13, + FW_EQ_OFLD_CMD = 0x21, + FW_VI_CMD = 0x14, + FW_VI_MAC_CMD = 0x15, + FW_VI_RXMODE_CMD = 0x16, + FW_VI_ENABLE_CMD = 0x17, + FW_ACL_MAC_CMD = 0x18, + FW_ACL_VLAN_CMD = 0x19, + FW_VI_STATS_CMD = 0x1a, + FW_PORT_CMD = 0x1b, + FW_PORT_STATS_CMD = 0x1c, + FW_PORT_LB_STATS_CMD = 0x1d, + FW_PORT_TRACE_CMD = 0x1e, + FW_PORT_TRACE_MMAP_CMD = 0x1f, + FW_RSS_IND_TBL_CMD = 0x20, + FW_RSS_GLB_CONFIG_CMD = 0x22, + FW_RSS_VI_CONFIG_CMD = 0x23, + FW_LASTC2E_CMD = 0x40, + FW_ERROR_CMD = 0x80, + FW_DEBUG_CMD = 0x81, +}; + +enum fw_cmd_cap { + FW_CMD_CAP_PF = 0x01, + FW_CMD_CAP_DMAQ = 0x02, + FW_CMD_CAP_PORT = 0x04, + FW_CMD_CAP_PORTPROMISC = 0x08, + FW_CMD_CAP_PORTSTATS = 0x10, + FW_CMD_CAP_VF = 0x80, +}; + +/* + * Generic command header flit0 + */ +struct fw_cmd_hdr { + __be32 hi; + __be32 lo; +}; + +#define FW_CMD_OP(x) ((x) << 24) +#define FW_CMD_OP_GET(x) (((x) >> 24) & 0xff) +#define FW_CMD_REQUEST (1U << 23) +#define FW_CMD_READ (1U << 22) +#define FW_CMD_WRITE (1U << 21) +#define FW_CMD_EXEC (1U << 20) +#define FW_CMD_RAMASK(x) ((x) << 20) +#define FW_CMD_RETVAL(x) ((x) << 8) +#define FW_CMD_RETVAL_GET(x) (((x) >> 8) & 0xff) +#define FW_CMD_LEN16(x) ((x) << 0) + +enum fw_ldst_addrspc { + FW_LDST_ADDRSPC_FIRMWARE = 0x0001, + FW_LDST_ADDRSPC_SGE_EGRC = 0x0008, + FW_LDST_ADDRSPC_SGE_INGC = 0x0009, + FW_LDST_ADDRSPC_SGE_FLMC = 0x000a, + FW_LDST_ADDRSPC_SGE_CONMC = 0x000b, + FW_LDST_ADDRSPC_TP_PIO = 0x0010, + FW_LDST_ADDRSPC_TP_TM_PIO = 0x0011, + FW_LDST_ADDRSPC_TP_MIB = 0x0012, + FW_LDST_ADDRSPC_MDIO = 0x0018, + FW_LDST_ADDRSPC_MPS = 0x0020, + FW_LDST_ADDRSPC_FUNC = 0x0028 +}; + +enum fw_ldst_mps_fid { + FW_LDST_MPS_ATRB, + FW_LDST_MPS_RPLC +}; + +enum fw_ldst_func_access_ctl { + FW_LDST_FUNC_ACC_CTL_VIID, + FW_LDST_FUNC_ACC_CTL_FID +}; + +enum fw_ldst_func_mod_index { + FW_LDST_FUNC_MPS +}; + +struct fw_ldst_cmd { + __be32 op_to_addrspace; +#define FW_LDST_CMD_ADDRSPACE(x) ((x) << 0) + __be32 cycles_to_len16; + union fw_ldst { + struct fw_ldst_addrval { + __be32 addr; + __be32 val; + } addrval; + struct fw_ldst_idctxt { + __be32 physid; + __be32 msg_pkd; + __be32 ctxt_data7; + __be32 ctxt_data6; + __be32 ctxt_data5; + __be32 ctxt_data4; + __be32 ctxt_data3; + __be32 ctxt_data2; + __be32 ctxt_data1; + __be32 ctxt_data0; + } idctxt; + struct fw_ldst_mdio { + __be16 paddr_mmd; + __be16 raddr; + __be16 vctl; + __be16 rval; + } mdio; + struct fw_ldst_mps { + __be16 fid_ctl; + __be16 rplcpf_pkd; + __be32 rplc127_96; + __be32 rplc95_64; + __be32 rplc63_32; + __be32 rplc31_0; + __be32 atrb; + __be16 vlan[16]; + } mps; + struct fw_ldst_func { + u8 access_ctl; + u8 mod_index; + __be16 ctl_id; + __be32 offset; + __be64 data0; + __be64 data1; + } func; + } u; +}; + +#define FW_LDST_CMD_MSG(x) ((x) << 31) +#define FW_LDST_CMD_PADDR(x) ((x) << 8) +#define FW_LDST_CMD_MMD(x) ((x) << 0) +#define FW_LDST_CMD_FID(x) ((x) << 15) +#define FW_LDST_CMD_CTL(x) ((x) << 0) +#define FW_LDST_CMD_RPLCPF(x) ((x) << 0) + +struct fw_reset_cmd { + __be32 op_to_write; + __be32 retval_len16; + __be32 val; + __be32 r3; +}; + +struct fw_hello_cmd { + __be32 op_to_write; + __be32 retval_len16; + __be32 err_to_mbasyncnot; +#define FW_HELLO_CMD_ERR (1U << 31) +#define FW_HELLO_CMD_INIT (1U << 30) +#define FW_HELLO_CMD_MASTERDIS(x) ((x) << 29) +#define FW_HELLO_CMD_MASTERFORCE(x) ((x) << 28) +#define FW_HELLO_CMD_MBMASTER(x) ((x) << 24) +#define FW_HELLO_CMD_MBASYNCNOT(x) ((x) << 20) + __be32 fwrev; +}; + +struct fw_bye_cmd { + __be32 op_to_write; + __be32 retval_len16; + __be64 r3; +}; + +struct fw_initialize_cmd { + __be32 op_to_write; + __be32 retval_len16; + __be64 r3; +}; + +enum fw_caps_config_hm { + FW_CAPS_CONFIG_HM_PCIE = 0x00000001, + FW_CAPS_CONFIG_HM_PL = 0x00000002, + FW_CAPS_CONFIG_HM_SGE = 0x00000004, + FW_CAPS_CONFIG_HM_CIM = 0x00000008, + FW_CAPS_CONFIG_HM_ULPTX = 0x00000010, + FW_CAPS_CONFIG_HM_TP = 0x00000020, + FW_CAPS_CONFIG_HM_ULPRX = 0x00000040, + FW_CAPS_CONFIG_HM_PMRX = 0x00000080, + FW_CAPS_CONFIG_HM_PMTX = 0x00000100, + FW_CAPS_CONFIG_HM_MC = 0x00000200, + FW_CAPS_CONFIG_HM_LE = 0x00000400, + FW_CAPS_CONFIG_HM_MPS = 0x00000800, + FW_CAPS_CONFIG_HM_XGMAC = 0x00001000, + FW_CAPS_CONFIG_HM_CPLSWITCH = 0x00002000, + FW_CAPS_CONFIG_HM_T4DBG = 0x00004000, + FW_CAPS_CONFIG_HM_MI = 0x00008000, + FW_CAPS_CONFIG_HM_I2CM = 0x00010000, + FW_CAPS_CONFIG_HM_NCSI = 0x00020000, + FW_CAPS_CONFIG_HM_SMB = 0x00040000, + FW_CAPS_CONFIG_HM_MA = 0x00080000, + FW_CAPS_CONFIG_HM_EDRAM = 0x00100000, + FW_CAPS_CONFIG_HM_PMU = 0x00200000, + FW_CAPS_CONFIG_HM_UART = 0x00400000, + FW_CAPS_CONFIG_HM_SF = 0x00800000, +}; + +enum fw_caps_config_nbm { + FW_CAPS_CONFIG_NBM_IPMI = 0x00000001, + FW_CAPS_CONFIG_NBM_NCSI = 0x00000002, +}; + +enum fw_caps_config_link { + FW_CAPS_CONFIG_LINK_PPP = 0x00000001, + FW_CAPS_CONFIG_LINK_QFC = 0x00000002, + FW_CAPS_CONFIG_LINK_DCBX = 0x00000004, +}; + +enum fw_caps_config_switch { + FW_CAPS_CONFIG_SWITCH_INGRESS = 0x00000001, + FW_CAPS_CONFIG_SWITCH_EGRESS = 0x00000002, +}; + +enum fw_caps_config_nic { + FW_CAPS_CONFIG_NIC = 0x00000001, + FW_CAPS_CONFIG_NIC_VM = 0x00000002, +}; + +enum fw_caps_config_ofld { + FW_CAPS_CONFIG_OFLD = 0x00000001, +}; + +enum fw_caps_config_rdma { + FW_CAPS_CONFIG_RDMA_RDDP = 0x00000001, + FW_CAPS_CONFIG_RDMA_RDMAC = 0x00000002, +}; + +enum fw_caps_config_iscsi { + FW_CAPS_CONFIG_ISCSI_INITIATOR_PDU = 0x00000001, + FW_CAPS_CONFIG_ISCSI_TARGET_PDU = 0x00000002, + FW_CAPS_CONFIG_ISCSI_INITIATOR_CNXOFLD = 0x00000004, + FW_CAPS_CONFIG_ISCSI_TARGET_CNXOFLD = 0x00000008, +}; + +enum fw_caps_config_fcoe { + FW_CAPS_CONFIG_FCOE_INITIATOR = 0x00000001, + FW_CAPS_CONFIG_FCOE_TARGET = 0x00000002, +}; + +struct fw_caps_config_cmd { + __be32 op_to_write; + __be32 retval_len16; + __be32 r2; + __be32 hwmbitmap; + __be16 nbmcaps; + __be16 linkcaps; + __be16 switchcaps; + __be16 r3; + __be16 niccaps; + __be16 ofldcaps; + __be16 rdmacaps; + __be16 r4; + __be16 iscsicaps; + __be16 fcoecaps; + __be32 r5; + __be64 r6; +}; + +/* + * params command mnemonics + */ +enum fw_params_mnem { + FW_PARAMS_MNEM_DEV = 1, /* device params */ + FW_PARAMS_MNEM_PFVF = 2, /* function params */ + FW_PARAMS_MNEM_REG = 3, /* limited register access */ + FW_PARAMS_MNEM_DMAQ = 4, /* dma queue params */ + FW_PARAMS_MNEM_LAST +}; + +/* + * device parameters + */ +enum fw_params_param_dev { + FW_PARAMS_PARAM_DEV_CCLK = 0x00, /* chip core clock in khz */ + FW_PARAMS_PARAM_DEV_PORTVEC = 0x01, /* the port vector */ + FW_PARAMS_PARAM_DEV_NTID = 0x02, /* reads the number of TIDs + * allocated by the device's + * Lookup Engine + */ + FW_PARAMS_PARAM_DEV_FLOWC_BUFFIFO_SZ = 0x03, + FW_PARAMS_PARAM_DEV_INTVER_NIC = 0x04, + FW_PARAMS_PARAM_DEV_INTVER_VNIC = 0x05, + FW_PARAMS_PARAM_DEV_INTVER_OFLD = 0x06, + FW_PARAMS_PARAM_DEV_INTVER_RI = 0x07, + FW_PARAMS_PARAM_DEV_INTVER_ISCSIPDU = 0x08, + FW_PARAMS_PARAM_DEV_INTVER_ISCSI = 0x09, + FW_PARAMS_PARAM_DEV_INTVER_FCOE = 0x0A +}; + +/* + * physical and virtual function parameters + */ +enum fw_params_param_pfvf { + FW_PARAMS_PARAM_PFVF_RWXCAPS = 0x00, + FW_PARAMS_PARAM_PFVF_ROUTE_START = 0x01, + FW_PARAMS_PARAM_PFVF_ROUTE_END = 0x02, + FW_PARAMS_PARAM_PFVF_CLIP_START = 0x03, + FW_PARAMS_PARAM_PFVF_CLIP_END = 0x04, + FW_PARAMS_PARAM_PFVF_FILTER_START = 0x05, + FW_PARAMS_PARAM_PFVF_FILTER_END = 0x06, + FW_PARAMS_PARAM_PFVF_SERVER_START = 0x07, + FW_PARAMS_PARAM_PFVF_SERVER_END = 0x08, + FW_PARAMS_PARAM_PFVF_TDDP_START = 0x09, + FW_PARAMS_PARAM_PFVF_TDDP_END = 0x0A, + FW_PARAMS_PARAM_PFVF_ISCSI_START = 0x0B, + FW_PARAMS_PARAM_PFVF_ISCSI_END = 0x0C, + FW_PARAMS_PARAM_PFVF_STAG_START = 0x0D, + FW_PARAMS_PARAM_PFVF_STAG_END = 0x0E, + FW_PARAMS_PARAM_PFVF_RQ_START = 0x1F, + FW_PARAMS_PARAM_PFVF_RQ_END = 0x10, + FW_PARAMS_PARAM_PFVF_PBL_START = 0x11, + FW_PARAMS_PARAM_PFVF_PBL_END = 0x12, + FW_PARAMS_PARAM_PFVF_L2T_START = 0x13, + FW_PARAMS_PARAM_PFVF_L2T_END = 0x14, + FW_PARAMS_PARAM_PFVF_SCHEDCLASS_ETH = 0x20, +}; + +/* + * dma queue parameters + */ +enum fw_params_param_dmaq { + FW_PARAMS_PARAM_DMAQ_IQ_DCAEN_DCACPU = 0x00, + FW_PARAMS_PARAM_DMAQ_IQ_INTCNTTHRESH = 0x01, + FW_PARAMS_PARAM_DMAQ_EQ_CMPLIQID_MNGT = 0x10, + FW_PARAMS_PARAM_DMAQ_EQ_CMPLIQID_CTRL = 0x11, + FW_PARAMS_PARAM_DMAQ_EQ_SCHEDCLASS_ETH = 0x12, +}; + +#define FW_PARAMS_MNEM(x) ((x) << 24) +#define FW_PARAMS_PARAM_X(x) ((x) << 16) +#define FW_PARAMS_PARAM_Y(x) ((x) << 8) +#define FW_PARAMS_PARAM_Z(x) ((x) << 0) +#define FW_PARAMS_PARAM_XYZ(x) ((x) << 0) +#define FW_PARAMS_PARAM_YZ(x) ((x) << 0) + +struct fw_params_cmd { + __be32 op_to_vfn; + __be32 retval_len16; + struct fw_params_param { + __be32 mnem; + __be32 val; + } param[7]; +}; + +#define FW_PARAMS_CMD_PFN(x) ((x) << 8) +#define FW_PARAMS_CMD_VFN(x) ((x) << 0) + +struct fw_pfvf_cmd { + __be32 op_to_vfn; + __be32 retval_len16; + __be32 niqflint_niq; + __be32 cmask_to_neq; + __be32 tc_to_nexactf; + __be32 r_caps_to_nethctrl; + __be16 nricq; + __be16 nriqp; + __be32 r4; +}; + +#define FW_PFVF_CMD_PFN(x) ((x) << 8) +#define FW_PFVF_CMD_VFN(x) ((x) << 0) + +#define FW_PFVF_CMD_NIQFLINT(x) ((x) << 20) +#define FW_PFVF_CMD_NIQFLINT_GET(x) (((x) >> 20) & 0xfff) + +#define FW_PFVF_CMD_NIQ(x) ((x) << 0) +#define FW_PFVF_CMD_NIQ_GET(x) (((x) >> 0) & 0xfffff) + +#define FW_PFVF_CMD_CMASK(x) ((x) << 24) +#define FW_PFVF_CMD_CMASK_GET(x) (((x) >> 24) & 0xf) + +#define FW_PFVF_CMD_PMASK(x) ((x) << 20) +#define FW_PFVF_CMD_PMASK_GET(x) (((x) >> 20) & 0xf) + +#define FW_PFVF_CMD_NEQ(x) ((x) << 0) +#define FW_PFVF_CMD_NEQ_GET(x) (((x) >> 0) & 0xfffff) + +#define FW_PFVF_CMD_TC(x) ((x) << 24) +#define FW_PFVF_CMD_TC_GET(x) (((x) >> 24) & 0xff) + +#define FW_PFVF_CMD_NVI(x) ((x) << 16) +#define FW_PFVF_CMD_NVI_GET(x) (((x) >> 16) & 0xff) + +#define FW_PFVF_CMD_NEXACTF(x) ((x) << 0) +#define FW_PFVF_CMD_NEXACTF_GET(x) (((x) >> 0) & 0xffff) + +#define FW_PFVF_CMD_R_CAPS(x) ((x) << 24) +#define FW_PFVF_CMD_R_CAPS_GET(x) (((x) >> 24) & 0xff) + +#define FW_PFVF_CMD_WX_CAPS(x) ((x) << 16) +#define FW_PFVF_CMD_WX_CAPS_GET(x) (((x) >> 16) & 0xff) + +#define FW_PFVF_CMD_NETHCTRL(x) ((x) << 0) +#define FW_PFVF_CMD_NETHCTRL_GET(x) (((x) >> 0) & 0xffff) + +enum fw_iq_type { + FW_IQ_TYPE_FL_INT_CAP, + FW_IQ_TYPE_NO_FL_INT_CAP +}; + +struct fw_iq_cmd { + __be32 op_to_vfn; + __be32 alloc_to_len16; + __be16 physiqid; + __be16 iqid; + __be16 fl0id; + __be16 fl1id; + __be32 type_to_iqandstindex; + __be16 iqdroprss_to_iqesize; + __be16 iqsize; + __be64 iqaddr; + __be32 iqns_to_fl0congen; + __be16 fl0dcaen_to_fl0cidxfthresh; + __be16 fl0size; + __be64 fl0addr; + __be32 fl1cngchmap_to_fl1congen; + __be16 fl1dcaen_to_fl1cidxfthresh; + __be16 fl1size; + __be64 fl1addr; +}; + +#define FW_IQ_CMD_PFN(x) ((x) << 8) +#define FW_IQ_CMD_VFN(x) ((x) << 0) + +#define FW_IQ_CMD_ALLOC (1U << 31) +#define FW_IQ_CMD_FREE (1U << 30) +#define FW_IQ_CMD_MODIFY (1U << 29) +#define FW_IQ_CMD_IQSTART(x) ((x) << 28) +#define FW_IQ_CMD_IQSTOP(x) ((x) << 27) + +#define FW_IQ_CMD_TYPE(x) ((x) << 29) +#define FW_IQ_CMD_IQASYNCH(x) ((x) << 28) +#define FW_IQ_CMD_VIID(x) ((x) << 16) +#define FW_IQ_CMD_IQANDST(x) ((x) << 15) +#define FW_IQ_CMD_IQANUS(x) ((x) << 14) +#define FW_IQ_CMD_IQANUD(x) ((x) << 12) +#define FW_IQ_CMD_IQANDSTINDEX(x) ((x) << 0) + +#define FW_IQ_CMD_IQDROPRSS (1U << 15) +#define FW_IQ_CMD_IQGTSMODE (1U << 14) +#define FW_IQ_CMD_IQPCIECH(x) ((x) << 12) +#define FW_IQ_CMD_IQDCAEN(x) ((x) << 11) +#define FW_IQ_CMD_IQDCACPU(x) ((x) << 6) +#define FW_IQ_CMD_IQINTCNTTHRESH(x) ((x) << 4) +#define FW_IQ_CMD_IQO (1U << 3) +#define FW_IQ_CMD_IQCPRIO(x) ((x) << 2) +#define FW_IQ_CMD_IQESIZE(x) ((x) << 0) + +#define FW_IQ_CMD_IQNS(x) ((x) << 31) +#define FW_IQ_CMD_IQRO(x) ((x) << 30) +#define FW_IQ_CMD_IQFLINTIQHSEN(x) ((x) << 28) +#define FW_IQ_CMD_IQFLINTCONGEN(x) ((x) << 27) +#define FW_IQ_CMD_IQFLINTISCSIC(x) ((x) << 26) +#define FW_IQ_CMD_FL0CNGCHMAP(x) ((x) << 20) +#define FW_IQ_CMD_FL0CACHELOCK(x) ((x) << 15) +#define FW_IQ_CMD_FL0DBP(x) ((x) << 14) +#define FW_IQ_CMD_FL0DATANS(x) ((x) << 13) +#define FW_IQ_CMD_FL0DATARO(x) ((x) << 12) +#define FW_IQ_CMD_FL0CONGCIF(x) ((x) << 11) +#define FW_IQ_CMD_FL0ONCHIP(x) ((x) << 10) +#define FW_IQ_CMD_FL0STATUSPGNS(x) ((x) << 9) +#define FW_IQ_CMD_FL0STATUSPGRO(x) ((x) << 8) +#define FW_IQ_CMD_FL0FETCHNS(x) ((x) << 7) +#define FW_IQ_CMD_FL0FETCHRO(x) ((x) << 6) +#define FW_IQ_CMD_FL0HOSTFCMODE(x) ((x) << 4) +#define FW_IQ_CMD_FL0CPRIO(x) ((x) << 3) +#define FW_IQ_CMD_FL0PADEN (1U << 2) +#define FW_IQ_CMD_FL0PACKEN (1U << 1) +#define FW_IQ_CMD_FL0CONGEN (1U << 0) + +#define FW_IQ_CMD_FL0DCAEN(x) ((x) << 15) +#define FW_IQ_CMD_FL0DCACPU(x) ((x) << 10) +#define FW_IQ_CMD_FL0FBMIN(x) ((x) << 7) +#define FW_IQ_CMD_FL0FBMAX(x) ((x) << 4) +#define FW_IQ_CMD_FL0CIDXFTHRESHO (1U << 3) +#define FW_IQ_CMD_FL0CIDXFTHRESH(x) ((x) << 0) + +#define FW_IQ_CMD_FL1CNGCHMAP(x) ((x) << 20) +#define FW_IQ_CMD_FL1CACHELOCK(x) ((x) << 15) +#define FW_IQ_CMD_FL1DBP(x) ((x) << 14) +#define FW_IQ_CMD_FL1DATANS(x) ((x) << 13) +#define FW_IQ_CMD_FL1DATARO(x) ((x) << 12) +#define FW_IQ_CMD_FL1CONGCIF(x) ((x) << 11) +#define FW_IQ_CMD_FL1ONCHIP(x) ((x) << 10) +#define FW_IQ_CMD_FL1STATUSPGNS(x) ((x) << 9) +#define FW_IQ_CMD_FL1STATUSPGRO(x) ((x) << 8) +#define FW_IQ_CMD_FL1FETCHNS(x) ((x) << 7) +#define FW_IQ_CMD_FL1FETCHRO(x) ((x) << 6) +#define FW_IQ_CMD_FL1HOSTFCMODE(x) ((x) << 4) +#define FW_IQ_CMD_FL1CPRIO(x) ((x) << 3) +#define FW_IQ_CMD_FL1PADEN (1U << 2) +#define FW_IQ_CMD_FL1PACKEN (1U << 1) +#define FW_IQ_CMD_FL1CONGEN (1U << 0) + +#define FW_IQ_CMD_FL1DCAEN(x) ((x) << 15) +#define FW_IQ_CMD_FL1DCACPU(x) ((x) << 10) +#define FW_IQ_CMD_FL1FBMIN(x) ((x) << 7) +#define FW_IQ_CMD_FL1FBMAX(x) ((x) << 4) +#define FW_IQ_CMD_FL1CIDXFTHRESHO (1U << 3) +#define FW_IQ_CMD_FL1CIDXFTHRESH(x) ((x) << 0) + +struct fw_eq_eth_cmd { + __be32 op_to_vfn; + __be32 alloc_to_len16; + __be32 eqid_pkd; + __be32 physeqid_pkd; + __be32 fetchszm_to_iqid; + __be32 dcaen_to_eqsize; + __be64 eqaddr; + __be32 viid_pkd; + __be32 r8_lo; + __be64 r9; +}; + +#define FW_EQ_ETH_CMD_PFN(x) ((x) << 8) +#define FW_EQ_ETH_CMD_VFN(x) ((x) << 0) +#define FW_EQ_ETH_CMD_ALLOC (1U << 31) +#define FW_EQ_ETH_CMD_FREE (1U << 30) +#define FW_EQ_ETH_CMD_MODIFY (1U << 29) +#define FW_EQ_ETH_CMD_EQSTART (1U << 28) +#define FW_EQ_ETH_CMD_EQSTOP (1U << 27) + +#define FW_EQ_ETH_CMD_EQID(x) ((x) << 0) +#define FW_EQ_ETH_CMD_EQID_GET(x) (((x) >> 0) & 0xfffff) +#define FW_EQ_ETH_CMD_PHYSEQID(x) ((x) << 0) + +#define FW_EQ_ETH_CMD_FETCHSZM(x) ((x) << 26) +#define FW_EQ_ETH_CMD_STATUSPGNS(x) ((x) << 25) +#define FW_EQ_ETH_CMD_STATUSPGRO(x) ((x) << 24) +#define FW_EQ_ETH_CMD_FETCHNS(x) ((x) << 23) +#define FW_EQ_ETH_CMD_FETCHRO(x) ((x) << 22) +#define FW_EQ_ETH_CMD_HOSTFCMODE(x) ((x) << 20) +#define FW_EQ_ETH_CMD_CPRIO(x) ((x) << 19) +#define FW_EQ_ETH_CMD_ONCHIP(x) ((x) << 18) +#define FW_EQ_ETH_CMD_PCIECHN(x) ((x) << 16) +#define FW_EQ_ETH_CMD_IQID(x) ((x) << 0) + +#define FW_EQ_ETH_CMD_DCAEN(x) ((x) << 31) +#define FW_EQ_ETH_CMD_DCACPU(x) ((x) << 26) +#define FW_EQ_ETH_CMD_FBMIN(x) ((x) << 23) +#define FW_EQ_ETH_CMD_FBMAX(x) ((x) << 20) +#define FW_EQ_ETH_CMD_CIDXFTHRESHO(x) ((x) << 19) +#define FW_EQ_ETH_CMD_CIDXFTHRESH(x) ((x) << 16) +#define FW_EQ_ETH_CMD_EQSIZE(x) ((x) << 0) + +#define FW_EQ_ETH_CMD_VIID(x) ((x) << 16) + +struct fw_eq_ctrl_cmd { + __be32 op_to_vfn; + __be32 alloc_to_len16; + __be32 cmpliqid_eqid; + __be32 physeqid_pkd; + __be32 fetchszm_to_iqid; + __be32 dcaen_to_eqsize; + __be64 eqaddr; +}; + +#define FW_EQ_CTRL_CMD_PFN(x) ((x) << 8) +#define FW_EQ_CTRL_CMD_VFN(x) ((x) << 0) + +#define FW_EQ_CTRL_CMD_ALLOC (1U << 31) +#define FW_EQ_CTRL_CMD_FREE (1U << 30) +#define FW_EQ_CTRL_CMD_MODIFY (1U << 29) +#define FW_EQ_CTRL_CMD_EQSTART (1U << 28) +#define FW_EQ_CTRL_CMD_EQSTOP (1U << 27) + +#define FW_EQ_CTRL_CMD_CMPLIQID(x) ((x) << 20) +#define FW_EQ_CTRL_CMD_EQID(x) ((x) << 0) +#define FW_EQ_CTRL_CMD_EQID_GET(x) (((x) >> 0) & 0xfffff) +#define FW_EQ_CTRL_CMD_PHYSEQID_GET(x) (((x) >> 0) & 0xfffff) + +#define FW_EQ_CTRL_CMD_FETCHSZM (1U << 26) +#define FW_EQ_CTRL_CMD_STATUSPGNS (1U << 25) +#define FW_EQ_CTRL_CMD_STATUSPGRO (1U << 24) +#define FW_EQ_CTRL_CMD_FETCHNS (1U << 23) +#define FW_EQ_CTRL_CMD_FETCHRO (1U << 22) +#define FW_EQ_CTRL_CMD_HOSTFCMODE(x) ((x) << 20) +#define FW_EQ_CTRL_CMD_CPRIO(x) ((x) << 19) +#define FW_EQ_CTRL_CMD_ONCHIP(x) ((x) << 18) +#define FW_EQ_CTRL_CMD_PCIECHN(x) ((x) << 16) +#define FW_EQ_CTRL_CMD_IQID(x) ((x) << 0) + +#define FW_EQ_CTRL_CMD_DCAEN(x) ((x) << 31) +#define FW_EQ_CTRL_CMD_DCACPU(x) ((x) << 26) +#define FW_EQ_CTRL_CMD_FBMIN(x) ((x) << 23) +#define FW_EQ_CTRL_CMD_FBMAX(x) ((x) << 20) +#define FW_EQ_CTRL_CMD_CIDXFTHRESHO(x) ((x) << 19) +#define FW_EQ_CTRL_CMD_CIDXFTHRESH(x) ((x) << 16) +#define FW_EQ_CTRL_CMD_EQSIZE(x) ((x) << 0) + +struct fw_eq_ofld_cmd { + __be32 op_to_vfn; + __be32 alloc_to_len16; + __be32 eqid_pkd; + __be32 physeqid_pkd; + __be32 fetchszm_to_iqid; + __be32 dcaen_to_eqsize; + __be64 eqaddr; +}; + +#define FW_EQ_OFLD_CMD_PFN(x) ((x) << 8) +#define FW_EQ_OFLD_CMD_VFN(x) ((x) << 0) + +#define FW_EQ_OFLD_CMD_ALLOC (1U << 31) +#define FW_EQ_OFLD_CMD_FREE (1U << 30) +#define FW_EQ_OFLD_CMD_MODIFY (1U << 29) +#define FW_EQ_OFLD_CMD_EQSTART (1U << 28) +#define FW_EQ_OFLD_CMD_EQSTOP (1U << 27) + +#define FW_EQ_OFLD_CMD_EQID(x) ((x) << 0) +#define FW_EQ_OFLD_CMD_EQID_GET(x) (((x) >> 0) & 0xfffff) +#define FW_EQ_OFLD_CMD_PHYSEQID_GET(x) (((x) >> 0) & 0xfffff) + +#define FW_EQ_OFLD_CMD_FETCHSZM(x) ((x) << 26) +#define FW_EQ_OFLD_CMD_STATUSPGNS(x) ((x) << 25) +#define FW_EQ_OFLD_CMD_STATUSPGRO(x) ((x) << 24) +#define FW_EQ_OFLD_CMD_FETCHNS(x) ((x) << 23) +#define FW_EQ_OFLD_CMD_FETCHRO(x) ((x) << 22) +#define FW_EQ_OFLD_CMD_HOSTFCMODE(x) ((x) << 20) +#define FW_EQ_OFLD_CMD_CPRIO(x) ((x) << 19) +#define FW_EQ_OFLD_CMD_ONCHIP(x) ((x) << 18) +#define FW_EQ_OFLD_CMD_PCIECHN(x) ((x) << 16) +#define FW_EQ_OFLD_CMD_IQID(x) ((x) << 0) + +#define FW_EQ_OFLD_CMD_DCAEN(x) ((x) << 31) +#define FW_EQ_OFLD_CMD_DCACPU(x) ((x) << 26) +#define FW_EQ_OFLD_CMD_FBMIN(x) ((x) << 23) +#define FW_EQ_OFLD_CMD_FBMAX(x) ((x) << 20) +#define FW_EQ_OFLD_CMD_CIDXFTHRESHO(x) ((x) << 19) +#define FW_EQ_OFLD_CMD_CIDXFTHRESH(x) ((x) << 16) +#define FW_EQ_OFLD_CMD_EQSIZE(x) ((x) << 0) + +/* + * Macros for VIID parsing: + * VIID - [10:8] PFN, [7] VI Valid, [6:0] VI number + */ +#define FW_VIID_PFN_GET(x) (((x) >> 8) & 0x7) +#define FW_VIID_VIVLD_GET(x) (((x) >> 7) & 0x1) +#define FW_VIID_VIN_GET(x) (((x) >> 0) & 0x7F) + +struct fw_vi_cmd { + __be32 op_to_vfn; + __be32 alloc_to_len16; + __be16 viid_pkd; + u8 mac[6]; + u8 portid_pkd; + u8 nmac; + u8 nmac0[6]; + __be16 rsssize_pkd; + u8 nmac1[6]; + __be16 r7; + u8 nmac2[6]; + __be16 r8; + u8 nmac3[6]; + __be64 r9; + __be64 r10; +}; + +#define FW_VI_CMD_PFN(x) ((x) << 8) +#define FW_VI_CMD_VFN(x) ((x) << 0) +#define FW_VI_CMD_ALLOC (1U << 31) +#define FW_VI_CMD_FREE (1U << 30) +#define FW_VI_CMD_VIID(x) ((x) << 0) +#define FW_VI_CMD_PORTID(x) ((x) << 4) +#define FW_VI_CMD_RSSSIZE_GET(x) (((x) >> 0) & 0x7ff) + +/* Special VI_MAC command index ids */ +#define FW_VI_MAC_ADD_MAC 0x3FF +#define FW_VI_MAC_ADD_PERSIST_MAC 0x3FE +#define FW_VI_MAC_MAC_BASED_FREE 0x3FD + +enum fw_vi_mac_smac { + FW_VI_MAC_MPS_TCAM_ENTRY, + FW_VI_MAC_MPS_TCAM_ONLY, + FW_VI_MAC_SMT_ONLY, + FW_VI_MAC_SMT_AND_MPSTCAM +}; + +enum fw_vi_mac_result { + FW_VI_MAC_R_SUCCESS, + FW_VI_MAC_R_F_NONEXISTENT_NOMEM, + FW_VI_MAC_R_SMAC_FAIL, + FW_VI_MAC_R_F_ACL_CHECK +}; + +struct fw_vi_mac_cmd { + __be32 op_to_viid; + __be32 freemacs_to_len16; + union fw_vi_mac { + struct fw_vi_mac_exact { + __be16 valid_to_idx; + u8 macaddr[6]; + } exact[7]; + struct fw_vi_mac_hash { + __be64 hashvec; + } hash; + } u; +}; + +#define FW_VI_MAC_CMD_VIID(x) ((x) << 0) +#define FW_VI_MAC_CMD_FREEMACS(x) ((x) << 31) +#define FW_VI_MAC_CMD_HASHVECEN (1U << 23) +#define FW_VI_MAC_CMD_HASHUNIEN(x) ((x) << 22) +#define FW_VI_MAC_CMD_VALID (1U << 15) +#define FW_VI_MAC_CMD_PRIO(x) ((x) << 12) +#define FW_VI_MAC_CMD_SMAC_RESULT(x) ((x) << 10) +#define FW_VI_MAC_CMD_SMAC_RESULT_GET(x) (((x) >> 10) & 0x3) +#define FW_VI_MAC_CMD_IDX(x) ((x) << 0) +#define FW_VI_MAC_CMD_IDX_GET(x) (((x) >> 0) & 0x3ff) + +#define FW_RXMODE_MTU_NO_CHG 65535 + +struct fw_vi_rxmode_cmd { + __be32 op_to_viid; + __be32 retval_len16; + __be32 mtu_to_broadcasten; + __be32 r4_lo; +}; + +#define FW_VI_RXMODE_CMD_VIID(x) ((x) << 0) +#define FW_VI_RXMODE_CMD_MTU(x) ((x) << 16) +#define FW_VI_RXMODE_CMD_PROMISCEN_MASK 0x3 +#define FW_VI_RXMODE_CMD_PROMISCEN(x) ((x) << 14) +#define FW_VI_RXMODE_CMD_ALLMULTIEN_MASK 0x3 +#define FW_VI_RXMODE_CMD_ALLMULTIEN(x) ((x) << 12) +#define FW_VI_RXMODE_CMD_BROADCASTEN_MASK 0x3 +#define FW_VI_RXMODE_CMD_BROADCASTEN(x) ((x) << 10) + +struct fw_vi_enable_cmd { + __be32 op_to_viid; + __be32 ien_to_len16; + __be16 blinkdur; + __be16 r3; + __be32 r4; +}; + +#define FW_VI_ENABLE_CMD_VIID(x) ((x) << 0) +#define FW_VI_ENABLE_CMD_IEN(x) ((x) << 31) +#define FW_VI_ENABLE_CMD_EEN(x) ((x) << 30) +#define FW_VI_ENABLE_CMD_LED (1U << 29) + +/* VI VF stats offset definitions */ +#define VI_VF_NUM_STATS 16 +enum fw_vi_stats_vf_index { + FW_VI_VF_STAT_TX_BCAST_BYTES_IX, + FW_VI_VF_STAT_TX_BCAST_FRAMES_IX, + FW_VI_VF_STAT_TX_MCAST_BYTES_IX, + FW_VI_VF_STAT_TX_MCAST_FRAMES_IX, + FW_VI_VF_STAT_TX_UCAST_BYTES_IX, + FW_VI_VF_STAT_TX_UCAST_FRAMES_IX, + FW_VI_VF_STAT_TX_DROP_FRAMES_IX, + FW_VI_VF_STAT_TX_OFLD_BYTES_IX, + FW_VI_VF_STAT_TX_OFLD_FRAMES_IX, + FW_VI_VF_STAT_RX_BCAST_BYTES_IX, + FW_VI_VF_STAT_RX_BCAST_FRAMES_IX, + FW_VI_VF_STAT_RX_MCAST_BYTES_IX, + FW_VI_VF_STAT_RX_MCAST_FRAMES_IX, + FW_VI_VF_STAT_RX_UCAST_BYTES_IX, + FW_VI_VF_STAT_RX_UCAST_FRAMES_IX, + FW_VI_VF_STAT_RX_ERR_FRAMES_IX +}; + +/* VI PF stats offset definitions */ +#define VI_PF_NUM_STATS 17 +enum fw_vi_stats_pf_index { + FW_VI_PF_STAT_TX_BCAST_BYTES_IX, + FW_VI_PF_STAT_TX_BCAST_FRAMES_IX, + FW_VI_PF_STAT_TX_MCAST_BYTES_IX, + FW_VI_PF_STAT_TX_MCAST_FRAMES_IX, + FW_VI_PF_STAT_TX_UCAST_BYTES_IX, + FW_VI_PF_STAT_TX_UCAST_FRAMES_IX, + FW_VI_PF_STAT_TX_OFLD_BYTES_IX, + FW_VI_PF_STAT_TX_OFLD_FRAMES_IX, + FW_VI_PF_STAT_RX_BYTES_IX, + FW_VI_PF_STAT_RX_FRAMES_IX, + FW_VI_PF_STAT_RX_BCAST_BYTES_IX, + FW_VI_PF_STAT_RX_BCAST_FRAMES_IX, + FW_VI_PF_STAT_RX_MCAST_BYTES_IX, + FW_VI_PF_STAT_RX_MCAST_FRAMES_IX, + FW_VI_PF_STAT_RX_UCAST_BYTES_IX, + FW_VI_PF_STAT_RX_UCAST_FRAMES_IX, + FW_VI_PF_STAT_RX_ERR_FRAMES_IX +}; + +struct fw_vi_stats_cmd { + __be32 op_to_viid; + __be32 retval_len16; + union fw_vi_stats { + struct fw_vi_stats_ctl { + __be16 nstats_ix; + __be16 r6; + __be32 r7; + __be64 stat0; + __be64 stat1; + __be64 stat2; + __be64 stat3; + __be64 stat4; + __be64 stat5; + } ctl; + struct fw_vi_stats_pf { + __be64 tx_bcast_bytes; + __be64 tx_bcast_frames; + __be64 tx_mcast_bytes; + __be64 tx_mcast_frames; + __be64 tx_ucast_bytes; + __be64 tx_ucast_frames; + __be64 tx_offload_bytes; + __be64 tx_offload_frames; + __be64 rx_pf_bytes; + __be64 rx_pf_frames; + __be64 rx_bcast_bytes; + __be64 rx_bcast_frames; + __be64 rx_mcast_bytes; + __be64 rx_mcast_frames; + __be64 rx_ucast_bytes; + __be64 rx_ucast_frames; + __be64 rx_err_frames; + } pf; + struct fw_vi_stats_vf { + __be64 tx_bcast_bytes; + __be64 tx_bcast_frames; + __be64 tx_mcast_bytes; + __be64 tx_mcast_frames; + __be64 tx_ucast_bytes; + __be64 tx_ucast_frames; + __be64 tx_drop_frames; + __be64 tx_offload_bytes; + __be64 tx_offload_frames; + __be64 rx_bcast_bytes; + __be64 rx_bcast_frames; + __be64 rx_mcast_bytes; + __be64 rx_mcast_frames; + __be64 rx_ucast_bytes; + __be64 rx_ucast_frames; + __be64 rx_err_frames; + } vf; + } u; +}; + +#define FW_VI_STATS_CMD_VIID(x) ((x) << 0) +#define FW_VI_STATS_CMD_NSTATS(x) ((x) << 12) +#define FW_VI_STATS_CMD_IX(x) ((x) << 0) + +struct fw_acl_mac_cmd { + __be32 op_to_vfn; + __be32 en_to_len16; + u8 nmac; + u8 r3[7]; + __be16 r4; + u8 macaddr0[6]; + __be16 r5; + u8 macaddr1[6]; + __be16 r6; + u8 macaddr2[6]; + __be16 r7; + u8 macaddr3[6]; +}; + +#define FW_ACL_MAC_CMD_PFN(x) ((x) << 8) +#define FW_ACL_MAC_CMD_VFN(x) ((x) << 0) +#define FW_ACL_MAC_CMD_EN(x) ((x) << 31) + +struct fw_acl_vlan_cmd { + __be32 op_to_vfn; + __be32 en_to_len16; + u8 nvlan; + u8 dropnovlan_fm; + u8 r3_lo[6]; + __be16 vlanid[16]; +}; + +#define FW_ACL_VLAN_CMD_PFN(x) ((x) << 8) +#define FW_ACL_VLAN_CMD_VFN(x) ((x) << 0) +#define FW_ACL_VLAN_CMD_EN(x) ((x) << 31) +#define FW_ACL_VLAN_CMD_DROPNOVLAN(x) ((x) << 7) +#define FW_ACL_VLAN_CMD_FM(x) ((x) << 6) + +enum fw_port_cap { + FW_PORT_CAP_SPEED_100M = 0x0001, + FW_PORT_CAP_SPEED_1G = 0x0002, + FW_PORT_CAP_SPEED_2_5G = 0x0004, + FW_PORT_CAP_SPEED_10G = 0x0008, + FW_PORT_CAP_SPEED_40G = 0x0010, + FW_PORT_CAP_SPEED_100G = 0x0020, + FW_PORT_CAP_FC_RX = 0x0040, + FW_PORT_CAP_FC_TX = 0x0080, + FW_PORT_CAP_ANEG = 0x0100, + FW_PORT_CAP_MDI_0 = 0x0200, + FW_PORT_CAP_MDI_1 = 0x0400, + FW_PORT_CAP_BEAN = 0x0800, + FW_PORT_CAP_PMA_LPBK = 0x1000, + FW_PORT_CAP_PCS_LPBK = 0x2000, + FW_PORT_CAP_PHYXS_LPBK = 0x4000, + FW_PORT_CAP_FAR_END_LPBK = 0x8000, +}; + +enum fw_port_mdi { + FW_PORT_MDI_UNCHANGED, + FW_PORT_MDI_AUTO, + FW_PORT_MDI_F_STRAIGHT, + FW_PORT_MDI_F_CROSSOVER +}; + +#define FW_PORT_MDI(x) ((x) << 9) + +enum fw_port_action { + FW_PORT_ACTION_L1_CFG = 0x0001, + FW_PORT_ACTION_L2_CFG = 0x0002, + FW_PORT_ACTION_GET_PORT_INFO = 0x0003, + FW_PORT_ACTION_L2_PPP_CFG = 0x0004, + FW_PORT_ACTION_L2_DCB_CFG = 0x0005, + FW_PORT_ACTION_LOW_PWR_TO_NORMAL = 0x0010, + FW_PORT_ACTION_L1_LOW_PWR_EN = 0x0011, + FW_PORT_ACTION_L2_WOL_MODE_EN = 0x0012, + FW_PORT_ACTION_LPBK_TO_NORMAL = 0x0020, + FW_PORT_ACTION_L1_LPBK = 0x0021, + FW_PORT_ACTION_L1_PMA_LPBK = 0x0022, + FW_PORT_ACTION_L1_PCS_LPBK = 0x0023, + FW_PORT_ACTION_L1_PHYXS_CSIDE_LPBK = 0x0024, + FW_PORT_ACTION_L1_PHYXS_ESIDE_LPBK = 0x0025, + FW_PORT_ACTION_PHY_RESET = 0x0040, + FW_PORT_ACTION_PMA_RESET = 0x0041, + FW_PORT_ACTION_PCS_RESET = 0x0042, + FW_PORT_ACTION_PHYXS_RESET = 0x0043, + FW_PORT_ACTION_DTEXS_REEST = 0x0044, + FW_PORT_ACTION_AN_RESET = 0x0045 +}; + +enum fw_port_l2cfg_ctlbf { + FW_PORT_L2_CTLBF_OVLAN0 = 0x01, + FW_PORT_L2_CTLBF_OVLAN1 = 0x02, + FW_PORT_L2_CTLBF_OVLAN2 = 0x04, + FW_PORT_L2_CTLBF_OVLAN3 = 0x08, + FW_PORT_L2_CTLBF_IVLAN = 0x10, + FW_PORT_L2_CTLBF_TXIPG = 0x20 +}; + +enum fw_port_dcb_cfg { + FW_PORT_DCB_CFG_PG = 0x01, + FW_PORT_DCB_CFG_PFC = 0x02, + FW_PORT_DCB_CFG_APPL = 0x04 +}; + +enum fw_port_dcb_cfg_rc { + FW_PORT_DCB_CFG_SUCCESS = 0x0, + FW_PORT_DCB_CFG_ERROR = 0x1 +}; + +struct fw_port_cmd { + __be32 op_to_portid; + __be32 action_to_len16; + union fw_port { + struct fw_port_l1cfg { + __be32 rcap; + __be32 r; + } l1cfg; + struct fw_port_l2cfg { + __be16 ctlbf_to_ivlan0; + __be16 ivlantype; + __be32 txipg_pkd; + __be16 ovlan0mask; + __be16 ovlan0type; + __be16 ovlan1mask; + __be16 ovlan1type; + __be16 ovlan2mask; + __be16 ovlan2type; + __be16 ovlan3mask; + __be16 ovlan3type; + } l2cfg; + struct fw_port_info { + __be32 lstatus_to_modtype; + __be16 pcap; + __be16 acap; + } info; + struct fw_port_ppp { + __be32 pppen_to_ncsich; + __be32 r11; + } ppp; + struct fw_port_dcb { + __be16 cfg; + u8 up_map; + u8 sf_cfgrc; + __be16 prot_ix; + u8 pe7_to_pe0; + u8 numTCPFCs; + __be32 pgid0_to_pgid7; + __be32 numTCs_oui; + u8 pgpc[8]; + } dcb; + } u; +}; + +#define FW_PORT_CMD_READ (1U << 22) + +#define FW_PORT_CMD_PORTID(x) ((x) << 0) +#define FW_PORT_CMD_PORTID_GET(x) (((x) >> 0) & 0xf) + +#define FW_PORT_CMD_ACTION(x) ((x) << 16) + +#define FW_PORT_CMD_CTLBF(x) ((x) << 10) +#define FW_PORT_CMD_OVLAN3(x) ((x) << 7) +#define FW_PORT_CMD_OVLAN2(x) ((x) << 6) +#define FW_PORT_CMD_OVLAN1(x) ((x) << 5) +#define FW_PORT_CMD_OVLAN0(x) ((x) << 4) +#define FW_PORT_CMD_IVLAN0(x) ((x) << 3) + +#define FW_PORT_CMD_TXIPG(x) ((x) << 19) + +#define FW_PORT_CMD_LSTATUS (1U << 31) +#define FW_PORT_CMD_LSPEED(x) ((x) << 24) +#define FW_PORT_CMD_LSPEED_GET(x) (((x) >> 24) & 0x3f) +#define FW_PORT_CMD_TXPAUSE (1U << 23) +#define FW_PORT_CMD_RXPAUSE (1U << 22) +#define FW_PORT_CMD_MDIOCAP (1U << 21) +#define FW_PORT_CMD_MDIOADDR_GET(x) (((x) >> 16) & 0x1f) +#define FW_PORT_CMD_LPTXPAUSE (1U << 15) +#define FW_PORT_CMD_LPRXPAUSE (1U << 14) +#define FW_PORT_CMD_PTYPE_MASK 0x1f +#define FW_PORT_CMD_PTYPE_GET(x) (((x) >> 8) & FW_PORT_CMD_PTYPE_MASK) +#define FW_PORT_CMD_MODTYPE_MASK 0x1f +#define FW_PORT_CMD_MODTYPE_GET(x) (((x) >> 0) & FW_PORT_CMD_MODTYPE_MASK) + +#define FW_PORT_CMD_PPPEN(x) ((x) << 31) +#define FW_PORT_CMD_TPSRC(x) ((x) << 28) +#define FW_PORT_CMD_NCSISRC(x) ((x) << 24) + +#define FW_PORT_CMD_CH0(x) ((x) << 20) +#define FW_PORT_CMD_CH1(x) ((x) << 16) +#define FW_PORT_CMD_CH2(x) ((x) << 12) +#define FW_PORT_CMD_CH3(x) ((x) << 8) +#define FW_PORT_CMD_NCSICH(x) ((x) << 4) + +enum fw_port_type { + FW_PORT_TYPE_FIBER, + FW_PORT_TYPE_KX4, + FW_PORT_TYPE_BT_SGMII, + FW_PORT_TYPE_KX, + FW_PORT_TYPE_BT_XAUI, + FW_PORT_TYPE_KR, + FW_PORT_TYPE_CX4, + FW_PORT_TYPE_TWINAX, + + FW_PORT_TYPE_NONE = FW_PORT_CMD_PTYPE_MASK +}; + +enum fw_port_module_type { + FW_PORT_MOD_TYPE_NA, + FW_PORT_MOD_TYPE_LR, + FW_PORT_MOD_TYPE_SR, + FW_PORT_MOD_TYPE_ER, + + FW_PORT_MOD_TYPE_NONE = FW_PORT_CMD_MODTYPE_MASK +}; + +/* port stats */ +#define FW_NUM_PORT_STATS 50 +#define FW_NUM_PORT_TX_STATS 23 +#define FW_NUM_PORT_RX_STATS 27 + +enum fw_port_stats_tx_index { + FW_STAT_TX_PORT_BYTES_IX, + FW_STAT_TX_PORT_FRAMES_IX, + FW_STAT_TX_PORT_BCAST_IX, + FW_STAT_TX_PORT_MCAST_IX, + FW_STAT_TX_PORT_UCAST_IX, + FW_STAT_TX_PORT_ERROR_IX, + FW_STAT_TX_PORT_64B_IX, + FW_STAT_TX_PORT_65B_127B_IX, + FW_STAT_TX_PORT_128B_255B_IX, + FW_STAT_TX_PORT_256B_511B_IX, + FW_STAT_TX_PORT_512B_1023B_IX, + FW_STAT_TX_PORT_1024B_1518B_IX, + FW_STAT_TX_PORT_1519B_MAX_IX, + FW_STAT_TX_PORT_DROP_IX, + FW_STAT_TX_PORT_PAUSE_IX, + FW_STAT_TX_PORT_PPP0_IX, + FW_STAT_TX_PORT_PPP1_IX, + FW_STAT_TX_PORT_PPP2_IX, + FW_STAT_TX_PORT_PPP3_IX, + FW_STAT_TX_PORT_PPP4_IX, + FW_STAT_TX_PORT_PPP5_IX, + FW_STAT_TX_PORT_PPP6_IX, + FW_STAT_TX_PORT_PPP7_IX +}; + +enum fw_port_stat_rx_index { + FW_STAT_RX_PORT_BYTES_IX, + FW_STAT_RX_PORT_FRAMES_IX, + FW_STAT_RX_PORT_BCAST_IX, + FW_STAT_RX_PORT_MCAST_IX, + FW_STAT_RX_PORT_UCAST_IX, + FW_STAT_RX_PORT_MTU_ERROR_IX, + FW_STAT_RX_PORT_MTU_CRC_ERROR_IX, + FW_STAT_RX_PORT_CRC_ERROR_IX, + FW_STAT_RX_PORT_LEN_ERROR_IX, + FW_STAT_RX_PORT_SYM_ERROR_IX, + FW_STAT_RX_PORT_64B_IX, + FW_STAT_RX_PORT_65B_127B_IX, + FW_STAT_RX_PORT_128B_255B_IX, + FW_STAT_RX_PORT_256B_511B_IX, + FW_STAT_RX_PORT_512B_1023B_IX, + FW_STAT_RX_PORT_1024B_1518B_IX, + FW_STAT_RX_PORT_1519B_MAX_IX, + FW_STAT_RX_PORT_PAUSE_IX, + FW_STAT_RX_PORT_PPP0_IX, + FW_STAT_RX_PORT_PPP1_IX, + FW_STAT_RX_PORT_PPP2_IX, + FW_STAT_RX_PORT_PPP3_IX, + FW_STAT_RX_PORT_PPP4_IX, + FW_STAT_RX_PORT_PPP5_IX, + FW_STAT_RX_PORT_PPP6_IX, + FW_STAT_RX_PORT_PPP7_IX, + FW_STAT_RX_PORT_LESS_64B_IX +}; + +struct fw_port_stats_cmd { + __be32 op_to_portid; + __be32 retval_len16; + union fw_port_stats { + struct fw_port_stats_ctl { + u8 nstats_bg_bm; + u8 tx_ix; + __be16 r6; + __be32 r7; + __be64 stat0; + __be64 stat1; + __be64 stat2; + __be64 stat3; + __be64 stat4; + __be64 stat5; + } ctl; + struct fw_port_stats_all { + __be64 tx_bytes; + __be64 tx_frames; + __be64 tx_bcast; + __be64 tx_mcast; + __be64 tx_ucast; + __be64 tx_error; + __be64 tx_64b; + __be64 tx_65b_127b; + __be64 tx_128b_255b; + __be64 tx_256b_511b; + __be64 tx_512b_1023b; + __be64 tx_1024b_1518b; + __be64 tx_1519b_max; + __be64 tx_drop; + __be64 tx_pause; + __be64 tx_ppp0; + __be64 tx_ppp1; + __be64 tx_ppp2; + __be64 tx_ppp3; + __be64 tx_ppp4; + __be64 tx_ppp5; + __be64 tx_ppp6; + __be64 tx_ppp7; + __be64 rx_bytes; + __be64 rx_frames; + __be64 rx_bcast; + __be64 rx_mcast; + __be64 rx_ucast; + __be64 rx_mtu_error; + __be64 rx_mtu_crc_error; + __be64 rx_crc_error; + __be64 rx_len_error; + __be64 rx_sym_error; + __be64 rx_64b; + __be64 rx_65b_127b; + __be64 rx_128b_255b; + __be64 rx_256b_511b; + __be64 rx_512b_1023b; + __be64 rx_1024b_1518b; + __be64 rx_1519b_max; + __be64 rx_pause; + __be64 rx_ppp0; + __be64 rx_ppp1; + __be64 rx_ppp2; + __be64 rx_ppp3; + __be64 rx_ppp4; + __be64 rx_ppp5; + __be64 rx_ppp6; + __be64 rx_ppp7; + __be64 rx_less_64b; + __be64 rx_bg_drop; + __be64 rx_bg_trunc; + } all; + } u; +}; + +#define FW_PORT_STATS_CMD_NSTATS(x) ((x) << 4) +#define FW_PORT_STATS_CMD_BG_BM(x) ((x) << 0) +#define FW_PORT_STATS_CMD_TX(x) ((x) << 7) +#define FW_PORT_STATS_CMD_IX(x) ((x) << 0) + +/* port loopback stats */ +#define FW_NUM_LB_STATS 16 +enum fw_port_lb_stats_index { + FW_STAT_LB_PORT_BYTES_IX, + FW_STAT_LB_PORT_FRAMES_IX, + FW_STAT_LB_PORT_BCAST_IX, + FW_STAT_LB_PORT_MCAST_IX, + FW_STAT_LB_PORT_UCAST_IX, + FW_STAT_LB_PORT_ERROR_IX, + FW_STAT_LB_PORT_64B_IX, + FW_STAT_LB_PORT_65B_127B_IX, + FW_STAT_LB_PORT_128B_255B_IX, + FW_STAT_LB_PORT_256B_511B_IX, + FW_STAT_LB_PORT_512B_1023B_IX, + FW_STAT_LB_PORT_1024B_1518B_IX, + FW_STAT_LB_PORT_1519B_MAX_IX, + FW_STAT_LB_PORT_DROP_FRAMES_IX +}; + +struct fw_port_lb_stats_cmd { + __be32 op_to_lbport; + __be32 retval_len16; + union fw_port_lb_stats { + struct fw_port_lb_stats_ctl { + u8 nstats_bg_bm; + u8 ix_pkd; + __be16 r6; + __be32 r7; + __be64 stat0; + __be64 stat1; + __be64 stat2; + __be64 stat3; + __be64 stat4; + __be64 stat5; + } ctl; + struct fw_port_lb_stats_all { + __be64 tx_bytes; + __be64 tx_frames; + __be64 tx_bcast; + __be64 tx_mcast; + __be64 tx_ucast; + __be64 tx_error; + __be64 tx_64b; + __be64 tx_65b_127b; + __be64 tx_128b_255b; + __be64 tx_256b_511b; + __be64 tx_512b_1023b; + __be64 tx_1024b_1518b; + __be64 tx_1519b_max; + __be64 rx_lb_drop; + __be64 rx_lb_trunc; + } all; + } u; +}; + +#define FW_PORT_LB_STATS_CMD_LBPORT(x) ((x) << 0) +#define FW_PORT_LB_STATS_CMD_NSTATS(x) ((x) << 4) +#define FW_PORT_LB_STATS_CMD_BG_BM(x) ((x) << 0) +#define FW_PORT_LB_STATS_CMD_IX(x) ((x) << 0) + +struct fw_rss_ind_tbl_cmd { + __be32 op_to_viid; +#define FW_RSS_IND_TBL_CMD_VIID(x) ((x) << 0) + __be32 retval_len16; + __be16 niqid; + __be16 startidx; + __be32 r3; + __be32 iq0_to_iq2; +#define FW_RSS_IND_TBL_CMD_IQ0(x) ((x) << 20) +#define FW_RSS_IND_TBL_CMD_IQ1(x) ((x) << 10) +#define FW_RSS_IND_TBL_CMD_IQ2(x) ((x) << 0) + __be32 iq3_to_iq5; + __be32 iq6_to_iq8; + __be32 iq9_to_iq11; + __be32 iq12_to_iq14; + __be32 iq15_to_iq17; + __be32 iq18_to_iq20; + __be32 iq21_to_iq23; + __be32 iq24_to_iq26; + __be32 iq27_to_iq29; + __be32 iq30_iq31; + __be32 r15_lo; +}; + +struct fw_rss_glb_config_cmd { + __be32 op_to_write; + __be32 retval_len16; + union fw_rss_glb_config { + struct fw_rss_glb_config_manual { + __be32 mode_pkd; + __be32 r3; + __be64 r4; + __be64 r5; + } manual; + struct fw_rss_glb_config_basicvirtual { + __be32 mode_pkd; + __be32 synmapen_to_hashtoeplitz; +#define FW_RSS_GLB_CONFIG_CMD_SYNMAPEN (1U << 8) +#define FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV6 (1U << 7) +#define FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV6 (1U << 6) +#define FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV4 (1U << 5) +#define FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV4 (1U << 4) +#define FW_RSS_GLB_CONFIG_CMD_OFDMAPEN (1U << 3) +#define FW_RSS_GLB_CONFIG_CMD_TNLMAPEN (1U << 2) +#define FW_RSS_GLB_CONFIG_CMD_TNLALLLKP (1U << 1) +#define FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ (1U << 0) + __be64 r8; + __be64 r9; + } basicvirtual; + } u; +}; + +#define FW_RSS_GLB_CONFIG_CMD_MODE(x) ((x) << 28) + +#define FW_RSS_GLB_CONFIG_CMD_MODE_MANUAL 0 +#define FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL 1 + +struct fw_rss_vi_config_cmd { + __be32 op_to_viid; +#define FW_RSS_VI_CONFIG_CMD_VIID(x) ((x) << 0) + __be32 retval_len16; + union fw_rss_vi_config { + struct fw_rss_vi_config_manual { + __be64 r3; + __be64 r4; + __be64 r5; + } manual; + struct fw_rss_vi_config_basicvirtual { + __be32 r6; + __be32 defaultq_to_ip4udpen; +#define FW_RSS_VI_CONFIG_CMD_DEFAULTQ(x) ((x) << 16) +#define FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN (1U << 4) +#define FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN (1U << 3) +#define FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN (1U << 2) +#define FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN (1U << 1) +#define FW_RSS_VI_CONFIG_CMD_IP4UDPEN (1U << 0) + __be64 r9; + __be64 r10; + } basicvirtual; + } u; +}; + +enum fw_error_type { + FW_ERROR_TYPE_EXCEPTION = 0x0, + FW_ERROR_TYPE_HWMODULE = 0x1, + FW_ERROR_TYPE_WR = 0x2, + FW_ERROR_TYPE_ACL = 0x3, +}; + +struct fw_error_cmd { + __be32 op_to_type; + __be32 len16_pkd; + union fw_error { + struct fw_error_exception { + __be32 info[6]; + } exception; + struct fw_error_hwmodule { + __be32 regaddr; + __be32 regval; + } hwmodule; + struct fw_error_wr { + __be16 cidx; + __be16 pfn_vfn; + __be32 eqid; + u8 wrhdr[16]; + } wr; + struct fw_error_acl { + __be16 cidx; + __be16 pfn_vfn; + __be32 eqid; + __be16 mv_pkd; + u8 val[6]; + __be64 r4; + } acl; + } u; +}; + +struct fw_debug_cmd { + __be32 op_type; +#define FW_DEBUG_CMD_TYPE_GET(x) ((x) & 0xff) + __be32 len16_pkd; + union fw_debug { + struct fw_debug_assert { + __be32 fcid; + __be32 line; + __be32 x; + __be32 y; + u8 filename_0_7[8]; + u8 filename_8_15[8]; + __be64 r3; + } assert; + struct fw_debug_prt { + __be16 dprtstridx; + __be16 r3[3]; + __be32 dprtstrparam0; + __be32 dprtstrparam1; + __be32 dprtstrparam2; + __be32 dprtstrparam3; + } prt; + } u; +}; + +struct fw_hdr { + u8 ver; + u8 reserved1; + __be16 len512; /* bin length in units of 512-bytes */ + __be32 fw_ver; /* firmware version */ + __be32 tp_microcode_ver; + u8 intfver_nic; + u8 intfver_vnic; + u8 intfver_ofld; + u8 intfver_ri; + u8 intfver_iscsipdu; + u8 intfver_iscsi; + u8 intfver_fcoe; + u8 reserved2; + __be32 reserved3[27]; +}; + +#define FW_HDR_FW_VER_MAJOR_GET(x) (((x) >> 24) & 0xff) +#define FW_HDR_FW_VER_MINOR_GET(x) (((x) >> 16) & 0xff) +#define FW_HDR_FW_VER_MICRO_GET(x) (((x) >> 8) & 0xff) +#define FW_HDR_FW_VER_BUILD_GET(x) (((x) >> 0) & 0xff) +#endif /* _T4FW_INTERFACE_H_ */ diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c index cfd09cea721..73d43c53015 100644 --- a/drivers/net/e1000e/netdev.c +++ b/drivers/net/e1000e/netdev.c @@ -661,6 +661,8 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter) i = 0; } + if (i == tx_ring->next_to_use) + break; eop = tx_ring->buffer_info[i].next_to_watch; eop_desc = E1000_TX_DESC(*tx_ring, eop); } diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c index 73b260c3c65..5c98f7c2242 100644 --- a/drivers/net/forcedeth.c +++ b/drivers/net/forcedeth.c @@ -5899,7 +5899,7 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i /* Limit the number of tx's outstanding for hw bug */ if (id->driver_data & DEV_NEED_TX_LIMIT) { np->tx_limit = 1; - if ((id->driver_data & DEV_NEED_TX_LIMIT2) && + if (((id->driver_data & DEV_NEED_TX_LIMIT2) == DEV_NEED_TX_LIMIT2) && pci_dev->revision >= 0xA2) np->tx_limit = 0; } diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c index 669de028d44..080d1cea5b2 100644 --- a/drivers/net/gianfar.c +++ b/drivers/net/gianfar.c @@ -676,7 +676,7 @@ static int gfar_of_init(struct of_device *ofdev, struct net_device **pdev) priv->rx_queue[i] = NULL; for (i = 0; i < priv->num_tx_queues; i++) { - priv->tx_queue[i] = (struct gfar_priv_tx_q *)kmalloc( + priv->tx_queue[i] = (struct gfar_priv_tx_q *)kzalloc( sizeof (struct gfar_priv_tx_q), GFP_KERNEL); if (!priv->tx_queue[i]) { err = -ENOMEM; @@ -689,7 +689,7 @@ static int gfar_of_init(struct of_device *ofdev, struct net_device **pdev) } for (i = 0; i < priv->num_rx_queues; i++) { - priv->rx_queue[i] = (struct gfar_priv_rx_q *)kmalloc( + priv->rx_queue[i] = (struct gfar_priv_rx_q *)kzalloc( sizeof (struct gfar_priv_rx_q), GFP_KERNEL); if (!priv->rx_queue[i]) { err = -ENOMEM; @@ -1120,10 +1120,10 @@ static int gfar_probe(struct of_device *ofdev, /* provided which set of benchmarks. */ printk(KERN_INFO "%s: Running with NAPI enabled\n", dev->name); for (i = 0; i < priv->num_rx_queues; i++) - printk(KERN_INFO "%s: :RX BD ring size for Q[%d]: %d\n", + printk(KERN_INFO "%s: RX BD ring size for Q[%d]: %d\n", dev->name, i, priv->rx_queue[i]->rx_ring_size); for(i = 0; i < priv->num_tx_queues; i++) - printk(KERN_INFO "%s:TX BD ring size for Q[%d]: %d\n", + printk(KERN_INFO "%s: TX BD ring size for Q[%d]: %d\n", dev->name, i, priv->tx_queue[i]->tx_ring_size); return 0; @@ -1638,13 +1638,13 @@ static void free_skb_resources(struct gfar_private *priv) /* Go through all the buffer descriptors and free their data buffers */ for (i = 0; i < priv->num_tx_queues; i++) { tx_queue = priv->tx_queue[i]; - if(!tx_queue->tx_skbuff) + if(tx_queue->tx_skbuff) free_skb_tx_queue(tx_queue); } for (i = 0; i < priv->num_rx_queues; i++) { rx_queue = priv->rx_queue[i]; - if(!rx_queue->rx_skbuff) + if(rx_queue->rx_skbuff) free_skb_rx_queue(rx_queue); } diff --git a/drivers/net/igb/igb_ethtool.c b/drivers/net/igb/igb_ethtool.c index d313fae992d..74303849010 100644 --- a/drivers/net/igb/igb_ethtool.c +++ b/drivers/net/igb/igb_ethtool.c @@ -1814,6 +1814,7 @@ static int igb_wol_exclusion(struct igb_adapter *adapter, retval = 0; break; case E1000_DEV_ID_82576_QUAD_COPPER: + case E1000_DEV_ID_82576_QUAD_COPPER_ET2: /* quad port adapters only support WoL on port A */ if (!(adapter->flags & IGB_FLAG_QUAD_PORT_A)) { wol->supported = 0; diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c index 9b3c51ab175..c9baa2aa98c 100644 --- a/drivers/net/igb/igb_main.c +++ b/drivers/net/igb/igb_main.c @@ -1612,6 +1612,7 @@ static int __devinit igb_probe(struct pci_dev *pdev, adapter->eeprom_wol = 0; break; case E1000_DEV_ID_82576_QUAD_COPPER: + case E1000_DEV_ID_82576_QUAD_COPPER_ET2: /* if quad port adapter, disable WoL on all but port A */ if (global_quad_port_a != 0) adapter->eeprom_wol = 0; diff --git a/drivers/net/myri10ge/myri10ge.c b/drivers/net/myri10ge/myri10ge.c index 471887742b0..ecde0876a78 100644 --- a/drivers/net/myri10ge/myri10ge.c +++ b/drivers/net/myri10ge/myri10ge.c @@ -1690,7 +1690,7 @@ myri10ge_set_pauseparam(struct net_device *netdev, if (pause->tx_pause != mgp->pause) return myri10ge_change_pause(mgp, pause->tx_pause); if (pause->rx_pause != mgp->pause) - return myri10ge_change_pause(mgp, pause->tx_pause); + return myri10ge_change_pause(mgp, pause->rx_pause); if (pause->autoneg != 0) return -EINVAL; return 0; diff --git a/drivers/net/pcmcia/smc91c92_cs.c b/drivers/net/pcmcia/smc91c92_cs.c index 5adc662c4bf..fd9d6e34fda 100644 --- a/drivers/net/pcmcia/smc91c92_cs.c +++ b/drivers/net/pcmcia/smc91c92_cs.c @@ -493,13 +493,14 @@ static int pcmcia_get_versmac(struct pcmcia_device *p_dev, { struct net_device *dev = priv; cisparse_t parse; + u8 *buf; if (pcmcia_parse_tuple(tuple, &parse)) return -EINVAL; - if ((parse.version_1.ns > 3) && - (cvt_ascii_address(dev, - (parse.version_1.str + parse.version_1.ofs[3])))) + buf = parse.version_1.str + parse.version_1.ofs[3]; + + if ((parse.version_1.ns > 3) && (cvt_ascii_address(dev, buf) == 0)) return 0; return -EINVAL; @@ -528,7 +529,7 @@ static int mhz_setup(struct pcmcia_device *link) len = pcmcia_get_tuple(link, 0x81, &buf); if (buf && len >= 13) { buf[12] = '\0'; - if (cvt_ascii_address(dev, buf)) + if (cvt_ascii_address(dev, buf) == 0) rc = 0; } kfree(buf); @@ -910,7 +911,7 @@ static int smc91c92_config(struct pcmcia_device *link) if (i != 0) { printk(KERN_NOTICE "smc91c92_cs: Unable to find hardware address.\n"); - goto config_undo; + goto config_failed; } smc->duplex = 0; @@ -998,6 +999,7 @@ config_undo: unregister_netdev(dev); config_failed: smc91c92_release(link); + free_netdev(dev); return -ENODEV; } /* smc91c92_config */ @@ -1606,9 +1608,12 @@ static void set_rx_mode(struct net_device *dev) { unsigned int ioaddr = dev->base_addr; struct smc_private *smc = netdev_priv(dev); - u_int multicast_table[ 2 ] = { 0, }; + unsigned char multicast_table[8]; unsigned long flags; u_short rx_cfg_setting; + int i; + + memset(multicast_table, 0, sizeof(multicast_table)); if (dev->flags & IFF_PROMISC) { rx_cfg_setting = RxStripCRC | RxEnable | RxPromisc | RxAllMulti; @@ -1620,10 +1625,6 @@ static void set_rx_mode(struct net_device *dev) netdev_for_each_mc_addr(mc_addr, dev) { u_int position = ether_crc(6, mc_addr->dmi_addr); -#ifndef final_version /* Verify multicast address. */ - if ((mc_addr->dmi_addr[0] & 1) == 0) - continue; -#endif multicast_table[position >> 29] |= 1 << ((position >> 26) & 7); } } @@ -1633,8 +1634,8 @@ static void set_rx_mode(struct net_device *dev) /* Load MC table and Rx setting into the chip without interrupts. */ spin_lock_irqsave(&smc->lock, flags); SMC_SELECT_BANK(3); - outl(multicast_table[0], ioaddr + MULTICAST0); - outl(multicast_table[1], ioaddr + MULTICAST4); + for (i = 0; i < 8; i++) + outb(multicast_table[i], ioaddr + MULTICAST0 + i); SMC_SELECT_BANK(0); outw(rx_cfg_setting, ioaddr + RCR); SMC_SELECT_BANK(2); diff --git a/drivers/net/qlcnic/qlcnic_hw.c b/drivers/net/qlcnic/qlcnic_hw.c index a6ef266a2fe..e73ba455aa2 100644 --- a/drivers/net/qlcnic/qlcnic_hw.c +++ b/drivers/net/qlcnic/qlcnic_hw.c @@ -431,6 +431,9 @@ void qlcnic_set_multi(struct net_device *netdev) u8 bcast_addr[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; u32 mode = VPORT_MISS_MODE_DROP; + if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC) + return; + qlcnic_nic_add_mac(adapter, adapter->mac_addr); qlcnic_nic_add_mac(adapter, bcast_addr); diff --git a/drivers/net/r6040.c b/drivers/net/r6040.c index 43afdb6b25e..0298d8c1dcb 100644 --- a/drivers/net/r6040.c +++ b/drivers/net/r6040.c @@ -134,7 +134,7 @@ #define RX_DESC_SIZE (RX_DCNT * sizeof(struct r6040_descriptor)) #define TX_DESC_SIZE (TX_DCNT * sizeof(struct r6040_descriptor)) #define MBCR_DEFAULT 0x012A /* MAC Bus Control Register */ -#define MCAST_MAX 4 /* Max number multicast addresses to filter */ +#define MCAST_MAX 3 /* Max number multicast addresses to filter */ /* Descriptor status */ #define DSC_OWNER_MAC 0x8000 /* MAC is the owner of this descriptor */ @@ -982,9 +982,6 @@ static void r6040_multicast_list(struct net_device *dev) crc >>= 26; hash_table[crc >> 4] |= 1 << (15 - (crc & 0xf)); } - /* Write the index of the hash table */ - for (i = 0; i < 4; i++) - iowrite16(hash_table[i] << 14, ioaddr + MCR1); /* Fill the MAC hash tables with their values */ iowrite16(hash_table[0], ioaddr + MAR0); iowrite16(hash_table[1], ioaddr + MAR1); @@ -1000,9 +997,9 @@ static void r6040_multicast_list(struct net_device *dev) iowrite16(adrp[1], ioaddr + MID_1M + 8 * i); iowrite16(adrp[2], ioaddr + MID_1H + 8 * i); } else { - iowrite16(0xffff, ioaddr + MID_0L + 8 * i); - iowrite16(0xffff, ioaddr + MID_0M + 8 * i); - iowrite16(0xffff, ioaddr + MID_0H + 8 * i); + iowrite16(0xffff, ioaddr + MID_1L + 8 * i); + iowrite16(0xffff, ioaddr + MID_1M + 8 * i); + iowrite16(0xffff, ioaddr + MID_1H + 8 * i); } i++; } diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c index 96740051cdc..dbb1f5a1824 100644 --- a/drivers/net/r8169.c +++ b/drivers/net/r8169.c @@ -3227,8 +3227,8 @@ static void rtl8169_set_rxbufsize(struct rtl8169_private *tp, unsigned int max_frame = mtu + VLAN_ETH_HLEN + ETH_FCS_LEN; if (max_frame != 16383) - printk(KERN_WARNING "WARNING! Changing of MTU on this NIC" - "May lead to frame reception errors!\n"); + printk(KERN_WARNING PFX "WARNING! Changing of MTU on this " + "NIC may lead to frame reception errors!\n"); tp->rx_buf_sz = (max_frame > RX_BUF_SIZE) ? max_frame : RX_BUF_SIZE; } diff --git a/drivers/net/sgiseeq.c b/drivers/net/sgiseeq.c index beb537dbe9a..c8fc896fc46 100644 --- a/drivers/net/sgiseeq.c +++ b/drivers/net/sgiseeq.c @@ -593,8 +593,10 @@ static int sgiseeq_start_xmit(struct sk_buff *skb, struct net_device *dev) /* Setup... */ len = skb->len; if (len < ETH_ZLEN) { - if (skb_padto(skb, ETH_ZLEN)) + if (skb_padto(skb, ETH_ZLEN)) { + spin_unlock_irqrestore(&sp->tx_lock, flags); return NETDEV_TX_OK; + } len = ETH_ZLEN; } diff --git a/drivers/net/stmmac/Kconfig b/drivers/net/stmmac/Kconfig index fb287649a30..eb63d44748a 100644 --- a/drivers/net/stmmac/Kconfig +++ b/drivers/net/stmmac/Kconfig @@ -2,6 +2,7 @@ config STMMAC_ETH tristate "STMicroelectronics 10/100/1000 Ethernet driver" select MII select PHYLIB + select CRC32 depends on NETDEVICES && CPU_SUBTYPE_ST40 help This is the driver for the Ethernet IPs are built around a diff --git a/drivers/net/stmmac/stmmac_main.c b/drivers/net/stmmac/stmmac_main.c index a214a1627e8..4111a85ec80 100644 --- a/drivers/net/stmmac/stmmac_main.c +++ b/drivers/net/stmmac/stmmac_main.c @@ -1686,7 +1686,7 @@ static int stmmac_dvr_probe(struct platform_device *pdev) } pr_info("done!\n"); - if (!request_mem_region(res->start, (res->end - res->start), + if (!request_mem_region(res->start, resource_size(res), pdev->name)) { pr_err("%s: ERROR: memory allocation failed" "cannot get the I/O addr 0x%x\n", @@ -1695,9 +1695,9 @@ static int stmmac_dvr_probe(struct platform_device *pdev) goto out; } - addr = ioremap(res->start, (res->end - res->start)); + addr = ioremap(res->start, resource_size(res)); if (!addr) { - pr_err("%s: ERROR: memory mapping failed \n", __func__); + pr_err("%s: ERROR: memory mapping failed\n", __func__); ret = -ENOMEM; goto out; } @@ -1775,7 +1775,7 @@ static int stmmac_dvr_probe(struct platform_device *pdev) out: if (ret < 0) { platform_set_drvdata(pdev, NULL); - release_mem_region(res->start, (res->end - res->start)); + release_mem_region(res->start, resource_size(res)); if (addr != NULL) iounmap(addr); } @@ -1813,7 +1813,7 @@ static int stmmac_dvr_remove(struct platform_device *pdev) iounmap((void *)ndev->base_addr); res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - release_mem_region(res->start, (res->end - res->start)); + release_mem_region(res->start, resource_size(res)); free_netdev(ndev); diff --git a/drivers/net/tun.c b/drivers/net/tun.c index 96c39bddc78..43265207d46 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -387,6 +387,10 @@ static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev) } } + /* Orphan the skb - required as we might hang on to it + * for indefinite time. */ + skb_orphan(skb); + /* Enqueue packet */ skb_queue_tail(&tun->socket.sk->sk_receive_queue, skb); dev->trans_start = jiffies; diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 6fb783ce20b..b0577dd1a42 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -327,6 +327,7 @@ static int add_recvbuf_small(struct virtnet_info *vi, gfp_t gfp) struct scatterlist sg[2]; int err; + sg_init_table(sg, 2); skb = netdev_alloc_skb_ip_align(vi->dev, MAX_PACKET_LEN); if (unlikely(!skb)) return -ENOMEM; @@ -352,6 +353,7 @@ static int add_recvbuf_big(struct virtnet_info *vi, gfp_t gfp) char *p; int i, err, offset; + sg_init_table(sg, MAX_SKB_FRAGS + 2); /* page in sg[MAX_SKB_FRAGS + 1] is list tail */ for (i = MAX_SKB_FRAGS + 1; i > 1; --i) { first = get_a_page(vi, gfp); diff --git a/drivers/net/wan/hdlc_ppp.c b/drivers/net/wan/hdlc_ppp.c index b9b9d6b01c0..941f053e650 100644 --- a/drivers/net/wan/hdlc_ppp.c +++ b/drivers/net/wan/hdlc_ppp.c @@ -628,9 +628,15 @@ static void ppp_stop(struct net_device *dev) ppp_cp_event(dev, PID_LCP, STOP, 0, 0, 0, NULL); } +static void ppp_close(struct net_device *dev) +{ + ppp_tx_flush(); +} + static struct hdlc_proto proto = { .start = ppp_start, .stop = ppp_stop, + .close = ppp_close, .type_trans = ppp_type_trans, .ioctl = ppp_ioctl, .netif_rx = ppp_rx, diff --git a/drivers/net/wireless/ath/ar9170/usb.c b/drivers/net/wireless/ath/ar9170/usb.c index 0b0d2dc2f38..99a6da464bd 100644 --- a/drivers/net/wireless/ath/ar9170/usb.c +++ b/drivers/net/wireless/ath/ar9170/usb.c @@ -95,6 +95,8 @@ static struct usb_device_id ar9170_usb_ids[] = { { USB_DEVICE(0x04bb, 0x093f) }, /* AVM FRITZ!WLAN USB Stick N */ { USB_DEVICE(0x057C, 0x8401) }, + /* NEC WL300NU-G */ + { USB_DEVICE(0x0409, 0x0249) }, /* AVM FRITZ!WLAN USB Stick N 2.4 */ { USB_DEVICE(0x057C, 0x8402), .driver_info = AR9170_REQ_FW1_ONLY }, @@ -417,7 +419,7 @@ static int ar9170_usb_exec_cmd(struct ar9170 *ar, enum ar9170_cmd cmd, spin_unlock_irqrestore(&aru->common.cmdlock, flags); usb_fill_int_urb(urb, aru->udev, - usb_sndbulkpipe(aru->udev, AR9170_EP_CMD), + usb_sndintpipe(aru->udev, AR9170_EP_CMD), aru->common.cmdbuf, plen + 4, ar9170_usb_tx_urb_complete, NULL, 1); diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c index 67ca4e5a601..115e1aeedb5 100644 --- a/drivers/net/wireless/ath/ath9k/main.c +++ b/drivers/net/wireless/ath/ath9k/main.c @@ -1532,8 +1532,7 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed) all_wiphys_idle = ath9k_all_wiphys_idle(sc); ath9k_set_wiphy_idle(aphy, idle); - if (!idle && all_wiphys_idle) - enable_radio = true; + enable_radio = (!idle && all_wiphys_idle); /* * After we unlock here its possible another wiphy diff --git a/drivers/net/wireless/iwlwifi/iwl-4965.c b/drivers/net/wireless/iwlwifi/iwl-4965.c index 1bd2cd83602..8972166386c 100644 --- a/drivers/net/wireless/iwlwifi/iwl-4965.c +++ b/drivers/net/wireless/iwlwifi/iwl-4965.c @@ -2015,7 +2015,9 @@ static void iwl4965_rx_reply_tx(struct iwl_priv *priv, IWL_DEBUG_TX_REPLY(priv, "Retry scheduler reclaim scd_ssn " "%d index %d\n", scd_ssn , index); freed = iwl_tx_queue_reclaim(priv, txq_id, index); - iwl_free_tfds_in_queue(priv, sta_id, tid, freed); + if (qc) + iwl_free_tfds_in_queue(priv, sta_id, + tid, freed); if (priv->mac80211_registered && (iwl_queue_space(&txq->q) > txq->q.low_mark) && @@ -2042,13 +2044,14 @@ static void iwl4965_rx_reply_tx(struct iwl_priv *priv, freed = iwl_tx_queue_reclaim(priv, txq_id, index); if (qc && likely(sta_id != IWL_INVALID_STATION)) - priv->stations[sta_id].tid[tid].tfds_in_queue -= freed; + iwl_free_tfds_in_queue(priv, sta_id, tid, freed); + else if (sta_id == IWL_INVALID_STATION) + IWL_DEBUG_TX_REPLY(priv, "Station not known\n"); if (priv->mac80211_registered && (iwl_queue_space(&txq->q) > txq->q.low_mark)) iwl_wake_queue(priv, txq_id); } - if (qc && likely(sta_id != IWL_INVALID_STATION)) iwl_txq_check_empty(priv, sta_id, tid, txq_id); diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c index 35f819ac87a..1460116d329 100644 --- a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c +++ b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c @@ -346,6 +346,17 @@ static inline int get_num_of_ant_from_rate(u32 rate_n_flags) !!(rate_n_flags & RATE_MCS_ANT_C_MSK); } +/* + * Static function to get the expected throughput from an iwl_scale_tbl_info + * that wraps a NULL pointer check + */ +static s32 get_expected_tpt(struct iwl_scale_tbl_info *tbl, int rs_index) +{ + if (tbl->expected_tpt) + return tbl->expected_tpt[rs_index]; + return 0; +} + /** * rs_collect_tx_data - Update the success/failure sliding window * @@ -353,19 +364,21 @@ static inline int get_num_of_ant_from_rate(u32 rate_n_flags) * at this rate. window->data contains the bitmask of successful * packets. */ -static int rs_collect_tx_data(struct iwl_rate_scale_data *windows, - int scale_index, s32 tpt, int attempts, - int successes) +static int rs_collect_tx_data(struct iwl_scale_tbl_info *tbl, + int scale_index, int attempts, int successes) { struct iwl_rate_scale_data *window = NULL; static const u64 mask = (((u64)1) << (IWL_RATE_MAX_WINDOW - 1)); - s32 fail_count; + s32 fail_count, tpt; if (scale_index < 0 || scale_index >= IWL_RATE_COUNT) return -EINVAL; /* Select window for current tx bit rate */ - window = &(windows[scale_index]); + window = &(tbl->win[scale_index]); + + /* Get expected throughput */ + tpt = get_expected_tpt(tbl, scale_index); /* * Keep track of only the latest 62 tx frame attempts in this rate's @@ -739,16 +752,6 @@ static bool table_type_matches(struct iwl_scale_tbl_info *a, return (a->lq_type == b->lq_type) && (a->ant_type == b->ant_type) && (a->is_SGI == b->is_SGI); } -/* - * Static function to get the expected throughput from an iwl_scale_tbl_info - * that wraps a NULL pointer check - */ -static s32 get_expected_tpt(struct iwl_scale_tbl_info *tbl, int rs_index) -{ - if (tbl->expected_tpt) - return tbl->expected_tpt[rs_index]; - return 0; -} /* * mac80211 sends us Tx status @@ -765,12 +768,10 @@ static void rs_tx_status(void *priv_r, struct ieee80211_supported_band *sband, struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; struct iwl_priv *priv = (struct iwl_priv *)priv_r; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); - struct iwl_rate_scale_data *window = NULL; enum mac80211_rate_control_flags mac_flags; u32 tx_rate; struct iwl_scale_tbl_info tbl_type; - struct iwl_scale_tbl_info *curr_tbl, *other_tbl; - s32 tpt = 0; + struct iwl_scale_tbl_info *curr_tbl, *other_tbl, *tmp_tbl; IWL_DEBUG_RATE_LIMIT(priv, "get frame ack response, update rate scale window\n"); @@ -853,7 +854,6 @@ static void rs_tx_status(void *priv_r, struct ieee80211_supported_band *sband, IWL_DEBUG_RATE(priv, "Neither active nor search matches tx rate\n"); return; } - window = (struct iwl_rate_scale_data *)&(curr_tbl->win[0]); /* * Updating the frame history depends on whether packets were @@ -866,8 +866,7 @@ static void rs_tx_status(void *priv_r, struct ieee80211_supported_band *sband, tx_rate = le32_to_cpu(table->rs_table[0].rate_n_flags); rs_get_tbl_info_from_mcs(tx_rate, priv->band, &tbl_type, &rs_index); - tpt = get_expected_tpt(curr_tbl, rs_index); - rs_collect_tx_data(window, rs_index, tpt, + rs_collect_tx_data(curr_tbl, rs_index, info->status.ampdu_ack_len, info->status.ampdu_ack_map); @@ -897,19 +896,13 @@ static void rs_tx_status(void *priv_r, struct ieee80211_supported_band *sband, * table as active/search. */ if (table_type_matches(&tbl_type, curr_tbl)) - tpt = get_expected_tpt(curr_tbl, rs_index); + tmp_tbl = curr_tbl; else if (table_type_matches(&tbl_type, other_tbl)) - tpt = get_expected_tpt(other_tbl, rs_index); + tmp_tbl = other_tbl; else continue; - - /* Constants mean 1 transmission, 0 successes */ - if (i < retries) - rs_collect_tx_data(window, rs_index, tpt, 1, - 0); - else - rs_collect_tx_data(window, rs_index, tpt, 1, - legacy_success); + rs_collect_tx_data(tmp_tbl, rs_index, 1, + i < retries ? 0 : legacy_success); } /* Update success/fail counts if not searching for new mode */ diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c index 5e0c6bf3fbb..8b8e3e1cbb4 100644 --- a/drivers/net/wireless/iwlwifi/iwl-agn.c +++ b/drivers/net/wireless/iwlwifi/iwl-agn.c @@ -1259,7 +1259,15 @@ static void iwl_irq_tasklet(struct iwl_priv *priv) /* Ack/clear/reset pending uCode interrupts. * Note: Some bits in CSR_INT are "OR" of bits in CSR_FH_INT_STATUS, */ - iwl_write32(priv, CSR_INT, priv->inta); + /* There is a hardware bug in the interrupt mask function that some + * interrupts (i.e. CSR_INT_BIT_SCD) can still be generated even if + * they are disabled in the CSR_INT_MASK register. Furthermore the + * ICT interrupt handling mechanism has another bug that might cause + * these unmasked interrupts fail to be detected. We workaround the + * hardware bugs here by ACKing all the possible interrupts so that + * interrupt coalescing can still be achieved. + */ + iwl_write32(priv, CSR_INT, priv->inta | ~priv->inta_mask); inta = priv->inta; @@ -2645,7 +2653,7 @@ static int iwl_mac_setup_register(struct iwl_priv *priv) BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_ADHOC); - hw->wiphy->flags |= WIPHY_FLAG_STRICT_REGULATORY | + hw->wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY | WIPHY_FLAG_DISABLE_BEACON_HINTS; /* diff --git a/drivers/net/wireless/iwlwifi/iwl-calib.c b/drivers/net/wireless/iwlwifi/iwl-calib.c index de3b3f403d1..8b516c5ff0b 100644 --- a/drivers/net/wireless/iwlwifi/iwl-calib.c +++ b/drivers/net/wireless/iwlwifi/iwl-calib.c @@ -808,6 +808,18 @@ void iwl_chain_noise_calibration(struct iwl_priv *priv, } } + /* + * The above algorithm sometimes fails when the ucode + * reports 0 for all chains. It's not clear why that + * happens to start with, but it is then causing trouble + * because this can make us enable more chains than the + * hardware really has. + * + * To be safe, simply mask out any chains that we know + * are not on the device. + */ + active_chains &= priv->hw_params.valid_rx_ant; + num_tx_chains = 0; for (i = 0; i < NUM_RX_CHAINS; i++) { /* loops on all the bits of diff --git a/drivers/net/wireless/iwlwifi/iwl-core.c b/drivers/net/wireless/iwlwifi/iwl-core.c index db050b81123..3352f708663 100644 --- a/drivers/net/wireless/iwlwifi/iwl-core.c +++ b/drivers/net/wireless/iwlwifi/iwl-core.c @@ -308,10 +308,13 @@ int iwl_hw_nic_init(struct iwl_priv *priv) spin_unlock_irqrestore(&priv->lock, flags); - /* Allocate and init all Tx and Command queues */ - ret = iwl_txq_ctx_reset(priv); - if (ret) - return ret; + /* Allocate or reset and init all Tx and Command queues */ + if (!priv->txq) { + ret = iwl_txq_ctx_alloc(priv); + if (ret) + return ret; + } else + iwl_txq_ctx_reset(priv); set_bit(STATUS_INIT, &priv->status); diff --git a/drivers/net/wireless/iwlwifi/iwl-core.h b/drivers/net/wireless/iwlwifi/iwl-core.h index 4ef7739f9e8..732590f5fe3 100644 --- a/drivers/net/wireless/iwlwifi/iwl-core.h +++ b/drivers/net/wireless/iwlwifi/iwl-core.h @@ -442,7 +442,8 @@ void iwl_rx_csa(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb); /***************************************************** * TX ******************************************************/ -int iwl_txq_ctx_reset(struct iwl_priv *priv); +int iwl_txq_ctx_alloc(struct iwl_priv *priv); +void iwl_txq_ctx_reset(struct iwl_priv *priv); void iwl_hw_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq); int iwl_hw_txq_attach_buf_to_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq, @@ -456,6 +457,8 @@ void iwl_free_tfds_in_queue(struct iwl_priv *priv, void iwl_txq_update_write_ptr(struct iwl_priv *priv, struct iwl_tx_queue *txq); int iwl_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq, int slots_num, u32 txq_id); +void iwl_tx_queue_reset(struct iwl_priv *priv, struct iwl_tx_queue *txq, + int slots_num, u32 txq_id); void iwl_tx_queue_free(struct iwl_priv *priv, int txq_id); int iwl_tx_agg_start(struct iwl_priv *priv, const u8 *ra, u16 tid, u16 *ssn); int iwl_tx_agg_stop(struct iwl_priv *priv , const u8 *ra, u16 tid); diff --git a/drivers/net/wireless/iwlwifi/iwl-tx.c b/drivers/net/wireless/iwlwifi/iwl-tx.c index f0b7e6cfbe4..8dd0c036d54 100644 --- a/drivers/net/wireless/iwlwifi/iwl-tx.c +++ b/drivers/net/wireless/iwlwifi/iwl-tx.c @@ -194,10 +194,34 @@ void iwl_cmd_queue_free(struct iwl_priv *priv) struct iwl_queue *q = &txq->q; struct device *dev = &priv->pci_dev->dev; int i; + bool huge = false; if (q->n_bd == 0) return; + for (; q->read_ptr != q->write_ptr; + q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) { + /* we have no way to tell if it is a huge cmd ATM */ + i = get_cmd_index(q, q->read_ptr, 0); + + if (txq->meta[i].flags & CMD_SIZE_HUGE) { + huge = true; + continue; + } + + pci_unmap_single(priv->pci_dev, + pci_unmap_addr(&txq->meta[i], mapping), + pci_unmap_len(&txq->meta[i], len), + PCI_DMA_BIDIRECTIONAL); + } + if (huge) { + i = q->n_window; + pci_unmap_single(priv->pci_dev, + pci_unmap_addr(&txq->meta[i], mapping), + pci_unmap_len(&txq->meta[i], len), + PCI_DMA_BIDIRECTIONAL); + } + /* De-alloc array of command/tx buffers */ for (i = 0; i <= TFD_CMD_SLOTS; i++) kfree(txq->cmd[i]); @@ -410,6 +434,26 @@ out_free_arrays: } EXPORT_SYMBOL(iwl_tx_queue_init); +void iwl_tx_queue_reset(struct iwl_priv *priv, struct iwl_tx_queue *txq, + int slots_num, u32 txq_id) +{ + int actual_slots = slots_num; + + if (txq_id == IWL_CMD_QUEUE_NUM) + actual_slots++; + + memset(txq->meta, 0, sizeof(struct iwl_cmd_meta) * actual_slots); + + txq->need_update = 0; + + /* Initialize queue's high/low-water marks, and head/tail indexes */ + iwl_queue_init(priv, &txq->q, TFD_QUEUE_SIZE_MAX, slots_num, txq_id); + + /* Tell device where to find queue */ + priv->cfg->ops->lib->txq_init(priv, txq); +} +EXPORT_SYMBOL(iwl_tx_queue_reset); + /** * iwl_hw_txq_ctx_free - Free TXQ Context * @@ -421,8 +465,7 @@ void iwl_hw_txq_ctx_free(struct iwl_priv *priv) /* Tx queues */ if (priv->txq) { - for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; - txq_id++) + for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) if (txq_id == IWL_CMD_QUEUE_NUM) iwl_cmd_queue_free(priv); else @@ -438,15 +481,15 @@ void iwl_hw_txq_ctx_free(struct iwl_priv *priv) EXPORT_SYMBOL(iwl_hw_txq_ctx_free); /** - * iwl_txq_ctx_reset - Reset TX queue context - * Destroys all DMA structures and initialize them again + * iwl_txq_ctx_alloc - allocate TX queue context + * Allocate all Tx DMA structures and initialize them * * @param priv * @return error code */ -int iwl_txq_ctx_reset(struct iwl_priv *priv) +int iwl_txq_ctx_alloc(struct iwl_priv *priv) { - int ret = 0; + int ret; int txq_id, slots_num; unsigned long flags; @@ -504,8 +547,31 @@ int iwl_txq_ctx_reset(struct iwl_priv *priv) return ret; } +void iwl_txq_ctx_reset(struct iwl_priv *priv) +{ + int txq_id, slots_num; + unsigned long flags; + + spin_lock_irqsave(&priv->lock, flags); + + /* Turn off all Tx DMA fifos */ + priv->cfg->ops->lib->txq_set_sched(priv, 0); + + /* Tell NIC where to find the "keep warm" buffer */ + iwl_write_direct32(priv, FH_KW_MEM_ADDR_REG, priv->kw.dma >> 4); + + spin_unlock_irqrestore(&priv->lock, flags); + + /* Alloc and init all Tx queues, including the command queue (#4) */ + for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) { + slots_num = txq_id == IWL_CMD_QUEUE_NUM ? + TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS; + iwl_tx_queue_reset(priv, &priv->txq[txq_id], slots_num, txq_id); + } +} + /** - * iwl_txq_ctx_stop - Stop all Tx DMA channels, free Tx queue memory + * iwl_txq_ctx_stop - Stop all Tx DMA channels */ void iwl_txq_ctx_stop(struct iwl_priv *priv) { @@ -525,9 +591,6 @@ void iwl_txq_ctx_stop(struct iwl_priv *priv) 1000); } spin_unlock_irqrestore(&priv->lock, flags); - - /* Deallocate memory for all Tx queues */ - iwl_hw_txq_ctx_free(priv); } EXPORT_SYMBOL(iwl_txq_ctx_stop); @@ -1050,6 +1113,14 @@ int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd) spin_lock_irqsave(&priv->hcmd_lock, flags); + /* If this is a huge cmd, mark the huge flag also on the meta.flags + * of the _original_ cmd. This is used for DMA mapping clean up. + */ + if (cmd->flags & CMD_SIZE_HUGE) { + idx = get_cmd_index(q, q->write_ptr, 0); + txq->meta[idx].flags = CMD_SIZE_HUGE; + } + idx = get_cmd_index(q, q->write_ptr, cmd->flags & CMD_SIZE_HUGE); out_cmd = txq->cmd[idx]; out_meta = &txq->meta[idx]; @@ -1227,6 +1298,7 @@ void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb) bool huge = !!(pkt->hdr.sequence & SEQ_HUGE_FRAME); struct iwl_device_cmd *cmd; struct iwl_cmd_meta *meta; + struct iwl_tx_queue *txq = &priv->txq[IWL_CMD_QUEUE_NUM]; /* If a Tx command is being handled and it isn't in the actual * command queue then there a command routing bug has been introduced @@ -1240,9 +1312,17 @@ void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb) return; } - cmd_index = get_cmd_index(&priv->txq[IWL_CMD_QUEUE_NUM].q, index, huge); - cmd = priv->txq[IWL_CMD_QUEUE_NUM].cmd[cmd_index]; - meta = &priv->txq[IWL_CMD_QUEUE_NUM].meta[cmd_index]; + /* If this is a huge cmd, clear the huge flag on the meta.flags + * of the _original_ cmd. So that iwl_cmd_queue_free won't unmap + * the DMA buffer for the scan (huge) command. + */ + if (huge) { + cmd_index = get_cmd_index(&txq->q, index, 0); + txq->meta[cmd_index].flags = 0; + } + cmd_index = get_cmd_index(&txq->q, index, huge); + cmd = txq->cmd[cmd_index]; + meta = &txq->meta[cmd_index]; pci_unmap_single(priv->pci_dev, pci_unmap_addr(meta, mapping), @@ -1264,6 +1344,7 @@ void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb) get_cmd_string(cmd->hdr.cmd)); wake_up_interruptible(&priv->wait_command_queue); } + meta->flags = 0; } EXPORT_SYMBOL(iwl_tx_cmd_complete); diff --git a/drivers/net/wireless/iwlwifi/iwl3945-base.c b/drivers/net/wireless/iwlwifi/iwl3945-base.c index 1eaa0052c11..b55e4f39a9e 100644 --- a/drivers/net/wireless/iwlwifi/iwl3945-base.c +++ b/drivers/net/wireless/iwlwifi/iwl3945-base.c @@ -1956,7 +1956,7 @@ static void iwl3945_init_hw_rates(struct iwl_priv *priv, { int i; - for (i = 0; i < IWL_RATE_COUNT; i++) { + for (i = 0; i < IWL_RATE_COUNT_LEGACY; i++) { rates[i].bitrate = iwl3945_rates[i].ieee * 5; rates[i].hw_value = i; /* Rate scaling will work on indexes */ rates[i].hw_value_short = i; @@ -3922,7 +3922,7 @@ static int iwl3945_setup_mac(struct iwl_priv *priv) BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_ADHOC); - hw->wiphy->flags |= WIPHY_FLAG_STRICT_REGULATORY | + hw->wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY | WIPHY_FLAG_DISABLE_BEACON_HINTS; hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX_3945; diff --git a/drivers/net/wireless/libertas/cfg.c b/drivers/net/wireless/libertas/cfg.c index e196b84914d..ce7bec402a3 100644 --- a/drivers/net/wireless/libertas/cfg.c +++ b/drivers/net/wireless/libertas/cfg.c @@ -173,6 +173,8 @@ int lbs_cfg_register(struct lbs_private *priv) if (ret < 0) lbs_pr_err("cannot register wiphy device\n"); + priv->wiphy_registered = true; + ret = register_netdev(priv->dev); if (ret) lbs_pr_err("cannot register network device\n"); @@ -191,9 +193,11 @@ void lbs_cfg_free(struct lbs_private *priv) if (!wdev) return; - if (wdev->wiphy) { + if (priv->wiphy_registered) wiphy_unregister(wdev->wiphy); + + if (wdev->wiphy) wiphy_free(wdev->wiphy); - } + kfree(wdev); } diff --git a/drivers/net/wireless/libertas/dev.h b/drivers/net/wireless/libertas/dev.h index 6977ee82021..6875e1498bd 100644 --- a/drivers/net/wireless/libertas/dev.h +++ b/drivers/net/wireless/libertas/dev.h @@ -36,6 +36,7 @@ struct lbs_private { /* CFG80211 */ struct wireless_dev *wdev; + bool wiphy_registered; /* Mesh */ struct net_device *mesh_dev; /* Virtual device */ diff --git a/drivers/net/wireless/mwl8k.c b/drivers/net/wireless/mwl8k.c index 89354c29f08..12fdcb25fd3 100644 --- a/drivers/net/wireless/mwl8k.c +++ b/drivers/net/wireless/mwl8k.c @@ -3852,6 +3852,7 @@ MODULE_FIRMWARE("mwl8k/helper_8366.fw"); MODULE_FIRMWARE("mwl8k/fmimage_8366.fw"); static DEFINE_PCI_DEVICE_TABLE(mwl8k_pci_id_table) = { + { PCI_VDEVICE(MARVELL, 0x2a0a), .driver_data = MWL8363, }, { PCI_VDEVICE(MARVELL, 0x2a0c), .driver_data = MWL8363, }, { PCI_VDEVICE(MARVELL, 0x2a24), .driver_data = MWL8363, }, { PCI_VDEVICE(MARVELL, 0x2a2b), .driver_data = MWL8687, }, diff --git a/drivers/net/wireless/p54/p54usb.c b/drivers/net/wireless/p54/p54usb.c index 762952d688e..743a6c68b29 100644 --- a/drivers/net/wireless/p54/p54usb.c +++ b/drivers/net/wireless/p54/p54usb.c @@ -36,6 +36,7 @@ MODULE_FIRMWARE("isl3887usb"); static struct usb_device_id p54u_table[] __devinitdata = { /* Version 1 devices (pci chip + net2280) */ {USB_DEVICE(0x0506, 0x0a11)}, /* 3COM 3CRWE254G72 */ + {USB_DEVICE(0x06b9, 0x0120)}, /* Thomson SpeedTouch 120g */ {USB_DEVICE(0x0707, 0xee06)}, /* SMC 2862W-G */ {USB_DEVICE(0x07aa, 0x001c)}, /* Corega CG-WLUSB2GT */ {USB_DEVICE(0x083a, 0x4501)}, /* Accton 802.11g WN4501 USB */ diff --git a/drivers/net/wireless/rt2x00/rt2500usb.c b/drivers/net/wireless/rt2x00/rt2500usb.c index d2cc4458477..8ebb705fe10 100644 --- a/drivers/net/wireless/rt2x00/rt2500usb.c +++ b/drivers/net/wireless/rt2x00/rt2500usb.c @@ -1644,6 +1644,11 @@ static int rt2500usb_probe_hw_mode(struct rt2x00_dev *rt2x00dev) unsigned int i; /* + * Disable powersaving as default. + */ + rt2x00dev->hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT; + + /* * Initialize all hw fields. */ rt2x00dev->hw->flags = diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c index 58c7f218019..c015ce9fdd0 100644 --- a/drivers/net/wireless/rt2x00/rt2800lib.c +++ b/drivers/net/wireless/rt2x00/rt2800lib.c @@ -813,9 +813,9 @@ static void rt2800_config_channel_rt3x(struct rt2x00_dev *rt2x00dev, rt2800_rfcsr_write(rt2x00dev, 24, rt2x00dev->calibration[conf_is_ht40(conf)]); - rt2800_rfcsr_read(rt2x00dev, 23, &rfcsr); + rt2800_rfcsr_read(rt2x00dev, 7, &rfcsr); rt2x00_set_field8(&rfcsr, RFCSR7_RF_TUNING, 1); - rt2800_rfcsr_write(rt2x00dev, 23, rfcsr); + rt2800_rfcsr_write(rt2x00dev, 7, rfcsr); } static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev, diff --git a/drivers/pcmcia/rsrc_nonstatic.c b/drivers/pcmcia/rsrc_nonstatic.c index 2e47991eccf..559069a80a3 100644 --- a/drivers/pcmcia/rsrc_nonstatic.c +++ b/drivers/pcmcia/rsrc_nonstatic.c @@ -596,19 +596,17 @@ struct pcmcia_align_data { struct resource_map *map; }; -static resource_size_t -pcmcia_common_align(void *align_data, const struct resource *res, - resource_size_t size, resource_size_t align) +static resource_size_t pcmcia_common_align(struct pcmcia_align_data *align_data, + resource_size_t start) { - struct pcmcia_align_data *data = align_data; - resource_size_t start; + resource_size_t ret; /* * Ensure that we have the correct start address */ - start = (res->start & ~data->mask) + data->offset; - if (start < res->start) - start += data->mask + 1; - return start; + ret = (start & ~align_data->mask) + align_data->offset; + if (ret < start) + ret += align_data->mask + 1; + return ret; } static resource_size_t @@ -619,29 +617,28 @@ pcmcia_align(void *align_data, const struct resource *res, struct resource_map *m; resource_size_t start; - start = pcmcia_common_align(data, res, size, align); + start = pcmcia_common_align(data, res->start); for (m = data->map->next; m != data->map; m = m->next) { - unsigned long start = m->base; - unsigned long end = m->base + m->num - 1; + unsigned long map_start = m->base; + unsigned long map_end = m->base + m->num - 1; /* * If the lower resources are not available, try aligning * to this entry of the resource database to see if it'll * fit here. */ - if (res->start < start) { - start = pcmcia_common_align(data, res, size, align); - } + if (start < map_start) + start = pcmcia_common_align(data, map_start); /* * If we're above the area which was passed in, there's * no point proceeding. */ - if (res->start >= res->end) + if (start >= res->end) break; - if ((res->start + size - 1) <= end) + if ((start + size - 1) <= map_end) break; } diff --git a/drivers/platform/x86/intel_menlow.c b/drivers/platform/x86/intel_menlow.c index 1190bad4297..2f795ce2b93 100644 --- a/drivers/platform/x86/intel_menlow.c +++ b/drivers/platform/x86/intel_menlow.c @@ -397,6 +397,7 @@ static int intel_menlow_add_one_attribute(char *name, int mode, void *show, if (!attr) return -ENOMEM; + sysfs_attr_init(&attr->attr.attr); /* That is consistent naming :D */ attr->attr.attr.name = name; attr->attr.attr.mode = mode; attr->attr.show = show; diff --git a/drivers/pnp/pnpacpi/rsparser.c b/drivers/pnp/pnpacpi/rsparser.c index c6c552f681b..35bb44af49b 100644 --- a/drivers/pnp/pnpacpi/rsparser.c +++ b/drivers/pnp/pnpacpi/rsparser.c @@ -274,12 +274,33 @@ static void pnpacpi_parse_allocated_busresource(struct pnp_dev *dev, pnp_add_bus_resource(dev, start, end); } +static u64 addr_space_length(struct pnp_dev *dev, u64 min, u64 max, u64 len) +{ + u64 max_len; + + max_len = max - min + 1; + if (len <= max_len) + return len; + + /* + * Per 6.4.3.5, _LEN cannot exceed _MAX - _MIN + 1, but some BIOSes + * don't do this correctly, e.g., + * https://bugzilla.kernel.org/show_bug.cgi?id=15480 + */ + dev_info(&dev->dev, + "resource length %#llx doesn't fit in %#llx-%#llx, trimming\n", + (unsigned long long) len, (unsigned long long) min, + (unsigned long long) max); + return max_len; +} + static void pnpacpi_parse_allocated_address_space(struct pnp_dev *dev, struct acpi_resource *res) { struct acpi_resource_address64 addr, *p = &addr; acpi_status status; int window; + u64 len; status = acpi_resource_to_address64(res, p); if (!ACPI_SUCCESS(status)) { @@ -288,20 +309,18 @@ static void pnpacpi_parse_allocated_address_space(struct pnp_dev *dev, return; } + len = addr_space_length(dev, p->minimum, p->maximum, p->address_length); window = (p->producer_consumer == ACPI_PRODUCER) ? 1 : 0; if (p->resource_type == ACPI_MEMORY_RANGE) - pnpacpi_parse_allocated_memresource(dev, - p->minimum, p->address_length, + pnpacpi_parse_allocated_memresource(dev, p->minimum, len, p->info.mem.write_protect, window); else if (p->resource_type == ACPI_IO_RANGE) - pnpacpi_parse_allocated_ioresource(dev, - p->minimum, p->address_length, + pnpacpi_parse_allocated_ioresource(dev, p->minimum, len, p->granularity == 0xfff ? ACPI_DECODE_10 : ACPI_DECODE_16, window); else if (p->resource_type == ACPI_BUS_NUMBER_RANGE) - pnpacpi_parse_allocated_busresource(dev, p->minimum, - p->address_length); + pnpacpi_parse_allocated_busresource(dev, p->minimum, len); } static void pnpacpi_parse_allocated_ext_address_space(struct pnp_dev *dev, @@ -309,21 +328,20 @@ static void pnpacpi_parse_allocated_ext_address_space(struct pnp_dev *dev, { struct acpi_resource_extended_address64 *p = &res->data.ext_address64; int window; + u64 len; + len = addr_space_length(dev, p->minimum, p->maximum, p->address_length); window = (p->producer_consumer == ACPI_PRODUCER) ? 1 : 0; if (p->resource_type == ACPI_MEMORY_RANGE) - pnpacpi_parse_allocated_memresource(dev, - p->minimum, p->address_length, + pnpacpi_parse_allocated_memresource(dev, p->minimum, len, p->info.mem.write_protect, window); else if (p->resource_type == ACPI_IO_RANGE) - pnpacpi_parse_allocated_ioresource(dev, - p->minimum, p->address_length, + pnpacpi_parse_allocated_ioresource(dev, p->minimum, len, p->granularity == 0xfff ? ACPI_DECODE_10 : ACPI_DECODE_16, window); else if (p->resource_type == ACPI_BUS_NUMBER_RANGE) - pnpacpi_parse_allocated_busresource(dev, p->minimum, - p->address_length); + pnpacpi_parse_allocated_busresource(dev, p->minimum, len); } static acpi_status pnpacpi_allocated_resource(struct acpi_resource *res, diff --git a/drivers/rtc/rtc-mxc.c b/drivers/rtc/rtc-mxc.c index c77f6f72f95..d71fe61db1d 100644 --- a/drivers/rtc/rtc-mxc.c +++ b/drivers/rtc/rtc-mxc.c @@ -384,21 +384,26 @@ static int __init mxc_rtc_probe(struct platform_device *pdev) struct rtc_device *rtc; struct rtc_plat_data *pdata = NULL; u32 reg; - int ret, rate; + unsigned long rate; + int ret; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) return -ENODEV; - pdata = kzalloc(sizeof(*pdata), GFP_KERNEL); + pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL); if (!pdata) return -ENOMEM; - pdata->ioaddr = ioremap(res->start, resource_size(res)); + if (!devm_request_mem_region(&pdev->dev, res->start, + resource_size(res), pdev->name)) + return -EBUSY; + + pdata->ioaddr = devm_ioremap(&pdev->dev, res->start, + resource_size(res)); clk = clk_get(&pdev->dev, "ckil"); if (IS_ERR(clk)) { - iounmap(pdata->ioaddr); ret = PTR_ERR(clk); goto exit_free_pdata; } @@ -413,8 +418,7 @@ static int __init mxc_rtc_probe(struct platform_device *pdev) else if (rate == 38400) reg = RTC_INPUT_CLK_38400HZ; else { - dev_err(&pdev->dev, "rtc clock is not valid (%lu)\n", - clk_get_rate(clk)); + dev_err(&pdev->dev, "rtc clock is not valid (%lu)\n", rate); ret = -EINVAL; goto exit_free_pdata; } @@ -450,8 +454,8 @@ static int __init mxc_rtc_probe(struct platform_device *pdev) pdata->irq = platform_get_irq(pdev, 0); if (pdata->irq >= 0 && - request_irq(pdata->irq, mxc_rtc_interrupt, IRQF_SHARED, - pdev->name, pdev) < 0) { + devm_request_irq(&pdev->dev, pdata->irq, mxc_rtc_interrupt, + IRQF_SHARED, pdev->name, pdev) < 0) { dev_warn(&pdev->dev, "interrupt not available.\n"); pdata->irq = -1; } @@ -459,10 +463,10 @@ static int __init mxc_rtc_probe(struct platform_device *pdev) return 0; exit_put_clk: + clk_disable(pdata->clk); clk_put(pdata->clk); exit_free_pdata: - kfree(pdata); return ret; } @@ -473,12 +477,8 @@ static int __exit mxc_rtc_remove(struct platform_device *pdev) rtc_device_unregister(pdata->rtc); - if (pdata->irq >= 0) - free_irq(pdata->irq, pdev); - clk_disable(pdata->clk); clk_put(pdata->clk); - kfree(pdata); platform_set_drvdata(pdev, NULL); return 0; diff --git a/drivers/s390/char/sclp_async.c b/drivers/s390/char/sclp_async.c index 2aecf7f2136..7ad30e72f86 100644 --- a/drivers/s390/char/sclp_async.c +++ b/drivers/s390/char/sclp_async.c @@ -85,7 +85,7 @@ static int proc_handler_callhome(struct ctl_table *ctl, int write, rc = copy_from_user(buf, buffer, sizeof(buf)); if (rc != 0) return -EFAULT; - buf[len - 1] = '\0'; + buf[sizeof(buf) - 1] = '\0'; if (strict_strtoul(buf, 0, &val) != 0) return -EINVAL; if (val != 0 && val != 1) diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index 58c62ff42ab..8b827f37b03 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c @@ -2186,7 +2186,7 @@ static void sd_probe_async(void *data, async_cookie_t cookie) blk_queue_prep_rq(sdp->request_queue, sd_prep_fn); gd->driverfs_dev = &sdp->sdev_gendev; - gd->flags = GENHD_FL_EXT_DEVT | GENHD_FL_DRIVERFS; + gd->flags = GENHD_FL_EXT_DEVT; if (sdp->removable) gd->flags |= GENHD_FL_REMOVABLE; diff --git a/drivers/ssb/driver_pcicore.c b/drivers/ssb/driver_pcicore.c index f1dcd7969a5..0e8d3522461 100644 --- a/drivers/ssb/driver_pcicore.c +++ b/drivers/ssb/driver_pcicore.c @@ -246,20 +246,12 @@ static struct pci_controller ssb_pcicore_controller = { .pci_ops = &ssb_pcicore_pciops, .io_resource = &ssb_pcicore_io_resource, .mem_resource = &ssb_pcicore_mem_resource, - .mem_offset = 0x24000000, }; -static u32 ssb_pcicore_pcibus_iobase = 0x100; -static u32 ssb_pcicore_pcibus_membase = SSB_PCI_DMA; - /* This function is called when doing a pci_enable_device(). * We must first check if the device is a device on the PCI-core bridge. */ int ssb_pcicore_plat_dev_init(struct pci_dev *d) { - struct resource *res; - int pos, size; - u32 *base; - if (d->bus->ops != &ssb_pcicore_pciops) { /* This is not a device on the PCI-core bridge. */ return -ENODEV; @@ -268,27 +260,6 @@ int ssb_pcicore_plat_dev_init(struct pci_dev *d) ssb_printk(KERN_INFO "PCI: Fixing up device %s\n", pci_name(d)); - /* Fix up resource bases */ - for (pos = 0; pos < 6; pos++) { - res = &d->resource[pos]; - if (res->flags & IORESOURCE_IO) - base = &ssb_pcicore_pcibus_iobase; - else - base = &ssb_pcicore_pcibus_membase; - res->flags |= IORESOURCE_PCI_FIXED; - if (res->end) { - size = res->end - res->start + 1; - if (*base & (size - 1)) - *base = (*base + size) & ~(size - 1); - res->start = *base; - res->end = res->start + size - 1; - *base += size; - pci_write_config_dword(d, PCI_BASE_ADDRESS_0 + (pos << 2), res->start); - } - /* Fix up PCI bridge BAR0 only */ - if (d->bus->number == 0 && PCI_SLOT(d->devfn) == 0) - break; - } /* Fix up interrupt lines */ d->irq = ssb_mips_irq(extpci_core->dev) + 2; pci_write_config_byte(d, PCI_INTERRUPT_LINE, d->irq); diff --git a/drivers/thermal/thermal_sys.c b/drivers/thermal/thermal_sys.c index 9b6297f07b8..13c72c62932 100644 --- a/drivers/thermal/thermal_sys.c +++ b/drivers/thermal/thermal_sys.c @@ -506,6 +506,7 @@ thermal_add_hwmon_sysfs(struct thermal_zone_device *tz) tz->temp_input.attr.attr.name = tz->temp_input.name; tz->temp_input.attr.attr.mode = 0444; tz->temp_input.attr.show = temp_input_show; + sysfs_attr_init(&tz->temp_input.attr.attr); result = device_create_file(hwmon->device, &tz->temp_input.attr); if (result) goto unregister_hwmon_device; @@ -518,6 +519,7 @@ thermal_add_hwmon_sysfs(struct thermal_zone_device *tz) tz->temp_crit.attr.attr.name = tz->temp_crit.name; tz->temp_crit.attr.attr.mode = 0444; tz->temp_crit.attr.show = temp_crit_show; + sysfs_attr_init(&tz->temp_crit.attr.attr); result = device_create_file(hwmon->device, &tz->temp_crit.attr); if (result) @@ -726,6 +728,7 @@ int thermal_zone_bind_cooling_device(struct thermal_zone_device *tz, goto release_idr; sprintf(dev->attr_name, "cdev%d_trip_point", dev->id); + sysfs_attr_init(&dev->attr.attr); dev->attr.attr.name = dev->attr_name; dev->attr.attr.mode = 0444; dev->attr.show = thermal_cooling_device_trip_point_show; diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c index 5be11c99e18..e69d238c5af 100644 --- a/drivers/vhost/vhost.c +++ b/drivers/vhost/vhost.c @@ -236,6 +236,10 @@ static int vq_memory_access_ok(void __user *log_base, struct vhost_memory *mem, int log_all) { int i; + + if (!mem) + return 0; + for (i = 0; i < mem->nregions; ++i) { struct vhost_memory_region *m = mem->regions + i; unsigned long a = m->userspace_addr; diff --git a/drivers/video/fsl-diu-fb.c b/drivers/video/fsl-diu-fb.c index 4637bcbe03a..994358a4f30 100644 --- a/drivers/video/fsl-diu-fb.c +++ b/drivers/video/fsl-diu-fb.c @@ -1536,6 +1536,7 @@ static int __devinit fsl_diu_probe(struct of_device *ofdev, goto error; } + sysfs_attr_init(&machine_data->dev_attr.attr); machine_data->dev_attr.attr.name = "monitor"; machine_data->dev_attr.attr.mode = S_IRUGO|S_IWUSR; machine_data->dev_attr.show = show_monitor; diff --git a/drivers/video/mb862xx/mb862xxfb_accel.c b/drivers/video/mb862xx/mb862xxfb_accel.c index 841424912ec..fe92eed6da7 100644 --- a/drivers/video/mb862xx/mb862xxfb_accel.c +++ b/drivers/video/mb862xx/mb862xxfb_accel.c @@ -4,7 +4,7 @@ * Fujitsu Carmine/Coral-P(A)/Lime framebuffer driver acceleration support * * (C) 2007 Alexander Shishkin <virtuoso@slind.org> - * (C) 2009 Valentin Sitdikov <valentin.sitdikov@siemens.com> + * (C) 2009 Valentin Sitdikov <v.sitdikov@gmail.com> * (C) 2009 Siemens AG * * This program is free software; you can redistribute it and/or modify @@ -16,6 +16,7 @@ #include <linux/delay.h> #include <linux/init.h> #include <linux/interrupt.h> +#include <linux/module.h> #include <linux/pci.h> #include <linux/slab.h> #if defined(CONFIG_OF) @@ -330,3 +331,5 @@ void mb862xxfb_init_accel(struct fb_info *info, int xres) info->fix.accel = 0xff; /*FIXME: add right define */ } EXPORT_SYMBOL(mb862xxfb_init_accel); + +MODULE_LICENSE("GPL v2"); diff --git a/drivers/video/vesafb.c b/drivers/video/vesafb.c index 54ac91dc070..0cadf7aee27 100644 --- a/drivers/video/vesafb.c +++ b/drivers/video/vesafb.c @@ -225,7 +225,7 @@ static int __init vesafb_setup(char *options) return 0; } -static int __devinit vesafb_probe(struct platform_device *dev) +static int __init vesafb_probe(struct platform_device *dev) { struct fb_info *info; int i, err; @@ -476,7 +476,6 @@ err: } static struct platform_driver vesafb_driver = { - .probe = vesafb_probe, .driver = { .name = "vesafb", }, @@ -492,20 +491,21 @@ static int __init vesafb_init(void) /* ignore error return of fb_get_options */ fb_get_options("vesafb", &option); vesafb_setup(option); - ret = platform_driver_register(&vesafb_driver); - if (!ret) { - vesafb_device = platform_device_alloc("vesafb", 0); + vesafb_device = platform_device_alloc("vesafb", 0); + if (!vesafb_device) + return -ENOMEM; - if (vesafb_device) - ret = platform_device_add(vesafb_device); - else - ret = -ENOMEM; + ret = platform_device_add(vesafb_device); + if (!ret) { + ret = platform_driver_probe(&vesafb_driver, vesafb_probe); + if (ret) + platform_device_del(vesafb_device); + } - if (ret) { - platform_device_put(vesafb_device); - platform_driver_unregister(&vesafb_driver); - } + if (ret) { + platform_device_put(vesafb_device); + vesafb_device = NULL; } return ret; diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig index bdcdbd53da8..0bf5020d0d3 100644 --- a/drivers/watchdog/Kconfig +++ b/drivers/watchdog/Kconfig @@ -55,11 +55,6 @@ config SOFT_WATCHDOG To compile this driver as a module, choose M here: the module will be called softdog. -config MAX63XX_WATCHDOG - tristate "Max63xx watchdog" - help - Support for memory mapped max63{69,70,71,72,73,74} watchdog timer. - config WM831X_WATCHDOG tristate "WM831x watchdog" depends on MFD_WM831X @@ -199,10 +194,10 @@ config EP93XX_WATCHDOG config OMAP_WATCHDOG tristate "OMAP Watchdog" - depends on ARCH_OMAP16XX || ARCH_OMAP2 || ARCH_OMAP3 + depends on ARCH_OMAP16XX || ARCH_OMAP2PLUS help - Support for TI OMAP1610/OMAP1710/OMAP2420/OMAP3430 watchdog. Say 'Y' - here to enable the OMAP1610/OMAP1710/OMAP2420/OMAP3430 watchdog timer. + Support for TI OMAP1610/OMAP1710/OMAP2420/OMAP3430/OMAP4430 watchdog. Say 'Y' + here to enable the OMAP1610/OMAP1710/OMAP2420/OMAP3430/OMAP4430 watchdog timer. config PNX4008_WATCHDOG tristate "PNX4008 Watchdog" @@ -305,6 +300,12 @@ config TS72XX_WATCHDOG To compile this driver as a module, choose M here: the module will be called ts72xx_wdt. +config MAX63XX_WATCHDOG + tristate "Max63xx watchdog" + depends on ARM && HAS_IOMEM + help + Support for memory mapped max63{69,70,71,72,73,74} watchdog timer. + # AVR32 Architecture config AT32AP700X_WDT diff --git a/drivers/watchdog/booke_wdt.c b/drivers/watchdog/booke_wdt.c index 8b724aad682..500d38342e1 100644 --- a/drivers/watchdog/booke_wdt.c +++ b/drivers/watchdog/booke_wdt.c @@ -44,7 +44,7 @@ u32 booke_wdt_period = WDT_PERIOD_DEFAULT; #ifdef CONFIG_FSL_BOOKE #define WDTP(x) ((((x)&0x3)<<30)|(((x)&0x3c)<<15)) -#define WDTP_MASK (WDTP(0)) +#define WDTP_MASK (WDTP(0x3f)) #else #define WDTP(x) (TCR_WP(x)) #define WDTP_MASK (TCR_WP_MASK) diff --git a/drivers/watchdog/hpwdt.c b/drivers/watchdog/hpwdt.c index 72f5a3707b4..809e7167a62 100644 --- a/drivers/watchdog/hpwdt.c +++ b/drivers/watchdog/hpwdt.c @@ -442,7 +442,7 @@ static void hpwdt_ping(void) static int hpwdt_change_timer(int new_margin) { /* Arbitrary, can't find the card's limits */ - if (new_margin < 30 || new_margin > 600) { + if (new_margin < 5 || new_margin > 600) { printk(KERN_WARNING "hpwdt: New value passed in is invalid: %d seconds.\n", new_margin); diff --git a/drivers/watchdog/iTCO_wdt.c b/drivers/watchdog/iTCO_wdt.c index 44bc6aa46ed..8da88603537 100644 --- a/drivers/watchdog/iTCO_wdt.c +++ b/drivers/watchdog/iTCO_wdt.c @@ -115,8 +115,37 @@ enum iTCO_chipsets { TCO_3420, /* 3420 */ TCO_3450, /* 3450 */ TCO_EP80579, /* EP80579 */ - TCO_CPTD, /* CPT Desktop */ - TCO_CPTM, /* CPT Mobile */ + TCO_CPT1, /* Cougar Point */ + TCO_CPT2, /* Cougar Point Desktop */ + TCO_CPT3, /* Cougar Point Mobile */ + TCO_CPT4, /* Cougar Point */ + TCO_CPT5, /* Cougar Point */ + TCO_CPT6, /* Cougar Point */ + TCO_CPT7, /* Cougar Point */ + TCO_CPT8, /* Cougar Point */ + TCO_CPT9, /* Cougar Point */ + TCO_CPT10, /* Cougar Point */ + TCO_CPT11, /* Cougar Point */ + TCO_CPT12, /* Cougar Point */ + TCO_CPT13, /* Cougar Point */ + TCO_CPT14, /* Cougar Point */ + TCO_CPT15, /* Cougar Point */ + TCO_CPT16, /* Cougar Point */ + TCO_CPT17, /* Cougar Point */ + TCO_CPT18, /* Cougar Point */ + TCO_CPT19, /* Cougar Point */ + TCO_CPT20, /* Cougar Point */ + TCO_CPT21, /* Cougar Point */ + TCO_CPT22, /* Cougar Point */ + TCO_CPT23, /* Cougar Point */ + TCO_CPT24, /* Cougar Point */ + TCO_CPT25, /* Cougar Point */ + TCO_CPT26, /* Cougar Point */ + TCO_CPT27, /* Cougar Point */ + TCO_CPT28, /* Cougar Point */ + TCO_CPT29, /* Cougar Point */ + TCO_CPT30, /* Cougar Point */ + TCO_CPT31, /* Cougar Point */ }; static struct { @@ -173,8 +202,37 @@ static struct { {"3420", 2}, {"3450", 2}, {"EP80579", 2}, - {"CPT Desktop", 2}, - {"CPT Mobile", 2}, + {"Cougar Point", 2}, + {"Cougar Point", 2}, + {"Cougar Point", 2}, + {"Cougar Point", 2}, + {"Cougar Point", 2}, + {"Cougar Point", 2}, + {"Cougar Point", 2}, + {"Cougar Point", 2}, + {"Cougar Point", 2}, + {"Cougar Point", 2}, + {"Cougar Point", 2}, + {"Cougar Point", 2}, + {"Cougar Point", 2}, + {"Cougar Point", 2}, + {"Cougar Point", 2}, + {"Cougar Point", 2}, + {"Cougar Point", 2}, + {"Cougar Point", 2}, + {"Cougar Point", 2}, + {"Cougar Point", 2}, + {"Cougar Point", 2}, + {"Cougar Point", 2}, + {"Cougar Point", 2}, + {"Cougar Point", 2}, + {"Cougar Point", 2}, + {"Cougar Point", 2}, + {"Cougar Point", 2}, + {"Cougar Point", 2}, + {"Cougar Point", 2}, + {"Cougar Point", 2}, + {"Cougar Point", 2}, {NULL, 0} }; @@ -259,8 +317,37 @@ static struct pci_device_id iTCO_wdt_pci_tbl[] = { { ITCO_PCI_DEVICE(0x3b14, TCO_3420)}, { ITCO_PCI_DEVICE(0x3b16, TCO_3450)}, { ITCO_PCI_DEVICE(0x5031, TCO_EP80579)}, - { ITCO_PCI_DEVICE(0x1c42, TCO_CPTD)}, - { ITCO_PCI_DEVICE(0x1c43, TCO_CPTM)}, + { ITCO_PCI_DEVICE(0x1c41, TCO_CPT1)}, + { ITCO_PCI_DEVICE(0x1c42, TCO_CPT2)}, + { ITCO_PCI_DEVICE(0x1c43, TCO_CPT3)}, + { ITCO_PCI_DEVICE(0x1c44, TCO_CPT4)}, + { ITCO_PCI_DEVICE(0x1c45, TCO_CPT5)}, + { ITCO_PCI_DEVICE(0x1c46, TCO_CPT6)}, + { ITCO_PCI_DEVICE(0x1c47, TCO_CPT7)}, + { ITCO_PCI_DEVICE(0x1c48, TCO_CPT8)}, + { ITCO_PCI_DEVICE(0x1c49, TCO_CPT9)}, + { ITCO_PCI_DEVICE(0x1c4a, TCO_CPT10)}, + { ITCO_PCI_DEVICE(0x1c4b, TCO_CPT11)}, + { ITCO_PCI_DEVICE(0x1c4c, TCO_CPT12)}, + { ITCO_PCI_DEVICE(0x1c4d, TCO_CPT13)}, + { ITCO_PCI_DEVICE(0x1c4e, TCO_CPT14)}, + { ITCO_PCI_DEVICE(0x1c4f, TCO_CPT15)}, + { ITCO_PCI_DEVICE(0x1c50, TCO_CPT16)}, + { ITCO_PCI_DEVICE(0x1c51, TCO_CPT17)}, + { ITCO_PCI_DEVICE(0x1c52, TCO_CPT18)}, + { ITCO_PCI_DEVICE(0x1c53, TCO_CPT19)}, + { ITCO_PCI_DEVICE(0x1c54, TCO_CPT20)}, + { ITCO_PCI_DEVICE(0x1c55, TCO_CPT21)}, + { ITCO_PCI_DEVICE(0x1c56, TCO_CPT22)}, + { ITCO_PCI_DEVICE(0x1c57, TCO_CPT23)}, + { ITCO_PCI_DEVICE(0x1c58, TCO_CPT24)}, + { ITCO_PCI_DEVICE(0x1c59, TCO_CPT25)}, + { ITCO_PCI_DEVICE(0x1c5a, TCO_CPT26)}, + { ITCO_PCI_DEVICE(0x1c5b, TCO_CPT27)}, + { ITCO_PCI_DEVICE(0x1c5c, TCO_CPT28)}, + { ITCO_PCI_DEVICE(0x1c5d, TCO_CPT29)}, + { ITCO_PCI_DEVICE(0x1c5e, TCO_CPT30)}, + { ITCO_PCI_DEVICE(0x1c5f, TCO_CPT31)}, { 0, }, /* End of list */ }; MODULE_DEVICE_TABLE(pci, iTCO_wdt_pci_tbl); diff --git a/drivers/watchdog/max63xx_wdt.c b/drivers/watchdog/max63xx_wdt.c index 75f3a83c036..3053ff05ca4 100644 --- a/drivers/watchdog/max63xx_wdt.c +++ b/drivers/watchdog/max63xx_wdt.c @@ -154,9 +154,14 @@ static void max63xx_wdt_enable(struct max63xx_timeout *entry) static void max63xx_wdt_disable(void) { + u8 val; + spin_lock(&io_lock); - __raw_writeb(3, wdt_base); + val = __raw_readb(wdt_base); + val &= ~MAX6369_WDSET; + val |= 3; + __raw_writeb(val, wdt_base); spin_unlock(&io_lock); diff --git a/drivers/watchdog/pika_wdt.c b/drivers/watchdog/pika_wdt.c index 435ec2aed4f..2d22e996e99 100644 --- a/drivers/watchdog/pika_wdt.c +++ b/drivers/watchdog/pika_wdt.c @@ -52,7 +52,7 @@ static struct { struct timer_list timer; /* The timer that pings the watchdog */ } pikawdt_private; -static const struct watchdog_info ident = { +static struct watchdog_info ident = { .identity = DRV_NAME, .options = WDIOF_CARDRESET | WDIOF_SETTIMEOUT | @@ -554,7 +554,7 @@ static int __bio_add_page(struct request_queue *q, struct bio *bio, struct page .bi_rw = bio->bi_rw, }; - if (q->merge_bvec_fn(q, &bvm, prev) < len) { + if (q->merge_bvec_fn(q, &bvm, prev) < prev->bv_len) { prev->bv_len -= len; return 0; } @@ -607,7 +607,7 @@ static int __bio_add_page(struct request_queue *q, struct bio *bio, struct page * merge_bvec_fn() returns number of bytes it can accept * at this offset */ - if (q->merge_bvec_fn(q, &bvm, bvec) < len) { + if (q->merge_bvec_fn(q, &bvm, bvec) < bvec->bv_len) { bvec->bv_page = NULL; bvec->bv_len = 0; bvec->bv_offset = 0; diff --git a/fs/block_dev.c b/fs/block_dev.c index d11d0289f3d..2a6d0193f13 100644 --- a/fs/block_dev.c +++ b/fs/block_dev.c @@ -404,7 +404,7 @@ static loff_t block_llseek(struct file *file, loff_t offset, int origin) * NULL first argument is nfsd_sync_dir() and that's not a directory. */ -static int block_fsync(struct file *filp, struct dentry *dentry, int datasync) +int blkdev_fsync(struct file *filp, struct dentry *dentry, int datasync) { struct block_device *bdev = I_BDEV(filp->f_mapping->host); int error; @@ -418,6 +418,7 @@ static int block_fsync(struct file *filp, struct dentry *dentry, int datasync) error = 0; return error; } +EXPORT_SYMBOL(blkdev_fsync); /* * pseudo-fs @@ -1481,7 +1482,7 @@ const struct file_operations def_blk_fops = { .aio_read = generic_file_aio_read, .aio_write = blkdev_aio_write, .mmap = generic_file_mmap, - .fsync = block_fsync, + .fsync = blkdev_fsync, .unlocked_ioctl = block_ioctl, #ifdef CONFIG_COMPAT .compat_ioctl = compat_blkdev_ioctl, diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 9e23ffea7f5..b34d32fdaae 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -3235,7 +3235,8 @@ int btrfs_check_data_free_space(struct btrfs_root *root, struct inode *inode, u64 bytes) { struct btrfs_space_info *data_sinfo; - int ret = 0, committed = 0; + u64 used; + int ret = 0, committed = 0, flushed = 0; /* make sure bytes are sectorsize aligned */ bytes = (bytes + root->sectorsize - 1) & ~((u64)root->sectorsize - 1); @@ -3247,12 +3248,21 @@ int btrfs_check_data_free_space(struct btrfs_root *root, struct inode *inode, again: /* make sure we have enough space to handle the data first */ spin_lock(&data_sinfo->lock); - if (data_sinfo->total_bytes - data_sinfo->bytes_used - - data_sinfo->bytes_delalloc - data_sinfo->bytes_reserved - - data_sinfo->bytes_pinned - data_sinfo->bytes_readonly - - data_sinfo->bytes_may_use - data_sinfo->bytes_super < bytes) { + used = data_sinfo->bytes_used + data_sinfo->bytes_delalloc + + data_sinfo->bytes_reserved + data_sinfo->bytes_pinned + + data_sinfo->bytes_readonly + data_sinfo->bytes_may_use + + data_sinfo->bytes_super; + + if (used + bytes > data_sinfo->total_bytes) { struct btrfs_trans_handle *trans; + if (!flushed) { + spin_unlock(&data_sinfo->lock); + flush_delalloc(root, data_sinfo); + flushed = 1; + goto again; + } + /* * if we don't have enough free bytes in this space then we need * to alloc a new chunk. diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index aa7dc36dac7..8db7b14bbae 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -2250,6 +2250,12 @@ again: if (!looped) calc_size = max_t(u64, min_stripe_size, calc_size); + /* + * we're about to do_div by the stripe_len so lets make sure + * we end up with something bigger than a stripe + */ + calc_size = max_t(u64, calc_size, stripe_len * 4); + do_div(calc_size, stripe_len); calc_size *= stripe_len; diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c index aa3cd7cc3e4..412593703d1 100644 --- a/fs/ceph/addr.c +++ b/fs/ceph/addr.c @@ -337,16 +337,15 @@ out: /* * Get ref for the oldest snapc for an inode with dirty data... that is, the * only snap context we are allowed to write back. - * - * Caller holds i_lock. */ -static struct ceph_snap_context *__get_oldest_context(struct inode *inode, - u64 *snap_size) +static struct ceph_snap_context *get_oldest_context(struct inode *inode, + u64 *snap_size) { struct ceph_inode_info *ci = ceph_inode(inode); struct ceph_snap_context *snapc = NULL; struct ceph_cap_snap *capsnap = NULL; + spin_lock(&inode->i_lock); list_for_each_entry(capsnap, &ci->i_cap_snaps, ci_item) { dout(" cap_snap %p snapc %p has %d dirty pages\n", capsnap, capsnap->context, capsnap->dirty_pages); @@ -357,21 +356,11 @@ static struct ceph_snap_context *__get_oldest_context(struct inode *inode, break; } } - if (!snapc && ci->i_snap_realm) { - snapc = ceph_get_snap_context(ci->i_snap_realm->cached_context); + if (!snapc && ci->i_head_snapc) { + snapc = ceph_get_snap_context(ci->i_head_snapc); dout(" head snapc %p has %d dirty pages\n", snapc, ci->i_wrbuffer_ref_head); } - return snapc; -} - -static struct ceph_snap_context *get_oldest_context(struct inode *inode, - u64 *snap_size) -{ - struct ceph_snap_context *snapc = NULL; - - spin_lock(&inode->i_lock); - snapc = __get_oldest_context(inode, snap_size); spin_unlock(&inode->i_lock); return snapc; } @@ -392,7 +381,7 @@ static int writepage_nounlock(struct page *page, struct writeback_control *wbc) int len = PAGE_CACHE_SIZE; loff_t i_size; int err = 0; - struct ceph_snap_context *snapc; + struct ceph_snap_context *snapc, *oldest; u64 snap_size = 0; long writeback_stat; @@ -413,13 +402,16 @@ static int writepage_nounlock(struct page *page, struct writeback_control *wbc) dout("writepage %p page %p not dirty?\n", inode, page); goto out; } - if (snapc != get_oldest_context(inode, &snap_size)) { + oldest = get_oldest_context(inode, &snap_size); + if (snapc->seq > oldest->seq) { dout("writepage %p page %p snapc %p not writeable - noop\n", inode, page, (void *)page->private); /* we should only noop if called by kswapd */ WARN_ON((current->flags & PF_MEMALLOC) == 0); + ceph_put_snap_context(oldest); goto out; } + ceph_put_snap_context(oldest); /* is this a partial page at end of file? */ if (snap_size) @@ -458,7 +450,7 @@ static int writepage_nounlock(struct page *page, struct writeback_control *wbc) ClearPagePrivate(page); end_page_writeback(page); ceph_put_wrbuffer_cap_refs(ci, 1, snapc); - ceph_put_snap_context(snapc); + ceph_put_snap_context(snapc); /* page's reference */ out: return err; } @@ -558,9 +550,9 @@ static void writepages_finish(struct ceph_osd_request *req, dout("inode %p skipping page %p\n", inode, page); wbc->pages_skipped++; } + ceph_put_snap_context((void *)page->private); page->private = 0; ClearPagePrivate(page); - ceph_put_snap_context(snapc); dout("unlocking %d %p\n", i, page); end_page_writeback(page); @@ -618,7 +610,7 @@ static int ceph_writepages_start(struct address_space *mapping, int range_whole = 0; int should_loop = 1; pgoff_t max_pages = 0, max_pages_ever = 0; - struct ceph_snap_context *snapc = NULL, *last_snapc = NULL; + struct ceph_snap_context *snapc = NULL, *last_snapc = NULL, *pgsnapc; struct pagevec pvec; int done = 0; int rc = 0; @@ -770,9 +762,10 @@ get_more_pages: } /* only if matching snap context */ - if (snapc != (void *)page->private) { - dout("page snapc %p != oldest %p\n", - (void *)page->private, snapc); + pgsnapc = (void *)page->private; + if (pgsnapc->seq > snapc->seq) { + dout("page snapc %p %lld > oldest %p %lld\n", + pgsnapc, pgsnapc->seq, snapc, snapc->seq); unlock_page(page); if (!locked_pages) continue; /* keep looking for snap */ @@ -914,7 +907,10 @@ static int context_is_writeable_or_written(struct inode *inode, struct ceph_snap_context *snapc) { struct ceph_snap_context *oldest = get_oldest_context(inode, NULL); - return !oldest || snapc->seq <= oldest->seq; + int ret = !oldest || snapc->seq <= oldest->seq; + + ceph_put_snap_context(oldest); + return ret; } /* @@ -936,8 +932,8 @@ static int ceph_update_writeable_page(struct file *file, int pos_in_page = pos & ~PAGE_CACHE_MASK; int end_in_page = pos_in_page + len; loff_t i_size; - struct ceph_snap_context *snapc; int r; + struct ceph_snap_context *snapc, *oldest; retry_locked: /* writepages currently holds page lock, but if we change that later, */ @@ -947,23 +943,24 @@ retry_locked: BUG_ON(!ci->i_snap_realm); down_read(&mdsc->snap_rwsem); BUG_ON(!ci->i_snap_realm->cached_context); - if (page->private && - (void *)page->private != ci->i_snap_realm->cached_context) { + snapc = (void *)page->private; + if (snapc && snapc != ci->i_head_snapc) { /* * this page is already dirty in another (older) snap * context! is it writeable now? */ - snapc = get_oldest_context(inode, NULL); + oldest = get_oldest_context(inode, NULL); up_read(&mdsc->snap_rwsem); - if (snapc != (void *)page->private) { + if (snapc->seq > oldest->seq) { + ceph_put_snap_context(oldest); dout(" page %p snapc %p not current or oldest\n", - page, (void *)page->private); + page, snapc); /* * queue for writeback, and wait for snapc to * be writeable or written */ - snapc = ceph_get_snap_context((void *)page->private); + snapc = ceph_get_snap_context(snapc); unlock_page(page); ceph_queue_writeback(inode); r = wait_event_interruptible(ci->i_cap_wq, @@ -973,6 +970,7 @@ retry_locked: return r; return -EAGAIN; } + ceph_put_snap_context(oldest); /* yay, writeable, do it now (without dropping page lock) */ dout(" page %p snapc %p not current, but oldest\n", diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c index 3710e077a85..aa2239fa9a3 100644 --- a/fs/ceph/caps.c +++ b/fs/ceph/caps.c @@ -1205,6 +1205,12 @@ retry: if (capsnap->dirty_pages || capsnap->writing) continue; + /* + * if cap writeback already occurred, we should have dropped + * the capsnap in ceph_put_wrbuffer_cap_refs. + */ + BUG_ON(capsnap->dirty == 0); + /* pick mds, take s_mutex */ mds = __ceph_get_cap_mds(ci, &mseq); if (session && session->s_mds != mds) { @@ -2118,8 +2124,8 @@ void ceph_put_cap_refs(struct ceph_inode_info *ci, int had) } spin_unlock(&inode->i_lock); - dout("put_cap_refs %p had %s %s\n", inode, ceph_cap_string(had), - last ? "last" : ""); + dout("put_cap_refs %p had %s%s%s\n", inode, ceph_cap_string(had), + last ? " last" : "", put ? " put" : ""); if (last && !flushsnaps) ceph_check_caps(ci, 0, NULL); @@ -2143,7 +2149,8 @@ void ceph_put_wrbuffer_cap_refs(struct ceph_inode_info *ci, int nr, { struct inode *inode = &ci->vfs_inode; int last = 0; - int last_snap = 0; + int complete_capsnap = 0; + int drop_capsnap = 0; int found = 0; struct ceph_cap_snap *capsnap = NULL; @@ -2166,19 +2173,32 @@ void ceph_put_wrbuffer_cap_refs(struct ceph_inode_info *ci, int nr, list_for_each_entry(capsnap, &ci->i_cap_snaps, ci_item) { if (capsnap->context == snapc) { found = 1; - capsnap->dirty_pages -= nr; - last_snap = !capsnap->dirty_pages; break; } } BUG_ON(!found); + capsnap->dirty_pages -= nr; + if (capsnap->dirty_pages == 0) { + complete_capsnap = 1; + if (capsnap->dirty == 0) + /* cap writeback completed before we created + * the cap_snap; no FLUSHSNAP is needed */ + drop_capsnap = 1; + } dout("put_wrbuffer_cap_refs on %p cap_snap %p " - " snap %lld %d/%d -> %d/%d %s%s\n", + " snap %lld %d/%d -> %d/%d %s%s%s\n", inode, capsnap, capsnap->context->seq, ci->i_wrbuffer_ref+nr, capsnap->dirty_pages + nr, ci->i_wrbuffer_ref, capsnap->dirty_pages, last ? " (wrbuffer last)" : "", - last_snap ? " (capsnap last)" : ""); + complete_capsnap ? " (complete capsnap)" : "", + drop_capsnap ? " (drop capsnap)" : ""); + if (drop_capsnap) { + ceph_put_snap_context(capsnap->context); + list_del(&capsnap->ci_item); + list_del(&capsnap->flushing_item); + ceph_put_cap_snap(capsnap); + } } spin_unlock(&inode->i_lock); @@ -2186,10 +2206,12 @@ void ceph_put_wrbuffer_cap_refs(struct ceph_inode_info *ci, int nr, if (last) { ceph_check_caps(ci, CHECK_CAPS_AUTHONLY, NULL); iput(inode); - } else if (last_snap) { + } else if (complete_capsnap) { ceph_flush_snaps(ci); wake_up(&ci->i_cap_wq); } + if (drop_capsnap) + iput(inode); } /* @@ -2465,8 +2487,8 @@ static void handle_cap_flushsnap_ack(struct inode *inode, u64 flush_tid, break; } WARN_ON(capsnap->dirty_pages || capsnap->writing); - dout(" removing cap_snap %p follows %lld\n", - capsnap, follows); + dout(" removing %p cap_snap %p follows %lld\n", + inode, capsnap, follows); ceph_put_snap_context(capsnap->context); list_del(&capsnap->ci_item); list_del(&capsnap->flushing_item); diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c index 7261dc6c2ea..ea8ee2e526a 100644 --- a/fs/ceph/dir.c +++ b/fs/ceph/dir.c @@ -171,11 +171,11 @@ more: spin_lock(&inode->i_lock); spin_lock(&dcache_lock); + last = dentry; + if (err < 0) goto out_unlock; - last = dentry; - p = p->prev; filp->f_pos++; @@ -312,7 +312,7 @@ more: req->r_readdir_offset = fi->next_offset; req->r_args.readdir.frag = cpu_to_le32(frag); req->r_args.readdir.max_entries = cpu_to_le32(max_entries); - req->r_num_caps = max_entries; + req->r_num_caps = max_entries + 1; err = ceph_mdsc_do_request(mdsc, NULL, req); if (err < 0) { ceph_mdsc_put_request(req); @@ -489,6 +489,7 @@ struct dentry *ceph_finish_lookup(struct ceph_mds_request *req, struct inode *inode = ceph_get_snapdir(parent); dout("ENOENT on snapdir %p '%.*s', linking to snapdir %p\n", dentry, dentry->d_name.len, dentry->d_name.name, inode); + BUG_ON(!d_unhashed(dentry)); d_add(dentry, inode); err = 0; } diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c index aca82d55cc5..26f883c275e 100644 --- a/fs/ceph/inode.c +++ b/fs/ceph/inode.c @@ -886,6 +886,7 @@ int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req, struct inode *in = NULL; struct ceph_mds_reply_inode *ininfo; struct ceph_vino vino; + struct ceph_client *client = ceph_sb_to_client(sb); int i = 0; int err = 0; @@ -949,7 +950,14 @@ int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req, return err; } - if (rinfo->head->is_dentry && !req->r_aborted) { + /* + * ignore null lease/binding on snapdir ENOENT, or else we + * will have trouble splicing in the virtual snapdir later + */ + if (rinfo->head->is_dentry && !req->r_aborted && + (rinfo->head->is_target || strncmp(req->r_dentry->d_name.name, + client->mount_args->snapdir_name, + req->r_dentry->d_name.len))) { /* * lookup link rename : null -> possibly existing inode * mknod symlink mkdir : null -> new inode diff --git a/fs/ceph/messenger.c b/fs/ceph/messenger.c index 8f1715ffbe4..cdaaa131add 100644 --- a/fs/ceph/messenger.c +++ b/fs/ceph/messenger.c @@ -30,6 +30,10 @@ static char tag_msg = CEPH_MSGR_TAG_MSG; static char tag_ack = CEPH_MSGR_TAG_ACK; static char tag_keepalive = CEPH_MSGR_TAG_KEEPALIVE; +#ifdef CONFIG_LOCKDEP +static struct lock_class_key socket_class; +#endif + static void queue_con(struct ceph_connection *con); static void con_work(struct work_struct *); @@ -228,6 +232,10 @@ static struct socket *ceph_tcp_connect(struct ceph_connection *con) con->sock = sock; sock->sk->sk_allocation = GFP_NOFS; +#ifdef CONFIG_LOCKDEP + lockdep_set_class(&sock->sk->sk_lock, &socket_class); +#endif + set_sock_callbacks(sock, con); dout("connect %s\n", pr_addr(&con->peer_addr.in_addr)); @@ -333,6 +341,7 @@ static void reset_connection(struct ceph_connection *con) con->out_msg = NULL; } con->in_seq = 0; + con->in_seq_acked = 0; } /* diff --git a/fs/ceph/osdmap.c b/fs/ceph/osdmap.c index 21c6623c4b0..2e2c15eed82 100644 --- a/fs/ceph/osdmap.c +++ b/fs/ceph/osdmap.c @@ -314,71 +314,6 @@ bad: return ERR_PTR(err); } - -/* - * osd map - */ -void ceph_osdmap_destroy(struct ceph_osdmap *map) -{ - dout("osdmap_destroy %p\n", map); - if (map->crush) - crush_destroy(map->crush); - while (!RB_EMPTY_ROOT(&map->pg_temp)) { - struct ceph_pg_mapping *pg = - rb_entry(rb_first(&map->pg_temp), - struct ceph_pg_mapping, node); - rb_erase(&pg->node, &map->pg_temp); - kfree(pg); - } - while (!RB_EMPTY_ROOT(&map->pg_pools)) { - struct ceph_pg_pool_info *pi = - rb_entry(rb_first(&map->pg_pools), - struct ceph_pg_pool_info, node); - rb_erase(&pi->node, &map->pg_pools); - kfree(pi); - } - kfree(map->osd_state); - kfree(map->osd_weight); - kfree(map->osd_addr); - kfree(map); -} - -/* - * adjust max osd value. reallocate arrays. - */ -static int osdmap_set_max_osd(struct ceph_osdmap *map, int max) -{ - u8 *state; - struct ceph_entity_addr *addr; - u32 *weight; - - state = kcalloc(max, sizeof(*state), GFP_NOFS); - addr = kcalloc(max, sizeof(*addr), GFP_NOFS); - weight = kcalloc(max, sizeof(*weight), GFP_NOFS); - if (state == NULL || addr == NULL || weight == NULL) { - kfree(state); - kfree(addr); - kfree(weight); - return -ENOMEM; - } - - /* copy old? */ - if (map->osd_state) { - memcpy(state, map->osd_state, map->max_osd*sizeof(*state)); - memcpy(addr, map->osd_addr, map->max_osd*sizeof(*addr)); - memcpy(weight, map->osd_weight, map->max_osd*sizeof(*weight)); - kfree(map->osd_state); - kfree(map->osd_addr); - kfree(map->osd_weight); - } - - map->osd_state = state; - map->osd_weight = weight; - map->osd_addr = addr; - map->max_osd = max; - return 0; -} - /* * rbtree of pg_mapping for handling pg_temp (explicit mapping of pgid * to a set of osds) @@ -482,6 +417,13 @@ static struct ceph_pg_pool_info *__lookup_pg_pool(struct rb_root *root, int id) return NULL; } +static void __remove_pg_pool(struct rb_root *root, struct ceph_pg_pool_info *pi) +{ + rb_erase(&pi->node, root); + kfree(pi->name); + kfree(pi); +} + void __decode_pool(void **p, struct ceph_pg_pool_info *pi) { ceph_decode_copy(p, &pi->v, sizeof(pi->v)); @@ -490,6 +432,98 @@ void __decode_pool(void **p, struct ceph_pg_pool_info *pi) *p += le32_to_cpu(pi->v.num_removed_snap_intervals) * sizeof(u64) * 2; } +static int __decode_pool_names(void **p, void *end, struct ceph_osdmap *map) +{ + struct ceph_pg_pool_info *pi; + u32 num, len, pool; + + ceph_decode_32_safe(p, end, num, bad); + dout(" %d pool names\n", num); + while (num--) { + ceph_decode_32_safe(p, end, pool, bad); + ceph_decode_32_safe(p, end, len, bad); + dout(" pool %d len %d\n", pool, len); + pi = __lookup_pg_pool(&map->pg_pools, pool); + if (pi) { + kfree(pi->name); + pi->name = kmalloc(len + 1, GFP_NOFS); + if (pi->name) { + memcpy(pi->name, *p, len); + pi->name[len] = '\0'; + dout(" name is %s\n", pi->name); + } + } + *p += len; + } + return 0; + +bad: + return -EINVAL; +} + +/* + * osd map + */ +void ceph_osdmap_destroy(struct ceph_osdmap *map) +{ + dout("osdmap_destroy %p\n", map); + if (map->crush) + crush_destroy(map->crush); + while (!RB_EMPTY_ROOT(&map->pg_temp)) { + struct ceph_pg_mapping *pg = + rb_entry(rb_first(&map->pg_temp), + struct ceph_pg_mapping, node); + rb_erase(&pg->node, &map->pg_temp); + kfree(pg); + } + while (!RB_EMPTY_ROOT(&map->pg_pools)) { + struct ceph_pg_pool_info *pi = + rb_entry(rb_first(&map->pg_pools), + struct ceph_pg_pool_info, node); + __remove_pg_pool(&map->pg_pools, pi); + } + kfree(map->osd_state); + kfree(map->osd_weight); + kfree(map->osd_addr); + kfree(map); +} + +/* + * adjust max osd value. reallocate arrays. + */ +static int osdmap_set_max_osd(struct ceph_osdmap *map, int max) +{ + u8 *state; + struct ceph_entity_addr *addr; + u32 *weight; + + state = kcalloc(max, sizeof(*state), GFP_NOFS); + addr = kcalloc(max, sizeof(*addr), GFP_NOFS); + weight = kcalloc(max, sizeof(*weight), GFP_NOFS); + if (state == NULL || addr == NULL || weight == NULL) { + kfree(state); + kfree(addr); + kfree(weight); + return -ENOMEM; + } + + /* copy old? */ + if (map->osd_state) { + memcpy(state, map->osd_state, map->max_osd*sizeof(*state)); + memcpy(addr, map->osd_addr, map->max_osd*sizeof(*addr)); + memcpy(weight, map->osd_weight, map->max_osd*sizeof(*weight)); + kfree(map->osd_state); + kfree(map->osd_addr); + kfree(map->osd_weight); + } + + map->osd_state = state; + map->osd_weight = weight; + map->osd_addr = addr; + map->max_osd = max; + return 0; +} + /* * decode a full map. */ @@ -526,7 +560,7 @@ struct ceph_osdmap *osdmap_decode(void **p, void *end) ceph_decode_32_safe(p, end, max, bad); while (max--) { ceph_decode_need(p, end, 4 + 1 + sizeof(pi->v), bad); - pi = kmalloc(sizeof(*pi), GFP_NOFS); + pi = kzalloc(sizeof(*pi), GFP_NOFS); if (!pi) goto bad; pi->id = ceph_decode_32(p); @@ -539,6 +573,10 @@ struct ceph_osdmap *osdmap_decode(void **p, void *end) __decode_pool(p, pi); __insert_pg_pool(&map->pg_pools, pi); } + + if (version >= 5 && __decode_pool_names(p, end, map) < 0) + goto bad; + ceph_decode_32_safe(p, end, map->pool_max, bad); ceph_decode_32_safe(p, end, map->flags, bad); @@ -712,7 +750,7 @@ struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end, } pi = __lookup_pg_pool(&map->pg_pools, pool); if (!pi) { - pi = kmalloc(sizeof(*pi), GFP_NOFS); + pi = kzalloc(sizeof(*pi), GFP_NOFS); if (!pi) { err = -ENOMEM; goto bad; @@ -722,6 +760,8 @@ struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end, } __decode_pool(p, pi); } + if (version >= 5 && __decode_pool_names(p, end, map) < 0) + goto bad; /* old_pool */ ceph_decode_32_safe(p, end, len, bad); @@ -730,10 +770,8 @@ struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end, ceph_decode_32_safe(p, end, pool, bad); pi = __lookup_pg_pool(&map->pg_pools, pool); - if (pi) { - rb_erase(&pi->node, &map->pg_pools); - kfree(pi); - } + if (pi) + __remove_pg_pool(&map->pg_pools, pi); } /* new_up */ diff --git a/fs/ceph/osdmap.h b/fs/ceph/osdmap.h index 1fb55afb264..8bc9f1e4f56 100644 --- a/fs/ceph/osdmap.h +++ b/fs/ceph/osdmap.h @@ -23,6 +23,7 @@ struct ceph_pg_pool_info { int id; struct ceph_pg_pool v; int pg_num_mask, pgp_num_mask, lpg_num_mask, lpgp_num_mask; + char *name; }; struct ceph_pg_mapping { diff --git a/fs/ceph/rados.h b/fs/ceph/rados.h index 26ac8b89a67..a1fc1d017b5 100644 --- a/fs/ceph/rados.h +++ b/fs/ceph/rados.h @@ -11,8 +11,10 @@ /* * osdmap encoding versions */ -#define CEPH_OSDMAP_INC_VERSION 4 -#define CEPH_OSDMAP_VERSION 4 +#define CEPH_OSDMAP_INC_VERSION 5 +#define CEPH_OSDMAP_INC_VERSION_EXT 5 +#define CEPH_OSDMAP_VERSION 5 +#define CEPH_OSDMAP_VERSION_EXT 5 /* * fs id diff --git a/fs/ceph/snap.c b/fs/ceph/snap.c index e6f9bc57d47..2b881262ef6 100644 --- a/fs/ceph/snap.c +++ b/fs/ceph/snap.c @@ -431,8 +431,7 @@ static int dup_array(u64 **dst, __le64 *src, int num) * Caller must hold snap_rwsem for read (i.e., the realm topology won't * change). */ -void ceph_queue_cap_snap(struct ceph_inode_info *ci, - struct ceph_snap_context *snapc) +void ceph_queue_cap_snap(struct ceph_inode_info *ci) { struct inode *inode = &ci->vfs_inode; struct ceph_cap_snap *capsnap; @@ -451,10 +450,11 @@ void ceph_queue_cap_snap(struct ceph_inode_info *ci, as no new writes are allowed to start when pending, so any writes in progress now were started before the previous cap_snap. lucky us. */ - dout("queue_cap_snap %p snapc %p seq %llu used %d" - " already pending\n", inode, snapc, snapc->seq, used); + dout("queue_cap_snap %p already pending\n", inode); kfree(capsnap); } else if (ci->i_wrbuffer_ref_head || (used & CEPH_CAP_FILE_WR)) { + struct ceph_snap_context *snapc = ci->i_head_snapc; + igrab(inode); atomic_set(&capsnap->nref, 1); @@ -463,7 +463,6 @@ void ceph_queue_cap_snap(struct ceph_inode_info *ci, INIT_LIST_HEAD(&capsnap->flushing_item); capsnap->follows = snapc->seq - 1; - capsnap->context = ceph_get_snap_context(snapc); capsnap->issued = __ceph_caps_issued(ci, NULL); capsnap->dirty = __ceph_caps_dirty(ci); @@ -480,7 +479,7 @@ void ceph_queue_cap_snap(struct ceph_inode_info *ci, snapshot. */ capsnap->dirty_pages = ci->i_wrbuffer_ref_head; ci->i_wrbuffer_ref_head = 0; - ceph_put_snap_context(ci->i_head_snapc); + capsnap->context = snapc; ci->i_head_snapc = NULL; list_add_tail(&capsnap->ci_item, &ci->i_cap_snaps); @@ -522,15 +521,17 @@ int __ceph_finish_cap_snap(struct ceph_inode_info *ci, capsnap->ctime = inode->i_ctime; capsnap->time_warp_seq = ci->i_time_warp_seq; if (capsnap->dirty_pages) { - dout("finish_cap_snap %p cap_snap %p snapc %p %llu s=%llu " + dout("finish_cap_snap %p cap_snap %p snapc %p %llu %s s=%llu " "still has %d dirty pages\n", inode, capsnap, capsnap->context, capsnap->context->seq, - capsnap->size, capsnap->dirty_pages); + ceph_cap_string(capsnap->dirty), capsnap->size, + capsnap->dirty_pages); return 0; } - dout("finish_cap_snap %p cap_snap %p snapc %p %llu s=%llu clean\n", + dout("finish_cap_snap %p cap_snap %p snapc %p %llu %s s=%llu\n", inode, capsnap, capsnap->context, - capsnap->context->seq, capsnap->size); + capsnap->context->seq, ceph_cap_string(capsnap->dirty), + capsnap->size); spin_lock(&mdsc->snap_flush_lock); list_add_tail(&ci->i_snap_flush_item, &mdsc->snap_flush_list); @@ -602,7 +603,7 @@ more: if (lastinode) iput(lastinode); lastinode = inode; - ceph_queue_cap_snap(ci, realm->cached_context); + ceph_queue_cap_snap(ci); spin_lock(&realm->inodes_with_caps_lock); } spin_unlock(&realm->inodes_with_caps_lock); @@ -824,8 +825,7 @@ void ceph_handle_snap(struct ceph_mds_client *mdsc, spin_unlock(&realm->inodes_with_caps_lock); spin_unlock(&inode->i_lock); - ceph_queue_cap_snap(ci, - ci->i_snap_realm->cached_context); + ceph_queue_cap_snap(ci); iput(inode); continue; diff --git a/fs/ceph/super.h b/fs/ceph/super.h index ca702c67bc6..e30dfbb056c 100644 --- a/fs/ceph/super.h +++ b/fs/ceph/super.h @@ -715,8 +715,7 @@ extern int ceph_update_snap_trace(struct ceph_mds_client *m, extern void ceph_handle_snap(struct ceph_mds_client *mdsc, struct ceph_mds_session *session, struct ceph_msg *msg); -extern void ceph_queue_cap_snap(struct ceph_inode_info *ci, - struct ceph_snap_context *snapc); +extern void ceph_queue_cap_snap(struct ceph_inode_info *ci); extern int __ceph_finish_cap_snap(struct ceph_inode_info *ci, struct ceph_cap_snap *capsnap); extern void ceph_cleanup_empty_realms(struct ceph_mds_client *mdsc); diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c index 5183bc2a191..ded66be6597 100644 --- a/fs/cifs/cifsfs.c +++ b/fs/cifs/cifsfs.c @@ -808,6 +808,7 @@ const struct file_operations cifs_file_direct_nobrl_ops = { .release = cifs_close, .fsync = cifs_fsync, .flush = cifs_flush, + .mmap = cifs_file_mmap, .splice_read = generic_file_splice_read, #ifdef CONFIG_CIFS_POSIX .unlocked_ioctl = cifs_ioctl, diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c index 3f4fbd67050..5d3f29fef53 100644 --- a/fs/cifs/cifssmb.c +++ b/fs/cifs/cifssmb.c @@ -1431,6 +1431,8 @@ CIFSSMBWrite(const int xid, struct cifsTconInfo *tcon, __u32 bytes_sent; __u16 byte_count; + *nbytes = 0; + /* cFYI(1, ("write at %lld %d bytes", offset, count));*/ if (tcon->ses == NULL) return -ECONNABORTED; @@ -1513,11 +1515,18 @@ CIFSSMBWrite(const int xid, struct cifsTconInfo *tcon, cifs_stats_inc(&tcon->num_writes); if (rc) { cFYI(1, ("Send error in write = %d", rc)); - *nbytes = 0; } else { *nbytes = le16_to_cpu(pSMBr->CountHigh); *nbytes = (*nbytes) << 16; *nbytes += le16_to_cpu(pSMBr->Count); + + /* + * Mask off high 16 bits when bytes written as returned by the + * server is greater than bytes requested by the client. Some + * OS/2 servers are known to set incorrect CountHigh values. + */ + if (*nbytes > count) + *nbytes &= 0xFFFF; } cifs_buf_release(pSMB); @@ -1606,6 +1615,14 @@ CIFSSMBWrite2(const int xid, struct cifsTconInfo *tcon, *nbytes = le16_to_cpu(pSMBr->CountHigh); *nbytes = (*nbytes) << 16; *nbytes += le16_to_cpu(pSMBr->Count); + + /* + * Mask off high 16 bits when bytes written as returned by the + * server is greater than bytes requested by the client. OS/2 + * servers are known to set incorrect CountHigh values. + */ + if (*nbytes > count) + *nbytes &= 0xFFFF; } /* cifs_small_buf_release(pSMB); */ /* Freed earlier now in SendReceive2 */ @@ -1794,8 +1811,21 @@ CIFSSMBPosixLock(const int xid, struct cifsTconInfo *tcon, } parm_data = (struct cifs_posix_lock *) ((char *)&pSMBr->hdr.Protocol + data_offset); - if (parm_data->lock_type == cpu_to_le16(CIFS_UNLCK)) + if (parm_data->lock_type == __constant_cpu_to_le16(CIFS_UNLCK)) pLockData->fl_type = F_UNLCK; + else { + if (parm_data->lock_type == + __constant_cpu_to_le16(CIFS_RDLCK)) + pLockData->fl_type = F_RDLCK; + else if (parm_data->lock_type == + __constant_cpu_to_le16(CIFS_WRLCK)) + pLockData->fl_type = F_WRLCK; + + pLockData->fl_start = parm_data->start; + pLockData->fl_end = parm_data->start + + parm_data->length - 1; + pLockData->fl_pid = parm_data->pid; + } } plk_err_exit: diff --git a/fs/cifs/file.c b/fs/cifs/file.c index 058b390d3da..9b11a8f56f3 100644 --- a/fs/cifs/file.c +++ b/fs/cifs/file.c @@ -839,8 +839,32 @@ int cifs_lock(struct file *file, int cmd, struct file_lock *pfLock) } else { /* if rc == ERR_SHARING_VIOLATION ? */ - rc = 0; /* do not change lock type to unlock - since range in use */ + rc = 0; + + if (lockType & LOCKING_ANDX_SHARED_LOCK) { + pfLock->fl_type = F_WRLCK; + } else { + rc = CIFSSMBLock(xid, tcon, netfid, length, + pfLock->fl_start, 0, 1, + lockType | LOCKING_ANDX_SHARED_LOCK, + 0 /* wait flag */); + if (rc == 0) { + rc = CIFSSMBLock(xid, tcon, netfid, + length, pfLock->fl_start, 1, 0, + lockType | + LOCKING_ANDX_SHARED_LOCK, + 0 /* wait flag */); + pfLock->fl_type = F_RDLCK; + if (rc != 0) + cERROR(1, ("Error unlocking " + "previously locked range %d " + "during test of lock", rc)); + rc = 0; + } else { + pfLock->fl_type = F_WRLCK; + rc = 0; + } + } } FreeXid(xid); diff --git a/fs/ecryptfs/crypto.c b/fs/ecryptfs/crypto.c index efb2b940039..1cc087635a5 100644 --- a/fs/ecryptfs/crypto.c +++ b/fs/ecryptfs/crypto.c @@ -382,8 +382,8 @@ out: static void ecryptfs_lower_offset_for_extent(loff_t *offset, loff_t extent_num, struct ecryptfs_crypt_stat *crypt_stat) { - (*offset) = (crypt_stat->num_header_bytes_at_front - + (crypt_stat->extent_size * extent_num)); + (*offset) = ecryptfs_lower_header_size(crypt_stat) + + (crypt_stat->extent_size * extent_num); } /** @@ -835,13 +835,13 @@ void ecryptfs_set_default_sizes(struct ecryptfs_crypt_stat *crypt_stat) set_extent_mask_and_shift(crypt_stat); crypt_stat->iv_bytes = ECRYPTFS_DEFAULT_IV_BYTES; if (crypt_stat->flags & ECRYPTFS_METADATA_IN_XATTR) - crypt_stat->num_header_bytes_at_front = 0; + crypt_stat->metadata_size = ECRYPTFS_MINIMUM_HEADER_EXTENT_SIZE; else { if (PAGE_CACHE_SIZE <= ECRYPTFS_MINIMUM_HEADER_EXTENT_SIZE) - crypt_stat->num_header_bytes_at_front = + crypt_stat->metadata_size = ECRYPTFS_MINIMUM_HEADER_EXTENT_SIZE; else - crypt_stat->num_header_bytes_at_front = PAGE_CACHE_SIZE; + crypt_stat->metadata_size = PAGE_CACHE_SIZE; } } @@ -1108,9 +1108,9 @@ static void write_ecryptfs_marker(char *page_virt, size_t *written) (*written) = MAGIC_ECRYPTFS_MARKER_SIZE_BYTES; } -static void -write_ecryptfs_flags(char *page_virt, struct ecryptfs_crypt_stat *crypt_stat, - size_t *written) +void ecryptfs_write_crypt_stat_flags(char *page_virt, + struct ecryptfs_crypt_stat *crypt_stat, + size_t *written) { u32 flags = 0; int i; @@ -1238,8 +1238,7 @@ ecryptfs_write_header_metadata(char *virt, header_extent_size = (u32)crypt_stat->extent_size; num_header_extents_at_front = - (u16)(crypt_stat->num_header_bytes_at_front - / crypt_stat->extent_size); + (u16)(crypt_stat->metadata_size / crypt_stat->extent_size); put_unaligned_be32(header_extent_size, virt); virt += 4; put_unaligned_be16(num_header_extents_at_front, virt); @@ -1292,7 +1291,8 @@ static int ecryptfs_write_headers_virt(char *page_virt, size_t max, offset = ECRYPTFS_FILE_SIZE_BYTES; write_ecryptfs_marker((page_virt + offset), &written); offset += written; - write_ecryptfs_flags((page_virt + offset), crypt_stat, &written); + ecryptfs_write_crypt_stat_flags((page_virt + offset), crypt_stat, + &written); offset += written; ecryptfs_write_header_metadata((page_virt + offset), crypt_stat, &written); @@ -1382,7 +1382,7 @@ int ecryptfs_write_metadata(struct dentry *ecryptfs_dentry) rc = -EINVAL; goto out; } - virt_len = crypt_stat->num_header_bytes_at_front; + virt_len = crypt_stat->metadata_size; order = get_order(virt_len); /* Released in this function */ virt = (char *)ecryptfs_get_zeroed_pages(GFP_KERNEL, order); @@ -1428,16 +1428,15 @@ static int parse_header_metadata(struct ecryptfs_crypt_stat *crypt_stat, header_extent_size = get_unaligned_be32(virt); virt += sizeof(__be32); num_header_extents_at_front = get_unaligned_be16(virt); - crypt_stat->num_header_bytes_at_front = - (((size_t)num_header_extents_at_front - * (size_t)header_extent_size)); + crypt_stat->metadata_size = (((size_t)num_header_extents_at_front + * (size_t)header_extent_size)); (*bytes_read) = (sizeof(__be32) + sizeof(__be16)); if ((validate_header_size == ECRYPTFS_VALIDATE_HEADER_SIZE) - && (crypt_stat->num_header_bytes_at_front + && (crypt_stat->metadata_size < ECRYPTFS_MINIMUM_HEADER_EXTENT_SIZE)) { rc = -EINVAL; printk(KERN_WARNING "Invalid header size: [%zd]\n", - crypt_stat->num_header_bytes_at_front); + crypt_stat->metadata_size); } return rc; } @@ -1452,8 +1451,7 @@ static int parse_header_metadata(struct ecryptfs_crypt_stat *crypt_stat, */ static void set_default_header_data(struct ecryptfs_crypt_stat *crypt_stat) { - crypt_stat->num_header_bytes_at_front = - ECRYPTFS_MINIMUM_HEADER_EXTENT_SIZE; + crypt_stat->metadata_size = ECRYPTFS_MINIMUM_HEADER_EXTENT_SIZE; } /** @@ -1607,6 +1605,7 @@ int ecryptfs_read_metadata(struct dentry *ecryptfs_dentry) ecryptfs_dentry, ECRYPTFS_VALIDATE_HEADER_SIZE); if (rc) { + memset(page_virt, 0, PAGE_CACHE_SIZE); rc = ecryptfs_read_xattr_region(page_virt, ecryptfs_inode); if (rc) { printk(KERN_DEBUG "Valid eCryptfs headers not found in " diff --git a/fs/ecryptfs/ecryptfs_kernel.h b/fs/ecryptfs/ecryptfs_kernel.h index 542f625312f..bc7115403f3 100644 --- a/fs/ecryptfs/ecryptfs_kernel.h +++ b/fs/ecryptfs/ecryptfs_kernel.h @@ -273,7 +273,7 @@ struct ecryptfs_crypt_stat { u32 flags; unsigned int file_version; size_t iv_bytes; - size_t num_header_bytes_at_front; + size_t metadata_size; size_t extent_size; /* Data extent size; default is 4096 */ size_t key_size; size_t extent_shift; @@ -464,6 +464,14 @@ struct ecryptfs_daemon { extern struct mutex ecryptfs_daemon_hash_mux; +static inline size_t +ecryptfs_lower_header_size(struct ecryptfs_crypt_stat *crypt_stat) +{ + if (crypt_stat->flags & ECRYPTFS_METADATA_IN_XATTR) + return 0; + return crypt_stat->metadata_size; +} + static inline struct ecryptfs_file_info * ecryptfs_file_to_private(struct file *file) { @@ -651,6 +659,9 @@ int ecryptfs_decrypt_page(struct page *page); int ecryptfs_write_metadata(struct dentry *ecryptfs_dentry); int ecryptfs_read_metadata(struct dentry *ecryptfs_dentry); int ecryptfs_new_file_context(struct dentry *ecryptfs_dentry); +void ecryptfs_write_crypt_stat_flags(char *page_virt, + struct ecryptfs_crypt_stat *crypt_stat, + size_t *written); int ecryptfs_read_and_validate_header_region(char *data, struct inode *ecryptfs_inode); int ecryptfs_read_and_validate_xattr_region(char *page_virt, diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c index d3362faf385..e2d4418affa 100644 --- a/fs/ecryptfs/inode.c +++ b/fs/ecryptfs/inode.c @@ -324,6 +324,7 @@ int ecryptfs_lookup_and_interpose_lower(struct dentry *ecryptfs_dentry, rc = ecryptfs_read_and_validate_header_region(page_virt, ecryptfs_dentry->d_inode); if (rc) { + memset(page_virt, 0, PAGE_CACHE_SIZE); rc = ecryptfs_read_and_validate_xattr_region(page_virt, ecryptfs_dentry); if (rc) { @@ -336,7 +337,7 @@ int ecryptfs_lookup_and_interpose_lower(struct dentry *ecryptfs_dentry, ecryptfs_dentry->d_sb)->mount_crypt_stat; if (mount_crypt_stat->flags & ECRYPTFS_ENCRYPTED_VIEW_ENABLED) { if (crypt_stat->flags & ECRYPTFS_METADATA_IN_XATTR) - file_size = (crypt_stat->num_header_bytes_at_front + file_size = (crypt_stat->metadata_size + i_size_read(lower_dentry->d_inode)); else file_size = i_size_read(lower_dentry->d_inode); @@ -388,9 +389,9 @@ static struct dentry *ecryptfs_lookup(struct inode *ecryptfs_dir_inode, mutex_unlock(&lower_dir_dentry->d_inode->i_mutex); if (IS_ERR(lower_dentry)) { rc = PTR_ERR(lower_dentry); - printk(KERN_ERR "%s: lookup_one_len() returned [%d] on " - "lower_dentry = [%s]\n", __func__, rc, - ecryptfs_dentry->d_name.name); + ecryptfs_printk(KERN_DEBUG, "%s: lookup_one_len() returned " + "[%d] on lower_dentry = [%s]\n", __func__, rc, + encrypted_and_encoded_name); goto out_d_drop; } if (lower_dentry->d_inode) @@ -417,9 +418,9 @@ static struct dentry *ecryptfs_lookup(struct inode *ecryptfs_dir_inode, mutex_unlock(&lower_dir_dentry->d_inode->i_mutex); if (IS_ERR(lower_dentry)) { rc = PTR_ERR(lower_dentry); - printk(KERN_ERR "%s: lookup_one_len() returned [%d] on " - "lower_dentry = [%s]\n", __func__, rc, - encrypted_and_encoded_name); + ecryptfs_printk(KERN_DEBUG, "%s: lookup_one_len() returned " + "[%d] on lower_dentry = [%s]\n", __func__, rc, + encrypted_and_encoded_name); goto out_d_drop; } lookup_and_interpose: @@ -456,8 +457,8 @@ static int ecryptfs_link(struct dentry *old_dentry, struct inode *dir, rc = ecryptfs_interpose(lower_new_dentry, new_dentry, dir->i_sb, 0); if (rc) goto out_lock; - fsstack_copy_attr_times(dir, lower_new_dentry->d_inode); - fsstack_copy_inode_size(dir, lower_new_dentry->d_inode); + fsstack_copy_attr_times(dir, lower_dir_dentry->d_inode); + fsstack_copy_inode_size(dir, lower_dir_dentry->d_inode); old_dentry->d_inode->i_nlink = ecryptfs_inode_to_lower(old_dentry->d_inode)->i_nlink; i_size_write(new_dentry->d_inode, file_size_save); @@ -648,38 +649,17 @@ out_lock: return rc; } -static int -ecryptfs_readlink(struct dentry *dentry, char __user *buf, int bufsiz) +static int ecryptfs_readlink_lower(struct dentry *dentry, char **buf, + size_t *bufsiz) { + struct dentry *lower_dentry = ecryptfs_dentry_to_lower(dentry); char *lower_buf; - size_t lower_bufsiz; - struct dentry *lower_dentry; - struct ecryptfs_mount_crypt_stat *mount_crypt_stat; - char *plaintext_name; - size_t plaintext_name_size; + size_t lower_bufsiz = PATH_MAX; mm_segment_t old_fs; int rc; - lower_dentry = ecryptfs_dentry_to_lower(dentry); - if (!lower_dentry->d_inode->i_op->readlink) { - rc = -EINVAL; - goto out; - } - mount_crypt_stat = &ecryptfs_superblock_to_private( - dentry->d_sb)->mount_crypt_stat; - /* - * If the lower filename is encrypted, it will result in a significantly - * longer name. If needed, truncate the name after decode and decrypt. - */ - if (mount_crypt_stat->flags & ECRYPTFS_GLOBAL_ENCRYPT_FILENAMES) - lower_bufsiz = PATH_MAX; - else - lower_bufsiz = bufsiz; - /* Released in this function */ lower_buf = kmalloc(lower_bufsiz, GFP_KERNEL); - if (lower_buf == NULL) { - printk(KERN_ERR "%s: Out of memory whilst attempting to " - "kmalloc [%zd] bytes\n", __func__, lower_bufsiz); + if (!lower_buf) { rc = -ENOMEM; goto out; } @@ -689,29 +669,31 @@ ecryptfs_readlink(struct dentry *dentry, char __user *buf, int bufsiz) (char __user *)lower_buf, lower_bufsiz); set_fs(old_fs); - if (rc >= 0) { - rc = ecryptfs_decode_and_decrypt_filename(&plaintext_name, - &plaintext_name_size, - dentry, lower_buf, - rc); - if (rc) { - printk(KERN_ERR "%s: Error attempting to decode and " - "decrypt filename; rc = [%d]\n", __func__, - rc); - goto out_free_lower_buf; - } - /* Check for bufsiz <= 0 done in sys_readlinkat() */ - rc = copy_to_user(buf, plaintext_name, - min((size_t) bufsiz, plaintext_name_size)); - if (rc) - rc = -EFAULT; - else - rc = plaintext_name_size; - kfree(plaintext_name); - fsstack_copy_attr_atime(dentry->d_inode, lower_dentry->d_inode); - } -out_free_lower_buf: + if (rc < 0) + goto out; + lower_bufsiz = rc; + rc = ecryptfs_decode_and_decrypt_filename(buf, bufsiz, dentry, + lower_buf, lower_bufsiz); +out: kfree(lower_buf); + return rc; +} + +static int +ecryptfs_readlink(struct dentry *dentry, char __user *buf, int bufsiz) +{ + char *kbuf; + size_t kbufsiz, copied; + int rc; + + rc = ecryptfs_readlink_lower(dentry, &kbuf, &kbufsiz); + if (rc) + goto out; + copied = min_t(size_t, bufsiz, kbufsiz); + rc = copy_to_user(buf, kbuf, copied) ? -EFAULT : copied; + kfree(kbuf); + fsstack_copy_attr_atime(dentry->d_inode, + ecryptfs_dentry_to_lower(dentry)->d_inode); out: return rc; } @@ -769,7 +751,7 @@ upper_size_to_lower_size(struct ecryptfs_crypt_stat *crypt_stat, { loff_t lower_size; - lower_size = crypt_stat->num_header_bytes_at_front; + lower_size = ecryptfs_lower_header_size(crypt_stat); if (upper_size != 0) { loff_t num_extents; @@ -1016,6 +998,28 @@ out: return rc; } +int ecryptfs_getattr_link(struct vfsmount *mnt, struct dentry *dentry, + struct kstat *stat) +{ + struct ecryptfs_mount_crypt_stat *mount_crypt_stat; + int rc = 0; + + mount_crypt_stat = &ecryptfs_superblock_to_private( + dentry->d_sb)->mount_crypt_stat; + generic_fillattr(dentry->d_inode, stat); + if (mount_crypt_stat->flags & ECRYPTFS_GLOBAL_ENCRYPT_FILENAMES) { + char *target; + size_t targetsiz; + + rc = ecryptfs_readlink_lower(dentry, &target, &targetsiz); + if (!rc) { + kfree(target); + stat->size = targetsiz; + } + } + return rc; +} + int ecryptfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat) { @@ -1040,7 +1044,7 @@ ecryptfs_setxattr(struct dentry *dentry, const char *name, const void *value, lower_dentry = ecryptfs_dentry_to_lower(dentry); if (!lower_dentry->d_inode->i_op->setxattr) { - rc = -ENOSYS; + rc = -EOPNOTSUPP; goto out; } mutex_lock(&lower_dentry->d_inode->i_mutex); @@ -1058,7 +1062,7 @@ ecryptfs_getxattr_lower(struct dentry *lower_dentry, const char *name, int rc = 0; if (!lower_dentry->d_inode->i_op->getxattr) { - rc = -ENOSYS; + rc = -EOPNOTSUPP; goto out; } mutex_lock(&lower_dentry->d_inode->i_mutex); @@ -1085,7 +1089,7 @@ ecryptfs_listxattr(struct dentry *dentry, char *list, size_t size) lower_dentry = ecryptfs_dentry_to_lower(dentry); if (!lower_dentry->d_inode->i_op->listxattr) { - rc = -ENOSYS; + rc = -EOPNOTSUPP; goto out; } mutex_lock(&lower_dentry->d_inode->i_mutex); @@ -1102,7 +1106,7 @@ static int ecryptfs_removexattr(struct dentry *dentry, const char *name) lower_dentry = ecryptfs_dentry_to_lower(dentry); if (!lower_dentry->d_inode->i_op->removexattr) { - rc = -ENOSYS; + rc = -EOPNOTSUPP; goto out; } mutex_lock(&lower_dentry->d_inode->i_mutex); @@ -1133,6 +1137,7 @@ const struct inode_operations ecryptfs_symlink_iops = { .put_link = ecryptfs_put_link, .permission = ecryptfs_permission, .setattr = ecryptfs_setattr, + .getattr = ecryptfs_getattr_link, .setxattr = ecryptfs_setxattr, .getxattr = ecryptfs_getxattr, .listxattr = ecryptfs_listxattr, diff --git a/fs/ecryptfs/mmap.c b/fs/ecryptfs/mmap.c index d491237c98e..2ee9a3a7b68 100644 --- a/fs/ecryptfs/mmap.c +++ b/fs/ecryptfs/mmap.c @@ -83,6 +83,19 @@ out: return rc; } +static void strip_xattr_flag(char *page_virt, + struct ecryptfs_crypt_stat *crypt_stat) +{ + if (crypt_stat->flags & ECRYPTFS_METADATA_IN_XATTR) { + size_t written; + + crypt_stat->flags &= ~ECRYPTFS_METADATA_IN_XATTR; + ecryptfs_write_crypt_stat_flags(page_virt, crypt_stat, + &written); + crypt_stat->flags |= ECRYPTFS_METADATA_IN_XATTR; + } +} + /** * Header Extent: * Octets 0-7: Unencrypted file size (big-endian) @@ -98,19 +111,6 @@ out: * (big-endian) * Octet 26: Begin RFC 2440 authentication token packet set */ -static void set_header_info(char *page_virt, - struct ecryptfs_crypt_stat *crypt_stat) -{ - size_t written; - size_t save_num_header_bytes_at_front = - crypt_stat->num_header_bytes_at_front; - - crypt_stat->num_header_bytes_at_front = - ECRYPTFS_MINIMUM_HEADER_EXTENT_SIZE; - ecryptfs_write_header_metadata(page_virt + 20, crypt_stat, &written); - crypt_stat->num_header_bytes_at_front = - save_num_header_bytes_at_front; -} /** * ecryptfs_copy_up_encrypted_with_header @@ -136,8 +136,7 @@ ecryptfs_copy_up_encrypted_with_header(struct page *page, * num_extents_per_page) + extent_num_in_page); size_t num_header_extents_at_front = - (crypt_stat->num_header_bytes_at_front - / crypt_stat->extent_size); + (crypt_stat->metadata_size / crypt_stat->extent_size); if (view_extent_num < num_header_extents_at_front) { /* This is a header extent */ @@ -147,9 +146,14 @@ ecryptfs_copy_up_encrypted_with_header(struct page *page, memset(page_virt, 0, PAGE_CACHE_SIZE); /* TODO: Support more than one header extent */ if (view_extent_num == 0) { + size_t written; + rc = ecryptfs_read_xattr_region( page_virt, page->mapping->host); - set_header_info(page_virt, crypt_stat); + strip_xattr_flag(page_virt + 16, crypt_stat); + ecryptfs_write_header_metadata(page_virt + 20, + crypt_stat, + &written); } kunmap_atomic(page_virt, KM_USER0); flush_dcache_page(page); @@ -162,7 +166,7 @@ ecryptfs_copy_up_encrypted_with_header(struct page *page, /* This is an encrypted data extent */ loff_t lower_offset = ((view_extent_num * crypt_stat->extent_size) - - crypt_stat->num_header_bytes_at_front); + - crypt_stat->metadata_size); rc = ecryptfs_read_lower_page_segment( page, (lower_offset >> PAGE_CACHE_SHIFT), diff --git a/fs/ecryptfs/super.c b/fs/ecryptfs/super.c index fcef41c1d2c..278743c7716 100644 --- a/fs/ecryptfs/super.c +++ b/fs/ecryptfs/super.c @@ -86,7 +86,6 @@ static void ecryptfs_destroy_inode(struct inode *inode) if (lower_dentry->d_inode) { fput(inode_info->lower_file); inode_info->lower_file = NULL; - d_drop(lower_dentry); } } ecryptfs_destroy_crypt_stat(&inode_info->crypt_stat); diff --git a/fs/ext2/symlink.c b/fs/ext2/symlink.c index 4e2426e22bb..565cf817bbf 100644 --- a/fs/ext2/symlink.c +++ b/fs/ext2/symlink.c @@ -32,6 +32,7 @@ const struct inode_operations ext2_symlink_inode_operations = { .readlink = generic_readlink, .follow_link = page_follow_link_light, .put_link = page_put_link, + .setattr = ext2_setattr, #ifdef CONFIG_EXT2_FS_XATTR .setxattr = generic_setxattr, .getxattr = generic_getxattr, @@ -43,6 +44,7 @@ const struct inode_operations ext2_symlink_inode_operations = { const struct inode_operations ext2_fast_symlink_inode_operations = { .readlink = generic_readlink, .follow_link = ext2_follow_link, + .setattr = ext2_setattr, #ifdef CONFIG_EXT2_FS_XATTR .setxattr = generic_setxattr, .getxattr = generic_getxattr, diff --git a/fs/ext3/symlink.c b/fs/ext3/symlink.c index ff7b4ccd898..7c489820777 100644 --- a/fs/ext3/symlink.c +++ b/fs/ext3/symlink.c @@ -34,6 +34,7 @@ const struct inode_operations ext3_symlink_inode_operations = { .readlink = generic_readlink, .follow_link = page_follow_link_light, .put_link = page_put_link, + .setattr = ext3_setattr, #ifdef CONFIG_EXT3_FS_XATTR .setxattr = generic_setxattr, .getxattr = generic_getxattr, @@ -45,6 +46,7 @@ const struct inode_operations ext3_symlink_inode_operations = { const struct inode_operations ext3_fast_symlink_inode_operations = { .readlink = generic_readlink, .follow_link = ext3_follow_link, + .setattr = ext3_setattr, #ifdef CONFIG_EXT3_FS_XATTR .setxattr = generic_setxattr, .getxattr = generic_getxattr, diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c index 781a322ccb4..4b37f7cea4d 100644 --- a/fs/fs-writeback.c +++ b/fs/fs-writeback.c @@ -554,108 +554,85 @@ select_queue: return ret; } -static void unpin_sb_for_writeback(struct super_block **psb) +static void unpin_sb_for_writeback(struct super_block *sb) { - struct super_block *sb = *psb; - - if (sb) { - up_read(&sb->s_umount); - put_super(sb); - *psb = NULL; - } + up_read(&sb->s_umount); + put_super(sb); } +enum sb_pin_state { + SB_PINNED, + SB_NOT_PINNED, + SB_PIN_FAILED +}; + /* * For WB_SYNC_NONE writeback, the caller does not have the sb pinned * before calling writeback. So make sure that we do pin it, so it doesn't * go away while we are writing inodes from it. - * - * Returns 0 if the super was successfully pinned (or pinning wasn't needed), - * 1 if we failed. */ -static int pin_sb_for_writeback(struct writeback_control *wbc, - struct inode *inode, struct super_block **psb) +static enum sb_pin_state pin_sb_for_writeback(struct writeback_control *wbc, + struct super_block *sb) { - struct super_block *sb = inode->i_sb; - - /* - * If this sb is already pinned, nothing more to do. If not and - * *psb is non-NULL, unpin the old one first - */ - if (sb == *psb) - return 0; - else if (*psb) - unpin_sb_for_writeback(psb); - /* * Caller must already hold the ref for this */ if (wbc->sync_mode == WB_SYNC_ALL) { WARN_ON(!rwsem_is_locked(&sb->s_umount)); - return 0; + return SB_NOT_PINNED; } - spin_lock(&sb_lock); sb->s_count++; if (down_read_trylock(&sb->s_umount)) { if (sb->s_root) { spin_unlock(&sb_lock); - goto pinned; + return SB_PINNED; } /* * umounted, drop rwsem again and fall through to failure */ up_read(&sb->s_umount); } - sb->s_count--; spin_unlock(&sb_lock); - return 1; -pinned: - *psb = sb; - return 0; + return SB_PIN_FAILED; } -static void writeback_inodes_wb(struct bdi_writeback *wb, - struct writeback_control *wbc) +/* + * Write a portion of b_io inodes which belong to @sb. + * If @wbc->sb != NULL, then find and write all such + * inodes. Otherwise write only ones which go sequentially + * in reverse order. + * Return 1, if the caller writeback routine should be + * interrupted. Otherwise return 0. + */ +static int writeback_sb_inodes(struct super_block *sb, + struct bdi_writeback *wb, + struct writeback_control *wbc) { - struct super_block *sb = wbc->sb, *pin_sb = NULL; - const unsigned long start = jiffies; /* livelock avoidance */ - - spin_lock(&inode_lock); - - if (!wbc->for_kupdate || list_empty(&wb->b_io)) - queue_io(wb, wbc->older_than_this); - while (!list_empty(&wb->b_io)) { - struct inode *inode = list_entry(wb->b_io.prev, - struct inode, i_list); long pages_skipped; - - /* - * super block given and doesn't match, skip this inode - */ - if (sb && sb != inode->i_sb) { + struct inode *inode = list_entry(wb->b_io.prev, + struct inode, i_list); + if (wbc->sb && sb != inode->i_sb) { + /* super block given and doesn't + match, skip this inode */ redirty_tail(inode); continue; } - + if (sb != inode->i_sb) + /* finish with this superblock */ + return 0; if (inode->i_state & (I_NEW | I_WILL_FREE)) { requeue_io(inode); continue; } - /* * Was this inode dirtied after sync_sb_inodes was called? * This keeps sync from extra jobs and livelock. */ - if (inode_dirtied_after(inode, start)) - break; - - if (pin_sb_for_writeback(wbc, inode, &pin_sb)) { - requeue_io(inode); - continue; - } + if (inode_dirtied_after(inode, wbc->wb_start)) + return 1; BUG_ON(inode->i_state & (I_FREEING | I_CLEAR)); __iget(inode); @@ -674,14 +651,50 @@ static void writeback_inodes_wb(struct bdi_writeback *wb, spin_lock(&inode_lock); if (wbc->nr_to_write <= 0) { wbc->more_io = 1; - break; + return 1; } if (!list_empty(&wb->b_more_io)) wbc->more_io = 1; } + /* b_io is empty */ + return 1; +} + +static void writeback_inodes_wb(struct bdi_writeback *wb, + struct writeback_control *wbc) +{ + int ret = 0; - unpin_sb_for_writeback(&pin_sb); + wbc->wb_start = jiffies; /* livelock avoidance */ + spin_lock(&inode_lock); + if (!wbc->for_kupdate || list_empty(&wb->b_io)) + queue_io(wb, wbc->older_than_this); + + while (!list_empty(&wb->b_io)) { + struct inode *inode = list_entry(wb->b_io.prev, + struct inode, i_list); + struct super_block *sb = inode->i_sb; + enum sb_pin_state state; + + if (wbc->sb && sb != wbc->sb) { + /* super block given and doesn't + match, skip this inode */ + redirty_tail(inode); + continue; + } + state = pin_sb_for_writeback(wbc, sb); + + if (state == SB_PIN_FAILED) { + requeue_io(inode); + continue; + } + ret = writeback_sb_inodes(sb, wb, wbc); + if (state == SB_PINNED) + unpin_sb_for_writeback(sb); + if (ret) + break; + } spin_unlock(&inode_lock); /* Leave any unwritten inodes on b_io */ } diff --git a/fs/fscache/stats.c b/fs/fscache/stats.c index 46435f3aae6..4765190d537 100644 --- a/fs/fscache/stats.c +++ b/fs/fscache/stats.c @@ -165,8 +165,8 @@ static int fscache_stats_show(struct seq_file *m, void *v) atomic_read(&fscache_n_object_lookups), atomic_read(&fscache_n_object_lookups_negative), atomic_read(&fscache_n_object_lookups_positive), - atomic_read(&fscache_n_object_lookups_timed_out), - atomic_read(&fscache_n_object_created)); + atomic_read(&fscache_n_object_created), + atomic_read(&fscache_n_object_lookups_timed_out)); seq_printf(m, "Updates: n=%u nul=%u run=%u\n", atomic_read(&fscache_n_updates), diff --git a/fs/nfs/client.c b/fs/nfs/client.c index 2a3d352c0bf..a8766c4ef2e 100644 --- a/fs/nfs/client.c +++ b/fs/nfs/client.c @@ -1294,7 +1294,8 @@ static int nfs4_init_server(struct nfs_server *server, /* Initialise the client representation from the mount data */ server->flags = data->flags; - server->caps |= NFS_CAP_ATOMIC_OPEN|NFS_CAP_CHANGE_ATTR; + server->caps |= NFS_CAP_ATOMIC_OPEN|NFS_CAP_CHANGE_ATTR| + NFS_CAP_POSIX_LOCK; server->options = data->options; /* Get a client record */ diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c index c6f2750648f..be46f26c9a5 100644 --- a/fs/nfs/dir.c +++ b/fs/nfs/dir.c @@ -1025,12 +1025,12 @@ static struct dentry *nfs_atomic_lookup(struct inode *dir, struct dentry *dentry res = NULL; goto out; /* This turned out not to be a regular file */ + case -EISDIR: case -ENOTDIR: goto no_open; case -ELOOP: if (!(nd->intent.open.flags & O_NOFOLLOW)) goto no_open; - /* case -EISDIR: */ /* case -EINVAL: */ default: goto out; diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c index 737128f777f..50a56edca0b 100644 --- a/fs/nfs/inode.c +++ b/fs/nfs/inode.c @@ -623,10 +623,10 @@ struct nfs_open_context *nfs_find_open_context(struct inode *inode, struct rpc_c list_for_each_entry(pos, &nfsi->open_files, list) { if (cred != NULL && pos->cred != cred) continue; - if ((pos->mode & mode) == mode) { - ctx = get_nfs_open_context(pos); - break; - } + if ((pos->mode & (FMODE_READ|FMODE_WRITE)) != mode) + continue; + ctx = get_nfs_open_context(pos); + break; } spin_unlock(&inode->i_lock); return ctx; diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index d79a7b37e56..638067007c6 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -1523,6 +1523,8 @@ static int _nfs4_proc_open(struct nfs4_opendata *data) nfs_post_op_update_inode(dir, o_res->dir_attr); } else nfs_refresh_inode(dir, o_res->dir_attr); + if ((o_res->rflags & NFS4_OPEN_RESULT_LOCKTYPE_POSIX) == 0) + server->caps &= ~NFS_CAP_POSIX_LOCK; if(o_res->rflags & NFS4_OPEN_RESULT_CONFIRM) { status = _nfs4_proc_open_confirm(data); if (status != 0) @@ -1664,7 +1666,7 @@ static int _nfs4_do_open(struct inode *dir, struct path *path, fmode_t fmode, in status = PTR_ERR(state); if (IS_ERR(state)) goto err_opendata_put; - if ((opendata->o_res.rflags & NFS4_OPEN_RESULT_LOCKTYPE_POSIX) != 0) + if (server->caps & NFS_CAP_POSIX_LOCK) set_bit(NFS_STATE_POSIX_LOCKS, &state->flags); nfs4_opendata_put(opendata); nfs4_put_state_owner(sp); @@ -2068,8 +2070,7 @@ nfs4_open_revalidate(struct inode *dir, struct dentry *dentry, int openflags, st case -EDQUOT: case -ENOSPC: case -EROFS: - lookup_instantiate_filp(nd, (struct dentry *)state, NULL); - return 1; + return PTR_ERR(state); default: goto out_drop; } diff --git a/fs/nfs/write.c b/fs/nfs/write.c index 53ff70e2399..de38d63aa92 100644 --- a/fs/nfs/write.c +++ b/fs/nfs/write.c @@ -201,6 +201,7 @@ static int nfs_set_page_writeback(struct page *page) struct inode *inode = page->mapping->host; struct nfs_server *nfss = NFS_SERVER(inode); + page_cache_get(page); if (atomic_long_inc_return(&nfss->writeback) > NFS_CONGESTION_ON_THRESH) { set_bdi_congested(&nfss->backing_dev_info, @@ -216,6 +217,7 @@ static void nfs_end_page_writeback(struct page *page) struct nfs_server *nfss = NFS_SERVER(inode); end_page_writeback(page); + page_cache_release(page); if (atomic_long_dec_return(&nfss->writeback) < NFS_CONGESTION_OFF_THRESH) clear_bdi_congested(&nfss->backing_dev_info, BLK_RW_ASYNC); } @@ -421,6 +423,7 @@ static void nfs_mark_request_dirty(struct nfs_page *req) { __set_page_dirty_nobuffers(req->wb_page); + __mark_inode_dirty(req->wb_page->mapping->host, I_DIRTY_DATASYNC); } #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4) @@ -660,9 +663,11 @@ static int nfs_writepage_setup(struct nfs_open_context *ctx, struct page *page, req = nfs_setup_write_request(ctx, page, offset, count); if (IS_ERR(req)) return PTR_ERR(req); + nfs_mark_request_dirty(req); /* Update file length */ nfs_grow_file(page, offset, count); nfs_mark_uptodate(page, req->wb_pgbase, req->wb_bytes); + nfs_mark_request_dirty(req); nfs_clear_page_tag_locked(req); return 0; } @@ -739,8 +744,6 @@ int nfs_updatepage(struct file *file, struct page *page, status = nfs_writepage_setup(ctx, page, offset, count); if (status < 0) nfs_set_pageerror(page); - else - __set_page_dirty_nobuffers(page); dprintk("NFS: nfs_updatepage returns %d (isize %lld)\n", status, (long long)i_size_read(inode)); @@ -749,13 +752,12 @@ int nfs_updatepage(struct file *file, struct page *page, static void nfs_writepage_release(struct nfs_page *req) { + struct page *page = req->wb_page; - if (PageError(req->wb_page) || !nfs_reschedule_unstable_write(req)) { - nfs_end_page_writeback(req->wb_page); + if (PageError(req->wb_page) || !nfs_reschedule_unstable_write(req)) nfs_inode_remove_request(req); - } else - nfs_end_page_writeback(req->wb_page); nfs_clear_page_tag_locked(req); + nfs_end_page_writeback(page); } static int flush_task_priority(int how) @@ -779,7 +781,6 @@ static int nfs_write_rpcsetup(struct nfs_page *req, int how) { struct inode *inode = req->wb_context->path.dentry->d_inode; - int flags = (how & FLUSH_SYNC) ? 0 : RPC_TASK_ASYNC; int priority = flush_task_priority(how); struct rpc_task *task; struct rpc_message msg = { @@ -794,9 +795,10 @@ static int nfs_write_rpcsetup(struct nfs_page *req, .callback_ops = call_ops, .callback_data = data, .workqueue = nfsiod_workqueue, - .flags = flags, + .flags = RPC_TASK_ASYNC, .priority = priority, }; + int ret = 0; /* Set up the RPC argument and reply structs * NB: take care not to mess about with data->commit et al. */ @@ -835,10 +837,18 @@ static int nfs_write_rpcsetup(struct nfs_page *req, (unsigned long long)data->args.offset); task = rpc_run_task(&task_setup_data); - if (IS_ERR(task)) - return PTR_ERR(task); + if (IS_ERR(task)) { + ret = PTR_ERR(task); + goto out; + } + if (how & FLUSH_SYNC) { + ret = rpc_wait_for_completion_task(task); + if (ret == 0) + ret = task->tk_status; + } rpc_put_task(task); - return 0; +out: + return ret; } /* If a nfs_flush_* function fails, it should remove reqs from @head and @@ -847,9 +857,11 @@ static int nfs_write_rpcsetup(struct nfs_page *req, */ static void nfs_redirty_request(struct nfs_page *req) { + struct page *page = req->wb_page; + nfs_mark_request_dirty(req); - nfs_end_page_writeback(req->wb_page); nfs_clear_page_tag_locked(req); + nfs_end_page_writeback(page); } /* @@ -1084,16 +1096,15 @@ static void nfs_writeback_release_full(void *calldata) if (nfs_write_need_commit(data)) { memcpy(&req->wb_verf, &data->verf, sizeof(req->wb_verf)); nfs_mark_request_commit(req); - nfs_end_page_writeback(page); dprintk(" marked for commit\n"); goto next; } dprintk(" OK\n"); remove_request: - nfs_end_page_writeback(page); nfs_inode_remove_request(req); next: nfs_clear_page_tag_locked(req); + nfs_end_page_writeback(page); } nfs_writedata_release(calldata); } @@ -1207,7 +1218,6 @@ static int nfs_commit_rpcsetup(struct list_head *head, { struct nfs_page *first = nfs_list_entry(head->next); struct inode *inode = first->wb_context->path.dentry->d_inode; - int flags = (how & FLUSH_SYNC) ? 0 : RPC_TASK_ASYNC; int priority = flush_task_priority(how); struct rpc_task *task; struct rpc_message msg = { @@ -1222,7 +1232,7 @@ static int nfs_commit_rpcsetup(struct list_head *head, .callback_ops = &nfs_commit_ops, .callback_data = data, .workqueue = nfsiod_workqueue, - .flags = flags, + .flags = RPC_TASK_ASYNC, .priority = priority, }; @@ -1252,6 +1262,8 @@ static int nfs_commit_rpcsetup(struct list_head *head, task = rpc_run_task(&task_setup_data); if (IS_ERR(task)) return PTR_ERR(task); + if (how & FLUSH_SYNC) + rpc_wait_for_completion_task(task); rpc_put_task(task); return 0; } diff --git a/fs/nilfs2/alloc.c b/fs/nilfs2/alloc.c index 8d6356a804f..7cfb87e692d 100644 --- a/fs/nilfs2/alloc.c +++ b/fs/nilfs2/alloc.c @@ -426,7 +426,7 @@ void nilfs_palloc_abort_alloc_entry(struct inode *inode, bitmap = bitmap_kaddr + bh_offset(req->pr_bitmap_bh); if (!nilfs_clear_bit_atomic(nilfs_mdt_bgl_lock(inode, group), group_offset, bitmap)) - printk(KERN_WARNING "%s: entry numer %llu already freed\n", + printk(KERN_WARNING "%s: entry number %llu already freed\n", __func__, (unsigned long long)req->pr_entry_nr); nilfs_palloc_group_desc_add_entries(inode, group, desc, 1); diff --git a/fs/nilfs2/btree.c b/fs/nilfs2/btree.c index 7cdd98b8d51..76c38e3e19d 100644 --- a/fs/nilfs2/btree.c +++ b/fs/nilfs2/btree.c @@ -1879,7 +1879,7 @@ static int nilfs_btree_propagate_v(struct nilfs_btree *btree, struct nilfs_btree_path *path, int level, struct buffer_head *bh) { - int maxlevel, ret; + int maxlevel = 0, ret; struct nilfs_btree_node *parent; struct inode *dat = nilfs_bmap_get_dat(&btree->bt_bmap); __u64 ptr; diff --git a/fs/nilfs2/ioctl.c b/fs/nilfs2/ioctl.c index c2ff1b30601..f90a33d9a5b 100644 --- a/fs/nilfs2/ioctl.c +++ b/fs/nilfs2/ioctl.c @@ -649,7 +649,7 @@ static int nilfs_ioctl_get_info(struct inode *inode, struct file *filp, long nilfs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { struct inode *inode = filp->f_dentry->d_inode; - void __user *argp = (void * __user *)arg; + void __user *argp = (void __user *)arg; switch (cmd) { case NILFS_IOCTL_CHANGE_CPMODE: diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c index caf0337dff7..070553427dd 100644 --- a/fs/proc/task_mmu.c +++ b/fs/proc/task_mmu.c @@ -662,31 +662,18 @@ static u64 huge_pte_to_pagemap_entry(pte_t pte, int offset) return pme; } -static int pagemap_hugetlb_range(pte_t *pte, unsigned long addr, - unsigned long end, struct mm_walk *walk) +/* This function walks within one hugetlb entry in the single call */ +static int pagemap_hugetlb_range(pte_t *pte, unsigned long hmask, + unsigned long addr, unsigned long end, + struct mm_walk *walk) { - struct vm_area_struct *vma; struct pagemapread *pm = walk->private; - struct hstate *hs = NULL; int err = 0; + u64 pfn; - vma = find_vma(walk->mm, addr); - if (vma) - hs = hstate_vma(vma); for (; addr != end; addr += PAGE_SIZE) { - u64 pfn = PM_NOT_PRESENT; - - if (vma && (addr >= vma->vm_end)) { - vma = find_vma(walk->mm, addr); - if (vma) - hs = hstate_vma(vma); - } - - if (vma && (vma->vm_start <= addr) && is_vm_hugetlb_page(vma)) { - /* calculate pfn of the "raw" page in the hugepage. */ - int offset = (addr & ~huge_page_mask(hs)) >> PAGE_SHIFT; - pfn = huge_pte_to_pagemap_entry(*pte, offset); - } + int offset = (addr & ~hmask) >> PAGE_SHIFT; + pfn = huge_pte_to_pagemap_entry(*pte, offset); err = add_to_pagemap(addr, pfn, pm); if (err) return err; @@ -800,7 +787,7 @@ static ssize_t pagemap_read(struct file *file, char __user *buf, start_vaddr = end; len = min(count, PM_ENTRY_BYTES * pm.pos); - if (copy_to_user(buf, pm.buffer, len) < 0) { + if (copy_to_user(buf, pm.buffer, len)) { ret = -EFAULT; goto out_free; } diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c index e0b870f4749..a0a9405b202 100644 --- a/fs/quota/dquot.c +++ b/fs/quota/dquot.c @@ -874,14 +874,18 @@ static int dqinit_needed(struct inode *inode, int type) static void add_dquot_ref(struct super_block *sb, int type) { struct inode *inode, *old_inode = NULL; +#ifdef __DQUOT_PARANOIA int reserved = 0; +#endif spin_lock(&inode_lock); list_for_each_entry(inode, &sb->s_inodes, i_sb_list) { if (inode->i_state & (I_FREEING|I_CLEAR|I_WILL_FREE|I_NEW)) continue; +#ifdef __DQUOT_PARANOIA if (unlikely(inode_get_rsv_space(inode) > 0)) reserved = 1; +#endif if (!atomic_read(&inode->i_writecount)) continue; if (!dqinit_needed(inode, type)) @@ -903,11 +907,13 @@ static void add_dquot_ref(struct super_block *sb, int type) spin_unlock(&inode_lock); iput(old_inode); +#ifdef __DQUOT_PARANOIA if (reserved) { printk(KERN_WARNING "VFS (%s): Writes happened before quota" " was turned on thus quota information is probably " "inconsistent. Please run quotacheck(8).\n", sb->s_id); } +#endif } /* @@ -2322,34 +2328,34 @@ static int do_set_dqblk(struct dquot *dquot, struct if_dqblk *di) if (di->dqb_valid & QIF_SPACE) { dm->dqb_curspace = di->dqb_curspace - dm->dqb_rsvspace; check_blim = 1; - __set_bit(DQ_LASTSET_B + QIF_SPACE_B, &dquot->dq_flags); + set_bit(DQ_LASTSET_B + QIF_SPACE_B, &dquot->dq_flags); } if (di->dqb_valid & QIF_BLIMITS) { dm->dqb_bsoftlimit = qbtos(di->dqb_bsoftlimit); dm->dqb_bhardlimit = qbtos(di->dqb_bhardlimit); check_blim = 1; - __set_bit(DQ_LASTSET_B + QIF_BLIMITS_B, &dquot->dq_flags); + set_bit(DQ_LASTSET_B + QIF_BLIMITS_B, &dquot->dq_flags); } if (di->dqb_valid & QIF_INODES) { dm->dqb_curinodes = di->dqb_curinodes; check_ilim = 1; - __set_bit(DQ_LASTSET_B + QIF_INODES_B, &dquot->dq_flags); + set_bit(DQ_LASTSET_B + QIF_INODES_B, &dquot->dq_flags); } if (di->dqb_valid & QIF_ILIMITS) { dm->dqb_isoftlimit = di->dqb_isoftlimit; dm->dqb_ihardlimit = di->dqb_ihardlimit; check_ilim = 1; - __set_bit(DQ_LASTSET_B + QIF_ILIMITS_B, &dquot->dq_flags); + set_bit(DQ_LASTSET_B + QIF_ILIMITS_B, &dquot->dq_flags); } if (di->dqb_valid & QIF_BTIME) { dm->dqb_btime = di->dqb_btime; check_blim = 1; - __set_bit(DQ_LASTSET_B + QIF_BTIME_B, &dquot->dq_flags); + set_bit(DQ_LASTSET_B + QIF_BTIME_B, &dquot->dq_flags); } if (di->dqb_valid & QIF_ITIME) { dm->dqb_itime = di->dqb_itime; check_ilim = 1; - __set_bit(DQ_LASTSET_B + QIF_ITIME_B, &dquot->dq_flags); + set_bit(DQ_LASTSET_B + QIF_ITIME_B, &dquot->dq_flags); } if (check_blim) { diff --git a/fs/udf/balloc.c b/fs/udf/balloc.c index 19626e2491c..9a9378b4eb5 100644 --- a/fs/udf/balloc.c +++ b/fs/udf/balloc.c @@ -125,9 +125,8 @@ static void udf_bitmap_free_blocks(struct super_block *sb, mutex_lock(&sbi->s_alloc_mutex); partmap = &sbi->s_partmaps[bloc->partitionReferenceNum]; - if (bloc->logicalBlockNum < 0 || - (bloc->logicalBlockNum + count) > - partmap->s_partition_len) { + if (bloc->logicalBlockNum + count < count || + (bloc->logicalBlockNum + count) > partmap->s_partition_len) { udf_debug("%d < %d || %d + %d > %d\n", bloc->logicalBlockNum, 0, bloc->logicalBlockNum, count, partmap->s_partition_len); @@ -393,9 +392,8 @@ static void udf_table_free_blocks(struct super_block *sb, mutex_lock(&sbi->s_alloc_mutex); partmap = &sbi->s_partmaps[bloc->partitionReferenceNum]; - if (bloc->logicalBlockNum < 0 || - (bloc->logicalBlockNum + count) > - partmap->s_partition_len) { + if (bloc->logicalBlockNum + count < count || + (bloc->logicalBlockNum + count) > partmap->s_partition_len) { udf_debug("%d < %d || %d + %d > %d\n", bloc->logicalBlockNum, 0, bloc->logicalBlockNum, count, partmap->s_partition_len); diff --git a/fs/udf/file.c b/fs/udf/file.c index 1eb06774ed9..4b6a46ccbf4 100644 --- a/fs/udf/file.c +++ b/fs/udf/file.c @@ -218,7 +218,7 @@ const struct file_operations udf_file_operations = { .llseek = generic_file_llseek, }; -static int udf_setattr(struct dentry *dentry, struct iattr *iattr) +int udf_setattr(struct dentry *dentry, struct iattr *iattr) { struct inode *inode = dentry->d_inode; int error; diff --git a/fs/udf/inode.c b/fs/udf/inode.c index bb863fe579a..8a3fbd177ca 100644 --- a/fs/udf/inode.c +++ b/fs/udf/inode.c @@ -1314,7 +1314,7 @@ static void udf_fill_inode(struct inode *inode, struct buffer_head *bh) break; case ICBTAG_FILE_TYPE_SYMLINK: inode->i_data.a_ops = &udf_symlink_aops; - inode->i_op = &page_symlink_inode_operations; + inode->i_op = &udf_symlink_inode_operations; inode->i_mode = S_IFLNK | S_IRWXUGO; break; case ICBTAG_FILE_TYPE_MAIN: diff --git a/fs/udf/namei.c b/fs/udf/namei.c index db423ab078b..75816025f95 100644 --- a/fs/udf/namei.c +++ b/fs/udf/namei.c @@ -925,7 +925,7 @@ static int udf_symlink(struct inode *dir, struct dentry *dentry, iinfo = UDF_I(inode); inode->i_mode = S_IFLNK | S_IRWXUGO; inode->i_data.a_ops = &udf_symlink_aops; - inode->i_op = &page_symlink_inode_operations; + inode->i_op = &udf_symlink_inode_operations; if (iinfo->i_alloc_type != ICBTAG_FLAG_AD_IN_ICB) { struct kernel_lb_addr eloc; @@ -1393,6 +1393,7 @@ const struct export_operations udf_export_ops = { const struct inode_operations udf_dir_inode_operations = { .lookup = udf_lookup, .create = udf_create, + .setattr = udf_setattr, .link = udf_link, .unlink = udf_unlink, .symlink = udf_symlink, @@ -1401,3 +1402,9 @@ const struct inode_operations udf_dir_inode_operations = { .mknod = udf_mknod, .rename = udf_rename, }; +const struct inode_operations udf_symlink_inode_operations = { + .readlink = generic_readlink, + .follow_link = page_follow_link_light, + .put_link = page_put_link, + .setattr = udf_setattr, +}; diff --git a/fs/udf/udfdecl.h b/fs/udf/udfdecl.h index 4223ac855da..702a1148e70 100644 --- a/fs/udf/udfdecl.h +++ b/fs/udf/udfdecl.h @@ -76,6 +76,7 @@ extern const struct inode_operations udf_dir_inode_operations; extern const struct file_operations udf_dir_operations; extern const struct inode_operations udf_file_inode_operations; extern const struct file_operations udf_file_operations; +extern const struct inode_operations udf_symlink_inode_operations; extern const struct address_space_operations udf_aops; extern const struct address_space_operations udf_adinicb_aops; extern const struct address_space_operations udf_symlink_aops; @@ -131,7 +132,7 @@ extern int udf_write_fi(struct inode *inode, struct fileIdentDesc *, /* file.c */ extern int udf_ioctl(struct inode *, struct file *, unsigned int, unsigned long); - +extern int udf_setattr(struct dentry *dentry, struct iattr *iattr); /* inode.c */ extern struct inode *udf_iget(struct super_block *, struct kernel_lb_addr *); extern int udf_sync_inode(struct inode *); diff --git a/fs/xfs/linux-2.6/xfs_sync.c b/fs/xfs/linux-2.6/xfs_sync.c index 05cd85317f6..fd969821575 100644 --- a/fs/xfs/linux-2.6/xfs_sync.c +++ b/fs/xfs/linux-2.6/xfs_sync.c @@ -820,10 +820,10 @@ xfs_reclaim_inode( * call into reclaim to find it in a clean state instead of waiting for * it now. We also don't return errors here - if the error is transient * then the next reclaim pass will flush the inode, and if the error - * is permanent then the next sync reclaim will relcaim the inode and + * is permanent then the next sync reclaim will reclaim the inode and * pass on the error. */ - if (error && !XFS_FORCED_SHUTDOWN(ip->i_mount)) { + if (error && error != EAGAIN && !XFS_FORCED_SHUTDOWN(ip->i_mount)) { xfs_fs_cmn_err(CE_WARN, ip->i_mount, "inode 0x%llx background reclaim flush failed with %d", (long long)ip->i_ino, error); diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c index e8fba92d7cd..2be01913628 100644 --- a/fs/xfs/xfs_log.c +++ b/fs/xfs/xfs_log.c @@ -745,9 +745,16 @@ xfs_log_move_tail(xfs_mount_t *mp, /* * Determine if we have a transaction that has gone to disk - * that needs to be covered. Log activity needs to be idle (no AIL and - * nothing in the iclogs). And, we need to be in the right state indicating - * something has gone out. + * that needs to be covered. To begin the transition to the idle state + * firstly the log needs to be idle (no AIL and nothing in the iclogs). + * If we are then in a state where covering is needed, the caller is informed + * that dummy transactions are required to move the log into the idle state. + * + * Because this is called as part of the sync process, we should also indicate + * that dummy transactions should be issued in anything but the covered or + * idle states. This ensures that the log tail is accurately reflected in + * the log at the end of the sync, hence if a crash occurrs avoids replay + * of transactions where the metadata is already on disk. */ int xfs_log_need_covered(xfs_mount_t *mp) @@ -759,17 +766,24 @@ xfs_log_need_covered(xfs_mount_t *mp) return 0; spin_lock(&log->l_icloglock); - if (((log->l_covered_state == XLOG_STATE_COVER_NEED) || - (log->l_covered_state == XLOG_STATE_COVER_NEED2)) - && !xfs_trans_ail_tail(log->l_ailp) - && xlog_iclogs_empty(log)) { - if (log->l_covered_state == XLOG_STATE_COVER_NEED) - log->l_covered_state = XLOG_STATE_COVER_DONE; - else { - ASSERT(log->l_covered_state == XLOG_STATE_COVER_NEED2); - log->l_covered_state = XLOG_STATE_COVER_DONE2; + switch (log->l_covered_state) { + case XLOG_STATE_COVER_DONE: + case XLOG_STATE_COVER_DONE2: + case XLOG_STATE_COVER_IDLE: + break; + case XLOG_STATE_COVER_NEED: + case XLOG_STATE_COVER_NEED2: + if (!xfs_trans_ail_tail(log->l_ailp) && + xlog_iclogs_empty(log)) { + if (log->l_covered_state == XLOG_STATE_COVER_NEED) + log->l_covered_state = XLOG_STATE_COVER_DONE; + else + log->l_covered_state = XLOG_STATE_COVER_DONE2; } + /* FALLTHRU */ + default: needed = 1; + break; } spin_unlock(&log->l_icloglock); return needed; diff --git a/include/drm/drm_pciids.h b/include/drm/drm_pciids.h index 04a6ebc27b9..2d428b088cc 100644 --- a/include/drm/drm_pciids.h +++ b/include/drm/drm_pciids.h @@ -6,6 +6,7 @@ {0x1002, 0x3150, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY}, \ {0x1002, 0x3152, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ {0x1002, 0x3154, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x3155, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ {0x1002, 0x3E50, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_NEW_MEMMAP}, \ {0x1002, 0x3E54, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_NEW_MEMMAP}, \ {0x1002, 0x4136, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS100|RADEON_IS_IGP}, \ diff --git a/include/linux/ata.h b/include/linux/ata.h index b4c85e2adef..700c5b9b358 100644 --- a/include/linux/ata.h +++ b/include/linux/ata.h @@ -1025,8 +1025,8 @@ static inline int ata_ok(u8 status) static inline int lba_28_ok(u64 block, u32 n_block) { - /* check the ending block number */ - return ((block + n_block) < ((u64)1 << 28)) && (n_block <= 256); + /* check the ending block number: must be LESS THAN 0x0fffffff */ + return ((block + n_block) < ((1 << 28) - 1)) && (n_block <= 256); } static inline int lba_48_ok(u64 block, u32 n_block) diff --git a/include/linux/bitops.h b/include/linux/bitops.h index b7938987923..b796eab5ca7 100644 --- a/include/linux/bitops.h +++ b/include/linux/bitops.h @@ -21,9 +21,6 @@ (bit) < (size); \ (bit) = find_next_bit((addr), (size), (bit) + 1)) -/* Temporary */ -#define for_each_bit(bit, addr, size) for_each_set_bit(bit, addr, size) - static __inline__ int get_bitmask_order(unsigned int count) { int order; diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index ebd22dbed86..6690e8bae7b 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -158,7 +158,6 @@ enum rq_flag_bits { struct request { struct list_head queuelist; struct call_single_data csd; - int cpu; struct request_queue *q; @@ -166,9 +165,11 @@ struct request { enum rq_cmd_type_bits cmd_type; unsigned long atomic_flags; + int cpu; + /* the following two fields are internal, NEVER access directly */ - sector_t __sector; /* sector cursor */ unsigned int __data_len; /* total data len */ + sector_t __sector; /* sector cursor */ struct bio *bio; struct bio *biotail; @@ -201,20 +202,20 @@ struct request { unsigned short ioprio; + int ref_count; + void *special; /* opaque pointer available for LLD use */ char *buffer; /* kaddr of the current segment if available */ int tag; int errors; - int ref_count; - /* * when request is used as a packet command carrier */ - unsigned short cmd_len; unsigned char __cmd[BLK_MAX_CDB]; unsigned char *cmd; + unsigned short cmd_len; unsigned int extra_len; /* length of alignment and padding */ unsigned int sense_len; @@ -921,26 +922,7 @@ extern void blk_cleanup_queue(struct request_queue *); extern void blk_queue_make_request(struct request_queue *, make_request_fn *); extern void blk_queue_bounce_limit(struct request_queue *, u64); extern void blk_queue_max_hw_sectors(struct request_queue *, unsigned int); - -/* Temporary compatibility wrapper */ -static inline void blk_queue_max_sectors(struct request_queue *q, unsigned int max) -{ - blk_queue_max_hw_sectors(q, max); -} - extern void blk_queue_max_segments(struct request_queue *, unsigned short); - -static inline void blk_queue_max_phys_segments(struct request_queue *q, unsigned short max) -{ - blk_queue_max_segments(q, max); -} - -static inline void blk_queue_max_hw_segments(struct request_queue *q, unsigned short max) -{ - blk_queue_max_segments(q, max); -} - - extern void blk_queue_max_segment_size(struct request_queue *, unsigned int); extern void blk_queue_max_discard_sectors(struct request_queue *q, unsigned int max_discard_sectors); @@ -1030,11 +1012,6 @@ static inline int sb_issue_discard(struct super_block *sb, extern int blk_verify_command(unsigned char *cmd, fmode_t has_write_perm); -#define MAX_PHYS_SEGMENTS 128 -#define MAX_HW_SEGMENTS 128 -#define SAFE_MAX_SECTORS 255 -#define MAX_SEGMENT_SIZE 65536 - enum blk_default_limits { BLK_MAX_SEGMENTS = 128, BLK_SAFE_MAX_SECTORS = 255, diff --git a/include/linux/drbd.h b/include/linux/drbd.h index 78962272338..4341b1a97a3 100644 --- a/include/linux/drbd.h +++ b/include/linux/drbd.h @@ -56,7 +56,7 @@ extern const char *drbd_buildtag(void); #define REL_VERSION "8.3.7" #define API_VERSION 88 #define PRO_VERSION_MIN 86 -#define PRO_VERSION_MAX 91 +#define PRO_VERSION_MAX 92 enum drbd_io_error_p { diff --git a/include/linux/drbd_nl.h b/include/linux/drbd_nl.h index a4d82f89599..f7431a4ca60 100644 --- a/include/linux/drbd_nl.h +++ b/include/linux/drbd_nl.h @@ -12,7 +12,7 @@ #endif NL_PACKET(primary, 1, - NL_BIT( 1, T_MAY_IGNORE, overwrite_peer) + NL_BIT( 1, T_MAY_IGNORE, primary_force) ) NL_PACKET(secondary, 2, ) @@ -63,6 +63,7 @@ NL_PACKET(net_conf, 5, NL_BIT( 41, T_MAY_IGNORE, always_asbp) NL_BIT( 61, T_MAY_IGNORE, no_cork) NL_BIT( 62, T_MANDATORY, auto_sndbuf_size) + NL_BIT( 70, T_MANDATORY, dry_run) ) NL_PACKET(disconnect, 6, ) diff --git a/include/linux/firewire-cdev.h b/include/linux/firewire-cdev.h index 40b11013408..81f3b14d5d7 100644 --- a/include/linux/firewire-cdev.h +++ b/include/linux/firewire-cdev.h @@ -1,21 +1,26 @@ /* * Char device interface. * - * Copyright (C) 2005-2006 Kristian Hoegsberg <krh@bitplanet.net> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software Foundation, - * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * Copyright (C) 2005-2007 Kristian Hoegsberg <krh@bitplanet.net> + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. */ #ifndef _LINUX_FIREWIRE_CDEV_H @@ -438,7 +443,7 @@ struct fw_cdev_remove_descriptor { * @type: %FW_CDEV_ISO_CONTEXT_TRANSMIT or %FW_CDEV_ISO_CONTEXT_RECEIVE * @header_size: Header size to strip for receive contexts * @channel: Channel to bind to - * @speed: Speed to transmit at + * @speed: Speed for transmit contexts * @closure: To be returned in &fw_cdev_event_iso_interrupt * @handle: Handle to context, written back by kernel * @@ -451,6 +456,9 @@ struct fw_cdev_remove_descriptor { * If a context was successfully created, the kernel writes back a handle to the * context, which must be passed in for subsequent operations on that context. * + * For receive contexts, @header_size must be at least 4 and must be a multiple + * of 4. + * * Note that the effect of a @header_size > 4 depends on * &fw_cdev_get_info.version, as documented at &fw_cdev_event_iso_interrupt. */ @@ -481,10 +489,34 @@ struct fw_cdev_create_iso_context { * * &struct fw_cdev_iso_packet is used to describe isochronous packet queues. * - * Use the FW_CDEV_ISO_ macros to fill in @control. The sy and tag fields are - * specified by IEEE 1394a and IEC 61883. - * - * FIXME - finish this documentation + * Use the FW_CDEV_ISO_ macros to fill in @control. + * + * For transmit packets, the header length must be a multiple of 4 and specifies + * the numbers of bytes in @header that will be prepended to the packet's + * payload; these bytes are copied into the kernel and will not be accessed + * after the ioctl has returned. The sy and tag fields are copied to the iso + * packet header (these fields are specified by IEEE 1394a and IEC 61883-1). + * The skip flag specifies that no packet is to be sent in a frame; when using + * this, all other fields except the interrupt flag must be zero. + * + * For receive packets, the header length must be a multiple of the context's + * header size; if the header length is larger than the context's header size, + * multiple packets are queued for this entry. The sy and tag fields are + * ignored. If the sync flag is set, the context drops all packets until + * a packet with a matching sy field is received (the sync value to wait for is + * specified in the &fw_cdev_start_iso structure). The payload length defines + * how many payload bytes can be received for one packet (in addition to payload + * quadlets that have been defined as headers and are stripped and returned in + * the &fw_cdev_event_iso_interrupt structure). If more bytes are received, the + * additional bytes are dropped. If less bytes are received, the remaining + * bytes in this part of the payload buffer will not be written to, not even by + * the next packet, i.e., packets received in consecutive frames will not + * necessarily be consecutive in memory. If an entry has queued multiple + * packets, the payload length is divided equally among them. + * + * When a packet with the interrupt flag set has been completed, the + * &fw_cdev_event_iso_interrupt event will be sent. An entry that has queued + * multiple receive packets is completed when its last packet is completed. */ struct fw_cdev_iso_packet { __u32 control; @@ -501,7 +533,7 @@ struct fw_cdev_iso_packet { * Queue a number of isochronous packets for reception or transmission. * This ioctl takes a pointer to an array of &fw_cdev_iso_packet structs, * which describe how to transmit from or receive into a contiguous region - * of a mmap()'ed payload buffer. As part of the packet descriptors, + * of a mmap()'ed payload buffer. As part of transmit packet descriptors, * a series of headers can be supplied, which will be prepended to the * payload during DMA. * @@ -620,8 +652,8 @@ struct fw_cdev_get_cycle_timer2 { * instead of allocated. * An %FW_CDEV_EVENT_ISO_RESOURCE_DEALLOCATED event concludes this operation. * - * To summarize, %FW_CDEV_IOC_DEALLOCATE_ISO_RESOURCE allocates iso resources - * for the lifetime of the fd or handle. + * To summarize, %FW_CDEV_IOC_ALLOCATE_ISO_RESOURCE allocates iso resources + * for the lifetime of the fd or @handle. * In contrast, %FW_CDEV_IOC_ALLOCATE_ISO_RESOURCE_ONCE allocates iso resources * for the duration of a bus generation. * diff --git a/include/linux/firewire-constants.h b/include/linux/firewire-constants.h index b316770a43f..9c63f06e67f 100644 --- a/include/linux/firewire-constants.h +++ b/include/linux/firewire-constants.h @@ -1,3 +1,28 @@ +/* + * IEEE 1394 constants. + * + * Copyright (C) 2005-2007 Kristian Hoegsberg <krh@bitplanet.net> + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + #ifndef _LINUX_FIREWIRE_CONSTANTS_H #define _LINUX_FIREWIRE_CONSTANTS_H @@ -21,7 +46,7 @@ #define EXTCODE_WRAP_ADD 0x6 #define EXTCODE_VENDOR_DEPENDENT 0x7 -/* Juju specific tcodes */ +/* Linux firewire-core (Juju) specific tcodes */ #define TCODE_LOCK_MASK_SWAP (0x10 | EXTCODE_MASK_SWAP) #define TCODE_LOCK_COMPARE_SWAP (0x10 | EXTCODE_COMPARE_SWAP) #define TCODE_LOCK_FETCH_ADD (0x10 | EXTCODE_FETCH_ADD) @@ -36,7 +61,7 @@ #define RCODE_TYPE_ERROR 0x6 #define RCODE_ADDRESS_ERROR 0x7 -/* Juju specific rcodes */ +/* Linux firewire-core (Juju) specific rcodes */ #define RCODE_SEND_ERROR 0x10 #define RCODE_CANCELLED 0x11 #define RCODE_BUSY 0x12 diff --git a/include/linux/fs.h b/include/linux/fs.h index 10b8dedcd18..39d57bc6cc7 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -2212,6 +2212,7 @@ extern int generic_segment_checks(const struct iovec *iov, /* fs/block_dev.c */ extern ssize_t blkdev_aio_write(struct kiocb *iocb, const struct iovec *iov, unsigned long nr_segs, loff_t pos); +extern int blkdev_fsync(struct file *filp, struct dentry *dentry, int datasync); /* fs/splice.c */ extern ssize_t generic_file_splice_read(struct file *, loff_t *, diff --git a/include/linux/genhd.h b/include/linux/genhd.h index 56b50514ab2..5f2f4c4d8fb 100644 --- a/include/linux/genhd.h +++ b/include/linux/genhd.h @@ -109,7 +109,7 @@ struct hd_struct { }; #define GENHD_FL_REMOVABLE 1 -#define GENHD_FL_DRIVERFS 2 +/* 2 is unused */ #define GENHD_FL_MEDIA_CHANGE_NOTIFY 4 #define GENHD_FL_CD 8 #define GENHD_FL_UP 16 diff --git a/include/linux/i2o.h b/include/linux/i2o.h index 87018dc5527..9e7a12d6385 100644 --- a/include/linux/i2o.h +++ b/include/linux/i2o.h @@ -782,7 +782,6 @@ extern int i2o_exec_lct_get(struct i2o_controller *); #define to_i2o_driver(drv) container_of(drv,struct i2o_driver, driver) #define to_i2o_device(dev) container_of(dev, struct i2o_device, device) #define to_i2o_controller(dev) container_of(dev, struct i2o_controller, device) -#define kobj_to_i2o_device(kobj) to_i2o_device(container_of(kobj, struct device, kobj)) /** * i2o_out_to_virt - Turn an I2O message to a virtual address diff --git a/include/linux/ide.h b/include/linux/ide.h index 97e6ab43518..3239d1c10ac 100644 --- a/include/linux/ide.h +++ b/include/linux/ide.h @@ -1169,6 +1169,7 @@ extern void ide_stall_queue(ide_drive_t *drive, unsigned long timeout); extern void ide_timer_expiry(unsigned long); extern irqreturn_t ide_intr(int irq, void *dev_id); extern void do_ide_request(struct request_queue *); +extern void ide_requeue_and_plug(ide_drive_t *drive, struct request *rq); void ide_init_disk(struct gendisk *, ide_drive_t *); diff --git a/include/linux/input/matrix_keypad.h b/include/linux/input/matrix_keypad.h index 3bd018baae2..c964cd7f436 100644 --- a/include/linux/input/matrix_keypad.h +++ b/include/linux/input/matrix_keypad.h @@ -44,6 +44,7 @@ struct matrix_keymap_data { * @active_low: gpio polarity * @wakeup: controls whether the device should be set up as wakeup * source + * @no_autorepeat: disable key autorepeat * * This structure represents platform-specific data that use used by * matrix_keypad driver to perform proper initialization. @@ -64,6 +65,7 @@ struct matrix_keypad_platform_data { bool active_low; bool wakeup; + bool no_autorepeat; }; /** diff --git a/include/linux/iscsi_ibft.h b/include/linux/iscsi_ibft.h index 6092487e295..d2e4042f8f5 100644 --- a/include/linux/iscsi_ibft.h +++ b/include/linux/iscsi_ibft.h @@ -42,9 +42,13 @@ extern struct ibft_table_header *ibft_addr; * mapped address is set in the ibft_addr variable. */ #ifdef CONFIG_ISCSI_IBFT_FIND -extern void __init reserve_ibft_region(void); +unsigned long find_ibft_region(unsigned long *sizep); #else -static inline void reserve_ibft_region(void) { } +static inline unsigned long find_ibft_region(unsigned long *sizep) +{ + *sizep = 0; + return 0; +} #endif #endif /* ISCSI_IBFT_H */ diff --git a/include/linux/kernel.h b/include/linux/kernel.h index 7f070746336..9365227dbaf 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h @@ -426,7 +426,7 @@ static inline char *pack_hex_byte(char *buf, u8 byte) .burst = DEFAULT_RATELIMIT_BURST, \ }; \ \ - if (!__ratelimit(&_rs)) \ + if (__ratelimit(&_rs)) \ printk(fmt, ##__VA_ARGS__); \ }) #else diff --git a/include/linux/kfifo.h b/include/linux/kfifo.h index ece0b1c3381..e117b1aee69 100644 --- a/include/linux/kfifo.h +++ b/include/linux/kfifo.h @@ -86,7 +86,8 @@ union { \ */ #define INIT_KFIFO(name) \ name = __kfifo_initializer(sizeof(name##kfifo_buffer) - \ - sizeof(struct kfifo), name##kfifo_buffer) + sizeof(struct kfifo), \ + name##kfifo_buffer + sizeof(struct kfifo)) /** * DEFINE_KFIFO - macro to define and initialize a kfifo diff --git a/include/linux/lcm.h b/include/linux/lcm.h new file mode 100644 index 00000000000..7bf01d779b4 --- /dev/null +++ b/include/linux/lcm.h @@ -0,0 +1,8 @@ +#ifndef _LCM_H +#define _LCM_H + +#include <linux/compiler.h> + +unsigned long lcm(unsigned long a, unsigned long b) __attribute_const__; + +#endif /* _LCM_H */ diff --git a/include/linux/libata.h b/include/linux/libata.h index f8ea71e6d0e..b2f2003b92e 100644 --- a/include/linux/libata.h +++ b/include/linux/libata.h @@ -146,6 +146,7 @@ enum { ATA_DFLAG_SLEEPING = (1 << 15), /* device is sleeping */ ATA_DFLAG_DUBIOUS_XFER = (1 << 16), /* data transfer not verified */ ATA_DFLAG_NO_UNLOAD = (1 << 17), /* device doesn't support unload */ + ATA_DFLAG_UNLOCK_HPA = (1 << 18), /* unlock HPA */ ATA_DFLAG_INIT_MASK = (1 << 24) - 1, ATA_DFLAG_DETACH = (1 << 24), diff --git a/include/linux/mm.h b/include/linux/mm.h index e70f21beb4b..462acaf36f3 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -783,8 +783,8 @@ struct mm_walk { int (*pmd_entry)(pmd_t *, unsigned long, unsigned long, struct mm_walk *); int (*pte_entry)(pte_t *, unsigned long, unsigned long, struct mm_walk *); int (*pte_hole)(unsigned long, unsigned long, struct mm_walk *); - int (*hugetlb_entry)(pte_t *, unsigned long, unsigned long, - struct mm_walk *); + int (*hugetlb_entry)(pte_t *, unsigned long, + unsigned long, unsigned long, struct mm_walk *); struct mm_struct *mm; void *private; }; diff --git a/include/linux/module.h b/include/linux/module.h index 8bd399a0034..515d53ae6a7 100644 --- a/include/linux/module.h +++ b/include/linux/module.h @@ -368,7 +368,8 @@ struct module void (*exit)(void); struct module_ref { - int count; + unsigned int incs; + unsigned int decs; } __percpu *refptr; #endif @@ -463,9 +464,9 @@ static inline void __module_get(struct module *module) { if (module) { preempt_disable(); - __this_cpu_inc(module->refptr->count); + __this_cpu_inc(module->refptr->incs); trace_module_get(module, _THIS_IP_, - __this_cpu_read(module->refptr->count)); + __this_cpu_read(module->refptr->incs)); preempt_enable(); } } @@ -478,11 +479,10 @@ static inline int try_module_get(struct module *module) preempt_disable(); if (likely(module_is_live(module))) { - __this_cpu_inc(module->refptr->count); + __this_cpu_inc(module->refptr->incs); trace_module_get(module, _THIS_IP_, - __this_cpu_read(module->refptr->count)); - } - else + __this_cpu_read(module->refptr->incs)); + } else ret = 0; preempt_enable(); diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h index 717a5e54eb1..e82957acea5 100644 --- a/include/linux/nfs_fs_sb.h +++ b/include/linux/nfs_fs_sb.h @@ -176,6 +176,7 @@ struct nfs_server { #define NFS_CAP_ATIME (1U << 11) #define NFS_CAP_CTIME (1U << 12) #define NFS_CAP_MTIME (1U << 13) +#define NFS_CAP_POSIX_LOCK (1U << 14) /* maximum number of slots to use */ diff --git a/include/linux/page_cgroup.h b/include/linux/page_cgroup.h index 30b08136fdf..aef22ae2af4 100644 --- a/include/linux/page_cgroup.h +++ b/include/linux/page_cgroup.h @@ -39,6 +39,7 @@ enum { PCG_CACHE, /* charged as cache */ PCG_USED, /* this object is in use. */ PCG_ACCT_LRU, /* page has been accounted for */ + PCG_FILE_MAPPED, /* page is accounted as "mapped" */ }; #define TESTPCGFLAG(uname, lname) \ @@ -73,6 +74,11 @@ CLEARPCGFLAG(AcctLRU, ACCT_LRU) TESTPCGFLAG(AcctLRU, ACCT_LRU) TESTCLEARPCGFLAG(AcctLRU, ACCT_LRU) + +SETPCGFLAG(FileMapped, FILE_MAPPED) +CLEARPCGFLAG(FileMapped, FILE_MAPPED) +TESTPCGFLAG(FileMapped, FILE_MAPPED) + static inline int page_cgroup_nid(struct page_cgroup *pc) { return page_to_nid(pc->page); diff --git a/include/linux/radix-tree.h b/include/linux/radix-tree.h index c5da7491809..55ca73cf25e 100644 --- a/include/linux/radix-tree.h +++ b/include/linux/radix-tree.h @@ -121,6 +121,13 @@ do { \ * (Note, rcu_assign_pointer and rcu_dereference are not needed to control * access to data items when inserting into or looking up from the radix tree) * + * Note that the value returned by radix_tree_tag_get() may not be relied upon + * if only the RCU read lock is held. Functions to set/clear tags and to + * delete nodes running concurrently with it may affect its result such that + * two consecutive reads in the same locked section may return different + * values. If reliability is required, modification functions must also be + * excluded from concurrency. + * * radix_tree_tagged is able to be called without locking or RCU. */ diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index 872a98e13d6..07db2feb857 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h @@ -101,10 +101,7 @@ extern struct lockdep_map rcu_sched_lock_map; # define rcu_read_release_sched() \ lock_release(&rcu_sched_lock_map, 1, _THIS_IP_) -static inline int debug_lockdep_rcu_enabled(void) -{ - return likely(rcu_scheduler_active && debug_locks); -} +extern int debug_lockdep_rcu_enabled(void); /** * rcu_read_lock_held - might we be in RCU read-side critical section? @@ -195,12 +192,30 @@ static inline int rcu_read_lock_sched_held(void) /** * rcu_dereference_check - rcu_dereference with debug checking + * @p: The pointer to read, prior to dereferencing + * @c: The conditions under which the dereference will take place + * + * Do an rcu_dereference(), but check that the conditions under which the + * dereference will take place are correct. Typically the conditions indicate + * the various locking conditions that should be held at that point. The check + * should return true if the conditions are satisfied. + * + * For example: + * + * bar = rcu_dereference_check(foo->bar, rcu_read_lock_held() || + * lockdep_is_held(&foo->lock)); * - * Do an rcu_dereference(), but check that the context is correct. - * For example, rcu_dereference_check(gp, rcu_read_lock_held()) to - * ensure that the rcu_dereference_check() executes within an RCU - * read-side critical section. It is also possible to check for - * locks being held, for example, by using lockdep_is_held(). + * could be used to indicate to lockdep that foo->bar may only be dereferenced + * if either the RCU read lock is held, or that the lock required to replace + * the bar struct at foo->bar is held. + * + * Note that the list of conditions may also include indications of when a lock + * need not be held, for example during initialisation or destruction of the + * target struct: + * + * bar = rcu_dereference_check(foo->bar, rcu_read_lock_held() || + * lockdep_is_held(&foo->lock) || + * atomic_read(&foo->usage) == 0); */ #define rcu_dereference_check(p, c) \ ({ \ @@ -209,13 +224,45 @@ static inline int rcu_read_lock_sched_held(void) rcu_dereference_raw(p); \ }) +/** + * rcu_dereference_protected - fetch RCU pointer when updates prevented + * + * Return the value of the specified RCU-protected pointer, but omit + * both the smp_read_barrier_depends() and the ACCESS_ONCE(). This + * is useful in cases where update-side locks prevent the value of the + * pointer from changing. Please note that this primitive does -not- + * prevent the compiler from repeating this reference or combining it + * with other references, so it should not be used without protection + * of appropriate locks. + */ +#define rcu_dereference_protected(p, c) \ + ({ \ + if (debug_lockdep_rcu_enabled() && !(c)) \ + lockdep_rcu_dereference(__FILE__, __LINE__); \ + (p); \ + }) + #else /* #ifdef CONFIG_PROVE_RCU */ #define rcu_dereference_check(p, c) rcu_dereference_raw(p) +#define rcu_dereference_protected(p, c) (p) #endif /* #else #ifdef CONFIG_PROVE_RCU */ /** + * rcu_access_pointer - fetch RCU pointer with no dereferencing + * + * Return the value of the specified RCU-protected pointer, but omit the + * smp_read_barrier_depends() and keep the ACCESS_ONCE(). This is useful + * when the value of this pointer is accessed, but the pointer is not + * dereferenced, for example, when testing an RCU-protected pointer against + * NULL. This may also be used in cases where update-side locks prevent + * the value of the pointer from changing, but rcu_dereference_protected() + * is a lighter-weight primitive for this use case. + */ +#define rcu_access_pointer(p) ACCESS_ONCE(p) + +/** * rcu_read_lock - mark the beginning of an RCU read-side critical section. * * When synchronize_rcu() is invoked on one CPU while other CPUs diff --git a/include/linux/slab.h b/include/linux/slab.h index 488446289ca..49d1247cd6d 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h @@ -106,6 +106,7 @@ int kmem_cache_shrink(struct kmem_cache *); void kmem_cache_free(struct kmem_cache *, void *); unsigned int kmem_cache_size(struct kmem_cache *); const char *kmem_cache_name(struct kmem_cache *); +int kern_ptr_validate(const void *ptr, unsigned long size); int kmem_ptr_validate(struct kmem_cache *cachep, const void *ptr); /* diff --git a/include/linux/virtio_console.h b/include/linux/virtio_console.h index ae4f039515b..92228a8fbcb 100644 --- a/include/linux/virtio_console.h +++ b/include/linux/virtio_console.h @@ -12,37 +12,14 @@ /* Feature bits */ #define VIRTIO_CONSOLE_F_SIZE 0 /* Does host provide console size? */ -#define VIRTIO_CONSOLE_F_MULTIPORT 1 /* Does host provide multiple ports? */ struct virtio_console_config { /* colums of the screens */ __u16 cols; /* rows of the screens */ __u16 rows; - /* max. number of ports this device can hold */ - __u32 max_nr_ports; - /* number of ports added so far */ - __u32 nr_ports; } __attribute__((packed)); -/* - * A message that's passed between the Host and the Guest for a - * particular port. - */ -struct virtio_console_control { - __u32 id; /* Port number */ - __u16 event; /* The kind of control event (see below) */ - __u16 value; /* Extra information for the key */ -}; - -/* Some events for control messages */ -#define VIRTIO_CONSOLE_PORT_READY 0 -#define VIRTIO_CONSOLE_CONSOLE_PORT 1 -#define VIRTIO_CONSOLE_RESIZE 2 -#define VIRTIO_CONSOLE_PORT_OPEN 3 -#define VIRTIO_CONSOLE_PORT_NAME 4 -#define VIRTIO_CONSOLE_PORT_REMOVE 5 - #ifdef __KERNEL__ int __init virtio_cons_early_init(int (*put_chars)(u32, const char *, int)); #endif /* __KERNEL__ */ diff --git a/include/linux/writeback.h b/include/linux/writeback.h index 76e8903cd20..36520ded3e0 100644 --- a/include/linux/writeback.h +++ b/include/linux/writeback.h @@ -34,6 +34,9 @@ struct writeback_control { enum writeback_sync_modes sync_mode; unsigned long *older_than_this; /* If !NULL, only write back inodes older than this */ + unsigned long wb_start; /* Time writeback_inodes_wb was + called. This is needed to avoid + extra jobs and livelock */ long nr_to_write; /* Write this many pages, and decrement this for each page written */ long pages_skipped; /* Pages which were not written */ diff --git a/include/net/x25.h b/include/net/x25.h index 15ef9624ab7..468551ea4f1 100644 --- a/include/net/x25.h +++ b/include/net/x25.h @@ -183,6 +183,10 @@ extern int sysctl_x25_clear_request_timeout; extern int sysctl_x25_ack_holdback_timeout; extern int sysctl_x25_forward; +extern int x25_parse_address_block(struct sk_buff *skb, + struct x25_address *called_addr, + struct x25_address *calling_addr); + extern int x25_addr_ntoa(unsigned char *, struct x25_address *, struct x25_address *); extern int x25_addr_aton(unsigned char *, struct x25_address *, diff --git a/include/sound/ak4113.h b/include/sound/ak4113.h index 8988edae160..2609048c1d4 100644 --- a/include/sound/ak4113.h +++ b/include/sound/ak4113.h @@ -307,7 +307,7 @@ struct ak4113 { int snd_ak4113_create(struct snd_card *card, ak4113_read_t *read, ak4113_write_t *write, - const unsigned char pgm[AK4113_WRITABLE_REGS], + const unsigned char *pgm, void *private_data, struct ak4113 **r_ak4113); void snd_ak4113_reg_write(struct ak4113 *ak4113, unsigned char reg, unsigned char mask, unsigned char val); diff --git a/include/sound/soc-dai.h b/include/sound/soc-dai.h index 061f16d4c87..0a0b019d41a 100644 --- a/include/sound/soc-dai.h +++ b/include/sound/soc-dai.h @@ -219,7 +219,6 @@ struct snd_soc_dai { struct snd_soc_codec *codec; unsigned int active; unsigned char pop_wait:1; - void *dma_data; /* DAI private data */ void *private_data; @@ -230,4 +229,21 @@ struct snd_soc_dai { struct list_head list; }; +static inline void *snd_soc_dai_get_dma_data(const struct snd_soc_dai *dai, + const struct snd_pcm_substream *ss) +{ + return (ss->stream == SNDRV_PCM_STREAM_PLAYBACK) ? + dai->playback.dma_data : dai->capture.dma_data; +} + +static inline void snd_soc_dai_set_dma_data(struct snd_soc_dai *dai, + const struct snd_pcm_substream *ss, + void *data) +{ + if (ss->stream == SNDRV_PCM_STREAM_PLAYBACK) + dai->playback.dma_data = data; + else + dai->capture.dma_data = data; +} + #endif diff --git a/include/sound/soc.h b/include/sound/soc.h index 5d234a8c250..a57fbfcd4c8 100644 --- a/include/sound/soc.h +++ b/include/sound/soc.h @@ -375,6 +375,7 @@ struct snd_soc_pcm_stream { unsigned int channels_min; /* min channels */ unsigned int channels_max; /* max channels */ unsigned int active:1; /* stream is in use */ + void *dma_data; /* used by platform code */ }; /* SoC audio ops */ diff --git a/include/trace/events/block.h b/include/trace/events/block.h index 5fb72733331..d870a918559 100644 --- a/include/trace/events/block.h +++ b/include/trace/events/block.h @@ -40,6 +40,16 @@ DECLARE_EVENT_CLASS(block_rq_with_error, __entry->nr_sector, __entry->errors) ); +/** + * block_rq_abort - abort block operation request + * @q: queue containing the block operation request + * @rq: block IO operation request + * + * Called immediately after pending block IO operation request @rq in + * queue @q is aborted. The fields in the operation request @rq + * can be examined to determine which device and sectors the pending + * operation would access. + */ DEFINE_EVENT(block_rq_with_error, block_rq_abort, TP_PROTO(struct request_queue *q, struct request *rq), @@ -47,6 +57,15 @@ DEFINE_EVENT(block_rq_with_error, block_rq_abort, TP_ARGS(q, rq) ); +/** + * block_rq_requeue - place block IO request back on a queue + * @q: queue holding operation + * @rq: block IO operation request + * + * The block operation request @rq is being placed back into queue + * @q. For some reason the request was not completed and needs to be + * put back in the queue. + */ DEFINE_EVENT(block_rq_with_error, block_rq_requeue, TP_PROTO(struct request_queue *q, struct request *rq), @@ -54,6 +73,17 @@ DEFINE_EVENT(block_rq_with_error, block_rq_requeue, TP_ARGS(q, rq) ); +/** + * block_rq_complete - block IO operation completed by device driver + * @q: queue containing the block operation request + * @rq: block operations request + * + * The block_rq_complete tracepoint event indicates that some portion + * of operation request has been completed by the device driver. If + * the @rq->bio is %NULL, then there is absolutely no additional work to + * do for the request. If @rq->bio is non-NULL then there is + * additional work required to complete the request. + */ DEFINE_EVENT(block_rq_with_error, block_rq_complete, TP_PROTO(struct request_queue *q, struct request *rq), @@ -95,6 +125,16 @@ DECLARE_EVENT_CLASS(block_rq, __entry->nr_sector, __entry->comm) ); +/** + * block_rq_insert - insert block operation request into queue + * @q: target queue + * @rq: block IO operation request + * + * Called immediately before block operation request @rq is inserted + * into queue @q. The fields in the operation request @rq struct can + * be examined to determine which device and sectors the pending + * operation would access. + */ DEFINE_EVENT(block_rq, block_rq_insert, TP_PROTO(struct request_queue *q, struct request *rq), @@ -102,6 +142,14 @@ DEFINE_EVENT(block_rq, block_rq_insert, TP_ARGS(q, rq) ); +/** + * block_rq_issue - issue pending block IO request operation to device driver + * @q: queue holding operation + * @rq: block IO operation operation request + * + * Called when block operation request @rq from queue @q is sent to a + * device driver for processing. + */ DEFINE_EVENT(block_rq, block_rq_issue, TP_PROTO(struct request_queue *q, struct request *rq), @@ -109,6 +157,17 @@ DEFINE_EVENT(block_rq, block_rq_issue, TP_ARGS(q, rq) ); +/** + * block_bio_bounce - used bounce buffer when processing block operation + * @q: queue holding the block operation + * @bio: block operation + * + * A bounce buffer was used to handle the block operation @bio in @q. + * This occurs when hardware limitations prevent a direct transfer of + * data between the @bio data memory area and the IO device. Use of a + * bounce buffer requires extra copying of data and decreases + * performance. + */ TRACE_EVENT(block_bio_bounce, TP_PROTO(struct request_queue *q, struct bio *bio), @@ -138,6 +197,14 @@ TRACE_EVENT(block_bio_bounce, __entry->nr_sector, __entry->comm) ); +/** + * block_bio_complete - completed all work on the block operation + * @q: queue holding the block operation + * @bio: block operation completed + * + * This tracepoint indicates there is no further work to do on this + * block IO operation @bio. + */ TRACE_EVENT(block_bio_complete, TP_PROTO(struct request_queue *q, struct bio *bio), @@ -193,6 +260,14 @@ DECLARE_EVENT_CLASS(block_bio, __entry->nr_sector, __entry->comm) ); +/** + * block_bio_backmerge - merging block operation to the end of an existing operation + * @q: queue holding operation + * @bio: new block operation to merge + * + * Merging block request @bio to the end of an existing block request + * in queue @q. + */ DEFINE_EVENT(block_bio, block_bio_backmerge, TP_PROTO(struct request_queue *q, struct bio *bio), @@ -200,6 +275,14 @@ DEFINE_EVENT(block_bio, block_bio_backmerge, TP_ARGS(q, bio) ); +/** + * block_bio_frontmerge - merging block operation to the beginning of an existing operation + * @q: queue holding operation + * @bio: new block operation to merge + * + * Merging block IO operation @bio to the beginning of an existing block + * operation in queue @q. + */ DEFINE_EVENT(block_bio, block_bio_frontmerge, TP_PROTO(struct request_queue *q, struct bio *bio), @@ -207,6 +290,13 @@ DEFINE_EVENT(block_bio, block_bio_frontmerge, TP_ARGS(q, bio) ); +/** + * block_bio_queue - putting new block IO operation in queue + * @q: queue holding operation + * @bio: new block operation + * + * About to place the block IO operation @bio into queue @q. + */ DEFINE_EVENT(block_bio, block_bio_queue, TP_PROTO(struct request_queue *q, struct bio *bio), @@ -243,6 +333,15 @@ DECLARE_EVENT_CLASS(block_get_rq, __entry->nr_sector, __entry->comm) ); +/** + * block_getrq - get a free request entry in queue for block IO operations + * @q: queue for operations + * @bio: pending block IO operation + * @rw: low bit indicates a read (%0) or a write (%1) + * + * A request struct for queue @q has been allocated to handle the + * block IO operation @bio. + */ DEFINE_EVENT(block_get_rq, block_getrq, TP_PROTO(struct request_queue *q, struct bio *bio, int rw), @@ -250,6 +349,17 @@ DEFINE_EVENT(block_get_rq, block_getrq, TP_ARGS(q, bio, rw) ); +/** + * block_sleeprq - waiting to get a free request entry in queue for block IO operation + * @q: queue for operation + * @bio: pending block IO operation + * @rw: low bit indicates a read (%0) or a write (%1) + * + * In the case where a request struct cannot be provided for queue @q + * the process needs to wait for an request struct to become + * available. This tracepoint event is generated each time the + * process goes to sleep waiting for request struct become available. + */ DEFINE_EVENT(block_get_rq, block_sleeprq, TP_PROTO(struct request_queue *q, struct bio *bio, int rw), @@ -257,6 +367,14 @@ DEFINE_EVENT(block_get_rq, block_sleeprq, TP_ARGS(q, bio, rw) ); +/** + * block_plug - keep operations requests in request queue + * @q: request queue to plug + * + * Plug the request queue @q. Do not allow block operation requests + * to be sent to the device driver. Instead, accumulate requests in + * the queue to improve throughput performance of the block device. + */ TRACE_EVENT(block_plug, TP_PROTO(struct request_queue *q), @@ -293,6 +411,13 @@ DECLARE_EVENT_CLASS(block_unplug, TP_printk("[%s] %d", __entry->comm, __entry->nr_rq) ); +/** + * block_unplug_timer - timed release of operations requests in queue to device driver + * @q: request queue to unplug + * + * Unplug the request queue @q because a timer expired and allow block + * operation requests to be sent to the device driver. + */ DEFINE_EVENT(block_unplug, block_unplug_timer, TP_PROTO(struct request_queue *q), @@ -300,6 +425,13 @@ DEFINE_EVENT(block_unplug, block_unplug_timer, TP_ARGS(q) ); +/** + * block_unplug_io - release of operations requests in request queue + * @q: request queue to unplug + * + * Unplug request queue @q because device driver is scheduled to work + * on elements in the request queue. + */ DEFINE_EVENT(block_unplug, block_unplug_io, TP_PROTO(struct request_queue *q), @@ -307,6 +439,17 @@ DEFINE_EVENT(block_unplug, block_unplug_io, TP_ARGS(q) ); +/** + * block_split - split a single bio struct into two bio structs + * @q: queue containing the bio + * @bio: block operation being split + * @new_sector: The starting sector for the new bio + * + * The bio request @bio in request queue @q needs to be split into two + * bio requests. The newly created @bio request starts at + * @new_sector. This split may be required due to hardware limitation + * such as operation crossing device boundaries in a RAID system. + */ TRACE_EVENT(block_split, TP_PROTO(struct request_queue *q, struct bio *bio, @@ -337,6 +480,16 @@ TRACE_EVENT(block_split, __entry->comm) ); +/** + * block_remap - map request for a partition to the raw device + * @q: queue holding the operation + * @bio: revised operation + * @dev: device for the operation + * @from: original sector for the operation + * + * An operation for a partition on a block device has been mapped to the + * raw block device. + */ TRACE_EVENT(block_remap, TP_PROTO(struct request_queue *q, struct bio *bio, dev_t dev, @@ -370,6 +523,17 @@ TRACE_EVENT(block_remap, (unsigned long long)__entry->old_sector) ); +/** + * block_rq_remap - map request for a block operation request + * @q: queue holding the operation + * @rq: block IO operation request + * @dev: device for the operation + * @from: original sector for the operation + * + * The block operation request @rq in @q has been remapped. The block + * operation request @rq holds the current information and @from hold + * the original sector. + */ TRACE_EVENT(block_rq_remap, TP_PROTO(struct request_queue *q, struct request *rq, dev_t dev, diff --git a/kernel/exit.c b/kernel/exit.c index cce59cb5ee6..7f2683a10ac 100644 --- a/kernel/exit.c +++ b/kernel/exit.c @@ -953,7 +953,8 @@ NORET_TYPE void do_exit(long code) acct_update_integrals(tsk); /* sync mm's RSS info before statistics gathering */ - sync_mm_rss(tsk, tsk->mm); + if (tsk->mm) + sync_mm_rss(tsk, tsk->mm); group_dead = atomic_dec_and_test(&tsk->signal->live); if (group_dead) { hrtimer_cancel(&tsk->signal->real_timer); diff --git a/kernel/fork.c b/kernel/fork.c index 4799c5f0e6d..44b0791b0a2 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -1052,6 +1052,9 @@ static struct task_struct *copy_process(unsigned long clone_flags, p->prev_utime = cputime_zero; p->prev_stime = cputime_zero; #endif +#if defined(SPLIT_RSS_COUNTING) + memset(&p->rss_stat, 0, sizeof(p->rss_stat)); +#endif p->default_timer_slack_ns = current->timer_slack_ns; diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index 398fda155f6..704e488730a 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c @@ -757,6 +757,16 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new) if (new->flags & IRQF_ONESHOT) desc->status |= IRQ_ONESHOT; + /* + * Force MSI interrupts to run with interrupts + * disabled. The multi vector cards can cause stack + * overflows due to nested interrupts when enough of + * them are directed to a core and fire at the same + * time. + */ + if (desc->msi_desc) + new->flags |= IRQF_DISABLED; + if (!(desc->status & IRQ_NOAUTOEN)) { desc->depth = 0; desc->status &= ~IRQ_DISABLED; diff --git a/kernel/module.c b/kernel/module.c index 9f8d23d8b3a..1016b75b026 100644 --- a/kernel/module.c +++ b/kernel/module.c @@ -521,11 +521,13 @@ static void module_unload_init(struct module *mod) int cpu; INIT_LIST_HEAD(&mod->modules_which_use_me); - for_each_possible_cpu(cpu) - per_cpu_ptr(mod->refptr, cpu)->count = 0; + for_each_possible_cpu(cpu) { + per_cpu_ptr(mod->refptr, cpu)->incs = 0; + per_cpu_ptr(mod->refptr, cpu)->decs = 0; + } /* Hold reference count during initialization. */ - __this_cpu_write(mod->refptr->count, 1); + __this_cpu_write(mod->refptr->incs, 1); /* Backwards compatibility macros put refcount during init. */ mod->waiter = current; } @@ -664,12 +666,28 @@ static int try_stop_module(struct module *mod, int flags, int *forced) unsigned int module_refcount(struct module *mod) { - unsigned int total = 0; + unsigned int incs = 0, decs = 0; int cpu; for_each_possible_cpu(cpu) - total += per_cpu_ptr(mod->refptr, cpu)->count; - return total; + decs += per_cpu_ptr(mod->refptr, cpu)->decs; + /* + * ensure the incs are added up after the decs. + * module_put ensures incs are visible before decs with smp_wmb. + * + * This 2-count scheme avoids the situation where the refcount + * for CPU0 is read, then CPU0 increments the module refcount, + * then CPU1 drops that refcount, then the refcount for CPU1 is + * read. We would record a decrement but not its corresponding + * increment so we would see a low count (disaster). + * + * Rare situation? But module_refcount can be preempted, and we + * might be tallying up 4096+ CPUs. So it is not impossible. + */ + smp_rmb(); + for_each_possible_cpu(cpu) + incs += per_cpu_ptr(mod->refptr, cpu)->incs; + return incs - decs; } EXPORT_SYMBOL(module_refcount); @@ -846,10 +864,11 @@ void module_put(struct module *module) { if (module) { preempt_disable(); - __this_cpu_dec(module->refptr->count); + smp_wmb(); /* see comment in module_refcount */ + __this_cpu_inc(module->refptr->decs); trace_module_put(module, _RET_IP_, - __this_cpu_read(module->refptr->count)); + __this_cpu_read(module->refptr->decs)); /* Maybe they're waiting for us to drop reference? */ if (unlikely(!module_is_live(module))) wake_up_process(module->waiter); diff --git a/kernel/power/user.c b/kernel/power/user.c index 4d2289626a8..a8c96212bc1 100644 --- a/kernel/power/user.c +++ b/kernel/power/user.c @@ -420,7 +420,7 @@ static long snapshot_ioctl(struct file *filp, unsigned int cmd, * User space encodes device types as two-byte values, * so we need to recode them */ - swdev = old_decode_dev(swap_area.dev); + swdev = new_decode_dev(swap_area.dev); if (swdev) { offset = swap_area.offset; data->swap = swap_type_of(swdev, offset, NULL); diff --git a/kernel/rcupdate.c b/kernel/rcupdate.c index 63fe2543398..03a7ea1579f 100644 --- a/kernel/rcupdate.c +++ b/kernel/rcupdate.c @@ -69,6 +69,13 @@ EXPORT_SYMBOL_GPL(rcu_scheduler_active); #ifdef CONFIG_DEBUG_LOCK_ALLOC +int debug_lockdep_rcu_enabled(void) +{ + return rcu_scheduler_active && debug_locks && + current->lockdep_recursion == 0; +} +EXPORT_SYMBOL_GPL(debug_lockdep_rcu_enabled); + /** * rcu_read_lock_bh_held - might we be in RCU-bh read-side critical section? * diff --git a/kernel/sched.c b/kernel/sched.c index a3dff1f3f9b..6af210a7de7 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -4903,7 +4903,7 @@ SYSCALL_DEFINE3(sched_getaffinity, pid_t, pid, unsigned int, len, int ret; cpumask_var_t mask; - if (len < nr_cpu_ids) + if ((len * BITS_PER_BYTE) < nr_cpu_ids) return -EINVAL; if (len & (sizeof(unsigned long)-1)) return -EINVAL; diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 1fafb4b99c9..935248bdbc4 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -356,7 +356,7 @@ config SLUB_STATS config DEBUG_KMEMLEAK bool "Kernel memory leak detector" depends on DEBUG_KERNEL && EXPERIMENTAL && !MEMORY_HOTPLUG && \ - (X86 || ARM || PPC || S390 || SUPERH) + (X86 || ARM || PPC || S390 || SPARC64 || SUPERH || MICROBLAZE) select DEBUG_FS if SYSFS select STACKTRACE if STACKTRACE_SUPPORT diff --git a/lib/Makefile b/lib/Makefile index 2e152aed719..0d4015205c6 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -21,7 +21,7 @@ lib-y += kobject.o kref.o klist.o obj-y += bcd.o div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \ bust_spinlocks.o hexdump.o kasprintf.o bitmap.o scatterlist.o \ - string_helpers.o gcd.o list_sort.o + string_helpers.o gcd.o lcm.o list_sort.o ifeq ($(CONFIG_DEBUG_KOBJECT),y) CFLAGS_kobject.o += -DDEBUG diff --git a/lib/dma-debug.c b/lib/dma-debug.c index ba8b67039d1..01e64270e24 100644 --- a/lib/dma-debug.c +++ b/lib/dma-debug.c @@ -570,7 +570,7 @@ static ssize_t filter_write(struct file *file, const char __user *userbuf, * Now parse out the first token and use it as the name for the * driver to filter for. */ - for (i = 0; i < NAME_MAX_LEN; ++i) { + for (i = 0; i < NAME_MAX_LEN - 1; ++i) { current_driver_name[i] = buf[i]; if (isspace(buf[i]) || buf[i] == ' ' || buf[i] == 0) break; diff --git a/lib/lcm.c b/lib/lcm.c new file mode 100644 index 00000000000..157cd88a6ff --- /dev/null +++ b/lib/lcm.c @@ -0,0 +1,15 @@ +#include <linux/kernel.h> +#include <linux/gcd.h> +#include <linux/module.h> + +/* Lowest common multiple */ +unsigned long lcm(unsigned long a, unsigned long b) +{ + if (a && b) + return (a * b) / gcd(a, b); + else if (b) + return b; + + return a; +} +EXPORT_SYMBOL_GPL(lcm); diff --git a/lib/radix-tree.c b/lib/radix-tree.c index 0871582aa29..2a087e0f986 100644 --- a/lib/radix-tree.c +++ b/lib/radix-tree.c @@ -555,6 +555,10 @@ EXPORT_SYMBOL(radix_tree_tag_clear); * * 0: tag not present or not set * 1: tag set + * + * Note that the return value of this function may not be relied on, even if + * the RCU lock is held, unless tag modification and node deletion are excluded + * from concurrency. */ int radix_tree_tag_get(struct radix_tree_root *root, unsigned long index, unsigned int tag) @@ -595,12 +599,8 @@ int radix_tree_tag_get(struct radix_tree_root *root, */ if (!tag_get(node, tag, offset)) saw_unset_tag = 1; - if (height == 1) { - int ret = tag_get(node, tag, offset); - - BUG_ON(ret && saw_unset_tag); - return !!ret; - } + if (height == 1) + return !!tag_get(node, tag, offset); node = rcu_dereference_raw(node->slots[offset]); shift -= RADIX_TREE_MAP_SHIFT; height--; diff --git a/lib/ratelimit.c b/lib/ratelimit.c index 09f5ce1810d..027a03f4c56 100644 --- a/lib/ratelimit.c +++ b/lib/ratelimit.c @@ -16,9 +16,14 @@ /* * __ratelimit - rate limiting * @rs: ratelimit_state data + * @func: name of calling function * - * This enforces a rate limit: not more than @rs->ratelimit_burst callbacks - * in every @rs->ratelimit_jiffies + * This enforces a rate limit: not more than @rs->burst callbacks + * in every @rs->interval + * + * RETURNS: + * 0 means callbacks will be suppressed. + * 1 means go ahead and do it. */ int ___ratelimit(struct ratelimit_state *rs, const char *func) { @@ -35,7 +40,7 @@ int ___ratelimit(struct ratelimit_state *rs, const char *func) * the entity that is holding the lock already: */ if (!spin_trylock_irqsave(&rs->lock, flags)) - return 1; + return 0; if (!rs->begin) rs->begin = jiffies; diff --git a/lib/rwsem-spinlock.c b/lib/rwsem-spinlock.c index ccf95bff798..ffc9fc7f3b0 100644 --- a/lib/rwsem-spinlock.c +++ b/lib/rwsem-spinlock.c @@ -143,13 +143,14 @@ void __sched __down_read(struct rw_semaphore *sem) { struct rwsem_waiter waiter; struct task_struct *tsk; + unsigned long flags; - spin_lock_irq(&sem->wait_lock); + spin_lock_irqsave(&sem->wait_lock, flags); if (sem->activity >= 0 && list_empty(&sem->wait_list)) { /* granted */ sem->activity++; - spin_unlock_irq(&sem->wait_lock); + spin_unlock_irqrestore(&sem->wait_lock, flags); goto out; } @@ -164,7 +165,7 @@ void __sched __down_read(struct rw_semaphore *sem) list_add_tail(&waiter.list, &sem->wait_list); /* we don't need to touch the semaphore struct anymore */ - spin_unlock_irq(&sem->wait_lock); + spin_unlock_irqrestore(&sem->wait_lock, flags); /* wait to be given the lock */ for (;;) { @@ -209,13 +210,14 @@ void __sched __down_write_nested(struct rw_semaphore *sem, int subclass) { struct rwsem_waiter waiter; struct task_struct *tsk; + unsigned long flags; - spin_lock_irq(&sem->wait_lock); + spin_lock_irqsave(&sem->wait_lock, flags); if (sem->activity == 0 && list_empty(&sem->wait_list)) { /* granted */ sem->activity = -1; - spin_unlock_irq(&sem->wait_lock); + spin_unlock_irqrestore(&sem->wait_lock, flags); goto out; } @@ -230,7 +232,7 @@ void __sched __down_write_nested(struct rw_semaphore *sem, int subclass) list_add_tail(&waiter.list, &sem->wait_list); /* we don't need to touch the semaphore struct anymore */ - spin_unlock_irq(&sem->wait_lock); + spin_unlock_irqrestore(&sem->wait_lock, flags); /* wait to be given the lock */ for (;;) { diff --git a/lib/vsprintf.c b/lib/vsprintf.c index 24112e5a578..7376b7c55ff 100644 --- a/lib/vsprintf.c +++ b/lib/vsprintf.c @@ -408,12 +408,12 @@ enum format_type { }; struct printf_spec { - u16 type; - s16 field_width; /* width of output field */ + u8 type; /* format_type enum */ u8 flags; /* flags to number() */ - u8 base; - s8 precision; /* # of digits/chars */ - u8 qualifier; + u8 base; /* number base, 8, 10 or 16 only */ + u8 qualifier; /* number qualifier, one of 'hHlLtzZ' */ + s16 field_width; /* width of output field */ + s16 precision; /* # of digits/chars */ }; static char *number(char *buf, char *end, unsigned long long num, diff --git a/mm/backing-dev.c b/mm/backing-dev.c index 0e8ca034770..f13e067e146 100644 --- a/mm/backing-dev.c +++ b/mm/backing-dev.c @@ -227,6 +227,9 @@ static struct device_attribute bdi_dev_attrs[] = { static __init int bdi_class_init(void) { bdi_class = class_create(THIS_MODULE, "bdi"); + if (IS_ERR(bdi_class)) + return PTR_ERR(bdi_class); + bdi_class->dev_attrs = bdi_dev_attrs; bdi_debug_init(); return 0; diff --git a/mm/bootmem.c b/mm/bootmem.c index eff22422057..58c66cc5056 100644 --- a/mm/bootmem.c +++ b/mm/bootmem.c @@ -304,9 +304,22 @@ unsigned long __init free_all_bootmem_node(pg_data_t *pgdat) unsigned long __init free_all_bootmem(void) { #ifdef CONFIG_NO_BOOTMEM - return free_all_memory_core_early(NODE_DATA(0)->node_id); + /* + * We need to use MAX_NUMNODES instead of NODE_DATA(0)->node_id + * because in some case like Node0 doesnt have RAM installed + * low ram will be on Node1 + * Use MAX_NUMNODES will make sure all ranges in early_node_map[] + * will be used instead of only Node0 related + */ + return free_all_memory_core_early(MAX_NUMNODES); #else - return free_all_bootmem_core(NODE_DATA(0)->bdata); + unsigned long total_pages = 0; + bootmem_data_t *bdata; + + list_for_each_entry(bdata, &bdata_list, list) + total_pages += free_all_bootmem_core(bdata); + + return total_pages; #endif } diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 9ed760dc744..f4ede99c8b9 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -1359,16 +1359,19 @@ void mem_cgroup_update_file_mapped(struct page *page, int val) lock_page_cgroup(pc); mem = pc->mem_cgroup; - if (!mem) - goto done; - - if (!PageCgroupUsed(pc)) + if (!mem || !PageCgroupUsed(pc)) goto done; /* * Preemption is already disabled. We can use __this_cpu_xxx */ - __this_cpu_add(mem->stat->count[MEM_CGROUP_STAT_FILE_MAPPED], val); + if (val > 0) { + __this_cpu_inc(mem->stat->count[MEM_CGROUP_STAT_FILE_MAPPED]); + SetPageCgroupFileMapped(pc); + } else { + __this_cpu_dec(mem->stat->count[MEM_CGROUP_STAT_FILE_MAPPED]); + ClearPageCgroupFileMapped(pc); + } done: unlock_page_cgroup(pc); @@ -1801,16 +1804,13 @@ static void __mem_cgroup_commit_charge(struct mem_cgroup *mem, static void __mem_cgroup_move_account(struct page_cgroup *pc, struct mem_cgroup *from, struct mem_cgroup *to, bool uncharge) { - struct page *page; - VM_BUG_ON(from == to); VM_BUG_ON(PageLRU(pc->page)); VM_BUG_ON(!PageCgroupLocked(pc)); VM_BUG_ON(!PageCgroupUsed(pc)); VM_BUG_ON(pc->mem_cgroup != from); - page = pc->page; - if (page_mapped(page) && !PageAnon(page)) { + if (PageCgroupFileMapped(pc)) { /* Update mapped_file data for mem_cgroup */ preempt_disable(); __this_cpu_dec(from->stat->count[MEM_CGROUP_STAT_FILE_MAPPED]); diff --git a/mm/memory.c b/mm/memory.c index 1d2ea39260e..833952d8b74 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -125,13 +125,12 @@ core_initcall(init_zero_pfn); #if defined(SPLIT_RSS_COUNTING) -void __sync_task_rss_stat(struct task_struct *task, struct mm_struct *mm) +static void __sync_task_rss_stat(struct task_struct *task, struct mm_struct *mm) { int i; for (i = 0; i < NR_MM_COUNTERS; i++) { if (task->rss_stat.count[i]) { - BUG_ON(!mm); add_mm_counter(mm, i, task->rss_stat.count[i]); task->rss_stat.count[i] = 0; } diff --git a/mm/mmap.c b/mm/mmap.c index 75557c639ad..f90ea92f755 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -507,11 +507,12 @@ int vma_adjust(struct vm_area_struct *vma, unsigned long start, struct address_space *mapping = NULL; struct prio_tree_root *root = NULL; struct file *file = vma->vm_file; - struct anon_vma *anon_vma = NULL; long adjust_next = 0; int remove_next = 0; if (next && !insert) { + struct vm_area_struct *exporter = NULL; + if (end >= next->vm_end) { /* * vma expands, overlapping all the next, and @@ -519,7 +520,7 @@ int vma_adjust(struct vm_area_struct *vma, unsigned long start, */ again: remove_next = 1 + (end > next->vm_end); end = next->vm_end; - anon_vma = next->anon_vma; + exporter = next; importer = vma; } else if (end > next->vm_start) { /* @@ -527,7 +528,7 @@ again: remove_next = 1 + (end > next->vm_end); * mprotect case 5 shifting the boundary up. */ adjust_next = (end - next->vm_start) >> PAGE_SHIFT; - anon_vma = next->anon_vma; + exporter = next; importer = vma; } else if (end < vma->vm_end) { /* @@ -536,28 +537,19 @@ again: remove_next = 1 + (end > next->vm_end); * mprotect case 4 shifting the boundary down. */ adjust_next = - ((vma->vm_end - end) >> PAGE_SHIFT); - anon_vma = next->anon_vma; + exporter = vma; importer = next; } - } - /* - * When changing only vma->vm_end, we don't really need anon_vma lock. - */ - if (vma->anon_vma && (insert || importer || start != vma->vm_start)) - anon_vma = vma->anon_vma; - if (anon_vma) { /* * Easily overlooked: when mprotect shifts the boundary, * make sure the expanding vma has anon_vma set if the * shrinking vma had, to cover any anon pages imported. */ - if (importer && !importer->anon_vma) { - /* Block reverse map lookups until things are set up. */ - if (anon_vma_clone(importer, vma)) { + if (exporter && exporter->anon_vma && !importer->anon_vma) { + if (anon_vma_clone(importer, exporter)) return -ENOMEM; - } - importer->anon_vma = anon_vma; + importer->anon_vma = exporter->anon_vma; } } @@ -825,6 +817,61 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm, } /* + * Rough compatbility check to quickly see if it's even worth looking + * at sharing an anon_vma. + * + * They need to have the same vm_file, and the flags can only differ + * in things that mprotect may change. + * + * NOTE! The fact that we share an anon_vma doesn't _have_ to mean that + * we can merge the two vma's. For example, we refuse to merge a vma if + * there is a vm_ops->close() function, because that indicates that the + * driver is doing some kind of reference counting. But that doesn't + * really matter for the anon_vma sharing case. + */ +static int anon_vma_compatible(struct vm_area_struct *a, struct vm_area_struct *b) +{ + return a->vm_end == b->vm_start && + mpol_equal(vma_policy(a), vma_policy(b)) && + a->vm_file == b->vm_file && + !((a->vm_flags ^ b->vm_flags) & ~(VM_READ|VM_WRITE|VM_EXEC)) && + b->vm_pgoff == a->vm_pgoff + ((b->vm_start - a->vm_start) >> PAGE_SHIFT); +} + +/* + * Do some basic sanity checking to see if we can re-use the anon_vma + * from 'old'. The 'a'/'b' vma's are in VM order - one of them will be + * the same as 'old', the other will be the new one that is trying + * to share the anon_vma. + * + * NOTE! This runs with mm_sem held for reading, so it is possible that + * the anon_vma of 'old' is concurrently in the process of being set up + * by another page fault trying to merge _that_. But that's ok: if it + * is being set up, that automatically means that it will be a singleton + * acceptable for merging, so we can do all of this optimistically. But + * we do that ACCESS_ONCE() to make sure that we never re-load the pointer. + * + * IOW: that the "list_is_singular()" test on the anon_vma_chain only + * matters for the 'stable anon_vma' case (ie the thing we want to avoid + * is to return an anon_vma that is "complex" due to having gone through + * a fork). + * + * We also make sure that the two vma's are compatible (adjacent, + * and with the same memory policies). That's all stable, even with just + * a read lock on the mm_sem. + */ +static struct anon_vma *reusable_anon_vma(struct vm_area_struct *old, struct vm_area_struct *a, struct vm_area_struct *b) +{ + if (anon_vma_compatible(a, b)) { + struct anon_vma *anon_vma = ACCESS_ONCE(old->anon_vma); + + if (anon_vma && list_is_singular(&old->anon_vma_chain)) + return anon_vma; + } + return NULL; +} + +/* * find_mergeable_anon_vma is used by anon_vma_prepare, to check * neighbouring vmas for a suitable anon_vma, before it goes off * to allocate a new anon_vma. It checks because a repetitive @@ -834,28 +881,16 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm, */ struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *vma) { + struct anon_vma *anon_vma; struct vm_area_struct *near; - unsigned long vm_flags; near = vma->vm_next; if (!near) goto try_prev; - /* - * Since only mprotect tries to remerge vmas, match flags - * which might be mprotected into each other later on. - * Neither mlock nor madvise tries to remerge at present, - * so leave their flags as obstructing a merge. - */ - vm_flags = vma->vm_flags & ~(VM_READ|VM_WRITE|VM_EXEC); - vm_flags |= near->vm_flags & (VM_READ|VM_WRITE|VM_EXEC); - - if (near->anon_vma && vma->vm_end == near->vm_start && - mpol_equal(vma_policy(vma), vma_policy(near)) && - can_vma_merge_before(near, vm_flags, - NULL, vma->vm_file, vma->vm_pgoff + - ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT))) - return near->anon_vma; + anon_vma = reusable_anon_vma(near, vma, near); + if (anon_vma) + return anon_vma; try_prev: /* * It is potentially slow to have to call find_vma_prev here. @@ -868,14 +903,9 @@ try_prev: if (!near) goto none; - vm_flags = vma->vm_flags & ~(VM_READ|VM_WRITE|VM_EXEC); - vm_flags |= near->vm_flags & (VM_READ|VM_WRITE|VM_EXEC); - - if (near->anon_vma && near->vm_end == vma->vm_start && - mpol_equal(vma_policy(near), vma_policy(vma)) && - can_vma_merge_after(near, vm_flags, - NULL, vma->vm_file, vma->vm_pgoff)) - return near->anon_vma; + anon_vma = reusable_anon_vma(near, near, vma); + if (anon_vma) + return anon_vma; none: /* * There's no absolute need to look only at touching neighbours: diff --git a/mm/pagewalk.c b/mm/pagewalk.c index 7b47a57b664..8b1a2ce21ee 100644 --- a/mm/pagewalk.c +++ b/mm/pagewalk.c @@ -80,6 +80,37 @@ static int walk_pud_range(pgd_t *pgd, unsigned long addr, unsigned long end, return err; } +#ifdef CONFIG_HUGETLB_PAGE +static unsigned long hugetlb_entry_end(struct hstate *h, unsigned long addr, + unsigned long end) +{ + unsigned long boundary = (addr & huge_page_mask(h)) + huge_page_size(h); + return boundary < end ? boundary : end; +} + +static int walk_hugetlb_range(struct vm_area_struct *vma, + unsigned long addr, unsigned long end, + struct mm_walk *walk) +{ + struct hstate *h = hstate_vma(vma); + unsigned long next; + unsigned long hmask = huge_page_mask(h); + pte_t *pte; + int err = 0; + + do { + next = hugetlb_entry_end(h, addr, end); + pte = huge_pte_offset(walk->mm, addr & hmask); + if (pte && walk->hugetlb_entry) + err = walk->hugetlb_entry(pte, hmask, addr, next, walk); + if (err) + return err; + } while (addr = next, addr != end); + + return 0; +} +#endif + /** * walk_page_range - walk a memory map's page tables with a callback * @mm: memory map to walk @@ -128,20 +159,16 @@ int walk_page_range(unsigned long addr, unsigned long end, vma = find_vma(walk->mm, addr); #ifdef CONFIG_HUGETLB_PAGE if (vma && is_vm_hugetlb_page(vma)) { - pte_t *pte; - struct hstate *hs; - if (vma->vm_end < next) next = vma->vm_end; - hs = hstate_vma(vma); - pte = huge_pte_offset(walk->mm, - addr & huge_page_mask(hs)); - if (pte && !huge_pte_none(huge_ptep_get(pte)) - && walk->hugetlb_entry) - err = walk->hugetlb_entry(pte, addr, - next, walk); + /* + * Hugepage is very tightly coupled with vma, so + * walk through hugetlb entries within a given vma. + */ + err = walk_hugetlb_range(vma, addr, next, walk); if (err) break; + pgd = pgd_offset(walk->mm, next); continue; } #endif diff --git a/mm/readahead.c b/mm/readahead.c index 999b54bb462..dfa9a1a03a1 100644 --- a/mm/readahead.c +++ b/mm/readahead.c @@ -503,7 +503,7 @@ void page_cache_sync_readahead(struct address_space *mapping, return; /* be dumb */ - if (filp->f_mode & FMODE_RANDOM) { + if (filp && (filp->f_mode & FMODE_RANDOM)) { force_page_cache_readahead(mapping, filp, offset, req_size); return; } diff --git a/mm/rmap.c b/mm/rmap.c index eaa7a09eb72..526704e8215 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -182,7 +182,7 @@ int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src) { struct anon_vma_chain *avc, *pavc; - list_for_each_entry(pavc, &src->anon_vma_chain, same_vma) { + list_for_each_entry_reverse(pavc, &src->anon_vma_chain, same_vma) { avc = anon_vma_chain_alloc(); if (!avc) goto enomem_failure; @@ -730,13 +730,29 @@ void page_move_anon_rmap(struct page *page, * @page: the page to add the mapping to * @vma: the vm area in which the mapping is added * @address: the user virtual address mapped + * @exclusive: the page is exclusively owned by the current process */ static void __page_set_anon_rmap(struct page *page, - struct vm_area_struct *vma, unsigned long address) + struct vm_area_struct *vma, unsigned long address, int exclusive) { struct anon_vma *anon_vma = vma->anon_vma; BUG_ON(!anon_vma); + + /* + * If the page isn't exclusively mapped into this vma, + * we must use the _oldest_ possible anon_vma for the + * page mapping! + * + * So take the last AVC chain entry in the vma, which is + * the deepest ancestor, and use the anon_vma from that. + */ + if (!exclusive) { + struct anon_vma_chain *avc; + avc = list_entry(vma->anon_vma_chain.prev, struct anon_vma_chain, same_vma); + anon_vma = avc->anon_vma; + } + anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON; page->mapping = (struct address_space *) anon_vma; page->index = linear_page_index(vma, address); @@ -791,7 +807,7 @@ void page_add_anon_rmap(struct page *page, VM_BUG_ON(!PageLocked(page)); VM_BUG_ON(address < vma->vm_start || address >= vma->vm_end); if (first) - __page_set_anon_rmap(page, vma, address); + __page_set_anon_rmap(page, vma, address, 0); else __page_check_anon_rmap(page, vma, address); } @@ -813,7 +829,7 @@ void page_add_new_anon_rmap(struct page *page, SetPageSwapBacked(page); atomic_set(&page->_mapcount, 0); /* increment count (starts at -1) */ __inc_zone_page_state(page, NR_ANON_PAGES); - __page_set_anon_rmap(page, vma, address); + __page_set_anon_rmap(page, vma, address, 1); if (page_evictable(page, vma)) lru_cache_add_lru(page, LRU_ACTIVE_ANON); else diff --git a/mm/slab.c b/mm/slab.c index a9f325b28be..bac0f4fcc21 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -3602,21 +3602,10 @@ EXPORT_SYMBOL(kmem_cache_alloc_notrace); */ int kmem_ptr_validate(struct kmem_cache *cachep, const void *ptr) { - unsigned long addr = (unsigned long)ptr; - unsigned long min_addr = PAGE_OFFSET; - unsigned long align_mask = BYTES_PER_WORD - 1; unsigned long size = cachep->buffer_size; struct page *page; - if (unlikely(addr < min_addr)) - goto out; - if (unlikely(addr > (unsigned long)high_memory - size)) - goto out; - if (unlikely(addr & align_mask)) - goto out; - if (unlikely(!kern_addr_valid(addr))) - goto out; - if (unlikely(!kern_addr_valid(addr + size - 1))) + if (unlikely(!kern_ptr_validate(ptr, size))) goto out; page = virt_to_page(ptr); if (unlikely(!PageSlab(page))) diff --git a/mm/slub.c b/mm/slub.c index b364844a106..7d6c8b1ccf6 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -2386,6 +2386,9 @@ int kmem_ptr_validate(struct kmem_cache *s, const void *object) { struct page *page; + if (!kern_ptr_validate(object, s->size)) + return 0; + page = get_object_page(object); if (!page || s != page->slab) diff --git a/mm/util.c b/mm/util.c index 834db7be240..f5712e8964b 100644 --- a/mm/util.c +++ b/mm/util.c @@ -186,6 +186,27 @@ void kzfree(const void *p) } EXPORT_SYMBOL(kzfree); +int kern_ptr_validate(const void *ptr, unsigned long size) +{ + unsigned long addr = (unsigned long)ptr; + unsigned long min_addr = PAGE_OFFSET; + unsigned long align_mask = sizeof(void *) - 1; + + if (unlikely(addr < min_addr)) + goto out; + if (unlikely(addr > (unsigned long)high_memory - size)) + goto out; + if (unlikely(addr & align_mask)) + goto out; + if (unlikely(!kern_addr_valid(addr))) + goto out; + if (unlikely(!kern_addr_valid(addr + size - 1))) + goto out; + return 1; +out: + return 0; +} + /* * strndup_user - duplicate an existing string from user space * @s: The string to duplicate diff --git a/mm/vmscan.c b/mm/vmscan.c index e0e5f15bb72..3ff3311447f 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -1535,13 +1535,6 @@ static void get_scan_ratio(struct zone *zone, struct scan_control *sc, unsigned long ap, fp; struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(zone, sc); - /* If we have no swap space, do not bother scanning anon pages. */ - if (!sc->may_swap || (nr_swap_pages <= 0)) { - percent[0] = 0; - percent[1] = 100; - return; - } - anon = zone_nr_lru_pages(zone, sc, LRU_ACTIVE_ANON) + zone_nr_lru_pages(zone, sc, LRU_INACTIVE_ANON); file = zone_nr_lru_pages(zone, sc, LRU_ACTIVE_FILE) + @@ -1639,20 +1632,22 @@ static void shrink_zone(int priority, struct zone *zone, unsigned long nr_reclaimed = sc->nr_reclaimed; unsigned long nr_to_reclaim = sc->nr_to_reclaim; struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(zone, sc); + int noswap = 0; - get_scan_ratio(zone, sc, percent); + /* If we have no swap space, do not bother scanning anon pages. */ + if (!sc->may_swap || (nr_swap_pages <= 0)) { + noswap = 1; + percent[0] = 0; + percent[1] = 100; + } else + get_scan_ratio(zone, sc, percent); for_each_evictable_lru(l) { int file = is_file_lru(l); unsigned long scan; - if (percent[file] == 0) { - nr[l] = 0; - continue; - } - scan = zone_nr_lru_pages(zone, sc, l); - if (priority) { + if (priority || noswap) { scan >>= priority; scan = (scan * percent[file]) / 100; } diff --git a/net/bluetooth/l2cap.c b/net/bluetooth/l2cap.c index 7794a2e2adc..99d68c34e4f 100644 --- a/net/bluetooth/l2cap.c +++ b/net/bluetooth/l2cap.c @@ -1002,7 +1002,8 @@ static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, int al BT_DBG("sk %p", sk); - if (!addr || addr->sa_family != AF_BLUETOOTH) + if (!addr || alen < sizeof(addr->sa_family) || + addr->sa_family != AF_BLUETOOTH) return -EINVAL; memset(&la, 0, sizeof(la)); diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c index 7f439765403..8ed3c37684f 100644 --- a/net/bluetooth/rfcomm/sock.c +++ b/net/bluetooth/rfcomm/sock.c @@ -397,7 +397,8 @@ static int rfcomm_sock_connect(struct socket *sock, struct sockaddr *addr, int a BT_DBG("sk %p", sk); - if (addr->sa_family != AF_BLUETOOTH || alen < sizeof(struct sockaddr_rc)) + if (alen < sizeof(struct sockaddr_rc) || + addr->sa_family != AF_BLUETOOTH) return -EINVAL; lock_sock(sk); diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c index e5b16b76b22..ca6b2ad1c3f 100644 --- a/net/bluetooth/sco.c +++ b/net/bluetooth/sco.c @@ -499,7 +499,8 @@ static int sco_sock_connect(struct socket *sock, struct sockaddr *addr, int alen BT_DBG("sk %p", sk); - if (addr->sa_family != AF_BLUETOOTH || alen < sizeof(struct sockaddr_sco)) + if (alen < sizeof(struct sockaddr_sco) || + addr->sa_family != AF_BLUETOOTH) return -EINVAL; if (sk->sk_state != BT_OPEN && sk->sk_state != BT_BOUND) diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c index 6980625537c..f29ada827a6 100644 --- a/net/bridge/br_multicast.c +++ b/net/bridge/br_multicast.c @@ -723,7 +723,7 @@ static int br_multicast_igmp3_report(struct net_bridge *br, if (!pskb_may_pull(skb, len)) return -EINVAL; - grec = (void *)(skb->data + len); + grec = (void *)(skb->data + len - sizeof(*grec)); group = grec->grec_mca; type = grec->grec_type; diff --git a/net/can/bcm.c b/net/can/bcm.c index a2dee522b43..907dc871fac 100644 --- a/net/can/bcm.c +++ b/net/can/bcm.c @@ -1479,6 +1479,9 @@ static int bcm_connect(struct socket *sock, struct sockaddr *uaddr, int len, struct sock *sk = sock->sk; struct bcm_sock *bo = bcm_sk(sk); + if (len < sizeof(*addr)) + return -EINVAL; + if (bo->bound) return -EISCONN; diff --git a/net/can/raw.c b/net/can/raw.c index 3a7dffb6519..da99cf153b3 100644 --- a/net/can/raw.c +++ b/net/can/raw.c @@ -445,7 +445,7 @@ static int raw_setsockopt(struct socket *sock, int level, int optname, return -EFAULT; } } else if (count == 1) { - if (copy_from_user(&sfilter, optval, optlen)) + if (copy_from_user(&sfilter, optval, sizeof(sfilter))) return -EFAULT; } diff --git a/net/core/dev.c b/net/core/dev.c index 1c8a0ce473a..92584bfef09 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -1989,8 +1989,12 @@ static struct netdev_queue *dev_pick_tx(struct net_device *dev, if (dev->real_num_tx_queues > 1) queue_index = skb_tx_hash(dev, skb); - if (sk && sk->sk_dst_cache) - sk_tx_queue_set(sk, queue_index); + if (sk) { + struct dst_entry *dst = rcu_dereference(sk->sk_dst_cache); + + if (dst && skb_dst(skb) == dst) + sk_tx_queue_set(sk, queue_index); + } } } diff --git a/net/ieee802154/af_ieee802154.c b/net/ieee802154/af_ieee802154.c index 79886d546ba..c7da600750b 100644 --- a/net/ieee802154/af_ieee802154.c +++ b/net/ieee802154/af_ieee802154.c @@ -127,6 +127,9 @@ static int ieee802154_sock_connect(struct socket *sock, struct sockaddr *uaddr, { struct sock *sk = sock->sk; + if (addr_len < sizeof(uaddr->sa_family)) + return -EINVAL; + if (uaddr->sa_family == AF_UNSPEC) return sk->sk_prot->disconnect(sk, flags); diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c index 2ed85714540..f7135742238 100644 --- a/net/ipv4/af_inet.c +++ b/net/ipv4/af_inet.c @@ -531,6 +531,8 @@ int inet_dgram_connect(struct socket *sock, struct sockaddr * uaddr, { struct sock *sk = sock->sk; + if (addr_len < sizeof(uaddr->sa_family)) + return -EINVAL; if (uaddr->sa_family == AF_UNSPEC) return sk->sk_prot->disconnect(sk, flags); @@ -574,6 +576,9 @@ int inet_stream_connect(struct socket *sock, struct sockaddr *uaddr, int err; long timeo; + if (addr_len < sizeof(uaddr->sa_family)) + return -EINVAL; + lock_sock(sk); if (uaddr->sa_family == AF_UNSPEC) { diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c index 59a838795e3..c98f115fb0f 100644 --- a/net/ipv4/fib_trie.c +++ b/net/ipv4/fib_trie.c @@ -209,7 +209,9 @@ static inline struct node *tnode_get_child_rcu(struct tnode *tn, unsigned int i) { struct node *ret = tnode_get_child(tn, i); - return rcu_dereference(ret); + return rcu_dereference_check(ret, + rcu_read_lock_held() || + lockdep_rtnl_is_held()); } static inline int tnode_child_length(const struct tnode *tn) diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c index c65f18e0936..d1bcc9f21d4 100644 --- a/net/ipv4/ip_output.c +++ b/net/ipv4/ip_output.c @@ -120,7 +120,7 @@ static int ip_dev_loopback_xmit(struct sk_buff *newskb) newskb->pkt_type = PACKET_LOOPBACK; newskb->ip_summed = CHECKSUM_UNNECESSARY; WARN_ON(!skb_dst(newskb)); - netif_rx(newskb); + netif_rx_ni(newskb); return 0; } diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 7a1f1d78893..0f8caf64caa 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -1369,6 +1369,7 @@ int tcp_read_sock(struct sock *sk, read_descriptor_t *desc, sk_eat_skb(sk, skb, 0); if (!desc->count) break; + tp->copied_seq = seq; } tp->copied_seq = seq; diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index 954bbfb39df..8fef859db35 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c @@ -472,8 +472,8 @@ static struct sock *__udp4_lib_lookup(struct net *net, __be32 saddr, if (hslot->count < hslot2->count) goto begin; - result = udp4_lib_lookup2(net, INADDR_ANY, sport, - daddr, hnum, dif, + result = udp4_lib_lookup2(net, saddr, sport, + INADDR_ANY, hnum, dif, hslot2, slot2); } rcu_read_unlock(); diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index 16c4391f952..65f9c379df3 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c @@ -108,7 +108,7 @@ static int ip6_dev_loopback_xmit(struct sk_buff *newskb) newskb->ip_summed = CHECKSUM_UNNECESSARY; WARN_ON(!skb_dst(newskb)); - netif_rx(newskb); + netif_rx_ni(newskb); return 0; } diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index c177aea88c0..90824852f59 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c @@ -259,8 +259,8 @@ static struct sock *__udp6_lib_lookup(struct net *net, if (hslot->count < hslot2->count) goto begin; - result = udp6_lib_lookup2(net, &in6addr_any, sport, - daddr, hnum, dif, + result = udp6_lib_lookup2(net, saddr, sport, + &in6addr_any, hnum, dif, hslot2, slot2); } rcu_read_unlock(); diff --git a/net/mac80211/main.c b/net/mac80211/main.c index 06c33b68d8e..b887e484ae0 100644 --- a/net/mac80211/main.c +++ b/net/mac80211/main.c @@ -225,11 +225,11 @@ void ieee80211_bss_info_change_notify(struct ieee80211_sub_if_data *sdata, switch (sdata->vif.type) { case NL80211_IFTYPE_AP: sdata->vif.bss_conf.enable_beacon = - !!rcu_dereference(sdata->u.ap.beacon); + !!sdata->u.ap.beacon; break; case NL80211_IFTYPE_ADHOC: sdata->vif.bss_conf.enable_beacon = - !!rcu_dereference(sdata->u.ibss.presp); + !!sdata->u.ibss.presp; break; case NL80211_IFTYPE_MESH_POINT: sdata->vif.bss_conf.enable_beacon = true; diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c index 58e3e3a61d9..859ee5f3d94 100644 --- a/net/mac80211/mesh.c +++ b/net/mac80211/mesh.c @@ -750,9 +750,6 @@ ieee80211_mesh_rx_mgmt(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb) switch (fc & IEEE80211_FCTL_STYPE) { case IEEE80211_STYPE_ACTION: - if (skb->len < IEEE80211_MIN_ACTION_SIZE) - return RX_DROP_MONITOR; - /* fall through */ case IEEE80211_STYPE_PROBE_RESP: case IEEE80211_STYPE_BEACON: skb_queue_tail(&ifmsh->skb_queue, skb); diff --git a/net/mac80211/mesh_hwmp.c b/net/mac80211/mesh_hwmp.c index 122c11380ff..fefc45c4b4e 100644 --- a/net/mac80211/mesh_hwmp.c +++ b/net/mac80211/mesh_hwmp.c @@ -392,7 +392,7 @@ static u32 hwmp_route_info_get(struct ieee80211_sub_if_data *sdata, if (SN_GT(mpath->sn, orig_sn) || (mpath->sn == orig_sn && action == MPATH_PREQ && - new_metric > mpath->metric)) { + new_metric >= mpath->metric)) { process = false; fresh_info = false; } @@ -612,7 +612,7 @@ static void hwmp_prep_frame_process(struct ieee80211_sub_if_data *sdata, mesh_path_sel_frame_tx(MPATH_PREP, flags, orig_addr, cpu_to_le32(orig_sn), 0, target_addr, - cpu_to_le32(target_sn), mpath->next_hop->sta.addr, hopcount, + cpu_to_le32(target_sn), next_hop, hopcount, ttl, cpu_to_le32(lifetime), cpu_to_le32(metric), 0, sdata); rcu_read_unlock(); diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index f0accf622cd..04ea07f0e78 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c @@ -1974,6 +1974,11 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx) goto handled; } break; + case MESH_PLINK_CATEGORY: + case MESH_PATH_SEL_CATEGORY: + if (ieee80211_vif_is_mesh(&sdata->vif)) + return ieee80211_mesh_rx_mgmt(sdata, rx->skb); + break; } /* diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c index 56422d89435..fb12cec4d33 100644 --- a/net/mac80211/sta_info.c +++ b/net/mac80211/sta_info.c @@ -93,12 +93,18 @@ struct sta_info *sta_info_get(struct ieee80211_sub_if_data *sdata, struct ieee80211_local *local = sdata->local; struct sta_info *sta; - sta = rcu_dereference(local->sta_hash[STA_HASH(addr)]); + sta = rcu_dereference_check(local->sta_hash[STA_HASH(addr)], + rcu_read_lock_held() || + lockdep_is_held(&local->sta_lock) || + lockdep_is_held(&local->sta_mtx)); while (sta) { if (sta->sdata == sdata && memcmp(sta->sta.addr, addr, ETH_ALEN) == 0) break; - sta = rcu_dereference(sta->hnext); + sta = rcu_dereference_check(sta->hnext, + rcu_read_lock_held() || + lockdep_is_held(&local->sta_lock) || + lockdep_is_held(&local->sta_mtx)); } return sta; } @@ -113,13 +119,19 @@ struct sta_info *sta_info_get_bss(struct ieee80211_sub_if_data *sdata, struct ieee80211_local *local = sdata->local; struct sta_info *sta; - sta = rcu_dereference(local->sta_hash[STA_HASH(addr)]); + sta = rcu_dereference_check(local->sta_hash[STA_HASH(addr)], + rcu_read_lock_held() || + lockdep_is_held(&local->sta_lock) || + lockdep_is_held(&local->sta_mtx)); while (sta) { if ((sta->sdata == sdata || sta->sdata->bss == sdata->bss) && memcmp(sta->sta.addr, addr, ETH_ALEN) == 0) break; - sta = rcu_dereference(sta->hnext); + sta = rcu_dereference_check(sta->hnext, + rcu_read_lock_held() || + lockdep_is_held(&local->sta_lock) || + lockdep_is_held(&local->sta_mtx)); } return sta; } diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c index cbe53ed4fb0..cfc473e1b05 100644 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c @@ -1991,6 +1991,7 @@ static bool ieee80211_tx_pending_skb(struct ieee80211_local *local, void ieee80211_tx_pending(unsigned long data) { struct ieee80211_local *local = (struct ieee80211_local *)data; + struct ieee80211_sub_if_data *sdata; unsigned long flags; int i; bool txok; @@ -2029,6 +2030,11 @@ void ieee80211_tx_pending(unsigned long data) if (!txok) break; } + + if (skb_queue_empty(&local->pending[i])) + list_for_each_entry_rcu(sdata, &local->interfaces, list) + netif_tx_wake_queue( + netdev_get_tx_queue(sdata->dev, i)); } spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags); diff --git a/net/mac80211/util.c b/net/mac80211/util.c index c453226f06b..53af5704743 100644 --- a/net/mac80211/util.c +++ b/net/mac80211/util.c @@ -279,13 +279,13 @@ static void __ieee80211_wake_queue(struct ieee80211_hw *hw, int queue, /* someone still has this queue stopped */ return; - if (!skb_queue_empty(&local->pending[queue])) + if (skb_queue_empty(&local->pending[queue])) { + rcu_read_lock(); + list_for_each_entry_rcu(sdata, &local->interfaces, list) + netif_tx_wake_queue(netdev_get_tx_queue(sdata->dev, queue)); + rcu_read_unlock(); + } else tasklet_schedule(&local->tx_pending_tasklet); - - rcu_read_lock(); - list_for_each_entry_rcu(sdata, &local->interfaces, list) - netif_tx_wake_queue(netdev_get_tx_queue(sdata->dev, queue)); - rcu_read_unlock(); } void ieee80211_wake_queue_by_reason(struct ieee80211_hw *hw, int queue, @@ -1097,9 +1097,9 @@ int ieee80211_reconfig(struct ieee80211_local *local) */ res = drv_start(local); if (res) { - WARN(local->suspended, "Harware became unavailable " - "upon resume. This is could be a software issue" - "prior to suspend or a hardware issue\n"); + WARN(local->suspended, "Hardware became unavailable " + "upon resume. This could be a software issue " + "prior to suspend or a hardware issue.\n"); return res; } diff --git a/net/netlabel/netlabel_domainhash.c b/net/netlabel/netlabel_domainhash.c index 016ab9c75eb..d37b7f80fa3 100644 --- a/net/netlabel/netlabel_domainhash.c +++ b/net/netlabel/netlabel_domainhash.c @@ -51,9 +51,12 @@ struct netlbl_domhsh_tbl { }; /* Domain hash table */ -/* XXX - updates should be so rare that having one spinlock for the entire - * hash table should be okay */ +/* updates should be so rare that having one spinlock for the entire hash table + * should be okay */ static DEFINE_SPINLOCK(netlbl_domhsh_lock); +#define netlbl_domhsh_rcu_deref(p) \ + rcu_dereference_check(p, rcu_read_lock_held() || \ + lockdep_is_held(&netlbl_domhsh_lock)) static struct netlbl_domhsh_tbl *netlbl_domhsh = NULL; static struct netlbl_dom_map *netlbl_domhsh_def = NULL; @@ -107,7 +110,8 @@ static void netlbl_domhsh_free_entry(struct rcu_head *entry) * Description: * This is the hashing function for the domain hash table, it returns the * correct bucket number for the domain. The caller is responsibile for - * calling the rcu_read_[un]lock() functions. + * ensuring that the hash table is protected with either a RCU read lock or the + * hash table lock. * */ static u32 netlbl_domhsh_hash(const char *key) @@ -121,7 +125,7 @@ static u32 netlbl_domhsh_hash(const char *key) for (iter = 0, val = 0, len = strlen(key); iter < len; iter++) val = (val << 4 | (val >> (8 * sizeof(u32) - 4))) ^ key[iter]; - return val & (rcu_dereference(netlbl_domhsh)->size - 1); + return val & (netlbl_domhsh_rcu_deref(netlbl_domhsh)->size - 1); } /** @@ -131,7 +135,8 @@ static u32 netlbl_domhsh_hash(const char *key) * Description: * Searches the domain hash table and returns a pointer to the hash table * entry if found, otherwise NULL is returned. The caller is responsibile for - * the rcu hash table locks (i.e. the caller much call rcu_read_[un]lock()). + * ensuring that the hash table is protected with either a RCU read lock or the + * hash table lock. * */ static struct netlbl_dom_map *netlbl_domhsh_search(const char *domain) @@ -142,7 +147,7 @@ static struct netlbl_dom_map *netlbl_domhsh_search(const char *domain) if (domain != NULL) { bkt = netlbl_domhsh_hash(domain); - bkt_list = &rcu_dereference(netlbl_domhsh)->tbl[bkt]; + bkt_list = &netlbl_domhsh_rcu_deref(netlbl_domhsh)->tbl[bkt]; list_for_each_entry_rcu(iter, bkt_list, list) if (iter->valid && strcmp(iter->domain, domain) == 0) return iter; @@ -160,8 +165,8 @@ static struct netlbl_dom_map *netlbl_domhsh_search(const char *domain) * Searches the domain hash table and returns a pointer to the hash table * entry if an exact match is found, if an exact match is not present in the * hash table then the default entry is returned if valid otherwise NULL is - * returned. The caller is responsibile for the rcu hash table locks - * (i.e. the caller much call rcu_read_[un]lock()). + * returned. The caller is responsibile ensuring that the hash table is + * protected with either a RCU read lock or the hash table lock. * */ static struct netlbl_dom_map *netlbl_domhsh_search_def(const char *domain) @@ -170,7 +175,7 @@ static struct netlbl_dom_map *netlbl_domhsh_search_def(const char *domain) entry = netlbl_domhsh_search(domain); if (entry == NULL) { - entry = rcu_dereference(netlbl_domhsh_def); + entry = netlbl_domhsh_rcu_deref(netlbl_domhsh_def); if (entry != NULL && !entry->valid) entry = NULL; } @@ -307,8 +312,11 @@ int netlbl_domhsh_add(struct netlbl_dom_map *entry, struct netlbl_af6list *tmp6; #endif /* IPv6 */ + /* XXX - we can remove this RCU read lock as the spinlock protects the + * entire function, but before we do we need to fixup the + * netlbl_af[4,6]list RCU functions to do "the right thing" with + * respect to rcu_dereference() when only a spinlock is held. */ rcu_read_lock(); - spin_lock(&netlbl_domhsh_lock); if (entry->domain != NULL) entry_old = netlbl_domhsh_search(entry->domain); diff --git a/net/netlabel/netlabel_unlabeled.c b/net/netlabel/netlabel_unlabeled.c index d7ea2cf390b..a3d64aabe2f 100644 --- a/net/netlabel/netlabel_unlabeled.c +++ b/net/netlabel/netlabel_unlabeled.c @@ -115,6 +115,9 @@ struct netlbl_unlhsh_walk_arg { /* updates should be so rare that having one spinlock for the entire * hash table should be okay */ static DEFINE_SPINLOCK(netlbl_unlhsh_lock); +#define netlbl_unlhsh_rcu_deref(p) \ + rcu_dereference_check(p, rcu_read_lock_held() || \ + lockdep_is_held(&netlbl_unlhsh_lock)) static struct netlbl_unlhsh_tbl *netlbl_unlhsh = NULL; static struct netlbl_unlhsh_iface *netlbl_unlhsh_def = NULL; @@ -236,15 +239,13 @@ static void netlbl_unlhsh_free_iface(struct rcu_head *entry) * Description: * This is the hashing function for the unlabeled hash table, it returns the * bucket number for the given device/interface. The caller is responsible for - * calling the rcu_read_[un]lock() functions. + * ensuring that the hash table is protected with either a RCU read lock or + * the hash table lock. * */ static u32 netlbl_unlhsh_hash(int ifindex) { - /* this is taken _almost_ directly from - * security/selinux/netif.c:sel_netif_hasfn() as they do pretty much - * the same thing */ - return ifindex & (rcu_dereference(netlbl_unlhsh)->size - 1); + return ifindex & (netlbl_unlhsh_rcu_deref(netlbl_unlhsh)->size - 1); } /** @@ -254,7 +255,8 @@ static u32 netlbl_unlhsh_hash(int ifindex) * Description: * Searches the unlabeled connection hash table and returns a pointer to the * interface entry which matches @ifindex, otherwise NULL is returned. The - * caller is responsible for calling the rcu_read_[un]lock() functions. + * caller is responsible for ensuring that the hash table is protected with + * either a RCU read lock or the hash table lock. * */ static struct netlbl_unlhsh_iface *netlbl_unlhsh_search_iface(int ifindex) @@ -264,7 +266,7 @@ static struct netlbl_unlhsh_iface *netlbl_unlhsh_search_iface(int ifindex) struct netlbl_unlhsh_iface *iter; bkt = netlbl_unlhsh_hash(ifindex); - bkt_list = &rcu_dereference(netlbl_unlhsh)->tbl[bkt]; + bkt_list = &netlbl_unlhsh_rcu_deref(netlbl_unlhsh)->tbl[bkt]; list_for_each_entry_rcu(iter, bkt_list, list) if (iter->valid && iter->ifindex == ifindex) return iter; @@ -273,33 +275,6 @@ static struct netlbl_unlhsh_iface *netlbl_unlhsh_search_iface(int ifindex) } /** - * netlbl_unlhsh_search_iface_def - Search for a matching interface entry - * @ifindex: the network interface - * - * Description: - * Searches the unlabeled connection hash table and returns a pointer to the - * interface entry which matches @ifindex. If an exact match can not be found - * and there is a valid default entry, the default entry is returned, otherwise - * NULL is returned. The caller is responsible for calling the - * rcu_read_[un]lock() functions. - * - */ -static struct netlbl_unlhsh_iface *netlbl_unlhsh_search_iface_def(int ifindex) -{ - struct netlbl_unlhsh_iface *entry; - - entry = netlbl_unlhsh_search_iface(ifindex); - if (entry != NULL) - return entry; - - entry = rcu_dereference(netlbl_unlhsh_def); - if (entry != NULL && entry->valid) - return entry; - - return NULL; -} - -/** * netlbl_unlhsh_add_addr4 - Add a new IPv4 address entry to the hash table * @iface: the associated interface entry * @addr: IPv4 address in network byte order @@ -309,8 +284,7 @@ static struct netlbl_unlhsh_iface *netlbl_unlhsh_search_iface_def(int ifindex) * Description: * Add a new address entry into the unlabeled connection hash table using the * interface entry specified by @iface. On success zero is returned, otherwise - * a negative value is returned. The caller is responsible for calling the - * rcu_read_[un]lock() functions. + * a negative value is returned. * */ static int netlbl_unlhsh_add_addr4(struct netlbl_unlhsh_iface *iface, @@ -350,8 +324,7 @@ static int netlbl_unlhsh_add_addr4(struct netlbl_unlhsh_iface *iface, * Description: * Add a new address entry into the unlabeled connection hash table using the * interface entry specified by @iface. On success zero is returned, otherwise - * a negative value is returned. The caller is responsible for calling the - * rcu_read_[un]lock() functions. + * a negative value is returned. * */ static int netlbl_unlhsh_add_addr6(struct netlbl_unlhsh_iface *iface, @@ -392,8 +365,7 @@ static int netlbl_unlhsh_add_addr6(struct netlbl_unlhsh_iface *iface, * Description: * Add a new, empty, interface entry into the unlabeled connection hash table. * On success a pointer to the new interface entry is returned, on failure NULL - * is returned. The caller is responsible for calling the rcu_read_[un]lock() - * functions. + * is returned. * */ static struct netlbl_unlhsh_iface *netlbl_unlhsh_add_iface(int ifindex) @@ -416,10 +388,10 @@ static struct netlbl_unlhsh_iface *netlbl_unlhsh_add_iface(int ifindex) if (netlbl_unlhsh_search_iface(ifindex) != NULL) goto add_iface_failure; list_add_tail_rcu(&iface->list, - &rcu_dereference(netlbl_unlhsh)->tbl[bkt]); + &netlbl_unlhsh_rcu_deref(netlbl_unlhsh)->tbl[bkt]); } else { INIT_LIST_HEAD(&iface->list); - if (rcu_dereference(netlbl_unlhsh_def) != NULL) + if (netlbl_unlhsh_rcu_deref(netlbl_unlhsh_def) != NULL) goto add_iface_failure; rcu_assign_pointer(netlbl_unlhsh_def, iface); } @@ -549,8 +521,7 @@ unlhsh_add_return: * * Description: * Remove an IP address entry from the unlabeled connection hash table. - * Returns zero on success, negative values on failure. The caller is - * responsible for calling the rcu_read_[un]lock() functions. + * Returns zero on success, negative values on failure. * */ static int netlbl_unlhsh_remove_addr4(struct net *net, @@ -612,8 +583,7 @@ static int netlbl_unlhsh_remove_addr4(struct net *net, * * Description: * Remove an IP address entry from the unlabeled connection hash table. - * Returns zero on success, negative values on failure. The caller is - * responsible for calling the rcu_read_[un]lock() functions. + * Returns zero on success, negative values on failure. * */ static int netlbl_unlhsh_remove_addr6(struct net *net, @@ -1548,8 +1518,10 @@ int netlbl_unlabel_getattr(const struct sk_buff *skb, struct netlbl_unlhsh_iface *iface; rcu_read_lock(); - iface = netlbl_unlhsh_search_iface_def(skb->skb_iif); + iface = netlbl_unlhsh_search_iface(skb->skb_iif); if (iface == NULL) + iface = rcu_dereference(netlbl_unlhsh_def); + if (iface == NULL || !iface->valid) goto unlabel_getattr_nolabel; switch (family) { case PF_INET: { diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c index acbbae1e89b..795424396af 100644 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c @@ -683,6 +683,9 @@ static int netlink_connect(struct socket *sock, struct sockaddr *addr, struct netlink_sock *nlk = nlk_sk(sk); struct sockaddr_nl *nladdr = (struct sockaddr_nl *)addr; + if (alen < sizeof(addr->sa_family)) + return -EINVAL; + if (addr->sa_family == AF_UNSPEC) { sk->sk_state = NETLINK_UNCONNECTED; nlk->dst_pid = 0; diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index cc90363d7e7..243946d4809 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c @@ -2169,8 +2169,6 @@ static int packet_ioctl(struct socket *sock, unsigned int cmd, case SIOCGIFDSTADDR: case SIOCSIFDSTADDR: case SIOCSIFFLAGS: - if (!net_eq(sock_net(sk), &init_net)) - return -ENOIOCTLCMD; return inet_dgram_ops.ioctl(sock, cmd, arg); #endif diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c index fd90eb89842..edea15a54e5 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_transport.c +++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c @@ -679,7 +679,10 @@ static struct svc_xprt *svc_rdma_create(struct svc_serv *serv, int ret; dprintk("svcrdma: Creating RDMA socket\n"); - + if (sa->sa_family != AF_INET) { + dprintk("svcrdma: Address family %d is not supported.\n", sa->sa_family); + return ERR_PTR(-EAFNOSUPPORT); + } cma_xprt = rdma_create_xprt(serv, 1); if (!cma_xprt) return ERR_PTR(-ENOMEM); diff --git a/net/wireless/reg.c b/net/wireless/reg.c index b7604b823f4..422da20d1e5 100644 --- a/net/wireless/reg.c +++ b/net/wireless/reg.c @@ -325,7 +325,7 @@ struct reg_regdb_search_request { }; static LIST_HEAD(reg_regdb_search_list); -static DEFINE_SPINLOCK(reg_regdb_search_lock); +static DEFINE_MUTEX(reg_regdb_search_mutex); static void reg_regdb_search(struct work_struct *work) { @@ -333,7 +333,7 @@ static void reg_regdb_search(struct work_struct *work) const struct ieee80211_regdomain *curdom, *regdom; int i, r; - spin_lock(®_regdb_search_lock); + mutex_lock(®_regdb_search_mutex); while (!list_empty(®_regdb_search_list)) { request = list_first_entry(®_regdb_search_list, struct reg_regdb_search_request, @@ -347,18 +347,16 @@ static void reg_regdb_search(struct work_struct *work) r = reg_copy_regd(®dom, curdom); if (r) break; - spin_unlock(®_regdb_search_lock); mutex_lock(&cfg80211_mutex); set_regdom(regdom); mutex_unlock(&cfg80211_mutex); - spin_lock(®_regdb_search_lock); break; } } kfree(request); } - spin_unlock(®_regdb_search_lock); + mutex_unlock(®_regdb_search_mutex); } static DECLARE_WORK(reg_regdb_work, reg_regdb_search); @@ -376,9 +374,9 @@ static void reg_regdb_query(const char *alpha2) memcpy(request->alpha2, alpha2, 2); - spin_lock(®_regdb_search_lock); + mutex_lock(®_regdb_search_mutex); list_add_tail(&request->list, ®_regdb_search_list); - spin_unlock(®_regdb_search_lock); + mutex_unlock(®_regdb_search_mutex); schedule_work(®_regdb_work); } diff --git a/net/x25/af_x25.c b/net/x25/af_x25.c index e56f711bacc..cbddd0cb83f 100644 --- a/net/x25/af_x25.c +++ b/net/x25/af_x25.c @@ -83,6 +83,41 @@ struct compat_x25_subscrip_struct { }; #endif + +int x25_parse_address_block(struct sk_buff *skb, + struct x25_address *called_addr, + struct x25_address *calling_addr) +{ + unsigned char len; + int needed; + int rc; + + if (skb->len < 1) { + /* packet has no address block */ + rc = 0; + goto empty; + } + + len = *skb->data; + needed = 1 + (len >> 4) + (len & 0x0f); + + if (skb->len < needed) { + /* packet is too short to hold the addresses it claims + to hold */ + rc = -1; + goto empty; + } + + return x25_addr_ntoa(skb->data, called_addr, calling_addr); + +empty: + *called_addr->x25_addr = 0; + *calling_addr->x25_addr = 0; + + return rc; +} + + int x25_addr_ntoa(unsigned char *p, struct x25_address *called_addr, struct x25_address *calling_addr) { @@ -554,7 +589,8 @@ static int x25_create(struct net *net, struct socket *sock, int protocol, x25->facilities.winsize_out = X25_DEFAULT_WINDOW_SIZE; x25->facilities.pacsize_in = X25_DEFAULT_PACKET_SIZE; x25->facilities.pacsize_out = X25_DEFAULT_PACKET_SIZE; - x25->facilities.throughput = X25_DEFAULT_THROUGHPUT; + x25->facilities.throughput = 0; /* by default don't negotiate + throughput */ x25->facilities.reverse = X25_DEFAULT_REVERSE; x25->dte_facilities.calling_len = 0; x25->dte_facilities.called_len = 0; @@ -922,16 +958,26 @@ int x25_rx_call_request(struct sk_buff *skb, struct x25_neigh *nb, /* * Extract the X.25 addresses and convert them to ASCII strings, * and remove them. + * + * Address block is mandatory in call request packets */ - addr_len = x25_addr_ntoa(skb->data, &source_addr, &dest_addr); + addr_len = x25_parse_address_block(skb, &source_addr, &dest_addr); + if (addr_len <= 0) + goto out_clear_request; skb_pull(skb, addr_len); /* * Get the length of the facilities, skip past them for the moment * get the call user data because this is needed to determine * the correct listener + * + * Facilities length is mandatory in call request packets */ + if (skb->len < 1) + goto out_clear_request; len = skb->data[0] + 1; + if (skb->len < len) + goto out_clear_request; skb_pull(skb,len); /* @@ -1415,9 +1461,20 @@ static int x25_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) if (facilities.winsize_in < 1 || facilities.winsize_in > 127) break; - if (facilities.throughput < 0x03 || - facilities.throughput > 0xDD) - break; + if (facilities.throughput) { + int out = facilities.throughput & 0xf0; + int in = facilities.throughput & 0x0f; + if (!out) + facilities.throughput |= + X25_DEFAULT_THROUGHPUT << 4; + else if (out < 0x30 || out > 0xD0) + break; + if (!in) + facilities.throughput |= + X25_DEFAULT_THROUGHPUT; + else if (in < 0x03 || in > 0x0D) + break; + } if (facilities.reverse && (facilities.reverse & 0x81) != 0x81) break; diff --git a/net/x25/x25_facilities.c b/net/x25/x25_facilities.c index a21f6646eb3..771bab00754 100644 --- a/net/x25/x25_facilities.c +++ b/net/x25/x25_facilities.c @@ -35,7 +35,7 @@ int x25_parse_facilities(struct sk_buff *skb, struct x25_facilities *facilities, struct x25_dte_facilities *dte_facs, unsigned long *vc_fac_mask) { unsigned char *p = skb->data; - unsigned int len = *p++; + unsigned int len; *vc_fac_mask = 0; @@ -50,6 +50,14 @@ int x25_parse_facilities(struct sk_buff *skb, struct x25_facilities *facilities, memset(dte_facs->called_ae, '\0', sizeof(dte_facs->called_ae)); memset(dte_facs->calling_ae, '\0', sizeof(dte_facs->calling_ae)); + if (skb->len < 1) + return 0; + + len = *p++; + + if (len >= skb->len) + return -1; + while (len > 0) { switch (*p & X25_FAC_CLASS_MASK) { case X25_FAC_CLASS_A: @@ -247,6 +255,8 @@ int x25_negotiate_facilities(struct sk_buff *skb, struct sock *sk, memcpy(new, ours, sizeof(*new)); len = x25_parse_facilities(skb, &theirs, dte, &x25->vc_facil_mask); + if (len < 0) + return len; /* * They want reverse charging, we won't accept it. @@ -259,9 +269,18 @@ int x25_negotiate_facilities(struct sk_buff *skb, struct sock *sk, new->reverse = theirs.reverse; if (theirs.throughput) { - if (theirs.throughput < ours->throughput) { - SOCK_DEBUG(sk, "X.25: throughput negotiated down\n"); - new->throughput = theirs.throughput; + int theirs_in = theirs.throughput & 0x0f; + int theirs_out = theirs.throughput & 0xf0; + int ours_in = ours->throughput & 0x0f; + int ours_out = ours->throughput & 0xf0; + if (!ours_in || theirs_in < ours_in) { + SOCK_DEBUG(sk, "X.25: inbound throughput negotiated\n"); + new->throughput = (new->throughput & 0xf0) | theirs_in; + } + if (!ours_out || theirs_out < ours_out) { + SOCK_DEBUG(sk, + "X.25: outbound throughput negotiated\n"); + new->throughput = (new->throughput & 0x0f) | theirs_out; } } diff --git a/net/x25/x25_in.c b/net/x25/x25_in.c index a31b3b9e596..372ac226e64 100644 --- a/net/x25/x25_in.c +++ b/net/x25/x25_in.c @@ -90,6 +90,7 @@ static int x25_queue_rx_frame(struct sock *sk, struct sk_buff *skb, int more) static int x25_state1_machine(struct sock *sk, struct sk_buff *skb, int frametype) { struct x25_address source_addr, dest_addr; + int len; switch (frametype) { case X25_CALL_ACCEPTED: { @@ -107,11 +108,17 @@ static int x25_state1_machine(struct sock *sk, struct sk_buff *skb, int frametyp * Parse the data in the frame. */ skb_pull(skb, X25_STD_MIN_LEN); - skb_pull(skb, x25_addr_ntoa(skb->data, &source_addr, &dest_addr)); - skb_pull(skb, - x25_parse_facilities(skb, &x25->facilities, + + len = x25_parse_address_block(skb, &source_addr, + &dest_addr); + if (len > 0) + skb_pull(skb, len); + + len = x25_parse_facilities(skb, &x25->facilities, &x25->dte_facilities, - &x25->vc_facil_mask)); + &x25->vc_facil_mask); + if (len > 0) + skb_pull(skb, len); /* * Copy any Call User Data. */ diff --git a/security/selinux/ss/avtab.h b/security/selinux/ss/avtab.h index 8da6a842808..cd4f734e274 100644 --- a/security/selinux/ss/avtab.h +++ b/security/selinux/ss/avtab.h @@ -82,7 +82,7 @@ struct avtab_node *avtab_search_node_next(struct avtab_node *node, int specified void avtab_cache_init(void); void avtab_cache_destroy(void); -#define MAX_AVTAB_HASH_BITS 13 +#define MAX_AVTAB_HASH_BITS 11 #define MAX_AVTAB_HASH_BUCKETS (1 << MAX_AVTAB_HASH_BITS) #define MAX_AVTAB_HASH_MASK (MAX_AVTAB_HASH_BUCKETS-1) #define MAX_AVTAB_SIZE MAX_AVTAB_HASH_BUCKETS diff --git a/sound/arm/aaci.c b/sound/arm/aaci.c index 656e474dca4..91acc9a243e 100644 --- a/sound/arm/aaci.c +++ b/sound/arm/aaci.c @@ -863,7 +863,6 @@ static int __devinit aaci_probe_ac97(struct aaci *aaci) struct snd_ac97 *ac97; int ret; - writel(0, aaci->base + AC97_POWERDOWN); /* * Assert AACIRESET for 2us */ @@ -1047,7 +1046,11 @@ static int __devinit aaci_probe(struct amba_device *dev, struct amba_id *id) writel(0x1fff, aaci->base + AACI_INTCLR); writel(aaci->maincr, aaci->base + AACI_MAINCR); - + /* + * Fix: ac97 read back fail errors by reading + * from any arbitrary aaci register. + */ + readl(aaci->base + AACI_CSCH1); ret = aaci_probe_ac97(aaci); if (ret) goto out; diff --git a/sound/i2c/other/ak4113.c b/sound/i2c/other/ak4113.c index fff62cc8607..971a84a4fa7 100644 --- a/sound/i2c/other/ak4113.c +++ b/sound/i2c/other/ak4113.c @@ -70,7 +70,7 @@ static int snd_ak4113_dev_free(struct snd_device *device) } int snd_ak4113_create(struct snd_card *card, ak4113_read_t *read, - ak4113_write_t *write, const unsigned char pgm[5], + ak4113_write_t *write, const unsigned char *pgm, void *private_data, struct ak4113 **r_ak4113) { struct ak4113 *chip; diff --git a/sound/pci/echoaudio/echoaudio.c b/sound/pci/echoaudio/echoaudio.c index 8dab82d7d19..668a5ec0449 100644 --- a/sound/pci/echoaudio/echoaudio.c +++ b/sound/pci/echoaudio/echoaudio.c @@ -2184,10 +2184,9 @@ static int __devinit snd_echo_probe(struct pci_dev *pci, goto ctl_error; #endif - if ((err = snd_card_register(card)) < 0) { - snd_card_free(card); + err = snd_card_register(card); + if (err < 0) goto ctl_error; - } snd_printk(KERN_INFO "Card registered: %s\n", card->longname); pci_set_drvdata(pci, chip); diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c index 4bb90675f70..f669442b7c8 100644 --- a/sound/pci/hda/hda_intel.c +++ b/sound/pci/hda/hda_intel.c @@ -2272,6 +2272,7 @@ static struct snd_pci_quirk position_fix_list[] __devinitdata = { SND_PCI_QUIRK(0x1458, 0xa022, "ga-ma770-ud3", POS_FIX_LPIB), SND_PCI_QUIRK(0x1462, 0x1002, "MSI Wind U115", POS_FIX_LPIB), SND_PCI_QUIRK(0x1565, 0x820f, "Biostar Microtech", POS_FIX_LPIB), + SND_PCI_QUIRK(0x1565, 0x8218, "Biostar Microtech", POS_FIX_LPIB), SND_PCI_QUIRK(0x8086, 0xd601, "eMachines T5212", POS_FIX_LPIB), {} }; @@ -2362,6 +2363,7 @@ static struct snd_pci_quirk msi_black_list[] __devinitdata = { SND_PCI_QUIRK(0x1043, 0x81f6, "ASUS", 0), /* nvidia */ SND_PCI_QUIRK(0x1043, 0x822d, "ASUS", 0), /* Athlon64 X2 + nvidia MCP55 */ SND_PCI_QUIRK(0x1849, 0x0888, "ASRock", 0), /* Athlon64 X2 + nvidia */ + SND_PCI_QUIRK(0xa0a0, 0x0575, "Aopen MZ915-M", 0), /* ICH6 */ {} }; diff --git a/sound/pci/hda/patch_analog.c b/sound/pci/hda/patch_analog.c index e6d1bdff1b6..af34606c30c 100644 --- a/sound/pci/hda/patch_analog.c +++ b/sound/pci/hda/patch_analog.c @@ -1896,6 +1896,14 @@ static int patch_ad1981(struct hda_codec *codec) case AD1981_THINKPAD: spec->mixers[0] = ad1981_thinkpad_mixers; spec->input_mux = &ad1981_thinkpad_capture_source; + /* set the upper-limit for mixer amp to 0dB for avoiding the + * possible damage by overloading + */ + snd_hda_override_amp_caps(codec, 0x11, HDA_INPUT, + (0x17 << AC_AMPCAP_OFFSET_SHIFT) | + (0x17 << AC_AMPCAP_NUM_STEPS_SHIFT) | + (0x05 << AC_AMPCAP_STEP_SIZE_SHIFT) | + (1 << AC_AMPCAP_MUTE_SHIFT)); break; case AD1981_TOSHIBA: spec->mixers[0] = ad1981_hp_mixers; diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index 9a23444e9e7..aad1627f56f 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c @@ -230,6 +230,7 @@ enum { ALC888_ACER_ASPIRE_7730G, ALC883_MEDION, ALC883_MEDION_MD2, + ALC883_MEDION_WIM2160, ALC883_LAPTOP_EAPD, ALC883_LENOVO_101E_2ch, ALC883_LENOVO_NB0763, @@ -1389,22 +1390,31 @@ struct alc_fixup { static void alc_pick_fixup(struct hda_codec *codec, const struct snd_pci_quirk *quirk, - const struct alc_fixup *fix) + const struct alc_fixup *fix, + int pre_init) { const struct alc_pincfg *cfg; quirk = snd_pci_quirk_lookup(codec->bus->pci, quirk); if (!quirk) return; - fix += quirk->value; cfg = fix->pins; - if (cfg) { + if (pre_init && cfg) { +#ifdef CONFIG_SND_DEBUG_VERBOSE + snd_printdd(KERN_INFO "hda_codec: %s: Apply pincfg for %s\n", + codec->chip_name, quirk->name); +#endif for (; cfg->nid; cfg++) snd_hda_codec_set_pincfg(codec, cfg->nid, cfg->val); } - if (fix->verbs) + if (!pre_init && fix->verbs) { +#ifdef CONFIG_SND_DEBUG_VERBOSE + snd_printdd(KERN_INFO "hda_codec: %s: Apply fix-verbs for %s\n", + codec->chip_name, quirk->name); +#endif add_verb(codec->spec, fix->verbs); + } } static int alc_read_coef_idx(struct hda_codec *codec, @@ -1621,6 +1631,11 @@ static struct hda_verb alc888_acer_aspire_4930g_verbs[] = { */ static struct hda_verb alc888_acer_aspire_6530g_verbs[] = { +/* Route to built-in subwoofer as well as speakers */ + {0x0c, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(0)}, + {0x0c, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(1)}, + {0x0f, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(0)}, + {0x0f, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(1)}, /* Bias voltage on for external mic port */ {0x18, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_IN | PIN_VREF80}, /* Front Mic: set to PIN_IN (empty by default) */ @@ -1632,10 +1647,12 @@ static struct hda_verb alc888_acer_aspire_6530g_verbs[] = { /* Enable speaker output */ {0x14, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT}, {0x14, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE}, + {0x14, AC_VERB_SET_EAPD_BTLENABLE, 2}, /* Enable headphone output */ {0x15, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT | PIN_HP}, {0x15, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE}, {0x15, AC_VERB_SET_CONNECT_SEL, 0x00}, + {0x15, AC_VERB_SET_EAPD_BTLENABLE, 2}, { } }; @@ -4801,6 +4818,25 @@ static void alc880_auto_init_analog_input(struct hda_codec *codec) } } +static void alc880_auto_init_input_src(struct hda_codec *codec) +{ + struct alc_spec *spec = codec->spec; + int c; + + for (c = 0; c < spec->num_adc_nids; c++) { + unsigned int mux_idx; + const struct hda_input_mux *imux; + mux_idx = c >= spec->num_mux_defs ? 0 : c; + imux = &spec->input_mux[mux_idx]; + if (!imux->num_items && mux_idx > 0) + imux = &spec->input_mux[0]; + if (imux) + snd_hda_codec_write(codec, spec->adc_nids[c], 0, + AC_VERB_SET_CONNECT_SEL, + imux->items[0].index); + } +} + /* parse the BIOS configuration and set up the alc_spec */ /* return 1 if successful, 0 if the proper config is not found, * or a negative error code @@ -4879,6 +4915,7 @@ static void alc880_auto_init(struct hda_codec *codec) alc880_auto_init_multi_out(codec); alc880_auto_init_extra_out(codec); alc880_auto_init_analog_input(codec); + alc880_auto_init_input_src(codec); if (spec->unsol_event) alc_inithook(codec); } @@ -4984,6 +5021,70 @@ static void set_capture_mixer(struct hda_codec *codec) } } +/* fill adc_nids (and capsrc_nids) containing all active input pins */ +static void fillup_priv_adc_nids(struct hda_codec *codec, hda_nid_t *nids, + int num_nids) +{ + struct alc_spec *spec = codec->spec; + int n; + hda_nid_t fallback_adc = 0, fallback_cap = 0; + + for (n = 0; n < num_nids; n++) { + hda_nid_t adc, cap; + hda_nid_t conn[HDA_MAX_NUM_INPUTS]; + int nconns, i, j; + + adc = nids[n]; + if (get_wcaps_type(get_wcaps(codec, adc)) != AC_WID_AUD_IN) + continue; + cap = adc; + nconns = snd_hda_get_connections(codec, cap, conn, + ARRAY_SIZE(conn)); + if (nconns == 1) { + cap = conn[0]; + nconns = snd_hda_get_connections(codec, cap, conn, + ARRAY_SIZE(conn)); + } + if (nconns <= 0) + continue; + if (!fallback_adc) { + fallback_adc = adc; + fallback_cap = cap; + } + for (i = 0; i < AUTO_PIN_LAST; i++) { + hda_nid_t nid = spec->autocfg.input_pins[i]; + if (!nid) + continue; + for (j = 0; j < nconns; j++) { + if (conn[j] == nid) + break; + } + if (j >= nconns) + break; + } + if (i >= AUTO_PIN_LAST) { + int num_adcs = spec->num_adc_nids; + spec->private_adc_nids[num_adcs] = adc; + spec->private_capsrc_nids[num_adcs] = cap; + spec->num_adc_nids++; + spec->adc_nids = spec->private_adc_nids; + if (adc != cap) + spec->capsrc_nids = spec->private_capsrc_nids; + } + } + if (!spec->num_adc_nids) { + printk(KERN_WARNING "hda_codec: %s: no valid ADC found;" + " using fallback 0x%x\n", + codec->chip_name, fallback_adc); + spec->private_adc_nids[0] = fallback_adc; + spec->adc_nids = spec->private_adc_nids; + if (fallback_adc != fallback_cap) { + spec->private_capsrc_nids[0] = fallback_cap; + spec->capsrc_nids = spec->private_adc_nids; + } + } +} + #ifdef CONFIG_SND_HDA_INPUT_BEEP #define set_beep_amp(spec, nid, idx, dir) \ ((spec)->beep_amp = HDA_COMPOSE_AMP_VAL(nid, 3, idx, dir)) @@ -6326,6 +6427,8 @@ static void alc260_auto_init_analog_input(struct hda_codec *codec) } } +#define alc260_auto_init_input_src alc880_auto_init_input_src + /* * generic initialization of ADC, input mixers and output mixers */ @@ -6412,6 +6515,7 @@ static void alc260_auto_init(struct hda_codec *codec) struct alc_spec *spec = codec->spec; alc260_auto_init_multi_out(codec); alc260_auto_init_analog_input(codec); + alc260_auto_init_input_src(codec); if (spec->unsol_event) alc_inithook(codec); } @@ -8384,6 +8488,42 @@ static struct snd_kcontrol_new alc883_medion_md2_mixer[] = { { } /* end */ }; +static struct snd_kcontrol_new alc883_medion_wim2160_mixer[] = { + HDA_CODEC_VOLUME("Front Playback Volume", 0x0c, 0x0, HDA_OUTPUT), + HDA_BIND_MUTE("Front Playback Switch", 0x0c, 2, HDA_INPUT), + HDA_CODEC_MUTE("Speaker Playback Switch", 0x15, 0x0, HDA_OUTPUT), + HDA_CODEC_MUTE("Headphone Playback Switch", 0x1a, 0x0, HDA_OUTPUT), + HDA_CODEC_VOLUME("Line Playback Volume", 0x08, 0x0, HDA_INPUT), + HDA_CODEC_MUTE("Line Playback Switch", 0x08, 0x0, HDA_INPUT), + { } /* end */ +}; + +static struct hda_verb alc883_medion_wim2160_verbs[] = { + /* Unmute front mixer */ + {0x0c, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(0)}, + {0x0c, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(1)}, + + /* Set speaker pin to front mixer */ + {0x15, AC_VERB_SET_CONNECT_SEL, 0x00}, + + /* Init headphone pin */ + {0x1a, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_HP}, + {0x1a, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE}, + {0x1a, AC_VERB_SET_CONNECT_SEL, 0x00}, + {0x1a, AC_VERB_SET_UNSOLICITED_ENABLE, ALC880_HP_EVENT | AC_USRSP_EN}, + + { } /* end */ +}; + +/* toggle speaker-output according to the hp-jack state */ +static void alc883_medion_wim2160_setup(struct hda_codec *codec) +{ + struct alc_spec *spec = codec->spec; + + spec->autocfg.hp_pins[0] = 0x1a; + spec->autocfg.speaker_pins[0] = 0x15; +} + static struct snd_kcontrol_new alc883_acer_aspire_mixer[] = { HDA_CODEC_VOLUME("Front Playback Volume", 0x0c, 0x0, HDA_OUTPUT), HDA_BIND_MUTE("Front Playback Switch", 0x0c, 2, HDA_INPUT), @@ -8398,9 +8538,7 @@ static struct snd_kcontrol_new alc883_acer_aspire_mixer[] = { static struct snd_kcontrol_new alc888_acer_aspire_6530_mixer[] = { HDA_CODEC_VOLUME("Front Playback Volume", 0x0c, 0x0, HDA_OUTPUT), - HDA_BIND_MUTE("Front Playback Switch", 0x0c, 2, HDA_INPUT), HDA_CODEC_VOLUME("LFE Playback Volume", 0x0f, 0x0, HDA_OUTPUT), - HDA_BIND_MUTE("LFE Playback Switch", 0x0f, 2, HDA_INPUT), HDA_CODEC_VOLUME("Line Playback Volume", 0x0b, 0x02, HDA_INPUT), HDA_CODEC_MUTE("Line Playback Switch", 0x0b, 0x02, HDA_INPUT), HDA_CODEC_VOLUME("CD Playback Volume", 0x0b, 0x04, HDA_INPUT), @@ -9095,6 +9233,7 @@ static const char *alc882_models[ALC882_MODEL_LAST] = { [ALC888_ACER_ASPIRE_7730G] = "acer-aspire-7730g", [ALC883_MEDION] = "medion", [ALC883_MEDION_MD2] = "medion-md2", + [ALC883_MEDION_WIM2160] = "medion-wim2160", [ALC883_LAPTOP_EAPD] = "laptop-eapd", [ALC883_LENOVO_101E_2ch] = "lenovo-101e", [ALC883_LENOVO_NB0763] = "lenovo-nb0763", @@ -9211,6 +9350,7 @@ static struct snd_pci_quirk alc882_cfg_tbl[] = { SND_PCI_QUIRK(0x1462, 0xaa08, "MSI", ALC883_TARGA_2ch_DIG), SND_PCI_QUIRK(0x147b, 0x1083, "Abit IP35-PRO", ALC883_6ST_DIG), + SND_PCI_QUIRK(0x1558, 0x0571, "Clevo laptop M570U", ALC883_3ST_6ch_DIG), SND_PCI_QUIRK(0x1558, 0x0721, "Clevo laptop M720R", ALC883_CLEVO_M720), SND_PCI_QUIRK(0x1558, 0x0722, "Clevo laptop M720SR", ALC883_CLEVO_M720), SND_PCI_QUIRK(0x1558, 0x5409, "Clevo laptop M540R", ALC883_CLEVO_M540R), @@ -9749,6 +9889,21 @@ static struct alc_config_preset alc882_presets[] = { .setup = alc883_medion_md2_setup, .init_hook = alc_automute_amp, }, + [ALC883_MEDION_WIM2160] = { + .mixers = { alc883_medion_wim2160_mixer }, + .init_verbs = { alc883_init_verbs, alc883_medion_wim2160_verbs }, + .num_dacs = ARRAY_SIZE(alc883_dac_nids), + .dac_nids = alc883_dac_nids, + .dig_out_nid = ALC883_DIGOUT_NID, + .num_adc_nids = ARRAY_SIZE(alc883_adc_nids), + .adc_nids = alc883_adc_nids, + .num_channel_mode = ARRAY_SIZE(alc883_3ST_2ch_modes), + .channel_mode = alc883_3ST_2ch_modes, + .input_mux = &alc883_capture_source, + .unsol_event = alc_automute_amp_unsol_event, + .setup = alc883_medion_wim2160_setup, + .init_hook = alc_automute_amp, + }, [ALC883_LAPTOP_EAPD] = { .mixers = { alc883_base_mixer }, .init_verbs = { alc883_init_verbs, alc882_eapd_verbs }, @@ -10041,13 +10196,12 @@ static void alc882_auto_set_output_and_unmute(struct hda_codec *codec, int idx; alc_set_pin_output(codec, nid, pin_type); + if (dac_idx >= spec->multiout.num_dacs) + return; if (spec->multiout.dac_nids[dac_idx] == 0x25) idx = 4; - else { - if (spec->multiout.num_dacs >= dac_idx) - return; + else idx = spec->multiout.dac_nids[dac_idx] - 2; - } snd_hda_codec_write(codec, nid, 0, AC_VERB_SET_CONNECT_SEL, idx); } @@ -10295,7 +10449,8 @@ static int patch_alc882(struct hda_codec *codec) board_config = ALC882_AUTO; } - alc_pick_fixup(codec, alc882_fixup_tbl, alc882_fixups); + if (board_config == ALC882_AUTO) + alc_pick_fixup(codec, alc882_fixup_tbl, alc882_fixups, 1); if (board_config == ALC882_AUTO) { /* automatic parse from the BIOS config */ @@ -10368,6 +10523,9 @@ static int patch_alc882(struct hda_codec *codec) set_capture_mixer(codec); set_beep_amp(spec, 0x0b, 0x05, HDA_INPUT); + if (board_config == ALC882_AUTO) + alc_pick_fixup(codec, alc882_fixup_tbl, alc882_fixups, 0); + spec->vmaster_nid = 0x0c; codec->patch_ops = alc_patch_ops; @@ -12459,11 +12617,11 @@ static void alc268_aspire_one_speaker_automute(struct hda_codec *codec) unsigned char bits; present = snd_hda_jack_detect(codec, 0x15); - bits = present ? AMP_IN_MUTE(0) : 0; + bits = present ? HDA_AMP_MUTE : 0; snd_hda_codec_amp_stereo(codec, 0x0f, HDA_INPUT, 0, - AMP_IN_MUTE(0), bits); + HDA_AMP_MUTE, bits); snd_hda_codec_amp_stereo(codec, 0x0f, HDA_INPUT, 1, - AMP_IN_MUTE(0), bits); + HDA_AMP_MUTE, bits); } static void alc268_acer_lc_unsol_event(struct hda_codec *codec, @@ -12748,6 +12906,7 @@ static int alc268_new_analog_output(struct alc_spec *spec, hda_nid_t nid, dac = 0x02; break; case 0x15: + case 0x21: /* ALC269vb has this pin, too */ dac = 0x03; break; default: @@ -13333,9 +13492,9 @@ static hda_nid_t alc269vb_capsrc_nids[1] = { 0x22, }; -/* NOTE: ADC2 (0x07) is connected from a recording *MIXER* (0x24), - * not a mux! - */ +static hda_nid_t alc269_adc_candidates[] = { + 0x08, 0x09, 0x07, +}; #define alc269_modes alc260_modes #define alc269_capture_source alc880_lg_lw_capture_source @@ -13482,11 +13641,11 @@ static void alc269_quanta_fl1_speaker_automute(struct hda_codec *codec) unsigned char bits; present = snd_hda_jack_detect(codec, 0x15); - bits = present ? AMP_IN_MUTE(0) : 0; + bits = present ? HDA_AMP_MUTE : 0; snd_hda_codec_amp_stereo(codec, 0x0c, HDA_INPUT, 0, - AMP_IN_MUTE(0), bits); + HDA_AMP_MUTE, bits); snd_hda_codec_amp_stereo(codec, 0x0c, HDA_INPUT, 1, - AMP_IN_MUTE(0), bits); + HDA_AMP_MUTE, bits); snd_hda_codec_write(codec, 0x20, 0, AC_VERB_SET_COEF_INDEX, 0x0c); @@ -13511,11 +13670,11 @@ static void alc269_lifebook_speaker_automute(struct hda_codec *codec) /* Check port replicator headphone socket */ present |= snd_hda_jack_detect(codec, 0x1a); - bits = present ? AMP_IN_MUTE(0) : 0; + bits = present ? HDA_AMP_MUTE : 0; snd_hda_codec_amp_stereo(codec, 0x0c, HDA_INPUT, 0, - AMP_IN_MUTE(0), bits); + HDA_AMP_MUTE, bits); snd_hda_codec_amp_stereo(codec, 0x0c, HDA_INPUT, 1, - AMP_IN_MUTE(0), bits); + HDA_AMP_MUTE, bits); snd_hda_codec_write(codec, 0x20, 0, AC_VERB_SET_COEF_INDEX, 0x0c); @@ -13646,11 +13805,11 @@ static void alc269_speaker_automute(struct hda_codec *codec) unsigned char bits; present = snd_hda_jack_detect(codec, nid); - bits = present ? AMP_IN_MUTE(0) : 0; + bits = present ? HDA_AMP_MUTE : 0; snd_hda_codec_amp_stereo(codec, 0x0c, HDA_INPUT, 0, - AMP_IN_MUTE(0), bits); + HDA_AMP_MUTE, bits); snd_hda_codec_amp_stereo(codec, 0x0c, HDA_INPUT, 1, - AMP_IN_MUTE(0), bits); + HDA_AMP_MUTE, bits); } /* unsolicited event for HP jack sensing */ @@ -13667,19 +13826,19 @@ static void alc269_laptop_unsol_event(struct hda_codec *codec, } } -static void alc269_laptop_dmic_setup(struct hda_codec *codec) +static void alc269_laptop_amic_setup(struct hda_codec *codec) { struct alc_spec *spec = codec->spec; spec->autocfg.hp_pins[0] = 0x15; spec->autocfg.speaker_pins[0] = 0x14; spec->ext_mic.pin = 0x18; spec->ext_mic.mux_idx = 0; - spec->int_mic.pin = 0x12; - spec->int_mic.mux_idx = 5; + spec->int_mic.pin = 0x19; + spec->int_mic.mux_idx = 1; spec->auto_mic = 1; } -static void alc269vb_laptop_dmic_setup(struct hda_codec *codec) +static void alc269_laptop_dmic_setup(struct hda_codec *codec) { struct alc_spec *spec = codec->spec; spec->autocfg.hp_pins[0] = 0x15; @@ -13687,14 +13846,14 @@ static void alc269vb_laptop_dmic_setup(struct hda_codec *codec) spec->ext_mic.pin = 0x18; spec->ext_mic.mux_idx = 0; spec->int_mic.pin = 0x12; - spec->int_mic.mux_idx = 6; + spec->int_mic.mux_idx = 5; spec->auto_mic = 1; } -static void alc269_laptop_amic_setup(struct hda_codec *codec) +static void alc269vb_laptop_amic_setup(struct hda_codec *codec) { struct alc_spec *spec = codec->spec; - spec->autocfg.hp_pins[0] = 0x15; + spec->autocfg.hp_pins[0] = 0x21; spec->autocfg.speaker_pins[0] = 0x14; spec->ext_mic.pin = 0x18; spec->ext_mic.mux_idx = 0; @@ -13703,6 +13862,18 @@ static void alc269_laptop_amic_setup(struct hda_codec *codec) spec->auto_mic = 1; } +static void alc269vb_laptop_dmic_setup(struct hda_codec *codec) +{ + struct alc_spec *spec = codec->spec; + spec->autocfg.hp_pins[0] = 0x21; + spec->autocfg.speaker_pins[0] = 0x14; + spec->ext_mic.pin = 0x18; + spec->ext_mic.mux_idx = 0; + spec->int_mic.pin = 0x12; + spec->int_mic.mux_idx = 6; + spec->auto_mic = 1; +} + static void alc269_laptop_inithook(struct hda_codec *codec) { alc269_speaker_automute(codec); @@ -13842,7 +14013,6 @@ static int alc269_parse_auto_config(struct hda_codec *codec) struct alc_spec *spec = codec->spec; int err; static hda_nid_t alc269_ignore[] = { 0x1d, 0 }; - hda_nid_t real_capsrc_nids; err = snd_hda_parse_pin_def_config(codec, &spec->autocfg, alc269_ignore); @@ -13866,18 +14036,19 @@ static int alc269_parse_auto_config(struct hda_codec *codec) if ((alc_read_coef_idx(codec, 0) & 0x00f0) == 0x0010) { add_verb(spec, alc269vb_init_verbs); - real_capsrc_nids = alc269vb_capsrc_nids[0]; alc_ssid_check(codec, 0, 0x1b, 0x14, 0x21); } else { add_verb(spec, alc269_init_verbs); - real_capsrc_nids = alc269_capsrc_nids[0]; alc_ssid_check(codec, 0x15, 0x1b, 0x14, 0); } spec->num_mux_defs = 1; spec->input_mux = &spec->private_imux[0]; + fillup_priv_adc_nids(codec, alc269_adc_candidates, + sizeof(alc269_adc_candidates)); + /* set default input source */ - snd_hda_codec_write_cache(codec, real_capsrc_nids, + snd_hda_codec_write_cache(codec, spec->capsrc_nids[0], 0, AC_VERB_SET_CONNECT_SEL, spec->input_mux->items[0].index); @@ -13907,6 +14078,27 @@ static void alc269_auto_init(struct hda_codec *codec) alc_inithook(codec); } +enum { + ALC269_FIXUP_SONY_VAIO, +}; + +const static struct hda_verb alc269_sony_vaio_fixup_verbs[] = { + {0x19, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_VREFGRD}, + {} +}; + +static const struct alc_fixup alc269_fixups[] = { + [ALC269_FIXUP_SONY_VAIO] = { + .verbs = alc269_sony_vaio_fixup_verbs + }, +}; + +static struct snd_pci_quirk alc269_fixup_tbl[] = { + SND_PCI_QUIRK(0x104d, 0x9071, "Sony VAIO", ALC269_FIXUP_SONY_VAIO), + {} +}; + + /* * configuration and preset */ @@ -13966,7 +14158,7 @@ static struct snd_pci_quirk alc269_cfg_tbl[] = { ALC269_DMIC), SND_PCI_QUIRK(0x1043, 0x8398, "ASUS P1005HA", ALC269_DMIC), SND_PCI_QUIRK(0x1043, 0x83ce, "ASUS P1005HA", ALC269_DMIC), - SND_PCI_QUIRK(0x104d, 0x9071, "SONY XTB", ALC269_DMIC), + SND_PCI_QUIRK(0x104d, 0x9071, "Sony VAIO", ALC269_AUTO), SND_PCI_QUIRK(0x10cf, 0x1475, "Lifebook ICH9M-based", ALC269_LIFEBOOK), SND_PCI_QUIRK(0x152d, 0x1778, "Quanta ON1", ALC269_DMIC), SND_PCI_QUIRK(0x1734, 0x115d, "FSC Amilo", ALC269_FUJITSU), @@ -14040,7 +14232,7 @@ static struct alc_config_preset alc269_presets[] = { .num_channel_mode = ARRAY_SIZE(alc269_modes), .channel_mode = alc269_modes, .unsol_event = alc269_laptop_unsol_event, - .setup = alc269_laptop_amic_setup, + .setup = alc269vb_laptop_amic_setup, .init_hook = alc269_laptop_inithook, }, [ALC269VB_DMIC] = { @@ -14120,6 +14312,9 @@ static int patch_alc269(struct hda_codec *codec) board_config = ALC269_AUTO; } + if (board_config == ALC269_AUTO) + alc_pick_fixup(codec, alc269_fixup_tbl, alc269_fixups, 1); + if (board_config == ALC269_AUTO) { /* automatic parse from the BIOS config */ err = alc269_parse_auto_config(codec); @@ -14156,20 +14351,25 @@ static int patch_alc269(struct hda_codec *codec) spec->stream_digital_playback = &alc269_pcm_digital_playback; spec->stream_digital_capture = &alc269_pcm_digital_capture; - if (!is_alc269vb) { - spec->adc_nids = alc269_adc_nids; - spec->num_adc_nids = ARRAY_SIZE(alc269_adc_nids); - spec->capsrc_nids = alc269_capsrc_nids; - } else { - spec->adc_nids = alc269vb_adc_nids; - spec->num_adc_nids = ARRAY_SIZE(alc269vb_adc_nids); - spec->capsrc_nids = alc269vb_capsrc_nids; + if (!spec->adc_nids) { /* wasn't filled automatically? use default */ + if (!is_alc269vb) { + spec->adc_nids = alc269_adc_nids; + spec->num_adc_nids = ARRAY_SIZE(alc269_adc_nids); + spec->capsrc_nids = alc269_capsrc_nids; + } else { + spec->adc_nids = alc269vb_adc_nids; + spec->num_adc_nids = ARRAY_SIZE(alc269vb_adc_nids); + spec->capsrc_nids = alc269vb_capsrc_nids; + } } if (!spec->cap_mixer) set_capture_mixer(codec); set_beep_amp(spec, 0x0b, 0x04, HDA_INPUT); + if (board_config == ALC269_AUTO) + alc_pick_fixup(codec, alc269_fixup_tbl, alc269_fixups, 0); + spec->vmaster_nid = 0x02; codec->patch_ops = alc_patch_ops; @@ -15258,7 +15458,8 @@ static int patch_alc861(struct hda_codec *codec) board_config = ALC861_AUTO; } - alc_pick_fixup(codec, alc861_fixup_tbl, alc861_fixups); + if (board_config == ALC861_AUTO) + alc_pick_fixup(codec, alc861_fixup_tbl, alc861_fixups, 1); if (board_config == ALC861_AUTO) { /* automatic parse from the BIOS config */ @@ -15295,6 +15496,9 @@ static int patch_alc861(struct hda_codec *codec) spec->vmaster_nid = 0x03; + if (board_config == ALC861_AUTO) + alc_pick_fixup(codec, alc861_fixup_tbl, alc861_fixups, 0); + codec->patch_ops = alc_patch_ops; if (board_config == ALC861_AUTO) { spec->init_hook = alc861_auto_init; @@ -16229,7 +16433,8 @@ static int patch_alc861vd(struct hda_codec *codec) board_config = ALC861VD_AUTO; } - alc_pick_fixup(codec, alc861vd_fixup_tbl, alc861vd_fixups); + if (board_config == ALC861VD_AUTO) + alc_pick_fixup(codec, alc861vd_fixup_tbl, alc861vd_fixups, 1); if (board_config == ALC861VD_AUTO) { /* automatic parse from the BIOS config */ @@ -16277,6 +16482,9 @@ static int patch_alc861vd(struct hda_codec *codec) spec->vmaster_nid = 0x02; + if (board_config == ALC861VD_AUTO) + alc_pick_fixup(codec, alc861vd_fixup_tbl, alc861vd_fixups, 0); + codec->patch_ops = alc_patch_ops; if (board_config == ALC861VD_AUTO) @@ -17115,9 +17323,9 @@ static void alc663_m51va_speaker_automute(struct hda_codec *codec) present = snd_hda_jack_detect(codec, 0x21); bits = present ? HDA_AMP_MUTE : 0; snd_hda_codec_amp_stereo(codec, 0x0c, HDA_INPUT, 0, - AMP_IN_MUTE(0), bits); + HDA_AMP_MUTE, bits); snd_hda_codec_amp_stereo(codec, 0x0c, HDA_INPUT, 1, - AMP_IN_MUTE(0), bits); + HDA_AMP_MUTE, bits); } static void alc663_21jd_two_speaker_automute(struct hda_codec *codec) @@ -17128,13 +17336,13 @@ static void alc663_21jd_two_speaker_automute(struct hda_codec *codec) present = snd_hda_jack_detect(codec, 0x21); bits = present ? HDA_AMP_MUTE : 0; snd_hda_codec_amp_stereo(codec, 0x0c, HDA_INPUT, 0, - AMP_IN_MUTE(0), bits); + HDA_AMP_MUTE, bits); snd_hda_codec_amp_stereo(codec, 0x0c, HDA_INPUT, 1, - AMP_IN_MUTE(0), bits); + HDA_AMP_MUTE, bits); snd_hda_codec_amp_stereo(codec, 0x0e, HDA_INPUT, 0, - AMP_IN_MUTE(0), bits); + HDA_AMP_MUTE, bits); snd_hda_codec_amp_stereo(codec, 0x0e, HDA_INPUT, 1, - AMP_IN_MUTE(0), bits); + HDA_AMP_MUTE, bits); } static void alc663_15jd_two_speaker_automute(struct hda_codec *codec) @@ -17145,13 +17353,13 @@ static void alc663_15jd_two_speaker_automute(struct hda_codec *codec) present = snd_hda_jack_detect(codec, 0x15); bits = present ? HDA_AMP_MUTE : 0; snd_hda_codec_amp_stereo(codec, 0x0c, HDA_INPUT, 0, - AMP_IN_MUTE(0), bits); + HDA_AMP_MUTE, bits); snd_hda_codec_amp_stereo(codec, 0x0c, HDA_INPUT, 1, - AMP_IN_MUTE(0), bits); + HDA_AMP_MUTE, bits); snd_hda_codec_amp_stereo(codec, 0x0e, HDA_INPUT, 0, - AMP_IN_MUTE(0), bits); + HDA_AMP_MUTE, bits); snd_hda_codec_amp_stereo(codec, 0x0e, HDA_INPUT, 1, - AMP_IN_MUTE(0), bits); + HDA_AMP_MUTE, bits); } static void alc662_f5z_speaker_automute(struct hda_codec *codec) @@ -17190,14 +17398,14 @@ static void alc663_two_hp_m2_speaker_automute(struct hda_codec *codec) if (present1 || present2) { snd_hda_codec_amp_stereo(codec, 0x0c, HDA_INPUT, 0, - AMP_IN_MUTE(0), AMP_IN_MUTE(0)); + HDA_AMP_MUTE, HDA_AMP_MUTE); snd_hda_codec_amp_stereo(codec, 0x0c, HDA_INPUT, 1, - AMP_IN_MUTE(0), AMP_IN_MUTE(0)); + HDA_AMP_MUTE, HDA_AMP_MUTE); } else { snd_hda_codec_amp_stereo(codec, 0x0c, HDA_INPUT, 0, - AMP_IN_MUTE(0), 0); + HDA_AMP_MUTE, 0); snd_hda_codec_amp_stereo(codec, 0x0c, HDA_INPUT, 1, - AMP_IN_MUTE(0), 0); + HDA_AMP_MUTE, 0); } } diff --git a/sound/pci/hda/patch_via.c b/sound/pci/hda/patch_via.c index 9ddc37300f6..73453814e09 100644 --- a/sound/pci/hda/patch_via.c +++ b/sound/pci/hda/patch_via.c @@ -476,7 +476,7 @@ static struct snd_kcontrol_new *via_clone_control(struct via_spec *spec, knew->name = kstrdup(tmpl->name, GFP_KERNEL); if (!knew->name) return NULL; - return 0; + return knew; } static void via_free_kctls(struct hda_codec *codec) @@ -1215,14 +1215,13 @@ static struct snd_kcontrol_new via_hp_mixer[2] = { }, }; -static int via_hp_build(struct via_spec *spec) +static int via_hp_build(struct hda_codec *codec) { + struct via_spec *spec = codec->spec; struct snd_kcontrol_new *knew; hda_nid_t nid; - - knew = via_clone_control(spec, &via_hp_mixer[0]); - if (knew == NULL) - return -ENOMEM; + int nums; + hda_nid_t conn[HDA_MAX_CONNECTIONS]; switch (spec->codec_type) { case VT1718S: @@ -1239,6 +1238,14 @@ static int via_hp_build(struct via_spec *spec) break; } + nums = snd_hda_get_connections(codec, nid, conn, HDA_MAX_CONNECTIONS); + if (nums <= 1) + return 0; + + knew = via_clone_control(spec, &via_hp_mixer[0]); + if (knew == NULL) + return -ENOMEM; + knew->subdevice = HDA_SUBDEV_NID_FLAG | nid; knew->private_value = nid; @@ -2561,7 +2568,7 @@ static int vt1708_parse_auto_config(struct hda_codec *codec) spec->input_mux = &spec->private_imux[0]; if (spec->hp_mux) - via_hp_build(spec); + via_hp_build(codec); via_smart51_build(spec); return 1; @@ -3087,7 +3094,7 @@ static int vt1709_parse_auto_config(struct hda_codec *codec) spec->input_mux = &spec->private_imux[0]; if (spec->hp_mux) - via_hp_build(spec); + via_hp_build(codec); via_smart51_build(spec); return 1; @@ -3654,7 +3661,7 @@ static int vt1708B_parse_auto_config(struct hda_codec *codec) spec->input_mux = &spec->private_imux[0]; if (spec->hp_mux) - via_hp_build(spec); + via_hp_build(codec); via_smart51_build(spec); return 1; @@ -4140,7 +4147,7 @@ static int vt1708S_parse_auto_config(struct hda_codec *codec) spec->input_mux = &spec->private_imux[0]; if (spec->hp_mux) - via_hp_build(spec); + via_hp_build(codec); via_smart51_build(spec); return 1; @@ -4510,7 +4517,7 @@ static int vt1702_parse_auto_config(struct hda_codec *codec) spec->input_mux = &spec->private_imux[0]; if (spec->hp_mux) - via_hp_build(spec); + via_hp_build(codec); return 1; } @@ -4930,7 +4937,7 @@ static int vt1718S_parse_auto_config(struct hda_codec *codec) spec->input_mux = &spec->private_imux[0]; if (spec->hp_mux) - via_hp_build(spec); + via_hp_build(codec); via_smart51_build(spec); @@ -5425,7 +5432,7 @@ static int vt1716S_parse_auto_config(struct hda_codec *codec) spec->input_mux = &spec->private_imux[0]; if (spec->hp_mux) - via_hp_build(spec); + via_hp_build(codec); via_smart51_build(spec); @@ -5781,7 +5788,7 @@ static int vt2002P_parse_auto_config(struct hda_codec *codec) spec->input_mux = &spec->private_imux[0]; if (spec->hp_mux) - via_hp_build(spec); + via_hp_build(codec); return 1; } @@ -6000,12 +6007,12 @@ static int vt1812_auto_create_multi_out_ctls(struct via_spec *spec, /* Line-Out: PortE */ err = via_add_control(spec, VIA_CTL_WIDGET_VOL, - "Master Front Playback Volume", + "Front Playback Volume", HDA_COMPOSE_AMP_VAL(0x8, 3, 0, HDA_OUTPUT)); if (err < 0) return err; err = via_add_control(spec, VIA_CTL_WIDGET_BIND_PIN_MUTE, - "Master Front Playback Switch", + "Front Playback Switch", HDA_COMPOSE_AMP_VAL(0x28, 3, 0, HDA_OUTPUT)); if (err < 0) return err; @@ -6130,7 +6137,7 @@ static int vt1812_parse_auto_config(struct hda_codec *codec) spec->input_mux = &spec->private_imux[0]; if (spec->hp_mux) - via_hp_build(spec); + via_hp_build(codec); return 1; } diff --git a/sound/pci/mixart/mixart.c b/sound/pci/mixart/mixart.c index 55e9315d4cc..3be8f97c8bc 100644 --- a/sound/pci/mixart/mixart.c +++ b/sound/pci/mixart/mixart.c @@ -1162,13 +1162,15 @@ static long snd_mixart_BA0_read(struct snd_info_entry *entry, void *file_private unsigned long count, unsigned long pos) { struct mixart_mgr *mgr = entry->private_data; + unsigned long maxsize; - count = count & ~3; /* make sure the read size is a multiple of 4 bytes */ - if(count <= 0) + if (pos >= MIXART_BA0_SIZE) return 0; - if(pos + count > MIXART_BA0_SIZE) - count = (long)(MIXART_BA0_SIZE - pos); - if(copy_to_user_fromio(buf, MIXART_MEM( mgr, pos ), count)) + maxsize = MIXART_BA0_SIZE - pos; + if (count > maxsize) + count = maxsize; + count = count & ~3; /* make sure the read size is a multiple of 4 bytes */ + if (copy_to_user_fromio(buf, MIXART_MEM(mgr, pos), count)) return -EFAULT; return count; } @@ -1181,13 +1183,15 @@ static long snd_mixart_BA1_read(struct snd_info_entry *entry, void *file_private unsigned long count, unsigned long pos) { struct mixart_mgr *mgr = entry->private_data; + unsigned long maxsize; - count = count & ~3; /* make sure the read size is a multiple of 4 bytes */ - if(count <= 0) + if (pos > MIXART_BA1_SIZE) return 0; - if(pos + count > MIXART_BA1_SIZE) - count = (long)(MIXART_BA1_SIZE - pos); - if(copy_to_user_fromio(buf, MIXART_REG( mgr, pos ), count)) + maxsize = MIXART_BA1_SIZE - pos; + if (count > maxsize) + count = maxsize; + count = count & ~3; /* make sure the read size is a multiple of 4 bytes */ + if (copy_to_user_fromio(buf, MIXART_REG(mgr, pos), count)) return -EFAULT; return count; } diff --git a/sound/soc/atmel/atmel-pcm.c b/sound/soc/atmel/atmel-pcm.c index 9ef6b96373f..3e6628c8e66 100644 --- a/sound/soc/atmel/atmel-pcm.c +++ b/sound/soc/atmel/atmel-pcm.c @@ -180,7 +180,7 @@ static int atmel_pcm_hw_params(struct snd_pcm_substream *substream, snd_pcm_set_runtime_buffer(substream, &substream->dma_buffer); runtime->dma_bytes = params_buffer_bytes(params); - prtd->params = rtd->dai->cpu_dai->dma_data; + prtd->params = snd_soc_dai_get_dma_data(rtd->dai->cpu_dai, substream); prtd->params->dma_intr_handler = atmel_pcm_dma_irq; prtd->dma_buffer = runtime->dma_addr; diff --git a/sound/soc/atmel/atmel_ssc_dai.c b/sound/soc/atmel/atmel_ssc_dai.c index e588e63f18d..0b59806905d 100644 --- a/sound/soc/atmel/atmel_ssc_dai.c +++ b/sound/soc/atmel/atmel_ssc_dai.c @@ -363,12 +363,12 @@ static int atmel_ssc_hw_params(struct snd_pcm_substream *substream, ssc_p->dma_params[dir] = dma_params; /* - * The cpu_dai->dma_data field is only used to communicate the - * appropriate DMA parameters to the pcm driver hw_params() + * The snd_soc_pcm_stream->dma_data field is only used to communicate + * the appropriate DMA parameters to the pcm driver hw_params() * function. It should not be used for other purposes * as it is common to all substreams. */ - rtd->dai->cpu_dai->dma_data = dma_params; + snd_soc_dai_set_dma_data(rtd->dai->cpu_dai, substream, dma_params); channels = params_channels(params); diff --git a/sound/soc/codecs/ac97.c b/sound/soc/codecs/ac97.c index fd101d450d5..1f5e57a4bb7 100644 --- a/sound/soc/codecs/ac97.c +++ b/sound/soc/codecs/ac97.c @@ -81,9 +81,11 @@ static int ac97_write(struct snd_soc_codec *codec, unsigned int reg, static int ac97_soc_probe(struct platform_device *pdev) { struct snd_soc_device *socdev = platform_get_drvdata(pdev); + struct snd_soc_card *card = socdev->card; struct snd_soc_codec *codec; struct snd_ac97_bus *ac97_bus; struct snd_ac97_template ac97_template; + int i; int ret = 0; printk(KERN_INFO "AC97 SoC Audio Codec %s\n", AC97_VERSION); @@ -103,12 +105,6 @@ static int ac97_soc_probe(struct platform_device *pdev) INIT_LIST_HEAD(&codec->dapm_widgets); INIT_LIST_HEAD(&codec->dapm_paths); - ret = snd_soc_new_ac97_codec(codec, &soc_ac97_ops, 0); - if (ret < 0) { - printk(KERN_ERR "ASoC: failed to init gen ac97 glue\n"); - goto err; - } - /* register pcms */ ret = snd_soc_new_pcms(socdev, SNDRV_DEFAULT_IDX1, SNDRV_DEFAULT_STR1); if (ret < 0) @@ -124,6 +120,13 @@ static int ac97_soc_probe(struct platform_device *pdev) if (ret < 0) goto bus_err; + for (i = 0; i < card->num_links; i++) { + if (card->dai_link[i].codec_dai->ac97_control) { + snd_ac97_dev_add_pdata(codec->ac97, + card->dai_link[i].cpu_dai->ac97_pdata); + } + } + return 0; bus_err: diff --git a/sound/soc/codecs/wm2000.c b/sound/soc/codecs/wm2000.c index a34cbcf7904..002e289d125 100644 --- a/sound/soc/codecs/wm2000.c +++ b/sound/soc/codecs/wm2000.c @@ -23,7 +23,6 @@ #include <linux/module.h> #include <linux/moduleparam.h> -#include <linux/version.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/firmware.h> diff --git a/sound/soc/codecs/wm8994.c b/sound/soc/codecs/wm8994.c index 8d1c63754be..9da0724cd47 100644 --- a/sound/soc/codecs/wm8994.c +++ b/sound/soc/codecs/wm8994.c @@ -3008,34 +3008,39 @@ static int wm8994_set_bias_level(struct snd_soc_codec *codec, break; case SND_SOC_BIAS_OFF: - /* Switch over to startup biases */ - snd_soc_update_bits(codec, WM8994_ANTIPOP_2, - WM8994_BIAS_SRC | WM8994_STARTUP_BIAS_ENA | - WM8994_VMID_BUF_ENA | - WM8994_VMID_RAMP_MASK, - WM8994_BIAS_SRC | WM8994_STARTUP_BIAS_ENA | - WM8994_VMID_BUF_ENA | - (1 << WM8994_VMID_RAMP_SHIFT)); - - /* Disable main biases */ - snd_soc_update_bits(codec, WM8994_POWER_MANAGEMENT_1, - WM8994_BIAS_ENA | WM8994_VMID_SEL_MASK, 0); + if (codec->bias_level == SND_SOC_BIAS_STANDBY) { + /* Switch over to startup biases */ + snd_soc_update_bits(codec, WM8994_ANTIPOP_2, + WM8994_BIAS_SRC | + WM8994_STARTUP_BIAS_ENA | + WM8994_VMID_BUF_ENA | + WM8994_VMID_RAMP_MASK, + WM8994_BIAS_SRC | + WM8994_STARTUP_BIAS_ENA | + WM8994_VMID_BUF_ENA | + (1 << WM8994_VMID_RAMP_SHIFT)); - /* Discharge line */ - snd_soc_update_bits(codec, WM8994_ANTIPOP_1, - WM8994_LINEOUT1_DISCH | - WM8994_LINEOUT2_DISCH, - WM8994_LINEOUT1_DISCH | - WM8994_LINEOUT2_DISCH); + /* Disable main biases */ + snd_soc_update_bits(codec, WM8994_POWER_MANAGEMENT_1, + WM8994_BIAS_ENA | + WM8994_VMID_SEL_MASK, 0); - msleep(5); + /* Discharge line */ + snd_soc_update_bits(codec, WM8994_ANTIPOP_1, + WM8994_LINEOUT1_DISCH | + WM8994_LINEOUT2_DISCH, + WM8994_LINEOUT1_DISCH | + WM8994_LINEOUT2_DISCH); - /* Switch off startup biases */ - snd_soc_update_bits(codec, WM8994_ANTIPOP_2, - WM8994_BIAS_SRC | WM8994_STARTUP_BIAS_ENA | - WM8994_VMID_BUF_ENA | - WM8994_VMID_RAMP_MASK, 0); + msleep(5); + /* Switch off startup biases */ + snd_soc_update_bits(codec, WM8994_ANTIPOP_2, + WM8994_BIAS_SRC | + WM8994_STARTUP_BIAS_ENA | + WM8994_VMID_BUF_ENA | + WM8994_VMID_RAMP_MASK, 0); + } break; } codec->bias_level = level; @@ -3402,7 +3407,7 @@ struct snd_soc_dai wm8994_dai[] = { .rates = WM8994_RATES, .formats = WM8994_FORMATS, }, - .playback = { + .capture = { .stream_name = "AIF3 Capture", .channels_min = 2, .channels_max = 2, @@ -3731,11 +3736,12 @@ static int wm8994_codec_probe(struct platform_device *pdev) case 3: wm8994->hubs.dcs_codes = -5; wm8994->hubs.hp_startup_mode = 1; + wm8994->hubs.dcs_readback_mode = 1; break; default: + wm8994->hubs.dcs_readback_mode = 1; break; } - /* Remember if AIFnLRCLK is configured as a GPIO. This should be * configured on init - if a system wants to do this dynamically diff --git a/sound/soc/codecs/wm_hubs.c b/sound/soc/codecs/wm_hubs.c index 486bdd21a98..e1f225a3ac4 100644 --- a/sound/soc/codecs/wm_hubs.c +++ b/sound/soc/codecs/wm_hubs.c @@ -62,21 +62,27 @@ static const char *speaker_mode_text[] = { static const struct soc_enum speaker_mode = SOC_ENUM_SINGLE(WM8993_SPKMIXR_ATTENUATION, 8, 2, speaker_mode_text); -static void wait_for_dc_servo(struct snd_soc_codec *codec) +static void wait_for_dc_servo(struct snd_soc_codec *codec, unsigned int op) { unsigned int reg; int count = 0; + unsigned int val; + + val = op | WM8993_DCS_ENA_CHAN_0 | WM8993_DCS_ENA_CHAN_1; + + /* Trigger the command */ + snd_soc_write(codec, WM8993_DC_SERVO_0, val); dev_dbg(codec->dev, "Waiting for DC servo...\n"); do { count++; msleep(1); - reg = snd_soc_read(codec, WM8993_DC_SERVO_READBACK_0); + reg = snd_soc_read(codec, WM8993_DC_SERVO_0); dev_dbg(codec->dev, "DC servo: %x\n", reg); - } while (reg & WM8993_DCS_DATAPATH_BUSY && count < 400); + } while (reg & op && count < 400); - if (reg & WM8993_DCS_DATAPATH_BUSY) + if (reg & op) dev_err(codec->dev, "Timed out waiting for DC Servo\n"); } @@ -86,51 +92,58 @@ static void wait_for_dc_servo(struct snd_soc_codec *codec) static void calibrate_dc_servo(struct snd_soc_codec *codec) { struct wm_hubs_data *hubs = codec->private_data; - u16 reg, dcs_cfg; + u16 reg, reg_l, reg_r, dcs_cfg; /* Set for 32 series updates */ snd_soc_update_bits(codec, WM8993_DC_SERVO_1, WM8993_DCS_SERIES_NO_01_MASK, 32 << WM8993_DCS_SERIES_NO_01_SHIFT); - - /* Enable the DC servo. Write all bits to avoid triggering startup - * or write calibration. - */ - snd_soc_update_bits(codec, WM8993_DC_SERVO_0, - 0xFFFF, - WM8993_DCS_ENA_CHAN_0 | - WM8993_DCS_ENA_CHAN_1 | - WM8993_DCS_TRIG_SERIES_1 | - WM8993_DCS_TRIG_SERIES_0); - - wait_for_dc_servo(codec); + wait_for_dc_servo(codec, + WM8993_DCS_TRIG_SERIES_0 | WM8993_DCS_TRIG_SERIES_1); /* Apply correction to DC servo result */ if (hubs->dcs_codes) { dev_dbg(codec->dev, "Applying %d code DC servo correction\n", hubs->dcs_codes); + /* Different chips in the family support different + * readback methods. + */ + switch (hubs->dcs_readback_mode) { + case 0: + reg_l = snd_soc_read(codec, WM8993_DC_SERVO_READBACK_1) + & WM8993_DCS_INTEG_CHAN_0_MASK;; + reg_r = snd_soc_read(codec, WM8993_DC_SERVO_READBACK_2) + & WM8993_DCS_INTEG_CHAN_1_MASK; + break; + case 1: + reg = snd_soc_read(codec, WM8993_DC_SERVO_3); + reg_l = (reg & WM8993_DCS_DAC_WR_VAL_1_MASK) + >> WM8993_DCS_DAC_WR_VAL_1_SHIFT; + reg_r = reg & WM8993_DCS_DAC_WR_VAL_0_MASK; + break; + default: + WARN(1, "Unknown DCS readback method"); + break; + } + /* HPOUT1L */ - reg = snd_soc_read(codec, WM8993_DC_SERVO_READBACK_1) & - WM8993_DCS_INTEG_CHAN_0_MASK;; - reg += hubs->dcs_codes; - dcs_cfg = reg << WM8993_DCS_DAC_WR_VAL_1_SHIFT; + if (reg_l + hubs->dcs_codes > 0 && + reg_l + hubs->dcs_codes < 0xff) + reg_l += hubs->dcs_codes; + dcs_cfg = reg_l << WM8993_DCS_DAC_WR_VAL_1_SHIFT; /* HPOUT1R */ - reg = snd_soc_read(codec, WM8993_DC_SERVO_READBACK_2) & - WM8993_DCS_INTEG_CHAN_1_MASK; - reg += hubs->dcs_codes; - dcs_cfg |= reg; + if (reg_r + hubs->dcs_codes > 0 && + reg_r + hubs->dcs_codes < 0xff) + reg_r += hubs->dcs_codes; + dcs_cfg |= reg_r; /* Do it */ snd_soc_write(codec, WM8993_DC_SERVO_3, dcs_cfg); - snd_soc_update_bits(codec, WM8993_DC_SERVO_0, - WM8993_DCS_TRIG_DAC_WR_0 | - WM8993_DCS_TRIG_DAC_WR_1, - WM8993_DCS_TRIG_DAC_WR_0 | - WM8993_DCS_TRIG_DAC_WR_1); - - wait_for_dc_servo(codec); + wait_for_dc_servo(codec, + WM8993_DCS_TRIG_DAC_WR_0 | + WM8993_DCS_TRIG_DAC_WR_1); } } @@ -141,10 +154,16 @@ static int wm8993_put_dc_servo(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol); + struct wm_hubs_data *hubs = codec->private_data; int ret; ret = snd_soc_put_volsw_2r(kcontrol, ucontrol); + /* If we're applying an offset correction then updating the + * callibration would be likely to introduce further offsets. */ + if (hubs->dcs_codes) + return ret; + /* Only need to do this if the outputs are active */ if (snd_soc_read(codec, WM8993_POWER_MANAGEMENT_1) & (WM8993_HPOUT1L_ENA | WM8993_HPOUT1R_ENA)) diff --git a/sound/soc/codecs/wm_hubs.h b/sound/soc/codecs/wm_hubs.h index 420104fe9c9..e51c1668358 100644 --- a/sound/soc/codecs/wm_hubs.h +++ b/sound/soc/codecs/wm_hubs.h @@ -21,6 +21,7 @@ extern const unsigned int wm_hubs_spkmix_tlv[]; /* This *must* be the first element of the codec->private_data struct */ struct wm_hubs_data { int dcs_codes; + int dcs_readback_mode; int hp_startup_mode; }; diff --git a/sound/soc/davinci/davinci-i2s.c b/sound/soc/davinci/davinci-i2s.c index 62af7e025e7..adadcd3aa1b 100644 --- a/sound/soc/davinci/davinci-i2s.c +++ b/sound/soc/davinci/davinci-i2s.c @@ -586,7 +586,8 @@ static int davinci_i2s_probe(struct platform_device *pdev) dev->dma_params[SNDRV_PCM_STREAM_CAPTURE].channel = res->start; davinci_i2s_dai.private_data = dev; - davinci_i2s_dai.dma_data = dev->dma_params; + davinci_i2s_dai.capture.dma_data = dev->dma_params; + davinci_i2s_dai.playback.dma_data = dev->dma_params; ret = snd_soc_register_dai(&davinci_i2s_dai); if (ret != 0) goto err_free_mem; diff --git a/sound/soc/davinci/davinci-mcasp.c b/sound/soc/davinci/davinci-mcasp.c index 6c80cc35eca..79f0f4ad242 100644 --- a/sound/soc/davinci/davinci-mcasp.c +++ b/sound/soc/davinci/davinci-mcasp.c @@ -918,7 +918,8 @@ static int davinci_mcasp_probe(struct platform_device *pdev) dma_data->channel = res->start; davinci_mcasp_dai[pdata->op_mode].private_data = dev; - davinci_mcasp_dai[pdata->op_mode].dma_data = dev->dma_params; + davinci_mcasp_dai[pdata->op_mode].capture.dma_data = dev->dma_params; + davinci_mcasp_dai[pdata->op_mode].playback.dma_data = dev->dma_params; davinci_mcasp_dai[pdata->op_mode].dev = &pdev->dev; ret = snd_soc_register_dai(&davinci_mcasp_dai[pdata->op_mode]); diff --git a/sound/soc/davinci/davinci-pcm.c b/sound/soc/davinci/davinci-pcm.c index 80c7fdf2f52..2dc406f42fe 100644 --- a/sound/soc/davinci/davinci-pcm.c +++ b/sound/soc/davinci/davinci-pcm.c @@ -649,8 +649,10 @@ static int davinci_pcm_open(struct snd_pcm_substream *substream) struct snd_pcm_hardware *ppcm; int ret = 0; struct snd_soc_pcm_runtime *rtd = substream->private_data; - struct davinci_pcm_dma_params *pa = rtd->dai->cpu_dai->dma_data; + struct davinci_pcm_dma_params *pa; struct davinci_pcm_dma_params *params; + + pa = snd_soc_dai_get_dma_data(rtd->dai->cpu_dai, substream); if (!pa) return -ENODEV; params = &pa[substream->stream]; diff --git a/sound/soc/imx/imx-pcm-dma-mx2.c b/sound/soc/imx/imx-pcm-dma-mx2.c index 86668ab3f4d..2b31ac673ea 100644 --- a/sound/soc/imx/imx-pcm-dma-mx2.c +++ b/sound/soc/imx/imx-pcm-dma-mx2.c @@ -71,7 +71,12 @@ static void imx_ssi_dma_callback(int channel, void *data) static void snd_imx_dma_err_callback(int channel, void *data, int err) { - pr_err("DMA error callback called\n"); + struct snd_pcm_substream *substream = data; + struct snd_soc_pcm_runtime *rtd = substream->private_data; + struct imx_pcm_dma_params *dma_params = rtd->dai->cpu_dai->dma_data; + struct snd_pcm_runtime *runtime = substream->runtime; + struct imx_pcm_runtime_data *iprtd = runtime->private_data; + int ret; pr_err("DMA timeout on channel %d -%s%s%s%s\n", channel, @@ -79,16 +84,26 @@ static void snd_imx_dma_err_callback(int channel, void *data, int err) err & IMX_DMA_ERR_REQUEST ? " request" : "", err & IMX_DMA_ERR_TRANSFER ? " transfer" : "", err & IMX_DMA_ERR_BUFFER ? " buffer" : ""); + + imx_dma_disable(iprtd->dma); + ret = imx_dma_setup_sg(iprtd->dma, iprtd->sg_list, iprtd->sg_count, + IMX_DMA_LENGTH_LOOP, dma_params->dma_addr, + substream->stream == SNDRV_PCM_STREAM_PLAYBACK ? + DMA_MODE_WRITE : DMA_MODE_READ); + if (!ret) + imx_dma_enable(iprtd->dma); } static int imx_ssi_dma_alloc(struct snd_pcm_substream *substream) { struct snd_soc_pcm_runtime *rtd = substream->private_data; - struct imx_pcm_dma_params *dma_params = rtd->dai->cpu_dai->dma_data; + struct imx_pcm_dma_params *dma_params; struct snd_pcm_runtime *runtime = substream->runtime; struct imx_pcm_runtime_data *iprtd = runtime->private_data; int ret; + dma_params = snd_soc_get_dma_data(rtd->dai->cpu_dai, substream); + iprtd->dma = imx_dma_request_by_prio(DRV_NAME, DMA_PRIO_HIGH); if (iprtd->dma < 0) { pr_err("Failed to claim the audio DMA\n"); @@ -193,10 +208,12 @@ static int snd_imx_pcm_prepare(struct snd_pcm_substream *substream) { struct snd_pcm_runtime *runtime = substream->runtime; struct snd_soc_pcm_runtime *rtd = substream->private_data; - struct imx_pcm_dma_params *dma_params = rtd->dai->cpu_dai->dma_data; + struct imx_pcm_dma_params *dma_params; struct imx_pcm_runtime_data *iprtd = runtime->private_data; int err; + dma_params = snd_soc_get_dma_data(rtd->dai->cpu_dai, substream); + iprtd->substream = substream; iprtd->buf = (unsigned int *)substream->dma_buffer.area; iprtd->period_cnt = 0; diff --git a/sound/soc/imx/imx-pcm-fiq.c b/sound/soc/imx/imx-pcm-fiq.c index f96a373699c..6b518e07eea 100644 --- a/sound/soc/imx/imx-pcm-fiq.c +++ b/sound/soc/imx/imx-pcm-fiq.c @@ -39,23 +39,24 @@ struct imx_pcm_runtime_data { unsigned long offset; unsigned long last_offset; unsigned long size; - struct timer_list timer; - int poll_time; + struct hrtimer hrt; + int poll_time_ns; + struct snd_pcm_substream *substream; + atomic_t running; }; -static inline void imx_ssi_set_next_poll(struct imx_pcm_runtime_data *iprtd) +static enum hrtimer_restart snd_hrtimer_callback(struct hrtimer *hrt) { - iprtd->timer.expires = jiffies + iprtd->poll_time; -} - -static void imx_ssi_timer_callback(unsigned long data) -{ - struct snd_pcm_substream *substream = (void *)data; + struct imx_pcm_runtime_data *iprtd = + container_of(hrt, struct imx_pcm_runtime_data, hrt); + struct snd_pcm_substream *substream = iprtd->substream; struct snd_pcm_runtime *runtime = substream->runtime; - struct imx_pcm_runtime_data *iprtd = runtime->private_data; struct pt_regs regs; unsigned long delta; + if (!atomic_read(&iprtd->running)) + return HRTIMER_NORESTART; + get_fiq_regs(®s); if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) @@ -72,16 +73,14 @@ static void imx_ssi_timer_callback(unsigned long data) /* If we've transferred at least a period then report it and * reset our poll time */ - if (delta >= runtime->period_size) { + if (delta >= iprtd->period) { snd_pcm_period_elapsed(substream); iprtd->last_offset = iprtd->offset; - - imx_ssi_set_next_poll(iprtd); } - /* Restart the timer; if we didn't report we'll run on the next tick */ - add_timer(&iprtd->timer); + hrtimer_forward_now(hrt, ns_to_ktime(iprtd->poll_time_ns)); + return HRTIMER_RESTART; } static struct fiq_handler fh = { @@ -99,8 +98,8 @@ static int snd_imx_pcm_hw_params(struct snd_pcm_substream *substream, iprtd->period = params_period_bytes(params) ; iprtd->offset = 0; iprtd->last_offset = 0; - iprtd->poll_time = HZ / (params_rate(params) / params_period_size(params)); - + iprtd->poll_time_ns = 1000000000 / params_rate(params) * + params_period_size(params); snd_pcm_set_runtime_buffer(substream, &substream->dma_buffer); return 0; @@ -135,8 +134,9 @@ static int snd_imx_pcm_trigger(struct snd_pcm_substream *substream, int cmd) case SNDRV_PCM_TRIGGER_START: case SNDRV_PCM_TRIGGER_RESUME: case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: - imx_ssi_set_next_poll(iprtd); - add_timer(&iprtd->timer); + atomic_set(&iprtd->running, 1); + hrtimer_start(&iprtd->hrt, ns_to_ktime(iprtd->poll_time_ns), + HRTIMER_MODE_REL); if (++fiq_enable == 1) enable_fiq(imx_pcm_fiq); @@ -145,11 +145,11 @@ static int snd_imx_pcm_trigger(struct snd_pcm_substream *substream, int cmd) case SNDRV_PCM_TRIGGER_STOP: case SNDRV_PCM_TRIGGER_SUSPEND: case SNDRV_PCM_TRIGGER_PAUSE_PUSH: - del_timer(&iprtd->timer); + atomic_set(&iprtd->running, 0); + if (--fiq_enable == 0) disable_fiq(imx_pcm_fiq); - break; default: return -EINVAL; @@ -180,7 +180,7 @@ static struct snd_pcm_hardware snd_imx_hardware = { .buffer_bytes_max = IMX_SSI_DMABUF_SIZE, .period_bytes_min = 128, .period_bytes_max = 16 * 1024, - .periods_min = 2, + .periods_min = 4, .periods_max = 255, .fifo_size = 0, }; @@ -194,9 +194,11 @@ static int snd_imx_open(struct snd_pcm_substream *substream) iprtd = kzalloc(sizeof(*iprtd), GFP_KERNEL); runtime->private_data = iprtd; - init_timer(&iprtd->timer); - iprtd->timer.data = (unsigned long)substream; - iprtd->timer.function = imx_ssi_timer_callback; + iprtd->substream = substream; + + atomic_set(&iprtd->running, 0); + hrtimer_init(&iprtd->hrt, CLOCK_MONOTONIC, HRTIMER_MODE_REL); + iprtd->hrt.function = snd_hrtimer_callback; ret = snd_pcm_hw_constraint_integer(substream->runtime, SNDRV_PCM_HW_PARAM_PERIODS); @@ -212,7 +214,8 @@ static int snd_imx_close(struct snd_pcm_substream *substream) struct snd_pcm_runtime *runtime = substream->runtime; struct imx_pcm_runtime_data *iprtd = runtime->private_data; - del_timer_sync(&iprtd->timer); + hrtimer_cancel(&iprtd->hrt); + kfree(iprtd); return 0; diff --git a/sound/soc/imx/imx-ssi.c b/sound/soc/imx/imx-ssi.c index 6546b06cbd2..80b4fee2442 100644 --- a/sound/soc/imx/imx-ssi.c +++ b/sound/soc/imx/imx-ssi.c @@ -235,17 +235,20 @@ static int imx_ssi_hw_params(struct snd_pcm_substream *substream, struct snd_soc_dai *cpu_dai) { struct imx_ssi *ssi = cpu_dai->private_data; + struct imx_pcm_dma_params *dma_data; u32 reg, sccr; /* Tx/Rx config */ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { reg = SSI_STCCR; - cpu_dai->dma_data = &ssi->dma_params_tx; + dma_data = &ssi->dma_params_tx; } else { reg = SSI_SRCCR; - cpu_dai->dma_data = &ssi->dma_params_rx; + dma_data = &ssi->dma_params_rx; } + snd_soc_dai_set_dma_data(cpu_dai, substream, dma_data); + sccr = readl(ssi->base + reg) & ~SSI_STCCR_WL_MASK; /* DAI data (word) size */ @@ -653,7 +656,8 @@ static int imx_ssi_probe(struct platform_device *pdev) dai->private_data = ssi; if ((cpu_is_mx27() || cpu_is_mx21()) && - !(ssi->flags & IMX_SSI_USE_AC97)) { + !(ssi->flags & IMX_SSI_USE_AC97) && + (ssi->flags & IMX_SSI_DMA)) { ssi->flags |= IMX_SSI_DMA; platform = imx_ssi_dma_mx2_init(pdev, ssi); } else diff --git a/sound/soc/omap/omap-mcbsp.c b/sound/soc/omap/omap-mcbsp.c index e814a9591f7..8ad9dc90100 100644 --- a/sound/soc/omap/omap-mcbsp.c +++ b/sound/soc/omap/omap-mcbsp.c @@ -297,7 +297,9 @@ static int omap_mcbsp_dai_hw_params(struct snd_pcm_substream *substream, omap_mcbsp_dai_dma_params[id][substream->stream].sync_mode = sync_mode; omap_mcbsp_dai_dma_params[id][substream->stream].data_type = OMAP_DMA_DATA_TYPE_S16; - cpu_dai->dma_data = &omap_mcbsp_dai_dma_params[id][substream->stream]; + + snd_soc_dai_set_dma_data(cpu_dai, substream, + &omap_mcbsp_dai_dma_params[id][substream->stream]); if (mcbsp_data->configured) { /* McBSP already configured by another stream */ diff --git a/sound/soc/omap/omap-mcpdm.c b/sound/soc/omap/omap-mcpdm.c index 25f19e4728b..b7f4f7e015f 100644 --- a/sound/soc/omap/omap-mcpdm.c +++ b/sound/soc/omap/omap-mcpdm.c @@ -150,7 +150,8 @@ static int omap_mcpdm_dai_hw_params(struct snd_pcm_substream *substream, int stream = substream->stream; int channels, err, link_mask = 0; - cpu_dai->dma_data = &omap_mcpdm_dai_dma_params[stream]; + snd_soc_dai_set_dma_data(cpu_dai, substream, + &omap_mcpdm_dai_dma_params[stream]); channels = params_channels(params); switch (channels) { diff --git a/sound/soc/omap/omap-pcm.c b/sound/soc/omap/omap-pcm.c index ba8acbb0a7f..1e521904ea6 100644 --- a/sound/soc/omap/omap-pcm.c +++ b/sound/soc/omap/omap-pcm.c @@ -61,12 +61,11 @@ static void omap_pcm_dma_irq(int ch, u16 stat, void *data) struct omap_runtime_data *prtd = runtime->private_data; unsigned long flags; - if ((cpu_is_omap1510()) && - (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)) { + if ((cpu_is_omap1510())) { /* * OMAP1510 doesn't fully support DMA progress counter * and there is no software emulation implemented yet, - * so have to maintain our own playback progress counter + * so have to maintain our own progress counters * that can be used by omap_pcm_pointer() instead. */ spin_lock_irqsave(&prtd->lock, flags); @@ -101,9 +100,11 @@ static int omap_pcm_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_runtime *runtime = substream->runtime; struct snd_soc_pcm_runtime *rtd = substream->private_data; struct omap_runtime_data *prtd = runtime->private_data; - struct omap_pcm_dma_data *dma_data = rtd->dai->cpu_dai->dma_data; + struct omap_pcm_dma_data *dma_data; int err = 0; + dma_data = snd_soc_dai_get_dma_data(rtd->dai->cpu_dai, substream); + /* return if this is a bufferless transfer e.g. * codec <--> BT codec or GSM modem -- lg FIXME */ if (!dma_data) @@ -190,8 +191,7 @@ static int omap_pcm_prepare(struct snd_pcm_substream *substream) dma_params.frame_count = runtime->periods; omap_set_dma_params(prtd->dma_ch, &dma_params); - if ((cpu_is_omap1510()) && - (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)) + if ((cpu_is_omap1510())) omap_enable_dma_irq(prtd->dma_ch, OMAP_DMA_FRAME_IRQ | OMAP_DMA_LAST_IRQ | OMAP_DMA_BLOCK_IRQ); else @@ -249,14 +249,15 @@ static snd_pcm_uframes_t omap_pcm_pointer(struct snd_pcm_substream *substream) dma_addr_t ptr; snd_pcm_uframes_t offset; - if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) { + if (cpu_is_omap1510()) { + offset = prtd->period_index * runtime->period_size; + } else if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) { ptr = omap_get_dma_dst_pos(prtd->dma_ch); offset = bytes_to_frames(runtime, ptr - runtime->dma_addr); - } else if (!(cpu_is_omap1510())) { + } else { ptr = omap_get_dma_src_pos(prtd->dma_ch); offset = bytes_to_frames(runtime, ptr - runtime->dma_addr); - } else - offset = prtd->period_index * runtime->period_size; + } if (offset >= runtime->buffer_size) offset = 0; diff --git a/sound/soc/pxa/pxa-ssp.c b/sound/soc/pxa/pxa-ssp.c index d5fc52d0a3c..544fd9566f4 100644 --- a/sound/soc/pxa/pxa-ssp.c +++ b/sound/soc/pxa/pxa-ssp.c @@ -122,10 +122,9 @@ static int pxa_ssp_startup(struct snd_pcm_substream *substream, ssp_disable(ssp); } - if (cpu_dai->dma_data) { - kfree(cpu_dai->dma_data); - cpu_dai->dma_data = NULL; - } + kfree(snd_soc_dai_get_dma_data(cpu_dai, substream)); + snd_soc_dai_set_dma_data(cpu_dai, substream, NULL); + return ret; } @@ -142,10 +141,8 @@ static void pxa_ssp_shutdown(struct snd_pcm_substream *substream, clk_disable(ssp->clk); } - if (cpu_dai->dma_data) { - kfree(cpu_dai->dma_data); - cpu_dai->dma_data = NULL; - } + kfree(snd_soc_dai_get_dma_data(cpu_dai, substream)); + snd_soc_dai_set_dma_data(cpu_dai, substream, NULL); } #ifdef CONFIG_PM @@ -570,19 +567,23 @@ static int pxa_ssp_hw_params(struct snd_pcm_substream *substream, u32 sspsp; int width = snd_pcm_format_physical_width(params_format(params)); int ttsa = ssp_read_reg(ssp, SSTSA) & 0xf; + struct pxa2xx_pcm_dma_params *dma_data; + + dma_data = snd_soc_dai_get_dma_data(dai, substream); /* generate correct DMA params */ - if (cpu_dai->dma_data) - kfree(cpu_dai->dma_data); + kfree(dma_data); /* Network mode with one active slot (ttsa == 1) can be used * to force 16-bit frame width on the wire (for S16_LE), even * with two channels. Use 16-bit DMA transfers for this case. */ - cpu_dai->dma_data = ssp_get_dma_params(ssp, + dma_data = ssp_get_dma_params(ssp, ((chn == 2) && (ttsa != 1)) || (width == 32), substream->stream == SNDRV_PCM_STREAM_PLAYBACK); + snd_soc_dai_set_dma_data(dai, substream, dma_data); + /* we can only change the settings if the port is not in use */ if (ssp_read_reg(ssp, SSCR0) & SSCR0_SSE) return 0; diff --git a/sound/soc/pxa/pxa2xx-ac97.c b/sound/soc/pxa/pxa2xx-ac97.c index e9ae7b3a7e0..d314115e3dd 100644 --- a/sound/soc/pxa/pxa2xx-ac97.c +++ b/sound/soc/pxa/pxa2xx-ac97.c @@ -122,11 +122,14 @@ static int pxa2xx_ac97_hw_params(struct snd_pcm_substream *substream, { struct snd_soc_pcm_runtime *rtd = substream->private_data; struct snd_soc_dai *cpu_dai = rtd->dai->cpu_dai; + struct pxa2xx_pcm_dma_params *dma_data; if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) - cpu_dai->dma_data = &pxa2xx_ac97_pcm_stereo_out; + dma_data = &pxa2xx_ac97_pcm_stereo_out; else - cpu_dai->dma_data = &pxa2xx_ac97_pcm_stereo_in; + dma_data = &pxa2xx_ac97_pcm_stereo_in; + + snd_soc_dai_set_dma_data(cpu_dai, substream, dma_data); return 0; } @@ -137,11 +140,14 @@ static int pxa2xx_ac97_hw_aux_params(struct snd_pcm_substream *substream, { struct snd_soc_pcm_runtime *rtd = substream->private_data; struct snd_soc_dai *cpu_dai = rtd->dai->cpu_dai; + struct pxa2xx_pcm_dma_params *dma_data; if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) - cpu_dai->dma_data = &pxa2xx_ac97_pcm_aux_mono_out; + dma_data = &pxa2xx_ac97_pcm_aux_mono_out; else - cpu_dai->dma_data = &pxa2xx_ac97_pcm_aux_mono_in; + dma_data = &pxa2xx_ac97_pcm_aux_mono_in; + + snd_soc_dai_set_dma_data(cpu_dai, substream, dma_data); return 0; } @@ -156,7 +162,8 @@ static int pxa2xx_ac97_hw_mic_params(struct snd_pcm_substream *substream, if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) return -ENODEV; else - cpu_dai->dma_data = &pxa2xx_ac97_pcm_mic_mono_in; + snd_soc_dai_set_dma_data(cpu_dai, substream, + &pxa2xx_ac97_pcm_mic_mono_in); return 0; } diff --git a/sound/soc/pxa/pxa2xx-i2s.c b/sound/soc/pxa/pxa2xx-i2s.c index 6b8f655d1ad..c1a5275721e 100644 --- a/sound/soc/pxa/pxa2xx-i2s.c +++ b/sound/soc/pxa/pxa2xx-i2s.c @@ -164,6 +164,7 @@ static int pxa2xx_i2s_hw_params(struct snd_pcm_substream *substream, { struct snd_soc_pcm_runtime *rtd = substream->private_data; struct snd_soc_dai *cpu_dai = rtd->dai->cpu_dai; + struct pxa2xx_pcm_dma_params *dma_data; BUG_ON(IS_ERR(clk_i2s)); clk_enable(clk_i2s); @@ -171,9 +172,11 @@ static int pxa2xx_i2s_hw_params(struct snd_pcm_substream *substream, pxa_i2s_wait(); if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) - cpu_dai->dma_data = &pxa2xx_i2s_pcm_stereo_out; + dma_data = &pxa2xx_i2s_pcm_stereo_out; else - cpu_dai->dma_data = &pxa2xx_i2s_pcm_stereo_in; + dma_data = &pxa2xx_i2s_pcm_stereo_in; + + snd_soc_dai_set_dma_data(cpu_dai, substream, dma_data); /* is port used by another stream */ if (!(SACR0 & SACR0_ENB)) { diff --git a/sound/soc/pxa/pxa2xx-pcm.c b/sound/soc/pxa/pxa2xx-pcm.c index d38e39575f5..adc7e6f15f9 100644 --- a/sound/soc/pxa/pxa2xx-pcm.c +++ b/sound/soc/pxa/pxa2xx-pcm.c @@ -25,9 +25,11 @@ static int pxa2xx_pcm_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_runtime *runtime = substream->runtime; struct pxa2xx_runtime_data *prtd = runtime->private_data; struct snd_soc_pcm_runtime *rtd = substream->private_data; - struct pxa2xx_pcm_dma_params *dma = rtd->dai->cpu_dai->dma_data; + struct pxa2xx_pcm_dma_params *dma; int ret; + dma = snd_soc_dai_get_dma_data(rtd->dai->cpu_dai, substream); + /* return if this is a bufferless transfer e.g. * codec <--> BT codec or GSM modem -- lg FIXME */ if (!dma) diff --git a/sound/soc/s3c24xx/s3c-ac97.c b/sound/soc/s3c24xx/s3c-ac97.c index ee8ed9d7e70..ecf4fd04ae9 100644 --- a/sound/soc/s3c24xx/s3c-ac97.c +++ b/sound/soc/s3c24xx/s3c-ac97.c @@ -224,11 +224,14 @@ static int s3c_ac97_hw_params(struct snd_pcm_substream *substream, { struct snd_soc_pcm_runtime *rtd = substream->private_data; struct snd_soc_dai *cpu_dai = rtd->dai->cpu_dai; + struct s3c_dma_params *dma_data; if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) - cpu_dai->dma_data = &s3c_ac97_pcm_out; + dma_data = &s3c_ac97_pcm_out; else - cpu_dai->dma_data = &s3c_ac97_pcm_in; + dma_data = &s3c_ac97_pcm_in; + + snd_soc_dai_set_dma_data(cpu_dai, substream, dma_data); return 0; } @@ -238,8 +241,8 @@ static int s3c_ac97_trigger(struct snd_pcm_substream *substream, int cmd, { u32 ac_glbctrl; struct snd_soc_pcm_runtime *rtd = substream->private_data; - int channel = ((struct s3c_dma_params *) - rtd->dai->cpu_dai->dma_data)->channel; + struct s3c_dma_params *dma_data = + snd_soc_dai_get_dma_data(rtd->dai->cpu_dai, substream); ac_glbctrl = readl(s3c_ac97.regs + S3C_AC97_GLBCTRL); if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) @@ -265,7 +268,7 @@ static int s3c_ac97_trigger(struct snd_pcm_substream *substream, int cmd, writel(ac_glbctrl, s3c_ac97.regs + S3C_AC97_GLBCTRL); - s3c2410_dma_ctrl(channel, S3C2410_DMAOP_STARTED); + s3c2410_dma_ctrl(dma_data->channel, S3C2410_DMAOP_STARTED); return 0; } @@ -280,7 +283,7 @@ static int s3c_ac97_hw_mic_params(struct snd_pcm_substream *substream, if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) return -ENODEV; else - cpu_dai->dma_data = &s3c_ac97_mic_in; + snd_soc_dai_set_dma_data(cpu_dai, substream, &s3c_ac97_mic_in); return 0; } @@ -290,8 +293,8 @@ static int s3c_ac97_mic_trigger(struct snd_pcm_substream *substream, { u32 ac_glbctrl; struct snd_soc_pcm_runtime *rtd = substream->private_data; - int channel = ((struct s3c_dma_params *) - rtd->dai->cpu_dai->dma_data)->channel; + struct s3c_dma_params *dma_data = + snd_soc_dai_get_dma_data(rtd->dai->cpu_dai, substream); ac_glbctrl = readl(s3c_ac97.regs + S3C_AC97_GLBCTRL); ac_glbctrl &= ~S3C_AC97_GLBCTRL_MICINTM_MASK; @@ -311,7 +314,7 @@ static int s3c_ac97_mic_trigger(struct snd_pcm_substream *substream, writel(ac_glbctrl, s3c_ac97.regs + S3C_AC97_GLBCTRL); - s3c2410_dma_ctrl(channel, S3C2410_DMAOP_STARTED); + s3c2410_dma_ctrl(dma_data->channel, S3C2410_DMAOP_STARTED); return 0; } diff --git a/sound/soc/s3c24xx/s3c-dma.c b/sound/soc/s3c24xx/s3c-dma.c index 7725e26d6c9..1b61c23ff30 100644 --- a/sound/soc/s3c24xx/s3c-dma.c +++ b/sound/soc/s3c24xx/s3c-dma.c @@ -145,10 +145,12 @@ static int s3c_dma_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_runtime *runtime = substream->runtime; struct s3c24xx_runtime_data *prtd = runtime->private_data; struct snd_soc_pcm_runtime *rtd = substream->private_data; - struct s3c_dma_params *dma = rtd->dai->cpu_dai->dma_data; unsigned long totbytes = params_buffer_bytes(params); + struct s3c_dma_params *dma = + snd_soc_dai_get_dma_data(rtd->dai->cpu_dai, substream); int ret = 0; + pr_debug("Entered %s\n", __func__); /* return if this is a bufferless transfer e.g. diff --git a/sound/soc/s3c24xx/s3c-i2s-v2.c b/sound/soc/s3c24xx/s3c-i2s-v2.c index e994d8374fe..88515946b6c 100644 --- a/sound/soc/s3c24xx/s3c-i2s-v2.c +++ b/sound/soc/s3c24xx/s3c-i2s-v2.c @@ -339,14 +339,17 @@ static int s3c2412_i2s_hw_params(struct snd_pcm_substream *substream, struct snd_soc_pcm_runtime *rtd = substream->private_data; struct snd_soc_dai_link *dai = rtd->dai; struct s3c_i2sv2_info *i2s = to_info(dai->cpu_dai); + struct s3c_dma_params *dma_data; u32 iismod; pr_debug("Entered %s\n", __func__); if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) - dai->cpu_dai->dma_data = i2s->dma_playback; + dma_data = i2s->dma_playback; else - dai->cpu_dai->dma_data = i2s->dma_capture; + dma_data = i2s->dma_capture; + + snd_soc_dai_set_dma_data(dai->cpu_dai, substream, dma_data); /* Working copies of register */ iismod = readl(i2s->regs + S3C2412_IISMOD); @@ -394,8 +397,8 @@ static int s3c2412_i2s_trigger(struct snd_pcm_substream *substream, int cmd, int capture = (substream->stream == SNDRV_PCM_STREAM_CAPTURE); unsigned long irqs; int ret = 0; - int channel = ((struct s3c_dma_params *) - rtd->dai->cpu_dai->dma_data)->channel; + struct s3c_dma_params *dma_data = + snd_soc_dai_get_dma_data(rtd->dai->cpu_dai, substream); pr_debug("Entered %s\n", __func__); @@ -431,7 +434,7 @@ static int s3c2412_i2s_trigger(struct snd_pcm_substream *substream, int cmd, * of the auto reload mechanism of S3C24XX. * This call won't bother S3C64XX. */ - s3c2410_dma_ctrl(channel, S3C2410_DMAOP_STARTED); + s3c2410_dma_ctrl(dma_data->channel, S3C2410_DMAOP_STARTED); break; diff --git a/sound/soc/s3c24xx/s3c-pcm.c b/sound/soc/s3c24xx/s3c-pcm.c index a98f40c3cd2..326f0a9e7e3 100644 --- a/sound/soc/s3c24xx/s3c-pcm.c +++ b/sound/soc/s3c24xx/s3c-pcm.c @@ -178,6 +178,7 @@ static int s3c_pcm_hw_params(struct snd_pcm_substream *substream, struct snd_soc_pcm_runtime *rtd = substream->private_data; struct snd_soc_dai_link *dai = rtd->dai; struct s3c_pcm_info *pcm = to_info(dai->cpu_dai); + struct s3c_dma_params *dma_data; void __iomem *regs = pcm->regs; struct clk *clk; int sclk_div, sync_div; @@ -187,9 +188,11 @@ static int s3c_pcm_hw_params(struct snd_pcm_substream *substream, dev_dbg(pcm->dev, "Entered %s\n", __func__); if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) - dai->cpu_dai->dma_data = pcm->dma_playback; + dma_data = pcm->dma_playback; else - dai->cpu_dai->dma_data = pcm->dma_capture; + dma_data = pcm->dma_capture; + + snd_soc_dai_set_dma_data(dai->cpu_dai, substream, dma_data); /* Strictly check for sample size */ switch (params_format(params)) { diff --git a/sound/soc/s3c24xx/s3c24xx-i2s.c b/sound/soc/s3c24xx/s3c24xx-i2s.c index 0bc5950b9f0..c3ac890a398 100644 --- a/sound/soc/s3c24xx/s3c24xx-i2s.c +++ b/sound/soc/s3c24xx/s3c24xx-i2s.c @@ -242,14 +242,17 @@ static int s3c24xx_i2s_hw_params(struct snd_pcm_substream *substream, struct snd_soc_dai *dai) { struct snd_soc_pcm_runtime *rtd = substream->private_data; + struct s3c_dma_params *dma_data; u32 iismod; pr_debug("Entered %s\n", __func__); if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) - rtd->dai->cpu_dai->dma_data = &s3c24xx_i2s_pcm_stereo_out; + dma_data = &s3c24xx_i2s_pcm_stereo_out; else - rtd->dai->cpu_dai->dma_data = &s3c24xx_i2s_pcm_stereo_in; + dma_data = &s3c24xx_i2s_pcm_stereo_in; + + snd_soc_dai_set_dma_data(rtd->dai->cpu_dai, substream, dma_data); /* Working copies of register */ iismod = readl(s3c24xx_i2s.regs + S3C2410_IISMOD); @@ -258,13 +261,11 @@ static int s3c24xx_i2s_hw_params(struct snd_pcm_substream *substream, switch (params_format(params)) { case SNDRV_PCM_FORMAT_S8: iismod &= ~S3C2410_IISMOD_16BIT; - ((struct s3c_dma_params *) - rtd->dai->cpu_dai->dma_data)->dma_size = 1; + dma_data->dma_size = 1; break; case SNDRV_PCM_FORMAT_S16_LE: iismod |= S3C2410_IISMOD_16BIT; - ((struct s3c_dma_params *) - rtd->dai->cpu_dai->dma_data)->dma_size = 2; + dma_data->dma_size = 2; break; default: return -EINVAL; @@ -280,8 +281,8 @@ static int s3c24xx_i2s_trigger(struct snd_pcm_substream *substream, int cmd, { int ret = 0; struct snd_soc_pcm_runtime *rtd = substream->private_data; - int channel = ((struct s3c_dma_params *) - rtd->dai->cpu_dai->dma_data)->channel; + struct s3c_dma_params *dma_data = + snd_soc_dai_get_dma_data(rtd->dai->cpu_dai, substream); pr_debug("Entered %s\n", __func__); @@ -300,7 +301,7 @@ static int s3c24xx_i2s_trigger(struct snd_pcm_substream *substream, int cmd, else s3c24xx_snd_txctrl(1); - s3c2410_dma_ctrl(channel, S3C2410_DMAOP_STARTED); + s3c2410_dma_ctrl(dma_data->channel, S3C2410_DMAOP_STARTED); break; case SNDRV_PCM_TRIGGER_STOP: case SNDRV_PCM_TRIGGER_SUSPEND: diff --git a/sound/soc/s6000/s6000-i2s.c b/sound/soc/s6000/s6000-i2s.c index 0664fac7612..5b9ac1759bd 100644 --- a/sound/soc/s6000/s6000-i2s.c +++ b/sound/soc/s6000/s6000-i2s.c @@ -519,7 +519,8 @@ static int __devinit s6000_i2s_probe(struct platform_device *pdev) s6000_i2s_dai.dev = &pdev->dev; s6000_i2s_dai.private_data = dev; - s6000_i2s_dai.dma_data = &dev->dma_params; + s6000_i2s_dai.capture.dma_data = &dev->dma_params; + s6000_i2s_dai.playback.dma_data = &dev->dma_params; dev->sifbase = sifmem->start; dev->scbbase = mmio; diff --git a/sound/soc/s6000/s6000-pcm.c b/sound/soc/s6000/s6000-pcm.c index 1d61109e09f..9c7f7f00ceb 100644 --- a/sound/soc/s6000/s6000-pcm.c +++ b/sound/soc/s6000/s6000-pcm.c @@ -58,13 +58,15 @@ static void s6000_pcm_enqueue_dma(struct snd_pcm_substream *substream) struct snd_pcm_runtime *runtime = substream->runtime; struct s6000_runtime_data *prtd = runtime->private_data; struct snd_soc_pcm_runtime *soc_runtime = substream->private_data; - struct s6000_pcm_dma_params *par = soc_runtime->dai->cpu_dai->dma_data; + struct s6000_pcm_dma_params *par; int channel; unsigned int period_size; unsigned int dma_offset; dma_addr_t dma_pos; dma_addr_t src, dst; + par = snd_soc_dai_get_dma_data(soc_runtime->dai->cpu_dai, substream); + period_size = snd_pcm_lib_period_bytes(substream); dma_offset = prtd->period * period_size; dma_pos = runtime->dma_addr + dma_offset; @@ -101,7 +103,8 @@ static irqreturn_t s6000_pcm_irq(int irq, void *data) { struct snd_pcm *pcm = data; struct snd_soc_pcm_runtime *runtime = pcm->private_data; - struct s6000_pcm_dma_params *params = runtime->dai->cpu_dai->dma_data; + struct s6000_pcm_dma_params *params = + snd_soc_dai_get_dma_data(soc_runtime->dai->cpu_dai, substream); struct s6000_runtime_data *prtd; unsigned int has_xrun; int i, ret = IRQ_NONE; @@ -172,11 +175,13 @@ static int s6000_pcm_start(struct snd_pcm_substream *substream) { struct s6000_runtime_data *prtd = substream->runtime->private_data; struct snd_soc_pcm_runtime *soc_runtime = substream->private_data; - struct s6000_pcm_dma_params *par = soc_runtime->dai->cpu_dai->dma_data; + struct s6000_pcm_dma_params *par; unsigned long flags; int srcinc; u32 dma; + par = snd_soc_dai_get_dma_data(soc_runtime->dai->cpu_dai, substream); + spin_lock_irqsave(&prtd->lock, flags); if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { @@ -212,10 +217,12 @@ static int s6000_pcm_stop(struct snd_pcm_substream *substream) { struct s6000_runtime_data *prtd = substream->runtime->private_data; struct snd_soc_pcm_runtime *soc_runtime = substream->private_data; - struct s6000_pcm_dma_params *par = soc_runtime->dai->cpu_dai->dma_data; + struct s6000_pcm_dma_params *par; unsigned long flags; u32 channel; + par = snd_soc_dai_get_dma_data(soc_runtime->dai->cpu_dai, substream); + if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) channel = par->dma_out; else @@ -236,9 +243,11 @@ static int s6000_pcm_stop(struct snd_pcm_substream *substream) static int s6000_pcm_trigger(struct snd_pcm_substream *substream, int cmd) { struct snd_soc_pcm_runtime *soc_runtime = substream->private_data; - struct s6000_pcm_dma_params *par = soc_runtime->dai->cpu_dai->dma_data; + struct s6000_pcm_dma_params *par; int ret; + par = snd_soc_dai_get_dma_data(soc_runtime->dai->cpu_dai, substream); + ret = par->trigger(substream, cmd, 0); if (ret < 0) return ret; @@ -275,13 +284,15 @@ static int s6000_pcm_prepare(struct snd_pcm_substream *substream) static snd_pcm_uframes_t s6000_pcm_pointer(struct snd_pcm_substream *substream) { struct snd_soc_pcm_runtime *soc_runtime = substream->private_data; - struct s6000_pcm_dma_params *par = soc_runtime->dai->cpu_dai->dma_data; + struct s6000_pcm_dma_params *par; struct snd_pcm_runtime *runtime = substream->runtime; struct s6000_runtime_data *prtd = runtime->private_data; unsigned long flags; unsigned int offset; dma_addr_t count; + par = snd_soc_dai_get_dma_data(soc_runtime->dai->cpu_dai, substream); + spin_lock_irqsave(&prtd->lock, flags); if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) @@ -305,11 +316,12 @@ static snd_pcm_uframes_t s6000_pcm_pointer(struct snd_pcm_substream *substream) static int s6000_pcm_open(struct snd_pcm_substream *substream) { struct snd_soc_pcm_runtime *soc_runtime = substream->private_data; - struct s6000_pcm_dma_params *par = soc_runtime->dai->cpu_dai->dma_data; + struct s6000_pcm_dma_params *par; struct snd_pcm_runtime *runtime = substream->runtime; struct s6000_runtime_data *prtd; int ret; + par = snd_soc_dai_get_dma_data(soc_runtime->dai->cpu_dai, substream); snd_soc_set_runtime_hwparams(substream, &s6000_pcm_hardware); ret = snd_pcm_hw_constraint_step(runtime, 0, @@ -364,7 +376,7 @@ static int s6000_pcm_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *hw_params) { struct snd_soc_pcm_runtime *soc_runtime = substream->private_data; - struct s6000_pcm_dma_params *par = soc_runtime->dai->cpu_dai->dma_data; + struct s6000_pcm_dma_params *par; int ret; ret = snd_pcm_lib_malloc_pages(substream, params_buffer_bytes(hw_params)); @@ -373,6 +385,8 @@ static int s6000_pcm_hw_params(struct snd_pcm_substream *substream, return ret; } + par = snd_soc_dai_get_dma_data(soc_runtime->dai->cpu_dai, substream); + if (par->same_rate) { spin_lock(&par->lock); if (par->rate == -1 || @@ -392,7 +406,8 @@ static int s6000_pcm_hw_params(struct snd_pcm_substream *substream, static int s6000_pcm_hw_free(struct snd_pcm_substream *substream) { struct snd_soc_pcm_runtime *soc_runtime = substream->private_data; - struct s6000_pcm_dma_params *par = soc_runtime->dai->cpu_dai->dma_data; + struct s6000_pcm_dma_params *par = + snd_soc_dai_get_dma_data(soc_runtime->dai->cpu_dai, substream); spin_lock(&par->lock); par->in_use &= ~(1 << substream->stream); @@ -417,7 +432,8 @@ static struct snd_pcm_ops s6000_pcm_ops = { static void s6000_pcm_free(struct snd_pcm *pcm) { struct snd_soc_pcm_runtime *runtime = pcm->private_data; - struct s6000_pcm_dma_params *params = runtime->dai->cpu_dai->dma_data; + struct s6000_pcm_dma_params *params = + snd_soc_dai_get_dma_data(soc_runtime->dai->cpu_dai, substream); free_irq(params->irq, pcm); snd_pcm_lib_preallocate_free_for_all(pcm); @@ -429,9 +445,11 @@ static int s6000_pcm_new(struct snd_card *card, struct snd_soc_dai *dai, struct snd_pcm *pcm) { struct snd_soc_pcm_runtime *runtime = pcm->private_data; - struct s6000_pcm_dma_params *params = runtime->dai->cpu_dai->dma_data; + struct s6000_pcm_dma_params *params; int res; + params = snd_soc_dai_get_dma_data(soc_runtime->dai->cpu_dai, substream); + if (!card->dev->dma_mask) card->dev->dma_mask = &s6000_pcm_dmamask; if (!card->dev->coherent_dma_mask) diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c index 2320153bd92..ad7f9528d75 100644 --- a/sound/soc/soc-core.c +++ b/sound/soc/soc-core.c @@ -1549,7 +1549,8 @@ int snd_soc_new_pcms(struct snd_soc_device *socdev, int idx, const char *xid) mutex_unlock(&codec->mutex); return ret; } - if (card->dai_link[i].codec_dai->ac97_control) { + /* Check for codec->ac97 to handle the ac97.c fun */ + if (card->dai_link[i].codec_dai->ac97_control && codec->ac97) { snd_ac97_dev_add_pdata(codec->ac97, card->dai_link[i].cpu_dai->ac97_pdata); } diff --git a/sound/usb/usbmidi.c b/sound/usb/usbmidi.c index 2c59afd9961..9e28b20cb2c 100644 --- a/sound/usb/usbmidi.c +++ b/sound/usb/usbmidi.c @@ -986,6 +986,8 @@ static void snd_usbmidi_output_drain(struct snd_rawmidi_substream *substream) DEFINE_WAIT(wait); long timeout = msecs_to_jiffies(50); + if (ep->umidi->disconnected) + return; /* * The substream buffer is empty, but some data might still be in the * currently active URBs, so we have to wait for those to complete. @@ -1123,14 +1125,21 @@ static int snd_usbmidi_in_endpoint_create(struct snd_usb_midi* umidi, * Frees an output endpoint. * May be called when ep hasn't been initialized completely. */ -static void snd_usbmidi_out_endpoint_delete(struct snd_usb_midi_out_endpoint* ep) +static void snd_usbmidi_out_endpoint_clear(struct snd_usb_midi_out_endpoint *ep) { unsigned int i; for (i = 0; i < OUTPUT_URBS; ++i) - if (ep->urbs[i].urb) + if (ep->urbs[i].urb) { free_urb_and_buffer(ep->umidi, ep->urbs[i].urb, ep->max_transfer); + ep->urbs[i].urb = NULL; + } +} + +static void snd_usbmidi_out_endpoint_delete(struct snd_usb_midi_out_endpoint *ep) +{ + snd_usbmidi_out_endpoint_clear(ep); kfree(ep); } @@ -1262,15 +1271,18 @@ void snd_usbmidi_disconnect(struct list_head* p) usb_kill_urb(ep->out->urbs[j].urb); if (umidi->usb_protocol_ops->finish_out_endpoint) umidi->usb_protocol_ops->finish_out_endpoint(ep->out); + ep->out->active_urbs = 0; + if (ep->out->drain_urbs) { + ep->out->drain_urbs = 0; + wake_up(&ep->out->drain_wait); + } } if (ep->in) for (j = 0; j < INPUT_URBS; ++j) usb_kill_urb(ep->in->urbs[j]); /* free endpoints here; later call can result in Oops */ - if (ep->out) { - snd_usbmidi_out_endpoint_delete(ep->out); - ep->out = NULL; - } + if (ep->out) + snd_usbmidi_out_endpoint_clear(ep->out); if (ep->in) { snd_usbmidi_in_endpoint_delete(ep->in); ep->in = NULL; |