diff options
489 files changed, 15345 insertions, 5820 deletions
@@ -2209,7 +2209,7 @@ S: (address available on request) S: USA N: Ian McDonald -E: iam4@cs.waikato.ac.nz +E: ian.mcdonald@jandi.co.nz E: imcdnzl@gmail.com W: http://wand.net.nz/~iam4 W: http://imcdnzl.blogspot.com diff --git a/Documentation/connector/ucon.c b/Documentation/connector/ucon.c new file mode 100644 index 00000000000..d738cde2a8d --- /dev/null +++ b/Documentation/connector/ucon.c @@ -0,0 +1,206 @@ +/* + * ucon.c + * + * Copyright (c) 2004+ Evgeniy Polyakov <johnpol@2ka.mipt.ru> + * + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#include <asm/types.h> + +#include <sys/types.h> +#include <sys/socket.h> +#include <sys/poll.h> + +#include <linux/netlink.h> +#include <linux/rtnetlink.h> + +#include <arpa/inet.h> + +#include <stdio.h> +#include <stdlib.h> +#include <unistd.h> +#include <string.h> +#include <errno.h> +#include <time.h> + +#include <linux/connector.h> + +#define DEBUG +#define NETLINK_CONNECTOR 11 + +#ifdef DEBUG +#define ulog(f, a...) fprintf(stdout, f, ##a) +#else +#define ulog(f, a...) do {} while (0) +#endif + +static int need_exit; +static __u32 seq; + +static int netlink_send(int s, struct cn_msg *msg) +{ + struct nlmsghdr *nlh; + unsigned int size; + int err; + char buf[128]; + struct cn_msg *m; + + size = NLMSG_SPACE(sizeof(struct cn_msg) + msg->len); + + nlh = (struct nlmsghdr *)buf; + nlh->nlmsg_seq = seq++; + nlh->nlmsg_pid = getpid(); + nlh->nlmsg_type = NLMSG_DONE; + nlh->nlmsg_len = NLMSG_LENGTH(size - sizeof(*nlh)); + nlh->nlmsg_flags = 0; + + m = NLMSG_DATA(nlh); +#if 0 + ulog("%s: [%08x.%08x] len=%u, seq=%u, ack=%u.\n", + __func__, msg->id.idx, msg->id.val, msg->len, msg->seq, msg->ack); +#endif + memcpy(m, msg, sizeof(*m) + msg->len); + + err = send(s, nlh, size, 0); + if (err == -1) + ulog("Failed to send: %s [%d].\n", + strerror(errno), errno); + + return err; +} + +int main(int argc, char *argv[]) +{ + int s; + char buf[1024]; + int len; + struct nlmsghdr *reply; + struct sockaddr_nl l_local; + struct cn_msg *data; + FILE *out; + time_t tm; + struct pollfd pfd; + + if (argc < 2) + out = stdout; + else { + out = fopen(argv[1], "a+"); + if (!out) { + ulog("Unable to open %s for writing: %s\n", + argv[1], strerror(errno)); + out = stdout; + } + } + + memset(buf, 0, sizeof(buf)); + + s = socket(PF_NETLINK, SOCK_DGRAM, NETLINK_CONNECTOR); + if (s == -1) { + perror("socket"); + return -1; + } + + l_local.nl_family = AF_NETLINK; + l_local.nl_groups = 0x123; /* bitmask of requested groups */ + l_local.nl_pid = 0; + + if (bind(s, (struct sockaddr *)&l_local, sizeof(struct sockaddr_nl)) == -1) { + perror("bind"); + close(s); + return -1; + } + +#if 0 + { + int on = 0x57; /* Additional group number */ + setsockopt(s, SOL_NETLINK, NETLINK_ADD_MEMBERSHIP, &on, sizeof(on)); + } +#endif + if (0) { + int i, j; + + memset(buf, 0, sizeof(buf)); + + data = (struct cn_msg *)buf; + + data->id.idx = 0x123; + data->id.val = 0x456; + data->seq = seq++; + data->ack = 0; + data->len = 0; + + for (j=0; j<10; ++j) { + for (i=0; i<1000; ++i) { + len = netlink_send(s, data); + } + + ulog("%d messages have been sent to %08x.%08x.\n", i, data->id.idx, data->id.val); + } + + return 0; + } + + + pfd.fd = s; + + while (!need_exit) { + pfd.events = POLLIN; + pfd.revents = 0; + switch (poll(&pfd, 1, -1)) { + case 0: + need_exit = 1; + break; + case -1: + if (errno != EINTR) { + need_exit = 1; + break; + } + continue; + } + if (need_exit) + break; + + memset(buf, 0, sizeof(buf)); + len = recv(s, buf, sizeof(buf), 0); + if (len == -1) { + perror("recv buf"); + close(s); + return -1; + } + reply = (struct nlmsghdr *)buf; + + switch (reply->nlmsg_type) { + case NLMSG_ERROR: + fprintf(out, "Error message received.\n"); + fflush(out); + break; + case NLMSG_DONE: + data = (struct cn_msg *)NLMSG_DATA(reply); + + time(&tm); + fprintf(out, "%.24s : [%x.%x] [%08u.%08u].\n", + ctime(&tm), data->id.idx, data->id.val, data->seq, data->ack); + fflush(out); + break; + default: + break; + } + } + + close(s); + return 0; +} diff --git a/Documentation/cpusets.txt b/Documentation/cpusets.txt index 159e2a0c3e8..76b44290c15 100644 --- a/Documentation/cpusets.txt +++ b/Documentation/cpusets.txt @@ -217,6 +217,12 @@ exclusive cpuset. Also, the use of a Linux virtual file system (vfs) to represent the cpuset hierarchy provides for a familiar permission and name space for cpusets, with a minimum of additional kernel code. +The cpus file in the root (top_cpuset) cpuset is read-only. +It automatically tracks the value of cpu_online_map, using a CPU +hotplug notifier. If and when memory nodes can be hotplugged, +we expect to make the mems file in the root cpuset read-only +as well, and have it track the value of node_online_map. + 1.4 What are exclusive cpusets ? -------------------------------- diff --git a/Documentation/fb/imacfb.txt b/Documentation/fb/imacfb.txt new file mode 100644 index 00000000000..759028545a7 --- /dev/null +++ b/Documentation/fb/imacfb.txt @@ -0,0 +1,31 @@ + +What is imacfb? +=============== + +This is a generic EFI platform driver for Intel based Apple computers. +Imacfb is only for EFI booted Intel Macs. + +Supported Hardware +================== + +iMac 17"/20" +Macbook +Macbook Pro 15"/17" +MacMini + +How to use it? +============== + +Imacfb does not have any kind of autodetection of your machine. +You have to add the fillowing kernel parameters in your elilo.conf: + Macbook : + video=imacfb:macbook + MacMini : + video=imacfb:mini + Macbook Pro 15", iMac 17" : + video=imacfb:i17 + Macbook Pro 17", iMac 20" : + video=imacfb:i20 + +-- +Edgar Hucek <gimli@dark-green.com> diff --git a/Documentation/filesystems/00-INDEX b/Documentation/filesystems/00-INDEX index 66fdc0744fe..16dec61d767 100644 --- a/Documentation/filesystems/00-INDEX +++ b/Documentation/filesystems/00-INDEX @@ -62,8 +62,8 @@ ramfs-rootfs-initramfs.txt - info on the 'in memory' filesystems ramfs, rootfs and initramfs. reiser4.txt - info on the Reiser4 filesystem based on dancing tree algorithms. -relayfs.txt - - info on relayfs, for efficient streaming from kernel to user space. +relay.txt + - info on relay, for efficient streaming from kernel to user space. romfs.txt - description of the ROMFS filesystem. smbfs.txt diff --git a/Documentation/filesystems/relay.txt b/Documentation/filesystems/relay.txt new file mode 100644 index 00000000000..d6788dae034 --- /dev/null +++ b/Documentation/filesystems/relay.txt @@ -0,0 +1,479 @@ +relay interface (formerly relayfs) +================================== + +The relay interface provides a means for kernel applications to +efficiently log and transfer large quantities of data from the kernel +to userspace via user-defined 'relay channels'. + +A 'relay channel' is a kernel->user data relay mechanism implemented +as a set of per-cpu kernel buffers ('channel buffers'), each +represented as a regular file ('relay file') in user space. Kernel +clients write into the channel buffers using efficient write +functions; these automatically log into the current cpu's channel +buffer. User space applications mmap() or read() from the relay files +and retrieve the data as it becomes available. The relay files +themselves are files created in a host filesystem, e.g. debugfs, and +are associated with the channel buffers using the API described below. + +The format of the data logged into the channel buffers is completely +up to the kernel client; the relay interface does however provide +hooks which allow kernel clients to impose some structure on the +buffer data. The relay interface doesn't implement any form of data +filtering - this also is left to the kernel client. The purpose is to +keep things as simple as possible. + +This document provides an overview of the relay interface API. The +details of the function parameters are documented along with the +functions in the relay interface code - please see that for details. + +Semantics +========= + +Each relay channel has one buffer per CPU, each buffer has one or more +sub-buffers. Messages are written to the first sub-buffer until it is +too full to contain a new message, in which case it it is written to +the next (if available). Messages are never split across sub-buffers. +At this point, userspace can be notified so it empties the first +sub-buffer, while the kernel continues writing to the next. + +When notified that a sub-buffer is full, the kernel knows how many +bytes of it are padding i.e. unused space occurring because a complete +message couldn't fit into a sub-buffer. Userspace can use this +knowledge to copy only valid data. + +After copying it, userspace can notify the kernel that a sub-buffer +has been consumed. + +A relay channel can operate in a mode where it will overwrite data not +yet collected by userspace, and not wait for it to be consumed. + +The relay channel itself does not provide for communication of such +data between userspace and kernel, allowing the kernel side to remain +simple and not impose a single interface on userspace. It does +provide a set of examples and a separate helper though, described +below. + +The read() interface both removes padding and internally consumes the +read sub-buffers; thus in cases where read(2) is being used to drain +the channel buffers, special-purpose communication between kernel and +user isn't necessary for basic operation. + +One of the major goals of the relay interface is to provide a low +overhead mechanism for conveying kernel data to userspace. While the +read() interface is easy to use, it's not as efficient as the mmap() +approach; the example code attempts to make the tradeoff between the +two approaches as small as possible. + +klog and relay-apps example code +================================ + +The relay interface itself is ready to use, but to make things easier, +a couple simple utility functions and a set of examples are provided. + +The relay-apps example tarball, available on the relay sourceforge +site, contains a set of self-contained examples, each consisting of a +pair of .c files containing boilerplate code for each of the user and +kernel sides of a relay application. When combined these two sets of +boilerplate code provide glue to easily stream data to disk, without +having to bother with mundane housekeeping chores. + +The 'klog debugging functions' patch (klog.patch in the relay-apps +tarball) provides a couple of high-level logging functions to the +kernel which allow writing formatted text or raw data to a channel, +regardless of whether a channel to write into exists or not, or even +whether the relay interface is compiled into the kernel or not. These +functions allow you to put unconditional 'trace' statements anywhere +in the kernel or kernel modules; only when there is a 'klog handler' +registered will data actually be logged (see the klog and kleak +examples for details). + +It is of course possible to use the relay interface from scratch, +i.e. without using any of the relay-apps example code or klog, but +you'll have to implement communication between userspace and kernel, +allowing both to convey the state of buffers (full, empty, amount of +padding). The read() interface both removes padding and internally +consumes the read sub-buffers; thus in cases where read(2) is being +used to drain the channel buffers, special-purpose communication +between kernel and user isn't necessary for basic operation. Things +such as buffer-full conditions would still need to be communicated via +some channel though. + +klog and the relay-apps examples can be found in the relay-apps +tarball on http://relayfs.sourceforge.net + +The relay interface user space API +================================== + +The relay interface implements basic file operations for user space +access to relay channel buffer data. Here are the file operations +that are available and some comments regarding their behavior: + +open() enables user to open an _existing_ channel buffer. + +mmap() results in channel buffer being mapped into the caller's + memory space. Note that you can't do a partial mmap - you + must map the entire file, which is NRBUF * SUBBUFSIZE. + +read() read the contents of a channel buffer. The bytes read are + 'consumed' by the reader, i.e. they won't be available + again to subsequent reads. If the channel is being used + in no-overwrite mode (the default), it can be read at any + time even if there's an active kernel writer. If the + channel is being used in overwrite mode and there are + active channel writers, results may be unpredictable - + users should make sure that all logging to the channel has + ended before using read() with overwrite mode. Sub-buffer + padding is automatically removed and will not be seen by + the reader. + +sendfile() transfer data from a channel buffer to an output file + descriptor. Sub-buffer padding is automatically removed + and will not be seen by the reader. + +poll() POLLIN/POLLRDNORM/POLLERR supported. User applications are + notified when sub-buffer boundaries are crossed. + +close() decrements the channel buffer's refcount. When the refcount + reaches 0, i.e. when no process or kernel client has the + buffer open, the channel buffer is freed. + +In order for a user application to make use of relay files, the +host filesystem must be mounted. For example, + + mount -t debugfs debugfs /debug + +NOTE: the host filesystem doesn't need to be mounted for kernel + clients to create or use channels - it only needs to be + mounted when user space applications need access to the buffer + data. + + +The relay interface kernel API +============================== + +Here's a summary of the API the relay interface provides to in-kernel clients: + +TBD(curr. line MT:/API/) + channel management functions: + + relay_open(base_filename, parent, subbuf_size, n_subbufs, + callbacks) + relay_close(chan) + relay_flush(chan) + relay_reset(chan) + + channel management typically called on instigation of userspace: + + relay_subbufs_consumed(chan, cpu, subbufs_consumed) + + write functions: + + relay_write(chan, data, length) + __relay_write(chan, data, length) + relay_reserve(chan, length) + + callbacks: + + subbuf_start(buf, subbuf, prev_subbuf, prev_padding) + buf_mapped(buf, filp) + buf_unmapped(buf, filp) + create_buf_file(filename, parent, mode, buf, is_global) + remove_buf_file(dentry) + + helper functions: + + relay_buf_full(buf) + subbuf_start_reserve(buf, length) + + +Creating a channel +------------------ + +relay_open() is used to create a channel, along with its per-cpu +channel buffers. Each channel buffer will have an associated file +created for it in the host filesystem, which can be and mmapped or +read from in user space. The files are named basename0...basenameN-1 +where N is the number of online cpus, and by default will be created +in the root of the filesystem (if the parent param is NULL). If you +want a directory structure to contain your relay files, you should +create it using the host filesystem's directory creation function, +e.g. debugfs_create_dir(), and pass the parent directory to +relay_open(). Users are responsible for cleaning up any directory +structure they create, when the channel is closed - again the host +filesystem's directory removal functions should be used for that, +e.g. debugfs_remove(). + +In order for a channel to be created and the host filesystem's files +associated with its channel buffers, the user must provide definitions +for two callback functions, create_buf_file() and remove_buf_file(). +create_buf_file() is called once for each per-cpu buffer from +relay_open() and allows the user to create the file which will be used +to represent the corresponding channel buffer. The callback should +return the dentry of the file created to represent the channel buffer. +remove_buf_file() must also be defined; it's responsible for deleting +the file(s) created in create_buf_file() and is called during +relay_close(). + +Here are some typical definitions for these callbacks, in this case +using debugfs: + +/* + * create_buf_file() callback. Creates relay file in debugfs. + */ +static struct dentry *create_buf_file_handler(const char *filename, + struct dentry *parent, + int mode, + struct rchan_buf *buf, + int *is_global) +{ + return debugfs_create_file(filename, mode, parent, buf, + &relay_file_operations); +} + +/* + * remove_buf_file() callback. Removes relay file from debugfs. + */ +static int remove_buf_file_handler(struct dentry *dentry) +{ + debugfs_remove(dentry); + + return 0; +} + +/* + * relay interface callbacks + */ +static struct rchan_callbacks relay_callbacks = +{ + .create_buf_file = create_buf_file_handler, + .remove_buf_file = remove_buf_file_handler, +}; + +And an example relay_open() invocation using them: + + chan = relay_open("cpu", NULL, SUBBUF_SIZE, N_SUBBUFS, &relay_callbacks); + +If the create_buf_file() callback fails, or isn't defined, channel +creation and thus relay_open() will fail. + +The total size of each per-cpu buffer is calculated by multiplying the +number of sub-buffers by the sub-buffer size passed into relay_open(). +The idea behind sub-buffers is that they're basically an extension of +double-buffering to N buffers, and they also allow applications to +easily implement random-access-on-buffer-boundary schemes, which can +be important for some high-volume applications. The number and size +of sub-buffers is completely dependent on the application and even for +the same application, different conditions will warrant different +values for these parameters at different times. Typically, the right +values to use are best decided after some experimentation; in general, +though, it's safe to assume that having only 1 sub-buffer is a bad +idea - you're guaranteed to either overwrite data or lose events +depending on the channel mode being used. + +The create_buf_file() implementation can also be defined in such a way +as to allow the creation of a single 'global' buffer instead of the +default per-cpu set. This can be useful for applications interested +mainly in seeing the relative ordering of system-wide events without +the need to bother with saving explicit timestamps for the purpose of +merging/sorting per-cpu files in a postprocessing step. + +To have relay_open() create a global buffer, the create_buf_file() +implementation should set the value of the is_global outparam to a +non-zero value in addition to creating the file that will be used to +represent the single buffer. In the case of a global buffer, +create_buf_file() and remove_buf_file() will be called only once. The +normal channel-writing functions, e.g. relay_write(), can still be +used - writes from any cpu will transparently end up in the global +buffer - but since it is a global buffer, callers should make sure +they use the proper locking for such a buffer, either by wrapping +writes in a spinlock, or by copying a write function from relay.h and +creating a local version that internally does the proper locking. + +Channel 'modes' +--------------- + +relay channels can be used in either of two modes - 'overwrite' or +'no-overwrite'. The mode is entirely determined by the implementation +of the subbuf_start() callback, as described below. The default if no +subbuf_start() callback is defined is 'no-overwrite' mode. If the +default mode suits your needs, and you plan to use the read() +interface to retrieve channel data, you can ignore the details of this +section, as it pertains mainly to mmap() implementations. + +In 'overwrite' mode, also known as 'flight recorder' mode, writes +continuously cycle around the buffer and will never fail, but will +unconditionally overwrite old data regardless of whether it's actually +been consumed. In no-overwrite mode, writes will fail, i.e. data will +be lost, if the number of unconsumed sub-buffers equals the total +number of sub-buffers in the channel. It should be clear that if +there is no consumer or if the consumer can't consume sub-buffers fast +enough, data will be lost in either case; the only difference is +whether data is lost from the beginning or the end of a buffer. + +As explained above, a relay channel is made of up one or more +per-cpu channel buffers, each implemented as a circular buffer +subdivided into one or more sub-buffers. Messages are written into +the current sub-buffer of the channel's current per-cpu buffer via the +write functions described below. Whenever a message can't fit into +the current sub-buffer, because there's no room left for it, the +client is notified via the subbuf_start() callback that a switch to a +new sub-buffer is about to occur. The client uses this callback to 1) +initialize the next sub-buffer if appropriate 2) finalize the previous +sub-buffer if appropriate and 3) return a boolean value indicating +whether or not to actually move on to the next sub-buffer. + +To implement 'no-overwrite' mode, the userspace client would provide +an implementation of the subbuf_start() callback something like the +following: + +static int subbuf_start(struct rchan_buf *buf, + void *subbuf, + void *prev_subbuf, + unsigned int prev_padding) +{ + if (prev_subbuf) + *((unsigned *)prev_subbuf) = prev_padding; + + if (relay_buf_full(buf)) + return 0; + + subbuf_start_reserve(buf, sizeof(unsigned int)); + + return 1; +} + +If the current buffer is full, i.e. all sub-buffers remain unconsumed, +the callback returns 0 to indicate that the buffer switch should not +occur yet, i.e. until the consumer has had a chance to read the +current set of ready sub-buffers. For the relay_buf_full() function +to make sense, the consumer is reponsible for notifying the relay +interface when sub-buffers have been consumed via +relay_subbufs_consumed(). Any subsequent attempts to write into the +buffer will again invoke the subbuf_start() callback with the same +parameters; only when the consumer has consumed one or more of the +ready sub-buffers will relay_buf_full() return 0, in which case the +buffer switch can continue. + +The implementation of the subbuf_start() callback for 'overwrite' mode +would be very similar: + +static int subbuf_start(struct rchan_buf *buf, + void *subbuf, + void *prev_subbuf, + unsigned int prev_padding) +{ + if (prev_subbuf) + *((unsigned *)prev_subbuf) = prev_padding; + + subbuf_start_reserve(buf, sizeof(unsigned int)); + + return 1; +} + +In this case, the relay_buf_full() check is meaningless and the +callback always returns 1, causing the buffer switch to occur +unconditionally. It's also meaningless for the client to use the +relay_subbufs_consumed() function in this mode, as it's never +consulted. + +The default subbuf_start() implementation, used if the client doesn't +define any callbacks, or doesn't define the subbuf_start() callback, +implements the simplest possible 'no-overwrite' mode, i.e. it does +nothing but return 0. + +Header information can be reserved at the beginning of each sub-buffer +by calling the subbuf_start_reserve() helper function from within the +subbuf_start() callback. This reserved area can be used to store +whatever information the client wants. In the example above, room is +reserved in each sub-buffer to store the padding count for that +sub-buffer. This is filled in for the previous sub-buffer in the +subbuf_start() implementation; the padding value for the previous +sub-buffer is passed into the subbuf_start() callback along with a +pointer to the previous sub-buffer, since the padding value isn't +known until a sub-buffer is filled. The subbuf_start() callback is +also called for the first sub-buffer when the channel is opened, to +give the client a chance to reserve space in it. In this case the +previous sub-buffer pointer passed into the callback will be NULL, so +the client should check the value of the prev_subbuf pointer before +writing into the previous sub-buffer. + +Writing to a channel +-------------------- + +Kernel clients write data into the current cpu's channel buffer using +relay_write() or __relay_write(). relay_write() is the main logging +function - it uses local_irqsave() to protect the buffer and should be +used if you might be logging from interrupt context. If you know +you'll never be logging from interrupt context, you can use +__relay_write(), which only disables preemption. These functions +don't return a value, so you can't determine whether or not they +failed - the assumption is that you wouldn't want to check a return +value in the fast logging path anyway, and that they'll always succeed +unless the buffer is full and no-overwrite mode is being used, in +which case you can detect a failed write in the subbuf_start() +callback by calling the relay_buf_full() helper function. + +relay_reserve() is used to reserve a slot in a channel buffer which +can be written to later. This would typically be used in applications +that need to write directly into a channel buffer without having to +stage data in a temporary buffer beforehand. Because the actual write +may not happen immediately after the slot is reserved, applications +using relay_reserve() can keep a count of the number of bytes actually +written, either in space reserved in the sub-buffers themselves or as +a separate array. See the 'reserve' example in the relay-apps tarball +at http://relayfs.sourceforge.net for an example of how this can be +done. Because the write is under control of the client and is +separated from the reserve, relay_reserve() doesn't protect the buffer +at all - it's up to the client to provide the appropriate +synchronization when using relay_reserve(). + +Closing a channel +----------------- + +The client calls relay_close() when it's finished using the channel. +The channel and its associated buffers are destroyed when there are no +longer any references to any of the channel buffers. relay_flush() +forces a sub-buffer switch on all the channel buffers, and can be used +to finalize and process the last sub-buffers before the channel is +closed. + +Misc +---- + +Some applications may want to keep a channel around and re-use it +rather than open and close a new channel for each use. relay_reset() +can be used for this purpose - it resets a channel to its initial +state without reallocating channel buffer memory or destroying +existing mappings. It should however only be called when it's safe to +do so, i.e. when the channel isn't currently being written to. + +Finally, there are a couple of utility callbacks that can be used for +different purposes. buf_mapped() is called whenever a channel buffer +is mmapped from user space and buf_unmapped() is called when it's +unmapped. The client can use this notification to trigger actions +within the kernel application, such as enabling/disabling logging to +the channel. + + +Resources +========= + +For news, example code, mailing list, etc. see the relay interface homepage: + + http://relayfs.sourceforge.net + + +Credits +======= + +The ideas and specs for the relay interface came about as a result of +discussions on tracing involving the following: + +Michel Dagenais <michel.dagenais@polymtl.ca> +Richard Moore <richardj_moore@uk.ibm.com> +Bob Wisniewski <bob@watson.ibm.com> +Karim Yaghmour <karim@opersys.com> +Tom Zanussi <zanussi@us.ibm.com> + +Also thanks to Hubertus Franke for a lot of useful suggestions and bug +reports. diff --git a/Documentation/filesystems/relayfs.txt b/Documentation/filesystems/relayfs.txt deleted file mode 100644 index 5832377b734..00000000000 --- a/Documentation/filesystems/relayfs.txt +++ /dev/null @@ -1,442 +0,0 @@ - -relayfs - a high-speed data relay filesystem -============================================ - -relayfs is a filesystem designed to provide an efficient mechanism for -tools and facilities to relay large and potentially sustained streams -of data from kernel space to user space. - -The main abstraction of relayfs is the 'channel'. A channel consists -of a set of per-cpu kernel buffers each represented by a file in the -relayfs filesystem. Kernel clients write into a channel using -efficient write functions which automatically log to the current cpu's -channel buffer. User space applications mmap() the per-cpu files and -retrieve the data as it becomes available. - -The format of the data logged into the channel buffers is completely -up to the relayfs client; relayfs does however provide hooks which -allow clients to impose some structure on the buffer data. Nor does -relayfs implement any form of data filtering - this also is left to -the client. The purpose is to keep relayfs as simple as possible. - -This document provides an overview of the relayfs API. The details of -the function parameters are documented along with the functions in the -filesystem code - please see that for details. - -Semantics -========= - -Each relayfs channel has one buffer per CPU, each buffer has one or -more sub-buffers. Messages are written to the first sub-buffer until -it is too full to contain a new message, in which case it it is -written to the next (if available). Messages are never split across -sub-buffers. At this point, userspace can be notified so it empties -the first sub-buffer, while the kernel continues writing to the next. - -When notified that a sub-buffer is full, the kernel knows how many -bytes of it are padding i.e. unused. Userspace can use this knowledge -to copy only valid data. - -After copying it, userspace can notify the kernel that a sub-buffer -has been consumed. - -relayfs can operate in a mode where it will overwrite data not yet -collected by userspace, and not wait for it to consume it. - -relayfs itself does not provide for communication of such data between -userspace and kernel, allowing the kernel side to remain simple and -not impose a single interface on userspace. It does provide a set of -examples and a separate helper though, described below. - -klog and relay-apps example code -================================ - -relayfs itself is ready to use, but to make things easier, a couple -simple utility functions and a set of examples are provided. - -The relay-apps example tarball, available on the relayfs sourceforge -site, contains a set of self-contained examples, each consisting of a -pair of .c files containing boilerplate code for each of the user and -kernel sides of a relayfs application; combined these two sets of -boilerplate code provide glue to easily stream data to disk, without -having to bother with mundane housekeeping chores. - -The 'klog debugging functions' patch (klog.patch in the relay-apps -tarball) provides a couple of high-level logging functions to the -kernel which allow writing formatted text or raw data to a channel, -regardless of whether a channel to write into exists or not, or -whether relayfs is compiled into the kernel or is configured as a -module. These functions allow you to put unconditional 'trace' -statements anywhere in the kernel or kernel modules; only when there -is a 'klog handler' registered will data actually be logged (see the -klog and kleak examples for details). - -It is of course possible to use relayfs from scratch i.e. without -using any of the relay-apps example code or klog, but you'll have to -implement communication between userspace and kernel, allowing both to -convey the state of buffers (full, empty, amount of padding). - -klog and the relay-apps examples can be found in the relay-apps -tarball on http://relayfs.sourceforge.net - - -The relayfs user space API -========================== - -relayfs implements basic file operations for user space access to -relayfs channel buffer data. Here are the file operations that are -available and some comments regarding their behavior: - -open() enables user to open an _existing_ buffer. - -mmap() results in channel buffer being mapped into the caller's - memory space. Note that you can't do a partial mmap - you must - map the entire file, which is NRBUF * SUBBUFSIZE. - -read() read the contents of a channel buffer. The bytes read are - 'consumed' by the reader i.e. they won't be available again - to subsequent reads. If the channel is being used in - no-overwrite mode (the default), it can be read at any time - even if there's an active kernel writer. If the channel is - being used in overwrite mode and there are active channel - writers, results may be unpredictable - users should make - sure that all logging to the channel has ended before using - read() with overwrite mode. - -poll() POLLIN/POLLRDNORM/POLLERR supported. User applications are - notified when sub-buffer boundaries are crossed. - -close() decrements the channel buffer's refcount. When the refcount - reaches 0 i.e. when no process or kernel client has the buffer - open, the channel buffer is freed. - - -In order for a user application to make use of relayfs files, the -relayfs filesystem must be mounted. For example, - - mount -t relayfs relayfs /mnt/relay - -NOTE: relayfs doesn't need to be mounted for kernel clients to create - or use channels - it only needs to be mounted when user space - applications need access to the buffer data. - - -The relayfs kernel API -====================== - -Here's a summary of the API relayfs provides to in-kernel clients: - - - channel management functions: - - relay_open(base_filename, parent, subbuf_size, n_subbufs, - callbacks) - relay_close(chan) - relay_flush(chan) - relay_reset(chan) - relayfs_create_dir(name, parent) - relayfs_remove_dir(dentry) - relayfs_create_file(name, parent, mode, fops, data) - relayfs_remove_file(dentry) - - channel management typically called on instigation of userspace: - - relay_subbufs_consumed(chan, cpu, subbufs_consumed) - - write functions: - - relay_write(chan, data, length) - __relay_write(chan, data, length) - relay_reserve(chan, length) - - callbacks: - - subbuf_start(buf, subbuf, prev_subbuf, prev_padding) - buf_mapped(buf, filp) - buf_unmapped(buf, filp) - create_buf_file(filename, parent, mode, buf, is_global) - remove_buf_file(dentry) - - helper functions: - - relay_buf_full(buf) - subbuf_start_reserve(buf, length) - - -Creating a channel ------------------- - -relay_open() is used to create a channel, along with its per-cpu -channel buffers. Each channel buffer will have an associated file -created for it in the relayfs filesystem, which can be opened and -mmapped from user space if desired. The files are named -basename0...basenameN-1 where N is the number of online cpus, and by -default will be created in the root of the filesystem. If you want a -directory structure to contain your relayfs files, you can create it -with relayfs_create_dir() and pass the parent directory to -relay_open(). Clients are responsible for cleaning up any directory -structure they create when the channel is closed - use -relayfs_remove_dir() for that. - -The total size of each per-cpu buffer is calculated by multiplying the -number of sub-buffers by the sub-buffer size passed into relay_open(). -The idea behind sub-buffers is that they're basically an extension of -double-buffering to N buffers, and they also allow applications to -easily implement random-access-on-buffer-boundary schemes, which can -be important for some high-volume applications. The number and size -of sub-buffers is completely dependent on the application and even for -the same application, different conditions will warrant different -values for these parameters at different times. Typically, the right -values to use are best decided after some experimentation; in general, -though, it's safe to assume that having only 1 sub-buffer is a bad -idea - you're guaranteed to either overwrite data or lose events -depending on the channel mode being used. - -Channel 'modes' ---------------- - -relayfs channels can be used in either of two modes - 'overwrite' or -'no-overwrite'. The mode is entirely determined by the implementation -of the subbuf_start() callback, as described below. In 'overwrite' -mode, also known as 'flight recorder' mode, writes continuously cycle -around the buffer and will never fail, but will unconditionally -overwrite old data regardless of whether it's actually been consumed. -In no-overwrite mode, writes will fail i.e. data will be lost, if the -number of unconsumed sub-buffers equals the total number of -sub-buffers in the channel. It should be clear that if there is no -consumer or if the consumer can't consume sub-buffers fast enought, -data will be lost in either case; the only difference is whether data -is lost from the beginning or the end of a buffer. - -As explained above, a relayfs channel is made of up one or more -per-cpu channel buffers, each implemented as a circular buffer -subdivided into one or more sub-buffers. Messages are written into -the current sub-buffer of the channel's current per-cpu buffer via the -write functions described below. Whenever a message can't fit into -the current sub-buffer, because there's no room left for it, the -client is notified via the subbuf_start() callback that a switch to a -new sub-buffer is about to occur. The client uses this callback to 1) -initialize the next sub-buffer if appropriate 2) finalize the previous -sub-buffer if appropriate and 3) return a boolean value indicating -whether or not to actually go ahead with the sub-buffer switch. - -To implement 'no-overwrite' mode, the userspace client would provide -an implementation of the subbuf_start() callback something like the -following: - -static int subbuf_start(struct rchan_buf *buf, - void *subbuf, - void *prev_subbuf, - unsigned int prev_padding) -{ - if (prev_subbuf) - *((unsigned *)prev_subbuf) = prev_padding; - - if (relay_buf_full(buf)) - return 0; - - subbuf_start_reserve(buf, sizeof(unsigned int)); - - return 1; -} - -If the current buffer is full i.e. all sub-buffers remain unconsumed, -the callback returns 0 to indicate that the buffer switch should not -occur yet i.e. until the consumer has had a chance to read the current -set of ready sub-buffers. For the relay_buf_full() function to make -sense, the consumer is reponsible for notifying relayfs when -sub-buffers have been consumed via relay_subbufs_consumed(). Any -subsequent attempts to write into the buffer will again invoke the -subbuf_start() callback with the same parameters; only when the -consumer has consumed one or more of the ready sub-buffers will -relay_buf_full() return 0, in which case the buffer switch can -continue. - -The implementation of the subbuf_start() callback for 'overwrite' mode -would be very similar: - -static int subbuf_start(struct rchan_buf *buf, - void *subbuf, - void *prev_subbuf, - unsigned int prev_padding) -{ - if (prev_subbuf) - *((unsigned *)prev_subbuf) = prev_padding; - - subbuf_start_reserve(buf, sizeof(unsigned int)); - - return 1; -} - -In this case, the relay_buf_full() check is meaningless and the -callback always returns 1, causing the buffer switch to occur -unconditionally. It's also meaningless for the client to use the -relay_subbufs_consumed() function in this mode, as it's never -consulted. - -The default subbuf_start() implementation, used if the client doesn't -define any callbacks, or doesn't define the subbuf_start() callback, -implements the simplest possible 'no-overwrite' mode i.e. it does -nothing but return 0. - -Header information can be reserved at the beginning of each sub-buffer -by calling the subbuf_start_reserve() helper function from within the -subbuf_start() callback. This reserved area can be used to store -whatever information the client wants. In the example above, room is -reserved in each sub-buffer to store the padding count for that -sub-buffer. This is filled in for the previous sub-buffer in the -subbuf_start() implementation; the padding value for the previous -sub-buffer is passed into the subbuf_start() callback along with a -pointer to the previous sub-buffer, since the padding value isn't -known until a sub-buffer is filled. The subbuf_start() callback is -also called for the first sub-buffer when the channel is opened, to -give the client a chance to reserve space in it. In this case the -previous sub-buffer pointer passed into the callback will be NULL, so -the client should check the value of the prev_subbuf pointer before -writing into the previous sub-buffer. - -Writing to a channel --------------------- - -kernel clients write data into the current cpu's channel buffer using -relay_write() or __relay_write(). relay_write() is the main logging -function - it uses local_irqsave() to protect the buffer and should be -used if you might be logging from interrupt context. If you know -you'll never be logging from interrupt context, you can use -__relay_write(), which only disables preemption. These functions -don't return a value, so you can't determine whether or not they -failed - the assumption is that you wouldn't want to check a return -value in the fast logging path anyway, and that they'll always succeed -unless the buffer is full and no-overwrite mode is being used, in -which case you can detect a failed write in the subbuf_start() -callback by calling the relay_buf_full() helper function. - -relay_reserve() is used to reserve a slot in a channel buffer which -can be written to later. This would typically be used in applications -that need to write directly into a channel buffer without having to -stage data in a temporary buffer beforehand. Because the actual write -may not happen immediately after the slot is reserved, applications -using relay_reserve() can keep a count of the number of bytes actually -written, either in space reserved in the sub-buffers themselves or as -a separate array. See the 'reserve' example in the relay-apps tarball -at http://relayfs.sourceforge.net for an example of how this can be -done. Because the write is under control of the client and is -separated from the reserve, relay_reserve() doesn't protect the buffer -at all - it's up to the client to provide the appropriate -synchronization when using relay_reserve(). - -Closing a channel ------------------ - -The client calls relay_close() when it's finished using the channel. -The channel and its associated buffers are destroyed when there are no -longer any references to any of the channel buffers. relay_flush() -forces a sub-buffer switch on all the channel buffers, and can be used -to finalize and process the last sub-buffers before the channel is -closed. - -Creating non-relay files ------------------------- - -relay_open() automatically creates files in the relayfs filesystem to -represent the per-cpu kernel buffers; it's often useful for -applications to be able to create their own files alongside the relay -files in the relayfs filesystem as well e.g. 'control' files much like -those created in /proc or debugfs for similar purposes, used to -communicate control information between the kernel and user sides of a -relayfs application. For this purpose the relayfs_create_file() and -relayfs_remove_file() API functions exist. For relayfs_create_file(), -the caller passes in a set of user-defined file operations to be used -for the file and an optional void * to a user-specified data item, -which will be accessible via inode->u.generic_ip (see the relay-apps -tarball for examples). The file_operations are a required parameter -to relayfs_create_file() and thus the semantics of these files are -completely defined by the caller. - -See the relay-apps tarball at http://relayfs.sourceforge.net for -examples of how these non-relay files are meant to be used. - -Creating relay files in other filesystems ------------------------------------------ - -By default of course, relay_open() creates relay files in the relayfs -filesystem. Because relay_file_operations is exported, however, it's -also possible to create and use relay files in other pseudo-filesytems -such as debugfs. - -For this purpose, two callback functions are provided, -create_buf_file() and remove_buf_file(). create_buf_file() is called -once for each per-cpu buffer from relay_open() to allow the client to -create a file to be used to represent the corresponding buffer; if -this callback is not defined, the default implementation will create -and return a file in the relayfs filesystem to represent the buffer. -The callback should return the dentry of the file created to represent -the relay buffer. Note that the parent directory passed to -relay_open() (and passed along to the callback), if specified, must -exist in the same filesystem the new relay file is created in. If -create_buf_file() is defined, remove_buf_file() must also be defined; -it's responsible for deleting the file(s) created in create_buf_file() -and is called during relay_close(). - -The create_buf_file() implementation can also be defined in such a way -as to allow the creation of a single 'global' buffer instead of the -default per-cpu set. This can be useful for applications interested -mainly in seeing the relative ordering of system-wide events without -the need to bother with saving explicit timestamps for the purpose of -merging/sorting per-cpu files in a postprocessing step. - -To have relay_open() create a global buffer, the create_buf_file() -implementation should set the value of the is_global outparam to a -non-zero value in addition to creating the file that will be used to -represent the single buffer. In the case of a global buffer, -create_buf_file() and remove_buf_file() will be called only once. The -normal channel-writing functions e.g. relay_write() can still be used -- writes from any cpu will transparently end up in the global buffer - -but since it is a global buffer, callers should make sure they use the -proper locking for such a buffer, either by wrapping writes in a -spinlock, or by copying a write function from relayfs_fs.h and -creating a local version that internally does the proper locking. - -See the 'exported-relayfile' examples in the relay-apps tarball for -examples of creating and using relay files in debugfs. - -Misc ----- - -Some applications may want to keep a channel around and re-use it -rather than open and close a new channel for each use. relay_reset() -can be used for this purpose - it resets a channel to its initial -state without reallocating channel buffer memory or destroying -existing mappings. It should however only be called when it's safe to -do so i.e. when the channel isn't currently being written to. - -Finally, there are a couple of utility callbacks that can be used for -different purposes. buf_mapped() is called whenever a channel buffer -is mmapped from user space and buf_unmapped() is called when it's -unmapped. The client can use this notification to trigger actions -within the kernel application, such as enabling/disabling logging to -the channel. - - -Resources -========= - -For news, example code, mailing list, etc. see the relayfs homepage: - - http://relayfs.sourceforge.net - - -Credits -======= - -The ideas and specs for relayfs came about as a result of discussions -on tracing involving the following: - -Michel Dagenais <michel.dagenais@polymtl.ca> -Richard Moore <richardj_moore@uk.ibm.com> -Bob Wisniewski <bob@watson.ibm.com> -Karim Yaghmour <karim@opersys.com> -Tom Zanussi <zanussi@us.ibm.com> - -Also thanks to Hubertus Franke for a lot of useful suggestions and bug -reports. diff --git a/Documentation/input/joystick.txt b/Documentation/input/joystick.txt index d53b857a371..841c353297e 100644 --- a/Documentation/input/joystick.txt +++ b/Documentation/input/joystick.txt @@ -39,7 +39,6 @@ them. Bug reports and success stories are also welcome. The input project website is at: - http://www.suse.cz/development/input/ http://atrey.karlin.mff.cuni.cz/~vojtech/input/ There is also a mailing list for the driver at: diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt index b50595a0550..7947cede871 100644 --- a/Documentation/kernel-parameters.txt +++ b/Documentation/kernel-parameters.txt @@ -1183,6 +1183,8 @@ running once the system is up. Mechanism 2. nommconf [IA-32,X86_64] Disable use of MMCONFIG for PCI Configuration + mmconf [IA-32,X86_64] Force MMCONFIG. This is useful + to override the builtin blacklist. nomsi [MSI] If the PCI_MSI kernel config parameter is enabled, this kernel boot option can be used to disable the use of MSI interrupts system-wide. diff --git a/Documentation/kobject.txt b/Documentation/kobject.txt index 8d9bffbd192..949f7b5a205 100644 --- a/Documentation/kobject.txt +++ b/Documentation/kobject.txt @@ -247,7 +247,7 @@ the object-specific fields, which include: - default_attrs: Default attributes to be exported via sysfs when the object is registered.Note that the last attribute has to be initialized to NULL ! You can find a complete implementation - in drivers/block/genhd.c + in block/genhd.c Instances of struct kobj_type are not registered; only referenced by diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt index d46338af600..3e0c017e787 100644 --- a/Documentation/networking/ip-sysctl.txt +++ b/Documentation/networking/ip-sysctl.txt @@ -294,15 +294,15 @@ tcp_rmem - vector of 3 INTEGERs: min, default, max Default: 87380*2 bytes. tcp_mem - vector of 3 INTEGERs: min, pressure, max - low: below this number of pages TCP is not bothered about its + min: below this number of pages TCP is not bothered about its memory appetite. pressure: when amount of memory allocated by TCP exceeds this number of pages, TCP moderates its memory consumption and enters memory pressure mode, which is exited when memory consumption falls - under "low". + under "min". - high: number of pages allowed for queueing by all TCP sockets. + max: number of pages allowed for queueing by all TCP sockets. Defaults are calculated at boot time from amount of available memory. diff --git a/Documentation/scsi/ChangeLog.megaraid b/Documentation/scsi/ChangeLog.megaraid index c173806c91f..a056bbe67c7 100644 --- a/Documentation/scsi/ChangeLog.megaraid +++ b/Documentation/scsi/ChangeLog.megaraid @@ -1,3 +1,126 @@ +Release Date : Fri May 19 09:31:45 EST 2006 - Seokmann Ju <sju@lsil.com> +Current Version : 2.20.4.9 (scsi module), 2.20.2.6 (cmm module) +Older Version : 2.20.4.8 (scsi module), 2.20.2.6 (cmm module) + +1. Fixed a bug in megaraid_init_mbox(). + Customer reported "garbage in file on x86_64 platform". + Root Cause: the driver registered controllers as 64-bit DMA capable + for those which are not support it. + Fix: Made change in the function inserting identification machanism + identifying 64-bit DMA capable controllers. + + > -----Original Message----- + > From: Vasily Averin [mailto:vvs@sw.ru] + > Sent: Thursday, May 04, 2006 2:49 PM + > To: linux-scsi@vger.kernel.org; Kolli, Neela; Mukker, Atul; + > Ju, Seokmann; Bagalkote, Sreenivas; + > James.Bottomley@SteelEye.com; devel@openvz.org + > Subject: megaraid_mbox: garbage in file + > + > Hello all, + > + > I've investigated customers claim on the unstable work of + > their node and found a + > strange effect: reading from some files leads to the + > "attempt to access beyond end of device" messages. + > + > I've checked filesystem, memory on the node, motherboard BIOS + > version, but it + > does not help and issue still has been reproduced by simple + > file reading. + > + > Reproducer is simple: + > + > echo 0xffffffff >/proc/sys/dev/scsi/logging_level ; + > cat /vz/private/101/root/etc/ld.so.cache >/tmp/ttt ; + > echo 0 >/proc/sys/dev/scsi/logging + > + > It leads to the following messages in dmesg + > + > sd_init_command: disk=sda, block=871769260, count=26 + > sda : block=871769260 + > sda : reading 26/26 512 byte blocks. + > scsi_add_timer: scmd: f79ed980, time: 7500, (c02b1420) + > sd 0:1:0:0: send 0xf79ed980 sd 0:1:0:0: + > command: Read (10): 28 00 33 f6 24 ac 00 00 1a 00 + > buffer = 0xf7cfb540, bufflen = 13312, done = 0xc0366b40, + > queuecommand 0xc0344010 + > leaving scsi_dispatch_cmnd() + > scsi_delete_timer: scmd: f79ed980, rtn: 1 + > sd 0:1:0:0: done 0xf79ed980 SUCCESS 0 sd 0:1:0:0: + > command: Read (10): 28 00 33 f6 24 ac 00 00 1a 00 + > scsi host busy 1 failed 0 + > sd 0:1:0:0: Notifying upper driver of completion (result 0) + > sd_rw_intr: sda: res=0x0 + > 26 sectors total, 13312 bytes done. + > use_sg is 4 + > attempt to access beyond end of device + > sda6: rw=0, want=1044134458, limit=951401367 + > Buffer I/O error on device sda6, logical block 522067228 + > attempt to access beyond end of device + +2. When INQUIRY with EVPD bit set issued to the MegaRAID controller, + system memory gets corrupted. + Root Cause: MegaRAID F/W handle the INQUIRY with EVPD bit set + incorrectly. + Fix: MegaRAID F/W has fixed the problem and being process of release, + soon. Meanwhile, driver will filter out the request. + +3. One of member in the data structure of the driver leads unaligne + issue on 64-bit platform. + Customer reporeted "kernel unaligned access addrss" issue when + application communicates with MegaRAID HBA driver. + Root Cause: in uioc_t structure, one of member had misaligned and it + led system to display the error message. + Fix: A patch submitted to community from following folk. + + > -----Original Message----- + > From: linux-scsi-owner@vger.kernel.org + > [mailto:linux-scsi-owner@vger.kernel.org] On Behalf Of Sakurai Hiroomi + > Sent: Wednesday, July 12, 2006 4:20 AM + > To: linux-scsi@vger.kernel.org; linux-kernel@vger.kernel.org + > Subject: Re: Help: strange messages from kernel on IA64 platform + > + > Hi, + > + > I saw same message. + > + > When GAM(Global Array Manager) is started, The following + > message output. + > kernel: kernel unaligned access to 0xe0000001fe1080d4, + > ip=0xa000000200053371 + > + > The uioc structure used by ioctl is defined by packed, + > the allignment of each member are disturbed. + > In a 64 bit structure, the allignment of member doesn't fit 64 bit + > boundary. this causes this messages. + > In a 32 bit structure, we don't see the message because the allinment + > of member fit 32 bit boundary even if packed is specified. + > + > patch + > I Add 32 bit dummy member to fit 64 bit boundary. I tested. + > We confirmed this patch fix the problem by IA64 server. + > + > ************************************************************** + > **************** + > --- linux-2.6.9/drivers/scsi/megaraid/megaraid_ioctl.h.orig + > 2006-04-03 17:13:03.000000000 +0900 + > +++ linux-2.6.9/drivers/scsi/megaraid/megaraid_ioctl.h + > 2006-04-03 17:14:09.000000000 +0900 + > @@ -132,6 +132,10 @@ + > /* Driver Data: */ + > void __user * user_data; + > uint32_t user_data_len; + > + + > + /* 64bit alignment */ + > + uint32_t pad_0xBC; + > + + > mraid_passthru_t __user *user_pthru; + > + > mraid_passthru_t *pthru32; + > ************************************************************** + > **************** + Release Date : Mon Apr 11 12:27:22 EST 2006 - Seokmann Ju <sju@lsil.com> Current Version : 2.20.4.8 (scsi module), 2.20.2.6 (cmm module) Older Version : 2.20.4.7 (scsi module), 2.20.2.6 (cmm module) diff --git a/Documentation/sysctl/fs.txt b/Documentation/sysctl/fs.txt index 0b62c62142c..5c3a5190596 100644 --- a/Documentation/sysctl/fs.txt +++ b/Documentation/sysctl/fs.txt @@ -25,6 +25,7 @@ Currently, these files are in /proc/sys/fs: - inode-state - overflowuid - overflowgid +- suid_dumpable - super-max - super-nr @@ -131,6 +132,25 @@ The default is 65534. ============================================================== +suid_dumpable: + +This value can be used to query and set the core dump mode for setuid +or otherwise protected/tainted binaries. The modes are + +0 - (default) - traditional behaviour. Any process which has changed + privilege levels or is execute only will not be dumped +1 - (debug) - all processes dump core when possible. The core dump is + owned by the current user and no security is applied. This is + intended for system debugging situations only. Ptrace is unchecked. +2 - (suidsafe) - any binary which normally would not be dumped is dumped + readable by root only. This allows the end user to remove + such a dump but not access it directly. For security reasons + core dumps in this mode will not overwrite one another or + other files. This mode is appropriate when adminstrators are + attempting to debug problems in a normal environment. + +============================================================== + super-max & super-nr: These numbers control the maximum number of superblocks, and diff --git a/Documentation/sysctl/kernel.txt b/Documentation/sysctl/kernel.txt index 7345c338080..89bf8c20a58 100644 --- a/Documentation/sysctl/kernel.txt +++ b/Documentation/sysctl/kernel.txt @@ -50,7 +50,6 @@ show up in /proc/sys/kernel: - shmmax [ sysv ipc ] - shmmni - stop-a [ SPARC only ] -- suid_dumpable - sysrq ==> Documentation/sysrq.txt - tainted - threads-max @@ -310,25 +309,6 @@ kernel. This value defaults to SHMMAX. ============================================================== -suid_dumpable: - -This value can be used to query and set the core dump mode for setuid -or otherwise protected/tainted binaries. The modes are - -0 - (default) - traditional behaviour. Any process which has changed - privilege levels or is execute only will not be dumped -1 - (debug) - all processes dump core when possible. The core dump is - owned by the current user and no security is applied. This is - intended for system debugging situations only. Ptrace is unchecked. -2 - (suidsafe) - any binary which normally would not be dumped is dumped - readable by root only. This allows the end user to remove - such a dump but not access it directly. For security reasons - core dumps in this mode will not overwrite one another or - other files. This mode is appropriate when adminstrators are - attempting to debug problems in a normal environment. - -============================================================== - tainted: Non-zero if the kernel has been tainted. Numeric values, which diff --git a/MAINTAINERS b/MAINTAINERS index e3e1515ba5a..25cd7073a20 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -889,6 +889,12 @@ M: rdunlap@xenotime.net T: git http://tali.admingilde.org/git/linux-docbook.git S: Maintained +DOCKING STATION DRIVER +P: Kristen Carlson Accardi +M: kristen.c.accardi@intel.com +L: linux-acpi@vger.kernel.org +S: Maintained + DOUBLETALK DRIVER P: James R. Van Zandt M: jrv@vanzandt.mv.com @@ -2656,6 +2662,14 @@ M: chrisw@sous-sol.org L: stable@kernel.org S: Maintained +STABLE BRANCH: +P: Greg Kroah-Hartman +M: greg@kroah.com +P: Chris Wright +M: chrisw@sous-sol.org +L: stable@kernel.org +S: Maintained + TPM DEVICE DRIVER P: Kylene Hall M: kjhall@us.ibm.com @@ -3282,10 +3296,11 @@ S: Maintained XFS FILESYSTEM P: Silicon Graphics Inc +P: Tim Shimmin, David Chatterton M: xfs-masters@oss.sgi.com -M: nathans@sgi.com L: xfs@oss.sgi.com W: http://oss.sgi.com/projects/xfs +T: git git://oss.sgi.com:8090/xfs/xfs-2.6 S: Supported X86 3-LEVEL PAGING (PAE) SUPPORT @@ -1,7 +1,7 @@ VERSION = 2 PATCHLEVEL = 6 SUBLEVEL = 18 -EXTRAVERSION = -rc4 +EXTRAVERSION = -rc5 NAME=Crazed Snow-Weasel # *DOCUMENTATION* @@ -309,9 +309,6 @@ CPPFLAGS := -D__KERNEL__ $(LINUXINCLUDE) CFLAGS := -Wall -Wundef -Wstrict-prototypes -Wno-trigraphs \ -fno-strict-aliasing -fno-common -# Force gcc to behave correct even for buggy distributions -CFLAGS += $(call cc-option, -fno-stack-protector) - AFLAGS := -D__ASSEMBLY__ # Read KERNELRELEASE from include/config/kernel.release (if it exists) @@ -436,12 +433,13 @@ core-y := usr/ endif # KBUILD_EXTMOD ifeq ($(dot-config),1) -# In this section, we need .config +# Read in config +-include include/config/auto.conf +ifeq ($(KBUILD_EXTMOD),) # Read in dependencies to all Kconfig* files, make sure to run # oldconfig if changes are detected. -include include/config/auto.conf.cmd --include include/config/auto.conf # To avoid any implicit rule to kick in, define an empty command $(KCONFIG_CONFIG) include/config/auto.conf.cmd: ; @@ -451,16 +449,27 @@ $(KCONFIG_CONFIG) include/config/auto.conf.cmd: ; # if auto.conf.cmd is missing then we are probably in a cleaned tree so # we execute the config step to be sure to catch updated Kconfig files include/config/auto.conf: $(KCONFIG_CONFIG) include/config/auto.conf.cmd -ifeq ($(KBUILD_EXTMOD),) $(Q)$(MAKE) -f $(srctree)/Makefile silentoldconfig else - $(error kernel configuration not valid - run 'make prepare' in $(srctree) to update it) -endif +# external modules needs include/linux/autoconf.h and include/config/auto.conf +# but do not care if they are up-to-date. Use auto.conf to trigger the test +PHONY += include/config/auto.conf + +include/config/auto.conf: + $(Q)test -e include/linux/autoconf.h -a -e $@ || ( \ + echo; \ + echo " ERROR: Kernel configuration is invalid."; \ + echo " include/linux/autoconf.h or $@ are missing."; \ + echo " Run 'make oldconfig && make prepare' on kernel src to fix it."; \ + echo; \ + /bin/false) + +endif # KBUILD_EXTMOD else # Dummy target needed, because used as prerequisite include/config/auto.conf: ; -endif +endif # $(dot-config) # The all: target is the default when no target is given on the # command line. @@ -474,6 +483,8 @@ else CFLAGS += -O2 endif +include $(srctree)/arch/$(ARCH)/Makefile + ifdef CONFIG_FRAME_POINTER CFLAGS += -fno-omit-frame-pointer $(call cc-option,-fno-optimize-sibling-calls,) else @@ -488,7 +499,8 @@ ifdef CONFIG_DEBUG_INFO CFLAGS += -g endif -include $(srctree)/arch/$(ARCH)/Makefile +# Force gcc to behave correct even for buggy distributions +CFLAGS += $(call cc-option, -fno-stack-protector) # arch Makefile may override CC so keep this after arch Makefile is included NOSTDINC_FLAGS += -nostdinc -isystem $(shell $(CC) -print-file-name=include) diff --git a/arch/arm/Makefile b/arch/arm/Makefile index 3345c6d0fd1..92873cdee31 100644 --- a/arch/arm/Makefile +++ b/arch/arm/Makefile @@ -47,7 +47,8 @@ comma = , # testing for a specific architecture or later rather impossible. arch-$(CONFIG_CPU_32v6) :=-D__LINUX_ARM_ARCH__=6 $(call cc-option,-march=armv6,-march=armv5t -Wa$(comma)-march=armv6) arch-$(CONFIG_CPU_32v6K) :=-D__LINUX_ARM_ARCH__=6 $(call cc-option,-march=armv6k,-march=armv5t -Wa$(comma)-march=armv6k) -arch-$(CONFIG_CPU_32v5) :=-D__LINUX_ARM_ARCH__=5 $(call cc-option,-march=armv5te,-march=armv4) +arch-$(CONFIG_CPU_32v5) :=-D__LINUX_ARM_ARCH__=5 $(call cc-option,-march=armv5te,-march=armv4t) +arch-$(CONFIG_CPU_32v4T) :=-D__LINUX_ARM_ARCH__=4 -march=armv4t arch-$(CONFIG_CPU_32v4) :=-D__LINUX_ARM_ARCH__=4 -march=armv4 arch-$(CONFIG_CPU_32v3) :=-D__LINUX_ARM_ARCH__=3 -march=armv3 diff --git a/arch/arm/common/dmabounce.c b/arch/arm/common/dmabounce.c index 5b7c26395b4..028bdc9228f 100644 --- a/arch/arm/common/dmabounce.c +++ b/arch/arm/common/dmabounce.c @@ -179,17 +179,19 @@ alloc_safe_buffer(struct dmabounce_device_info *device_info, void *ptr, static inline struct safe_buffer * find_safe_buffer(struct dmabounce_device_info *device_info, dma_addr_t safe_dma_addr) { - struct safe_buffer *b = NULL; + struct safe_buffer *b, *rb = NULL; unsigned long flags; read_lock_irqsave(&device_info->lock, flags); list_for_each_entry(b, &device_info->safe_buffers, node) - if (b->safe_dma_addr == safe_dma_addr) + if (b->safe_dma_addr == safe_dma_addr) { + rb = b; break; + } read_unlock_irqrestore(&device_info->lock, flags); - return b; + return rb; } static inline void diff --git a/arch/arm/common/rtctime.c b/arch/arm/common/rtctime.c index 35c9a64ac14..4e5445cfb0e 100644 --- a/arch/arm/common/rtctime.c +++ b/arch/arm/common/rtctime.c @@ -68,6 +68,7 @@ void rtc_next_alarm_time(struct rtc_time *next, struct rtc_time *now, struct rtc rtc_time_to_tm(next_time, next); } } +EXPORT_SYMBOL(rtc_next_alarm_time); static inline int rtc_arm_read_time(struct rtc_ops *ops, struct rtc_time *tm) { diff --git a/arch/arm/common/sa1111.c b/arch/arm/common/sa1111.c index a331c12cead..29818bd3248 100644 --- a/arch/arm/common/sa1111.c +++ b/arch/arm/common/sa1111.c @@ -618,7 +618,7 @@ __sa1111_probe(struct device *me, struct resource *mem, int irq) { struct sa1111 *sachip; unsigned long id; - unsigned int has_devs, val; + unsigned int has_devs; int i, ret = -ENODEV; sachip = kzalloc(sizeof(struct sa1111), GFP_KERNEL); @@ -669,6 +669,9 @@ __sa1111_probe(struct device *me, struct resource *mem, int irq) sa1111_wake(sachip); #ifdef CONFIG_ARCH_SA1100 + { + unsigned int val; + /* * The SDRAM configuration of the SA1110 and the SA1111 must * match. This is very important to ensure that SA1111 accesses @@ -692,6 +695,7 @@ __sa1111_probe(struct device *me, struct resource *mem, int irq) * Enable the SA1110 memory bus request and grant signals. */ sa1110_mb_enable(); + } #endif /* diff --git a/arch/arm/kernel/Makefile b/arch/arm/kernel/Makefile index f0c0cdb1c18..1320a0efca7 100644 --- a/arch/arm/kernel/Makefile +++ b/arch/arm/kernel/Makefile @@ -13,12 +13,11 @@ obj-y := compat.o entry-armv.o entry-common.o irq.o \ obj-$(CONFIG_APM) += apm.o obj-$(CONFIG_ISA_DMA_API) += dma.o obj-$(CONFIG_ARCH_ACORN) += ecard.o -obj-$(CONFIG_FOOTBRIDGE) += isa.o obj-$(CONFIG_FIQ) += fiq.o obj-$(CONFIG_MODULES) += armksyms.o module.o obj-$(CONFIG_ARTHUR) += arthur.o obj-$(CONFIG_ISA_DMA) += dma-isa.o -obj-$(CONFIG_PCI) += bios32.o +obj-$(CONFIG_PCI) += bios32.o isa.o obj-$(CONFIG_SMP) += smp.o obj-$(CONFIG_OABI_COMPAT) += sys_oabi-compat.o diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S index 7ea5f01dfc7..de4e3313790 100644 --- a/arch/arm/kernel/entry-armv.S +++ b/arch/arm/kernel/entry-armv.S @@ -634,6 +634,14 @@ ENTRY(__switch_to) * purpose. */ + .macro usr_ret, reg +#ifdef CONFIG_ARM_THUMB + bx \reg +#else + mov pc, \reg +#endif + .endm + .align 5 .globl __kuser_helper_start __kuser_helper_start: @@ -675,7 +683,7 @@ __kuser_memory_barrier: @ 0xffff0fa0 #if __LINUX_ARM_ARCH__ >= 6 && defined(CONFIG_SMP) mcr p15, 0, r0, c7, c10, 5 @ dmb #endif - mov pc, lr + usr_ret lr .align 5 @@ -778,7 +786,7 @@ __kuser_cmpxchg: @ 0xffff0fc0 mov r0, #-1 adds r0, r0, #0 #endif - mov pc, lr + usr_ret lr #else @@ -792,7 +800,7 @@ __kuser_cmpxchg: @ 0xffff0fc0 #ifdef CONFIG_SMP mcr p15, 0, r0, c7, c10, 5 @ dmb #endif - mov pc, lr + usr_ret lr #endif @@ -834,16 +842,11 @@ __kuser_cmpxchg: @ 0xffff0fc0 __kuser_get_tls: @ 0xffff0fe0 #if !defined(CONFIG_HAS_TLS_REG) && !defined(CONFIG_TLS_REG_EMUL) - ldr r0, [pc, #(16 - 8)] @ TLS stored at 0xffff0ff0 - mov pc, lr - #else - mrc p15, 0, r0, c13, c0, 3 @ read TLS register - mov pc, lr - #endif + usr_ret lr .rep 5 .word 0 @ pad up to __kuser_helper_version diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S index 4fe386eea4b..5365d4e5949 100644 --- a/arch/arm/kernel/head.S +++ b/arch/arm/kernel/head.S @@ -118,7 +118,7 @@ ENTRY(secondary_startup) sub r4, r4, r5 @ mmu has been enabled ldr r4, [r7, r4] @ get secondary_data.pgdir adr lr, __enable_mmu @ return address - add pc, r10, #12 @ initialise processor + add pc, r10, #PROCINFO_INITFUNC @ initialise processor @ (return control reg) /* diff --git a/arch/arm/kernel/isa.c b/arch/arm/kernel/isa.c index 685c3e591a7..54bbd9fe255 100644 --- a/arch/arm/kernel/isa.c +++ b/arch/arm/kernel/isa.c @@ -3,21 +3,14 @@ * * Copyright (C) 1999 Phil Blundell * - * ISA shared memory and I/O port support - */ - -/* * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. + * + * ISA shared memory and I/O port support, and is required to support + * iopl, inb, outb and friends in userspace via glibc emulation. */ - -/* - * Nothing about this is actually ARM specific. One day we could move - * it into kernel/resource.c or some place like that. - */ - #include <linux/stddef.h> #include <linux/types.h> #include <linux/fs.h> @@ -27,21 +20,49 @@ static unsigned int isa_membase, isa_portbase, isa_portshift; static ctl_table ctl_isa_vars[4] = { - {BUS_ISA_MEM_BASE, "membase", &isa_membase, - sizeof(isa_membase), 0444, NULL, &proc_dointvec}, - {BUS_ISA_PORT_BASE, "portbase", &isa_portbase, - sizeof(isa_portbase), 0444, NULL, &proc_dointvec}, - {BUS_ISA_PORT_SHIFT, "portshift", &isa_portshift, - sizeof(isa_portshift), 0444, NULL, &proc_dointvec}, - {0} + { + .ctl_name = BUS_ISA_MEM_BASE, + .procname = "membase", + .data = &isa_membase, + .maxlen = sizeof(isa_membase), + .mode = 0444, + .proc_handler = &proc_dointvec, + }, { + .ctl_name = BUS_ISA_PORT_BASE, + .procname = "portbase", + .data = &isa_portbase, + .maxlen = sizeof(isa_portbase), + .mode = 0444, + .proc_handler = &proc_dointvec, + }, { + .ctl_name = BUS_ISA_PORT_SHIFT, + .procname = "portshift", + .data = &isa_portshift, + .maxlen = sizeof(isa_portshift), + .mode = 0444, + .proc_handler = &proc_dointvec, + }, {0} }; static struct ctl_table_header *isa_sysctl_header; -static ctl_table ctl_isa[2] = {{CTL_BUS_ISA, "isa", NULL, 0, 0555, ctl_isa_vars}, - {0}}; -static ctl_table ctl_bus[2] = {{CTL_BUS, "bus", NULL, 0, 0555, ctl_isa}, - {0}}; +static ctl_table ctl_isa[2] = { + { + .ctl_name = CTL_BUS_ISA, + .procname = "isa", + .mode = 0555, + .child = ctl_isa_vars, + }, {0} +}; + +static ctl_table ctl_bus[2] = { + { + .ctl_name = CTL_BUS, + .procname = "bus", + .mode = 0555, + .child = ctl_isa, + }, {0} +}; void __init register_isa_ports(unsigned int membase, unsigned int portbase, unsigned int portshift) diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c index 4e29dd03e58..aeeed806f99 100644 --- a/arch/arm/kernel/traps.c +++ b/arch/arm/kernel/traps.c @@ -233,7 +233,7 @@ NORET_TYPE void die(const char *str, struct pt_regs *regs, int err) spin_unlock_irq(&die_lock); if (panic_on_oops) - panic("Fatal exception: panic_on_oops"); + panic("Fatal exception"); do_exit(SIGSEGV); } diff --git a/arch/arm/mach-footbridge/dc21285.c b/arch/arm/mach-footbridge/dc21285.c index 607ed1f5b3f..823e25d4547 100644 --- a/arch/arm/mach-footbridge/dc21285.c +++ b/arch/arm/mach-footbridge/dc21285.c @@ -35,7 +35,6 @@ extern int setup_arm_irq(int, struct irqaction *); extern void pcibios_report_status(u_int status_mask, int warn); -extern void register_isa_ports(unsigned int, unsigned int, unsigned int); static unsigned long dc21285_base_address(struct pci_bus *bus, unsigned int devfn) diff --git a/arch/arm/mach-integrator/pci_v3.c b/arch/arm/mach-integrator/pci_v3.c index f9043592e29..4418f6d7572 100644 --- a/arch/arm/mach-integrator/pci_v3.c +++ b/arch/arm/mach-integrator/pci_v3.c @@ -600,4 +600,6 @@ void __init pci_v3_postinit(void) printk(KERN_ERR "PCI: unable to grab local bus timeout " "interrupt: %d\n", ret); #endif + + register_isa_ports(PHYS_PCI_MEM_BASE, PHYS_PCI_IO_BASE, 0); } diff --git a/arch/arm/mach-ixp4xx/common-pci.c b/arch/arm/mach-ixp4xx/common-pci.c index 2d40fe1145f..9562177b5fe 100644 --- a/arch/arm/mach-ixp4xx/common-pci.c +++ b/arch/arm/mach-ixp4xx/common-pci.c @@ -532,8 +532,6 @@ pci_set_consistent_dma_mask(struct pci_dev *dev, u64 mask) return -EIO; } -EXPORT_SYMBOL(pci_set_dma_mask); -EXPORT_SYMBOL(pci_set_consistent_dma_mask); EXPORT_SYMBOL(ixp4xx_pci_read); EXPORT_SYMBOL(ixp4xx_pci_write); diff --git a/arch/arm/mach-ixp4xx/gtwx5715-setup.c b/arch/arm/mach-ixp4xx/gtwx5715-setup.c index 654e2eed81f..30f1300e0e2 100644 --- a/arch/arm/mach-ixp4xx/gtwx5715-setup.c +++ b/arch/arm/mach-ixp4xx/gtwx5715-setup.c @@ -107,9 +107,9 @@ static struct flash_platform_data gtwx5715_flash_data = { .width = 2, }; -static struct gtw5715_flash_resource = { +static struct resource gtwx5715_flash_resource = { .flags = IORESOURCE_MEM, -} +}; static struct platform_device gtwx5715_flash = { .name = "IXP4XX-Flash", @@ -130,9 +130,6 @@ static void __init gtwx5715_init(void) { ixp4xx_sys_init(); - if (!flash_resource) - printk(KERN_ERR "Could not allocate flash resource\n"); - gtwx5715_flash_resource.start = IXP4XX_EXP_BUS_BASE(0); gtwx5715_flash_resource.end = IXP4XX_EXP_BUS_BASE(0) + SZ_8M - 1; diff --git a/arch/arm/mach-pxa/corgi_ssp.c b/arch/arm/mach-pxa/corgi_ssp.c index f9421318cb7..ff6b4ee037f 100644 --- a/arch/arm/mach-pxa/corgi_ssp.c +++ b/arch/arm/mach-pxa/corgi_ssp.c @@ -47,14 +47,15 @@ static struct corgissp_machinfo *ssp_machinfo; */ unsigned long corgi_ssp_ads7846_putget(ulong data) { - unsigned long ret,flag; + unsigned long flag; + u32 ret = 0; spin_lock_irqsave(&corgi_ssp_lock, flag); if (ssp_machinfo->cs_ads7846 >= 0) GPCR(ssp_machinfo->cs_ads7846) = GPIO_bit(ssp_machinfo->cs_ads7846); ssp_write_word(&corgi_ssp_dev,data); - ret = ssp_read_word(&corgi_ssp_dev); + ssp_read_word(&corgi_ssp_dev, &ret); if (ssp_machinfo->cs_ads7846 >= 0) GPSR(ssp_machinfo->cs_ads7846) = GPIO_bit(ssp_machinfo->cs_ads7846); @@ -88,7 +89,9 @@ void corgi_ssp_ads7846_put(ulong data) unsigned long corgi_ssp_ads7846_get(void) { - return ssp_read_word(&corgi_ssp_dev); + u32 ret = 0; + ssp_read_word(&corgi_ssp_dev, &ret); + return ret; } EXPORT_SYMBOL(corgi_ssp_ads7846_putget); @@ -104,6 +107,7 @@ EXPORT_SYMBOL(corgi_ssp_ads7846_get); unsigned long corgi_ssp_dac_put(ulong data) { unsigned long flag, sscr1 = SSCR1_SPH; + u32 tmp; spin_lock_irqsave(&corgi_ssp_lock, flag); @@ -118,7 +122,7 @@ unsigned long corgi_ssp_dac_put(ulong data) GPCR(ssp_machinfo->cs_lcdcon) = GPIO_bit(ssp_machinfo->cs_lcdcon); ssp_write_word(&corgi_ssp_dev,data); /* Read null data back from device to prevent SSP overflow */ - ssp_read_word(&corgi_ssp_dev); + ssp_read_word(&corgi_ssp_dev, &tmp); if (ssp_machinfo->cs_lcdcon >= 0) GPSR(ssp_machinfo->cs_lcdcon) = GPIO_bit(ssp_machinfo->cs_lcdcon); @@ -150,7 +154,7 @@ EXPORT_SYMBOL(corgi_ssp_blduty_set); int corgi_ssp_max1111_get(ulong data) { unsigned long flag; - int voltage,voltage1,voltage2; + long voltage = 0, voltage1 = 0, voltage2 = 0; spin_lock_irqsave(&corgi_ssp_lock, flag); if (ssp_machinfo->cs_max1111 >= 0) @@ -163,15 +167,15 @@ int corgi_ssp_max1111_get(ulong data) /* TB1/RB1 */ ssp_write_word(&corgi_ssp_dev,data); - ssp_read_word(&corgi_ssp_dev); /* null read */ + ssp_read_word(&corgi_ssp_dev, (u32*)&voltage1); /* null read */ /* TB12/RB2 */ ssp_write_word(&corgi_ssp_dev,0); - voltage1=ssp_read_word(&corgi_ssp_dev); + ssp_read_word(&corgi_ssp_dev, (u32*)&voltage1); /* TB13/RB3*/ ssp_write_word(&corgi_ssp_dev,0); - voltage2=ssp_read_word(&corgi_ssp_dev); + ssp_read_word(&corgi_ssp_dev, (u32*)&voltage2); ssp_disable(&corgi_ssp_dev); ssp_config(&corgi_ssp_dev, (SSCR0_National | (SSCR0_DSS & 0x0b )), 0, 0, SSCR0_SerClkDiv(ssp_machinfo->clk_ads7846)); diff --git a/arch/arm/mach-pxa/ssp.c b/arch/arm/mach-pxa/ssp.c index 93096befd01..1fddfeaa630 100644 --- a/arch/arm/mach-pxa/ssp.c +++ b/arch/arm/mach-pxa/ssp.c @@ -40,6 +40,8 @@ #define PXA_SSP_PORTS 3 +#define TIMEOUT 100000 + struct ssp_info_ { int irq; u32 clock; @@ -92,13 +94,18 @@ static irqreturn_t ssp_interrupt(int irq, void *dev_id, struct pt_regs *regs) * The caller is expected to perform the necessary locking. * * Returns: - * %-ETIMEDOUT timeout occurred (for future) + * %-ETIMEDOUT timeout occurred * 0 success */ int ssp_write_word(struct ssp_dev *dev, u32 data) { - while (!(SSSR_P(dev->port) & SSSR_TNF)) + int timeout = TIMEOUT; + + while (!(SSSR_P(dev->port) & SSSR_TNF)) { + if (!--timeout) + return -ETIMEDOUT; cpu_relax(); + } SSDR_P(dev->port) = data; @@ -117,15 +124,21 @@ int ssp_write_word(struct ssp_dev *dev, u32 data) * The caller is expected to perform the necessary locking. * * Returns: - * %-ETIMEDOUT timeout occurred (for future) + * %-ETIMEDOUT timeout occurred * 32-bit data success */ -int ssp_read_word(struct ssp_dev *dev) +int ssp_read_word(struct ssp_dev *dev, u32 *data) { - while (!(SSSR_P(dev->port) & SSSR_RNE)) + int timeout = TIMEOUT; + + while (!(SSSR_P(dev->port) & SSSR_RNE)) { + if (!--timeout) + return -ETIMEDOUT; cpu_relax(); + } - return SSDR_P(dev->port); + *data = SSDR_P(dev->port); + return 0; } /** @@ -136,13 +149,21 @@ int ssp_read_word(struct ssp_dev *dev) * * The caller is expected to perform the necessary locking. */ -void ssp_flush(struct ssp_dev *dev) +int ssp_flush(struct ssp_dev *dev) { + int timeout = TIMEOUT * 2; + do { while (SSSR_P(dev->port) & SSSR_RNE) { + if (!--timeout) + return -ETIMEDOUT; (void) SSDR_P(dev->port); } + if (!--timeout) + return -ETIMEDOUT; } while (SSSR_P(dev->port) & SSSR_BSY); + + return 0; } /** diff --git a/arch/arm/mach-s3c2410/Makefile b/arch/arm/mach-s3c2410/Makefile index 0c7938645df..273e05f2b8d 100644 --- a/arch/arm/mach-s3c2410/Makefile +++ b/arch/arm/mach-s3c2410/Makefile @@ -10,45 +10,47 @@ obj-m := obj-n := obj- := +# DMA +obj-$(CONFIG_S3C2410_DMA) += dma.o + # S3C2400 support files -obj-$(CONFIG_CPU_S3C2400) += s3c2400-gpio.o +obj-$(CONFIG_CPU_S3C2400) += s3c2400-gpio.o # S3C2410 support files -obj-$(CONFIG_CPU_S3C2410) += s3c2410.o -obj-$(CONFIG_CPU_S3C2410) += s3c2410-gpio.o -obj-$(CONFIG_S3C2410_DMA) += dma.o +obj-$(CONFIG_CPU_S3C2410) += s3c2410.o +obj-$(CONFIG_CPU_S3C2410) += s3c2410-gpio.o # Power Management support -obj-$(CONFIG_PM) += pm.o sleep.o -obj-$(CONFIG_PM_SIMTEC) += pm-simtec.o +obj-$(CONFIG_PM) += pm.o sleep.o +obj-$(CONFIG_PM_SIMTEC) += pm-simtec.o # S3C2412 support -obj-$(CONFIG_CPU_S3C2412) += s3c2412.o -obj-$(CONFIG_CPU_S3C2412) += s3c2412-clock.o +obj-$(CONFIG_CPU_S3C2412) += s3c2412.o +obj-$(CONFIG_CPU_S3C2412) += s3c2412-clock.o # # S3C244X support -obj-$(CONFIG_CPU_S3C244X) += s3c244x.o -obj-$(CONFIG_CPU_S3C244X) += s3c244x-irq.o +obj-$(CONFIG_CPU_S3C244X) += s3c244x.o +obj-$(CONFIG_CPU_S3C244X) += s3c244x-irq.o # Clock control -obj-$(CONFIG_S3C2410_CLOCK) += s3c2410-clock.o +obj-$(CONFIG_S3C2410_CLOCK) += s3c2410-clock.o # S3C2440 support -obj-$(CONFIG_CPU_S3C2440) += s3c2440.o s3c2440-dsc.o -obj-$(CONFIG_CPU_S3C2440) += s3c2440-irq.o -obj-$(CONFIG_CPU_S3C2440) += s3c2440-clock.o -obj-$(CONFIG_CPU_S3C2440) += s3c2410-gpio.o +obj-$(CONFIG_CPU_S3C2440) += s3c2440.o s3c2440-dsc.o +obj-$(CONFIG_CPU_S3C2440) += s3c2440-irq.o +obj-$(CONFIG_CPU_S3C2440) += s3c2440-clock.o +obj-$(CONFIG_CPU_S3C2440) += s3c2410-gpio.o # S3C2442 support -obj-$(CONFIG_CPU_S3C2442) += s3c2442.o -obj-$(CONFIG_CPU_S3C2442) += s3c2442-clock.o +obj-$(CONFIG_CPU_S3C2442) += s3c2442.o +obj-$(CONFIG_CPU_S3C2442) += s3c2442-clock.o # bast extras diff --git a/arch/arm/mach-s3c2410/dma.c b/arch/arm/mach-s3c2410/dma.c index 094cc52745c..25855452fe8 100644 --- a/arch/arm/mach-s3c2410/dma.c +++ b/arch/arm/mach-s3c2410/dma.c @@ -112,7 +112,7 @@ dmadbg_capture(s3c2410_dma_chan_t *chan, struct s3c2410_dma_regstate *regs) } static void -dmadbg_showregs(const char *fname, int line, s3c2410_dma_chan_t *chan, +dmadbg_dumpregs(const char *fname, int line, s3c2410_dma_chan_t *chan, struct s3c2410_dma_regstate *regs) { printk(KERN_DEBUG "dma%d: %s:%d: DCSRC=%08lx, DISRC=%08lx, DSTAT=%08lx DMT=%02lx, DCON=%08lx\n", @@ -132,7 +132,16 @@ dmadbg_showchan(const char *fname, int line, s3c2410_dma_chan_t *chan) chan->number, fname, line, chan->load_state, chan->curr, chan->next, chan->end); - dmadbg_showregs(fname, line, chan, &state); + dmadbg_dumpregs(fname, line, chan, &state); +} + +static void +dmadbg_showregs(const char *fname, int line, s3c2410_dma_chan_t *chan) +{ + struct s3c2410_dma_regstate state; + + dmadbg_capture(chan, &state); + dmadbg_dumpregs(fname, line, chan, &state); } #define dbg_showregs(chan) dmadbg_showregs(__FUNCTION__, __LINE__, (chan)) @@ -253,10 +262,14 @@ s3c2410_dma_loadbuffer(s3c2410_dma_chan_t *chan, buf->next); reload = (buf->next == NULL) ? S3C2410_DCON_NORELOAD : 0; } else { - pr_debug("load_state is %d => autoreload\n", chan->load_state); + //pr_debug("load_state is %d => autoreload\n", chan->load_state); reload = S3C2410_DCON_AUTORELOAD; } + if ((buf->data & 0xf0000000) != 0x30000000) { + dmawarn("dmaload: buffer is %p\n", (void *)buf->data); + } + writel(buf->data, chan->addr_reg); dma_wrreg(chan, S3C2410_DMA_DCON, @@ -370,7 +383,7 @@ static int s3c2410_dma_start(s3c2410_dma_chan_t *chan) tmp |= S3C2410_DMASKTRIG_ON; dma_wrreg(chan, S3C2410_DMA_DMASKTRIG, tmp); - pr_debug("wrote %08lx to DMASKTRIG\n", tmp); + pr_debug("dma%d: %08lx to DMASKTRIG\n", chan->number, tmp); #if 0 /* the dma buffer loads should take care of clearing the AUTO @@ -384,7 +397,30 @@ static int s3c2410_dma_start(s3c2410_dma_chan_t *chan) dbg_showchan(chan); + /* if we've only loaded one buffer onto the channel, then chec + * to see if we have another, and if so, try and load it so when + * the first buffer is finished, the new one will be loaded onto + * the channel */ + + if (chan->next != NULL) { + if (chan->load_state == S3C2410_DMALOAD_1LOADED) { + + if (s3c2410_dma_waitforload(chan, __LINE__) == 0) { + pr_debug("%s: buff not yet loaded, no more todo\n", + __FUNCTION__); + } else { + chan->load_state = S3C2410_DMALOAD_1RUNNING; + s3c2410_dma_loadbuffer(chan, chan->next); + } + + } else if (chan->load_state == S3C2410_DMALOAD_1RUNNING) { + s3c2410_dma_loadbuffer(chan, chan->next); + } + } + + local_irq_restore(flags); + return 0; } @@ -436,12 +472,11 @@ int s3c2410_dma_enqueue(unsigned int channel, void *id, buf = kmem_cache_alloc(dma_kmem, GFP_ATOMIC); if (buf == NULL) { pr_debug("%s: out of memory (%ld alloc)\n", - __FUNCTION__, sizeof(*buf)); + __FUNCTION__, (long)sizeof(*buf)); return -ENOMEM; } - pr_debug("%s: new buffer %p\n", __FUNCTION__, buf); - + //pr_debug("%s: new buffer %p\n", __FUNCTION__, buf); //dbg_showchan(chan); buf->next = NULL; @@ -537,14 +572,20 @@ s3c2410_dma_lastxfer(s3c2410_dma_chan_t *chan) case S3C2410_DMALOAD_1LOADED: if (s3c2410_dma_waitforload(chan, __LINE__) == 0) { /* flag error? */ - printk(KERN_ERR "dma%d: timeout waiting for load\n", - chan->number); + printk(KERN_ERR "dma%d: timeout waiting for load (%s)\n", + chan->number, __FUNCTION__); return; } break; + case S3C2410_DMALOAD_1LOADED_1RUNNING: + /* I belive in this case we do not have anything to do + * until the next buffer comes along, and we turn off the + * reload */ + return; + default: - pr_debug("dma%d: lastxfer: unhandled load_state %d with no next", + pr_debug("dma%d: lastxfer: unhandled load_state %d with no next\n", chan->number, chan->load_state); return; @@ -629,7 +670,14 @@ s3c2410_dma_irq(int irq, void *devpw, struct pt_regs *regs) } else { } - if (chan->next != NULL) { + /* only reload if the channel is still running... our buffer done + * routine may have altered the state by requesting the dma channel + * to stop or shutdown... */ + + /* todo: check that when the channel is shut-down from inside this + * function, we cope with unsetting reload, etc */ + + if (chan->next != NULL && chan->state != S3C2410_DMA_IDLE) { unsigned long flags; switch (chan->load_state) { @@ -644,8 +692,8 @@ s3c2410_dma_irq(int irq, void *devpw, struct pt_regs *regs) case S3C2410_DMALOAD_1LOADED: if (s3c2410_dma_waitforload(chan, __LINE__) == 0) { /* flag error? */ - printk(KERN_ERR "dma%d: timeout waiting for load\n", - chan->number); + printk(KERN_ERR "dma%d: timeout waiting for load (%s)\n", + chan->number, __FUNCTION__); return IRQ_HANDLED; } @@ -678,8 +726,6 @@ s3c2410_dma_irq(int irq, void *devpw, struct pt_regs *regs) return IRQ_HANDLED; } - - /* s3c2410_request_dma * * get control of an dma channel @@ -718,11 +764,17 @@ int s3c2410_dma_request(unsigned int channel, s3c2410_dma_client_t *client, pr_debug("dma%d: %s : requesting irq %d\n", channel, __FUNCTION__, chan->irq); + chan->irq_claimed = 1; + local_irq_restore(flags); + err = request_irq(chan->irq, s3c2410_dma_irq, IRQF_DISABLED, client->name, (void *)chan); + local_irq_save(flags); + if (err) { chan->in_use = 0; + chan->irq_claimed = 0; local_irq_restore(flags); printk(KERN_ERR "%s: cannot get IRQ %d for DMA %d\n", @@ -730,7 +782,6 @@ int s3c2410_dma_request(unsigned int channel, s3c2410_dma_client_t *client, return err; } - chan->irq_claimed = 1; chan->irq_enabled = 1; } @@ -810,6 +861,7 @@ static int s3c2410_dma_dostop(s3c2410_dma_chan_t *chan) tmp = dma_rdreg(chan, S3C2410_DMA_DMASKTRIG); tmp |= S3C2410_DMASKTRIG_STOP; + //tmp &= ~S3C2410_DMASKTRIG_ON; dma_wrreg(chan, S3C2410_DMA_DMASKTRIG, tmp); #if 0 @@ -819,6 +871,7 @@ static int s3c2410_dma_dostop(s3c2410_dma_chan_t *chan) dma_wrreg(chan, S3C2410_DMA_DCON, tmp); #endif + /* should stop do this, or should we wait for flush? */ chan->state = S3C2410_DMA_IDLE; chan->load_state = S3C2410_DMALOAD_NONE; @@ -827,6 +880,22 @@ static int s3c2410_dma_dostop(s3c2410_dma_chan_t *chan) return 0; } +void s3c2410_dma_waitforstop(s3c2410_dma_chan_t *chan) +{ + unsigned long tmp; + unsigned int timeout = 0x10000; + + while (timeout-- > 0) { + tmp = dma_rdreg(chan, S3C2410_DMA_DMASKTRIG); + + if (!(tmp & S3C2410_DMASKTRIG_ON)) + return; + } + + pr_debug("dma%d: failed to stop?\n", chan->number); +} + + /* s3c2410_dma_flush * * stop the channel, and remove all current and pending transfers @@ -837,7 +906,9 @@ static int s3c2410_dma_flush(s3c2410_dma_chan_t *chan) s3c2410_dma_buf_t *buf, *next; unsigned long flags; - pr_debug("%s:\n", __FUNCTION__); + pr_debug("%s: chan %p (%d)\n", __FUNCTION__, chan, chan->number); + + dbg_showchan(chan); local_irq_save(flags); @@ -864,11 +935,64 @@ static int s3c2410_dma_flush(s3c2410_dma_chan_t *chan) } } + dbg_showregs(chan); + + s3c2410_dma_waitforstop(chan); + +#if 0 + /* should also clear interrupts, according to WinCE BSP */ + { + unsigned long tmp; + + tmp = dma_rdreg(chan, S3C2410_DMA_DCON); + tmp |= S3C2410_DCON_NORELOAD; + dma_wrreg(chan, S3C2410_DMA_DCON, tmp); + } +#endif + + dbg_showregs(chan); + local_irq_restore(flags); return 0; } +int +s3c2410_dma_started(s3c2410_dma_chan_t *chan) +{ + unsigned long flags; + + local_irq_save(flags); + + dbg_showchan(chan); + + /* if we've only loaded one buffer onto the channel, then chec + * to see if we have another, and if so, try and load it so when + * the first buffer is finished, the new one will be loaded onto + * the channel */ + + if (chan->next != NULL) { + if (chan->load_state == S3C2410_DMALOAD_1LOADED) { + + if (s3c2410_dma_waitforload(chan, __LINE__) == 0) { + pr_debug("%s: buff not yet loaded, no more todo\n", + __FUNCTION__); + } else { + chan->load_state = S3C2410_DMALOAD_1RUNNING; + s3c2410_dma_loadbuffer(chan, chan->next); + } + + } else if (chan->load_state == S3C2410_DMALOAD_1RUNNING) { + s3c2410_dma_loadbuffer(chan, chan->next); + } + } + + + local_irq_restore(flags); + + return 0; + +} int s3c2410_dma_ctrl(dmach_t channel, s3c2410_chan_op_t op) @@ -885,14 +1009,15 @@ s3c2410_dma_ctrl(dmach_t channel, s3c2410_chan_op_t op) return s3c2410_dma_dostop(chan); case S3C2410_DMAOP_PAUSE: - return -ENOENT; - case S3C2410_DMAOP_RESUME: return -ENOENT; case S3C2410_DMAOP_FLUSH: return s3c2410_dma_flush(chan); + case S3C2410_DMAOP_STARTED: + return s3c2410_dma_started(chan); + case S3C2410_DMAOP_TIMEOUT: return 0; diff --git a/arch/arm/mach-sa1100/ssp.c b/arch/arm/mach-sa1100/ssp.c index 1604dadf27f..5eba5fbbb56 100644 --- a/arch/arm/mach-sa1100/ssp.c +++ b/arch/arm/mach-sa1100/ssp.c @@ -23,6 +23,8 @@ #include <asm/hardware.h> #include <asm/hardware/ssp.h> +#define TIMEOUT 100000 + static irqreturn_t ssp_interrupt(int irq, void *dev_id, struct pt_regs *regs) { unsigned int status = Ser4SSSR; @@ -47,18 +49,27 @@ static irqreturn_t ssp_interrupt(int irq, void *dev_id, struct pt_regs *regs) * The caller is expected to perform the necessary locking. * * Returns: - * %-ETIMEDOUT timeout occurred (for future) + * %-ETIMEDOUT timeout occurred * 0 success */ int ssp_write_word(u16 data) { - while (!(Ser4SSSR & SSSR_TNF)) + int timeout = TIMEOUT; + + while (!(Ser4SSSR & SSSR_TNF)) { + if (!--timeout) + return -ETIMEDOUT; cpu_relax(); + } Ser4SSDR = data; - while (!(Ser4SSSR & SSSR_BSY)) + timeout = TIMEOUT; + while (!(Ser4SSSR & SSSR_BSY)) { + if (!--timeout) + return -ETIMEDOUT; cpu_relax(); + } return 0; } @@ -75,15 +86,22 @@ int ssp_write_word(u16 data) * The caller is expected to perform the necessary locking. * * Returns: - * %-ETIMEDOUT timeout occurred (for future) + * %-ETIMEDOUT timeout occurred * 16-bit data success */ -int ssp_read_word(void) +int ssp_read_word(u16 *data) { - while (!(Ser4SSSR & SSSR_RNE)) + int timeout = TIMEOUT; + + while (!(Ser4SSSR & SSSR_RNE)) { + if (!--timeout) + return -ETIMEDOUT; cpu_relax(); + } + + *data = (u16)Ser4SSDR; - return Ser4SSDR; + return 0; } /** @@ -93,14 +111,26 @@ int ssp_read_word(void) * is empty. * * The caller is expected to perform the necessary locking. + * + * Returns: + * %-ETIMEDOUT timeout occurred + * 0 success */ -void ssp_flush(void) +int ssp_flush(void) { + int timeout = TIMEOUT * 2; + do { while (Ser4SSSR & SSSR_RNE) { + if (!--timeout) + return -ETIMEDOUT; (void) Ser4SSDR; } + if (!--timeout) + return -ETIMEDOUT; } while (Ser4SSSR & SSSR_BSY); + + return 0; } /** diff --git a/arch/arm/mach-versatile/core.c b/arch/arm/mach-versatile/core.c index c4e3f8c6847..f2bbef07b1e 100644 --- a/arch/arm/mach-versatile/core.c +++ b/arch/arm/mach-versatile/core.c @@ -285,7 +285,7 @@ static struct flash_platform_data versatile_flash_data = { static struct resource versatile_flash_resource = { .start = VERSATILE_FLASH_BASE, - .end = VERSATILE_FLASH_BASE + VERSATILE_FLASH_SIZE, + .end = VERSATILE_FLASH_BASE + VERSATILE_FLASH_SIZE - 1, .flags = IORESOURCE_MEM, }; diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig index 5f80f184cd3..b4f220dd5eb 100644 --- a/arch/arm/mm/Kconfig +++ b/arch/arm/mm/Kconfig @@ -46,7 +46,7 @@ config CPU_ARM710 config CPU_ARM720T bool "Support ARM720T processor" if !ARCH_CLPS711X && !ARCH_L7200 && !ARCH_CDB89712 && ARCH_INTEGRATOR default y if ARCH_CLPS711X || ARCH_L7200 || ARCH_CDB89712 || ARCH_H720X - select CPU_32v4 + select CPU_32v4T select CPU_ABRT_LV4T select CPU_CACHE_V4 select CPU_CACHE_VIVT @@ -64,7 +64,7 @@ config CPU_ARM920T bool "Support ARM920T processor" depends on ARCH_EP93XX || ARCH_INTEGRATOR || CPU_S3C2410 || CPU_S3C2440 || CPU_S3C2442 || ARCH_IMX || ARCH_AAEC2000 || ARCH_AT91RM9200 default y if CPU_S3C2410 || CPU_S3C2440 || CPU_S3C2442 || ARCH_AT91RM9200 - select CPU_32v4 + select CPU_32v4T select CPU_ABRT_EV4T select CPU_CACHE_V4WT select CPU_CACHE_VIVT @@ -85,7 +85,7 @@ config CPU_ARM922T bool "Support ARM922T processor" if ARCH_INTEGRATOR depends on ARCH_LH7A40X || ARCH_INTEGRATOR default y if ARCH_LH7A40X - select CPU_32v4 + select CPU_32v4T select CPU_ABRT_EV4T select CPU_CACHE_V4WT select CPU_CACHE_VIVT @@ -104,7 +104,7 @@ config CPU_ARM925T bool "Support ARM925T processor" if ARCH_OMAP1 depends on ARCH_OMAP15XX default y if ARCH_OMAP15XX - select CPU_32v4 + select CPU_32v4T select CPU_ABRT_EV4T select CPU_CACHE_V4WT select CPU_CACHE_VIVT @@ -285,6 +285,11 @@ config CPU_32v4 select TLS_REG_EMUL if SMP || !MMU select NEEDS_SYSCALL_FOR_CMPXCHG if SMP +config CPU_32v4T + bool + select TLS_REG_EMUL if SMP || !MMU + select NEEDS_SYSCALL_FOR_CMPXCHG if SMP + config CPU_32v5 bool select TLS_REG_EMUL if SMP || !MMU diff --git a/arch/arm/vfp/vfp.h b/arch/arm/vfp/vfp.h index 4b97950984e..5fbdf81a8aa 100644 --- a/arch/arm/vfp/vfp.h +++ b/arch/arm/vfp/vfp.h @@ -353,3 +353,11 @@ u32 vfp_estimate_sqrt_significand(u32 exponent, u32 significand); * A special flag to tell the normalisation code not to normalise. */ #define VFP_NAN_FLAG 0x100 + +/* + * A bit pattern used to indicate the initial (unset) value of the + * exception mask, in case nothing handles an instruction. This + * doesn't include the NAN flag, which get masked out before + * we check for an error. + */ +#define VFP_EXCEPTION_ERROR ((u32)-1 & ~VFP_NAN_FLAG) diff --git a/arch/arm/vfp/vfpdouble.c b/arch/arm/vfp/vfpdouble.c index 009038c8113..04bd3425b29 100644 --- a/arch/arm/vfp/vfpdouble.c +++ b/arch/arm/vfp/vfpdouble.c @@ -465,7 +465,7 @@ static u32 vfp_double_fcvts(int sd, int unused, int dm, u32 fpscr) */ if (tm & (VFP_INFINITY|VFP_NAN)) { vsd.exponent = 255; - if (tm & VFP_NAN) + if (tm == VFP_QNAN) vsd.significand |= VFP_SINGLE_SIGNIFICAND_QNAN; goto pack_nan; } else if (tm & VFP_ZERO) @@ -1127,7 +1127,7 @@ u32 vfp_double_cpdo(u32 inst, u32 fpscr) { u32 op = inst & FOP_MASK; u32 exceptions = 0; - unsigned int dd = vfp_get_dd(inst); + unsigned int dest; unsigned int dn = vfp_get_dn(inst); unsigned int dm = vfp_get_dm(inst); unsigned int vecitr, veclen, vecstride; @@ -1137,10 +1137,20 @@ u32 vfp_double_cpdo(u32 inst, u32 fpscr) vecstride = (1 + ((fpscr & FPSCR_STRIDE_MASK) == FPSCR_STRIDE_MASK)) * 2; /* + * fcvtds takes an sN register number as destination, not dN. + * It also always operates on scalars. + */ + if ((inst & FEXT_MASK) == FEXT_FCVT) { + veclen = 0; + dest = vfp_get_sd(inst); + } else + dest = vfp_get_dd(inst); + + /* * If destination bank is zero, vector length is always '1'. * ARM DDI0100F C5.1.3, C5.3.2. */ - if (FREG_BANK(dd) == 0) + if (FREG_BANK(dest) == 0) veclen = 0; pr_debug("VFP: vecstride=%u veclen=%u\n", vecstride, @@ -1153,16 +1163,20 @@ u32 vfp_double_cpdo(u32 inst, u32 fpscr) for (vecitr = 0; vecitr <= veclen; vecitr += 1 << FPSCR_LENGTH_BIT) { u32 except; - if (op == FOP_EXT) + if (op == FOP_EXT && (inst & FEXT_MASK) == FEXT_FCVT) + pr_debug("VFP: itr%d (s%u) = op[%u] (d%u)\n", + vecitr >> FPSCR_LENGTH_BIT, + dest, dn, dm); + else if (op == FOP_EXT) pr_debug("VFP: itr%d (d%u) = op[%u] (d%u)\n", vecitr >> FPSCR_LENGTH_BIT, - dd, dn, dm); + dest, dn, dm); else pr_debug("VFP: itr%d (d%u) = (d%u) op[%u] (d%u)\n", vecitr >> FPSCR_LENGTH_BIT, - dd, dn, FOP_TO_IDX(op), dm); + dest, dn, FOP_TO_IDX(op), dm); - except = fop(dd, dn, dm, fpscr); + except = fop(dest, dn, dm, fpscr); pr_debug("VFP: itr%d: exceptions=%08x\n", vecitr >> FPSCR_LENGTH_BIT, except); @@ -1180,7 +1194,7 @@ u32 vfp_double_cpdo(u32 inst, u32 fpscr) * we encounter an exception. We continue. */ - dd = FREG_BANK(dd) + ((FREG_IDX(dd) + vecstride) & 6); + dest = FREG_BANK(dest) + ((FREG_IDX(dest) + vecstride) & 6); dn = FREG_BANK(dn) + ((FREG_IDX(dn) + vecstride) & 6); if (FREG_BANK(dm) != 0) dm = FREG_BANK(dm) + ((FREG_IDX(dm) + vecstride) & 6); diff --git a/arch/arm/vfp/vfpmodule.c b/arch/arm/vfp/vfpmodule.c index 9d265d5e748..4178f6cc3d3 100644 --- a/arch/arm/vfp/vfpmodule.c +++ b/arch/arm/vfp/vfpmodule.c @@ -131,7 +131,7 @@ static void vfp_raise_exceptions(u32 exceptions, u32 inst, u32 fpscr, struct pt_ pr_debug("VFP: raising exceptions %08x\n", exceptions); - if (exceptions == (u32)-1) { + if (exceptions == VFP_EXCEPTION_ERROR) { vfp_panic("unhandled bounce"); vfp_raise_sigfpe(0, regs); return; @@ -170,7 +170,7 @@ static void vfp_raise_exceptions(u32 exceptions, u32 inst, u32 fpscr, struct pt_ */ static u32 vfp_emulate_instruction(u32 inst, u32 fpscr, struct pt_regs *regs) { - u32 exceptions = (u32)-1; + u32 exceptions = VFP_EXCEPTION_ERROR; pr_debug("VFP: emulate: INST=0x%08x SCR=0x%08x\n", inst, fpscr); diff --git a/arch/arm/vfp/vfpsingle.c b/arch/arm/vfp/vfpsingle.c index dae2c2f4605..78d7cac5f36 100644 --- a/arch/arm/vfp/vfpsingle.c +++ b/arch/arm/vfp/vfpsingle.c @@ -506,7 +506,7 @@ static u32 vfp_single_fcvtd(int dd, int unused, s32 m, u32 fpscr) */ if (tm & (VFP_INFINITY|VFP_NAN)) { vdd.exponent = 2047; - if (tm & VFP_NAN) + if (tm == VFP_QNAN) vdd.significand |= VFP_DOUBLE_SIGNIFICAND_QNAN; goto pack_nan; } else if (tm & VFP_ZERO) @@ -514,10 +514,6 @@ static u32 vfp_single_fcvtd(int dd, int unused, s32 m, u32 fpscr) else vdd.exponent = vsm.exponent + (1023 - 127); - /* - * Technically, if bit 0 of dd is set, this is an invalid - * instruction. However, we ignore this for efficiency. - */ return vfp_double_normaliseround(dd, &vdd, fpscr, exceptions, "fcvtd"); pack_nan: @@ -1174,7 +1170,7 @@ u32 vfp_single_cpdo(u32 inst, u32 fpscr) { u32 op = inst & FOP_MASK; u32 exceptions = 0; - unsigned int sd = vfp_get_sd(inst); + unsigned int dest; unsigned int sn = vfp_get_sn(inst); unsigned int sm = vfp_get_sm(inst); unsigned int vecitr, veclen, vecstride; @@ -1184,10 +1180,22 @@ u32 vfp_single_cpdo(u32 inst, u32 fpscr) vecstride = 1 + ((fpscr & FPSCR_STRIDE_MASK) == FPSCR_STRIDE_MASK); /* + * fcvtsd takes a dN register number as destination, not sN. + * Technically, if bit 0 of dd is set, this is an invalid + * instruction. However, we ignore this for efficiency. + * It also only operates on scalars. + */ + if ((inst & FEXT_MASK) == FEXT_FCVT) { + veclen = 0; + dest = vfp_get_dd(inst); + } else + dest = vfp_get_sd(inst); + + /* * If destination bank is zero, vector length is always '1'. * ARM DDI0100F C5.1.3, C5.3.2. */ - if (FREG_BANK(sd) == 0) + if (FREG_BANK(dest) == 0) veclen = 0; pr_debug("VFP: vecstride=%u veclen=%u\n", vecstride, @@ -1201,15 +1209,18 @@ u32 vfp_single_cpdo(u32 inst, u32 fpscr) s32 m = vfp_get_float(sm); u32 except; - if (op == FOP_EXT) + if (op == FOP_EXT && (inst & FEXT_MASK) == FEXT_FCVT) + pr_debug("VFP: itr%d (d%u) = op[%u] (s%u=%08x)\n", + vecitr >> FPSCR_LENGTH_BIT, dest, sn, sm, m); + else if (op == FOP_EXT) pr_debug("VFP: itr%d (s%u) = op[%u] (s%u=%08x)\n", - vecitr >> FPSCR_LENGTH_BIT, sd, sn, sm, m); + vecitr >> FPSCR_LENGTH_BIT, dest, sn, sm, m); else pr_debug("VFP: itr%d (s%u) = (s%u) op[%u] (s%u=%08x)\n", - vecitr >> FPSCR_LENGTH_BIT, sd, sn, + vecitr >> FPSCR_LENGTH_BIT, dest, sn, FOP_TO_IDX(op), sm, m); - except = fop(sd, sn, m, fpscr); + except = fop(dest, sn, m, fpscr); pr_debug("VFP: itr%d: exceptions=%08x\n", vecitr >> FPSCR_LENGTH_BIT, except); @@ -1227,7 +1238,7 @@ u32 vfp_single_cpdo(u32 inst, u32 fpscr) * we encounter an exception. We continue. */ - sd = FREG_BANK(sd) + ((FREG_IDX(sd) + vecstride) & 7); + dest = FREG_BANK(dest) + ((FREG_IDX(dest) + vecstride) & 7); sn = FREG_BANK(sn) + ((FREG_IDX(sn) + vecstride) & 7); if (FREG_BANK(sm) != 0) sm = FREG_BANK(sm) + ((FREG_IDX(sm) + vecstride) & 7); diff --git a/arch/i386/Kconfig b/arch/i386/Kconfig index f71fb4a029c..b2751eadbc5 100644 --- a/arch/i386/Kconfig +++ b/arch/i386/Kconfig @@ -142,6 +142,7 @@ config X86_SUMMIT In particular, it is needed for the x440. If you don't have one of these computers, you should say N here. + If you want to build a NUMA kernel, you must select ACPI. config X86_BIGSMP bool "Support for other sub-arch SMP systems with more than 8 CPUs" @@ -169,6 +170,7 @@ config X86_GENERICARCH help This option compiles in the Summit, bigsmp, ES7000, default subarchitectures. It is intended for a generic binary kernel. + If you want a NUMA kernel, select ACPI. We need SRAT for NUMA. config X86_ES7000 bool "Support for Unisys ES7000 IA32 series" @@ -542,7 +544,7 @@ config X86_PAE # Common NUMA Features config NUMA bool "Numa Memory Allocation and Scheduler Support" - depends on SMP && HIGHMEM64G && (X86_NUMAQ || X86_GENERICARCH || (X86_SUMMIT && ACPI)) + depends on SMP && HIGHMEM64G && (X86_NUMAQ || (X86_SUMMIT || X86_GENERICARCH) && ACPI) default n if X86_PC default y if (X86_NUMAQ || X86_SUMMIT) diff --git a/arch/i386/kernel/acpi/boot.c b/arch/i386/kernel/acpi/boot.c index 0db6387025c..ee003bc0e8b 100644 --- a/arch/i386/kernel/acpi/boot.c +++ b/arch/i386/kernel/acpi/boot.c @@ -59,7 +59,7 @@ static inline int gsi_irq_sharing(int gsi) { return gsi; } #define BAD_MADT_ENTRY(entry, end) ( \ (!entry) || (unsigned long)entry + sizeof(*entry) > end || \ - ((acpi_table_entry_header *)entry)->length != sizeof(*entry)) + ((acpi_table_entry_header *)entry)->length < sizeof(*entry)) #define PREFIX "ACPI: " diff --git a/arch/i386/kernel/acpi/wakeup.S b/arch/i386/kernel/acpi/wakeup.S index 9f408eee4e6..b781b38131c 100644 --- a/arch/i386/kernel/acpi/wakeup.S +++ b/arch/i386/kernel/acpi/wakeup.S @@ -292,7 +292,10 @@ ENTRY(do_suspend_lowlevel) pushl $3 call acpi_enter_sleep_state addl $4, %esp - ret + +# In case of S3 failure, we'll emerge here. Jump +# to ret_point to recover + jmp ret_point .p2align 4,,7 ret_point: call restore_registers diff --git a/arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c b/arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c index efb41e81351..e6ea00edcb5 100644 --- a/arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c +++ b/arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c @@ -567,16 +567,11 @@ static struct cpufreq_driver acpi_cpufreq_driver = { static int __init acpi_cpufreq_init (void) { - int result = 0; - dprintk("acpi_cpufreq_init\n"); - result = acpi_cpufreq_early_init_acpi(); + acpi_cpufreq_early_init_acpi(); - if (!result) - result = cpufreq_register_driver(&acpi_cpufreq_driver); - - return (result); + return cpufreq_register_driver(&acpi_cpufreq_driver); } diff --git a/arch/i386/kernel/head.S b/arch/i386/kernel/head.S index eb79aa2fa8b..a6b8bd89aa2 100644 --- a/arch/i386/kernel/head.S +++ b/arch/i386/kernel/head.S @@ -317,20 +317,14 @@ is386: movl $2,%ecx # set MP movl %eax,%gs lldt %ax cld # gcc2 wants the direction flag cleared at all times + pushl %eax # fake return address #ifdef CONFIG_SMP movb ready, %cl movb $1, ready - cmpb $0,%cl - je 1f # the first CPU calls start_kernel - # all other CPUs call initialize_secondary - call initialize_secondary - jmp L6 -1: + cmpb $0,%cl # the first CPU calls start_kernel + jne initialize_secondary # all other CPUs call initialize_secondary #endif /* CONFIG_SMP */ - call start_kernel -L6: - jmp L6 # main should never return here, but - # just in case, we know what happens. + jmp start_kernel /* * We depend on ET to be correct. This checks for 287/387. diff --git a/arch/i386/kernel/irq.c b/arch/i386/kernel/irq.c index 6cb529f60dc..5fe547cd8f9 100644 --- a/arch/i386/kernel/irq.c +++ b/arch/i386/kernel/irq.c @@ -82,10 +82,6 @@ fastcall unsigned int do_IRQ(struct pt_regs *regs) } #endif - if (!irq_desc[irq].handle_irq) { - __do_IRQ(irq, regs); - goto out_exit; - } #ifdef CONFIG_4KSTACKS curctx = (union irq_ctx *) current_thread_info(); @@ -125,7 +121,6 @@ fastcall unsigned int do_IRQ(struct pt_regs *regs) #endif __do_IRQ(irq, regs); -out_exit: irq_exit(); return 1; diff --git a/arch/i386/kernel/setup.c b/arch/i386/kernel/setup.c index f1682206d30..345ffb7d904 100644 --- a/arch/i386/kernel/setup.c +++ b/arch/i386/kernel/setup.c @@ -956,38 +956,6 @@ efi_memory_present_wrapper(unsigned long start, unsigned long end, void *arg) return 0; } - /* - * This function checks if the entire range <start,end> is mapped with type. - * - * Note: this function only works correct if the e820 table is sorted and - * not-overlapping, which is the case - */ -int __init -e820_all_mapped(unsigned long s, unsigned long e, unsigned type) -{ - u64 start = s; - u64 end = e; - int i; - for (i = 0; i < e820.nr_map; i++) { - struct e820entry *ei = &e820.map[i]; - if (type && ei->type != type) - continue; - /* is the region (part) in overlap with the current region ?*/ - if (ei->addr >= end || ei->addr + ei->size <= start) - continue; - /* if the region is at the beginning of <start,end> we move - * start to the end of the region since it's ok until there - */ - if (ei->addr <= start) - start = ei->addr + ei->size; - /* if start is now at or beyond end, we're done, full - * coverage */ - if (start >= end) - return 1; /* we're done */ - } - return 0; -} - /* * Find the highest page frame number we have available */ diff --git a/arch/i386/kernel/traps.c b/arch/i386/kernel/traps.c index 0d4005dc06c..7e9edafffd8 100644 --- a/arch/i386/kernel/traps.c +++ b/arch/i386/kernel/traps.c @@ -92,7 +92,11 @@ asmlinkage void spurious_interrupt_bug(void); asmlinkage void machine_check(void); static int kstack_depth_to_print = 24; +#ifdef CONFIG_STACK_UNWIND static int call_trace = 1; +#else +#define call_trace (-1) +#endif ATOMIC_NOTIFIER_HEAD(i386die_chain); int register_die_notifier(struct notifier_block *nb) @@ -187,22 +191,21 @@ static void show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs, if (unwind_init_blocked(&info, task) == 0) unw_ret = show_trace_unwind(&info, log_lvl); } - if (unw_ret > 0 && !arch_unw_user_mode(&info)) { -#ifdef CONFIG_STACK_UNWIND - print_symbol("DWARF2 unwinder stuck at %s\n", - UNW_PC(&info)); - if (call_trace == 1) { - printk("Leftover inexact backtrace:\n"); - if (UNW_SP(&info)) + if (unw_ret > 0) { + if (call_trace == 1 && !arch_unw_user_mode(&info)) { + print_symbol("DWARF2 unwinder stuck at %s\n", + UNW_PC(&info)); + if (UNW_SP(&info) >= PAGE_OFFSET) { + printk("Leftover inexact backtrace:\n"); stack = (void *)UNW_SP(&info); - } else if (call_trace > 1) + } else + printk("Full inexact backtrace again:\n"); + } else if (call_trace >= 1) return; else printk("Full inexact backtrace again:\n"); -#else + } else printk("Inexact backtrace:\n"); -#endif - } } if (task == current) { @@ -454,7 +457,7 @@ void die(const char * str, struct pt_regs * regs, long err) panic("Fatal exception in interrupt"); if (panic_on_oops) - panic("Fatal exception: panic_on_oops"); + panic("Fatal exception"); oops_exit(); do_exit(SIGSEGV); @@ -1241,6 +1244,7 @@ static int __init kstack_setup(char *s) } __setup("kstack=", kstack_setup); +#ifdef CONFIG_STACK_UNWIND static int __init call_trace_setup(char *s) { if (strcmp(s, "old") == 0) @@ -1254,3 +1258,4 @@ static int __init call_trace_setup(char *s) return 1; } __setup("call_trace=", call_trace_setup); +#endif diff --git a/arch/i386/pci/common.c b/arch/i386/pci/common.c index 0a362e3aeac..1220dd828ce 100644 --- a/arch/i386/pci/common.c +++ b/arch/i386/pci/common.c @@ -237,6 +237,11 @@ char * __devinit pcibios_setup(char *str) pci_probe &= ~PCI_PROBE_MMCONF; return NULL; } + /* override DMI blacklist */ + else if (!strcmp(str, "mmconf")) { + pci_probe |= PCI_PROBE_MMCONF_FORCE; + return NULL; + } #endif else if (!strcmp(str, "noacpi")) { acpi_noirq_set(); diff --git a/arch/i386/pci/init.c b/arch/i386/pci/init.c index c7650a7e0b0..51087a9d917 100644 --- a/arch/i386/pci/init.c +++ b/arch/i386/pci/init.c @@ -14,8 +14,12 @@ static __init int pci_access_init(void) #ifdef CONFIG_PCI_BIOS pci_pcbios_init(); #endif - if (raw_pci_ops) - return 0; + /* + * don't check for raw_pci_ops here because we want pcbios as last + * fallback, yet it's needed to run first to set pcibios_last_bus + * in case legacy PCI probing is used. otherwise detecting peer busses + * fails. + */ #ifdef CONFIG_PCI_DIRECT pci_direct_init(); #endif diff --git a/arch/i386/pci/mmconfig.c b/arch/i386/pci/mmconfig.c index e545b0992c4..ef5a2faa7d8 100644 --- a/arch/i386/pci/mmconfig.c +++ b/arch/i386/pci/mmconfig.c @@ -12,6 +12,7 @@ #include <linux/pci.h> #include <linux/init.h> #include <linux/acpi.h> +#include <linux/dmi.h> #include <asm/e820.h> #include "pci.h" @@ -178,7 +179,7 @@ static __init void unreachable_devices(void) pci_exp_set_dev_base(addr, k, PCI_DEVFN(i, 0)); if (addr == 0 || readl((u32 __iomem *)mmcfg_virt_addr) != val1) { - set_bit(i, fallback_slots); + set_bit(i + 32*k, fallback_slots); printk(KERN_NOTICE "PCI: No mmconfig possible on %x:%x\n", k, i); } @@ -187,9 +188,31 @@ static __init void unreachable_devices(void) } } +static int disable_mcfg(struct dmi_system_id *d) +{ + printk("PCI: %s detected. Disabling MCFG.\n", d->ident); + pci_probe &= ~PCI_PROBE_MMCONF; + return 0; +} + +static struct dmi_system_id __initdata dmi_bad_mcfg[] = { + /* Has broken MCFG table that makes the system hang when used */ + { + .callback = disable_mcfg, + .ident = "Intel D3C5105 SDV", + .matches = { + DMI_MATCH(DMI_BIOS_VENDOR, "Intel"), + DMI_MATCH(DMI_BOARD_NAME, "D26928"), + }, + }, + {} +}; + void __init pci_mmcfg_init(void) { - if ((pci_probe & PCI_PROBE_MMCONF) == 0) + dmi_check_system(dmi_bad_mcfg); + + if ((pci_probe & (PCI_PROBE_MMCONF_FORCE|PCI_PROBE_MMCONF)) == 0) return; acpi_table_parse(ACPI_MCFG, acpi_parse_mcfg); @@ -198,15 +221,6 @@ void __init pci_mmcfg_init(void) (pci_mmcfg_config[0].base_address == 0)) return; - if (!e820_all_mapped(pci_mmcfg_config[0].base_address, - pci_mmcfg_config[0].base_address + MMCONFIG_APER_MIN, - E820_RESERVED)) { - printk(KERN_ERR "PCI: BIOS Bug: MCFG area at %x is not E820-reserved\n", - pci_mmcfg_config[0].base_address); - printk(KERN_ERR "PCI: Not using MMCONFIG.\n"); - return; - } - printk(KERN_INFO "PCI: Using MMCONFIG\n"); raw_pci_ops = &pci_mmcfg; pci_probe = (pci_probe & ~PCI_PROBE_MASK) | PCI_PROBE_MMCONF; diff --git a/arch/i386/pci/pci.h b/arch/i386/pci/pci.h index bf4e7933538..49a849b3a24 100644 --- a/arch/i386/pci/pci.h +++ b/arch/i386/pci/pci.h @@ -16,7 +16,8 @@ #define PCI_PROBE_CONF1 0x0002 #define PCI_PROBE_CONF2 0x0004 #define PCI_PROBE_MMCONF 0x0008 -#define PCI_PROBE_MASK 0x000f +#define PCI_PROBE_MMCONF_FORCE 0x0010 +#define PCI_PROBE_MASK 0x00ff #define PCI_NO_SORT 0x0100 #define PCI_BIOS_SORT 0x0200 diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig index 47de9ee6bcd..674de894347 100644 --- a/arch/ia64/Kconfig +++ b/arch/ia64/Kconfig @@ -258,7 +258,7 @@ config NR_CPUS int "Maximum number of CPUs (2-1024)" range 2 1024 depends on SMP - default "64" + default "1024" help You should set this to the number of CPUs in your system, but keep in mind that a kernel compiled for, e.g., 2 CPUs will boot but @@ -354,7 +354,7 @@ config NUMA config NODES_SHIFT int "Max num nodes shift(3-10)" range 3 10 - default "8" + default "10" depends on NEED_MULTIPLE_NODES help This option specifies the maximum number of nodes in your SSI system. diff --git a/arch/ia64/hp/sim/simscsi.c b/arch/ia64/hp/sim/simscsi.c index 8a4f0d0d17a..8f0a16a79a6 100644 --- a/arch/ia64/hp/sim/simscsi.c +++ b/arch/ia64/hp/sim/simscsi.c @@ -244,7 +244,8 @@ static void simscsi_fillresult(struct scsi_cmnd *sc, char *buf, unsigned len) if (scatterlen == 0) memcpy(sc->request_buffer, buf, len); - else for (slp = (struct scatterlist *)sc->request_buffer; scatterlen-- > 0 && len > 0; slp++) { + else for (slp = (struct scatterlist *)sc->request_buffer; + scatterlen-- > 0 && len > 0; slp++) { unsigned thislen = min(len, slp->length); memcpy(page_address(slp->page) + slp->offset, buf, thislen); diff --git a/arch/ia64/kernel/acpi.c b/arch/ia64/kernel/acpi.c index 99761b81db4..0176556aeec 100644 --- a/arch/ia64/kernel/acpi.c +++ b/arch/ia64/kernel/acpi.c @@ -55,7 +55,7 @@ #define BAD_MADT_ENTRY(entry, end) ( \ (!entry) || (unsigned long)entry + sizeof(*entry) > end || \ - ((acpi_table_entry_header *)entry)->length != sizeof(*entry)) + ((acpi_table_entry_header *)entry)->length < sizeof(*entry)) #define PREFIX "ACPI: " diff --git a/arch/ia64/kernel/topology.c b/arch/ia64/kernel/topology.c index d24fa393b18..f648c610b10 100644 --- a/arch/ia64/kernel/topology.c +++ b/arch/ia64/kernel/topology.c @@ -67,10 +67,8 @@ static int __init topology_init(void) #endif sysfs_cpus = kzalloc(sizeof(struct ia64_cpu) * NR_CPUS, GFP_KERNEL); - if (!sysfs_cpus) { - err = -ENOMEM; - goto out; - } + if (!sysfs_cpus) + panic("kzalloc in topology_init failed - NR_CPUS too big?"); for_each_present_cpu(i) { if((err = arch_register_cpu(i))) diff --git a/arch/ia64/kernel/traps.c b/arch/ia64/kernel/traps.c index 5a0420464c6..fffa9e0826b 100644 --- a/arch/ia64/kernel/traps.c +++ b/arch/ia64/kernel/traps.c @@ -118,7 +118,7 @@ die (const char *str, struct pt_regs *regs, long err) spin_unlock_irq(&die.lock); if (panic_on_oops) - panic("Fatal exception: panic_on_oops"); + panic("Fatal exception"); do_exit(SIGSEGV); } diff --git a/arch/ia64/sn/kernel/xpc_channel.c b/arch/ia64/sn/kernel/xpc_channel.c index c2f69f7942a..1f3540826e6 100644 --- a/arch/ia64/sn/kernel/xpc_channel.c +++ b/arch/ia64/sn/kernel/xpc_channel.c @@ -279,8 +279,8 @@ xpc_pull_remote_cachelines(struct xpc_partition *part, void *dst, return part->reason; } - bte_ret = xp_bte_copy((u64) src, (u64) ia64_tpa((u64) dst), - (u64) cnt, (BTE_NORMAL | BTE_WACQUIRE), NULL); + bte_ret = xp_bte_copy((u64) src, (u64) dst, (u64) cnt, + (BTE_NORMAL | BTE_WACQUIRE), NULL); if (bte_ret == BTE_SUCCESS) { return xpcSuccess; } diff --git a/arch/ia64/sn/kernel/xpc_main.c b/arch/ia64/sn/kernel/xpc_main.c index 5e8e59efb34..4d026f9dd98 100644 --- a/arch/ia64/sn/kernel/xpc_main.c +++ b/arch/ia64/sn/kernel/xpc_main.c @@ -1052,6 +1052,8 @@ xpc_do_exit(enum xpc_retval reason) if (xpc_sysctl) { unregister_sysctl_table(xpc_sysctl); } + + kfree(xpc_remote_copy_buffer_base); } @@ -1212,24 +1214,20 @@ xpc_init(void) partid_t partid; struct xpc_partition *part; pid_t pid; + size_t buf_size; if (!ia64_platform_is("sn2")) { return -ENODEV; } - /* - * xpc_remote_copy_buffer is used as a temporary buffer for bte_copy'ng - * various portions of a partition's reserved page. Its size is based - * on the size of the reserved page header and part_nasids mask. So we - * need to ensure that the other items will fit as well. - */ - if (XPC_RP_VARS_SIZE > XPC_RP_HEADER_SIZE + XP_NASID_MASK_BYTES) { - dev_err(xpc_part, "xpc_remote_copy_buffer is not big enough\n"); - return -EPERM; - } - DBUG_ON((u64) xpc_remote_copy_buffer != - L1_CACHE_ALIGN((u64) xpc_remote_copy_buffer)); + + buf_size = max(XPC_RP_VARS_SIZE, + XPC_RP_HEADER_SIZE + XP_NASID_MASK_BYTES); + xpc_remote_copy_buffer = xpc_kmalloc_cacheline_aligned(buf_size, + GFP_KERNEL, &xpc_remote_copy_buffer_base); + if (xpc_remote_copy_buffer == NULL) + return -ENOMEM; snprintf(xpc_part->bus_id, BUS_ID_SIZE, "part"); snprintf(xpc_chan->bus_id, BUS_ID_SIZE, "chan"); @@ -1293,6 +1291,8 @@ xpc_init(void) if (xpc_sysctl) { unregister_sysctl_table(xpc_sysctl); } + + kfree(xpc_remote_copy_buffer_base); return -EBUSY; } @@ -1311,6 +1311,8 @@ xpc_init(void) if (xpc_sysctl) { unregister_sysctl_table(xpc_sysctl); } + + kfree(xpc_remote_copy_buffer_base); return -EBUSY; } @@ -1362,6 +1364,8 @@ xpc_init(void) if (xpc_sysctl) { unregister_sysctl_table(xpc_sysctl); } + + kfree(xpc_remote_copy_buffer_base); return -EBUSY; } diff --git a/arch/ia64/sn/kernel/xpc_partition.c b/arch/ia64/sn/kernel/xpc_partition.c index 2a89cfce495..57c723f5cba 100644 --- a/arch/ia64/sn/kernel/xpc_partition.c +++ b/arch/ia64/sn/kernel/xpc_partition.c @@ -71,19 +71,15 @@ struct xpc_partition xpc_partitions[XP_MAX_PARTITIONS + 1]; * Generic buffer used to store a local copy of portions of a remote * partition's reserved page (either its header and part_nasids mask, * or its vars). - * - * xpc_discovery runs only once and is a seperate thread that is - * very likely going to be processing in parallel with receiving - * interrupts. */ -char ____cacheline_aligned xpc_remote_copy_buffer[XPC_RP_HEADER_SIZE + - XP_NASID_MASK_BYTES]; +char *xpc_remote_copy_buffer; +void *xpc_remote_copy_buffer_base; /* * Guarantee that the kmalloc'd memory is cacheline aligned. */ -static void * +void * xpc_kmalloc_cacheline_aligned(size_t size, gfp_t flags, void **base) { /* see if kmalloc will give us cachline aligned memory by default */ @@ -148,7 +144,7 @@ xpc_get_rsvd_page_pa(int nasid) } } - bte_res = xp_bte_copy(rp_pa, ia64_tpa(buf), buf_len, + bte_res = xp_bte_copy(rp_pa, buf, buf_len, (BTE_NOTIFY | BTE_WACQUIRE), NULL); if (bte_res != BTE_SUCCESS) { dev_dbg(xpc_part, "xp_bte_copy failed %i\n", bte_res); @@ -447,7 +443,7 @@ xpc_check_remote_hb(void) /* pull the remote_hb cache line */ bres = xp_bte_copy(part->remote_vars_pa, - ia64_tpa((u64) remote_vars), + (u64) remote_vars, XPC_RP_VARS_SIZE, (BTE_NOTIFY | BTE_WACQUIRE), NULL); if (bres != BTE_SUCCESS) { @@ -498,8 +494,7 @@ xpc_get_remote_rp(int nasid, u64 *discovered_nasids, /* pull over the reserved page header and part_nasids mask */ - - bres = xp_bte_copy(*remote_rp_pa, ia64_tpa((u64) remote_rp), + bres = xp_bte_copy(*remote_rp_pa, (u64) remote_rp, XPC_RP_HEADER_SIZE + xp_nasid_mask_bytes, (BTE_NOTIFY | BTE_WACQUIRE), NULL); if (bres != BTE_SUCCESS) { @@ -554,11 +549,8 @@ xpc_get_remote_vars(u64 remote_vars_pa, struct xpc_vars *remote_vars) return xpcVarsNotSet; } - /* pull over the cross partition variables */ - - bres = xp_bte_copy(remote_vars_pa, ia64_tpa((u64) remote_vars), - XPC_RP_VARS_SIZE, + bres = xp_bte_copy(remote_vars_pa, (u64) remote_vars, XPC_RP_VARS_SIZE, (BTE_NOTIFY | BTE_WACQUIRE), NULL); if (bres != BTE_SUCCESS) { return xpc_map_bte_errors(bres); @@ -1239,7 +1231,7 @@ xpc_initiate_partid_to_nasids(partid_t partid, void *nasid_mask) part_nasid_pa = (u64) XPC_RP_PART_NASIDS(part->remote_rp_pa); - bte_res = xp_bte_copy(part_nasid_pa, ia64_tpa((u64) nasid_mask), + bte_res = xp_bte_copy(part_nasid_pa, (u64) nasid_mask, xp_nasid_mask_bytes, (BTE_NOTIFY | BTE_WACQUIRE), NULL); return xpc_map_bte_errors(bte_res); diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c index 4d0b4e74d57..9b352bd0a46 100644 --- a/arch/powerpc/kernel/traps.c +++ b/arch/powerpc/kernel/traps.c @@ -148,7 +148,7 @@ int die(const char *str, struct pt_regs *regs, long err) panic("Fatal exception in interrupt"); if (panic_on_oops) - panic("Fatal exception: panic_on_oops"); + panic("Fatal exception"); do_exit(err); diff --git a/arch/ppc/platforms/85xx/mpc8560_ads.c b/arch/ppc/platforms/85xx/mpc8560_ads.c index d90cd24d018..94badafe4ef 100644 --- a/arch/ppc/platforms/85xx/mpc8560_ads.c +++ b/arch/ppc/platforms/85xx/mpc8560_ads.c @@ -29,6 +29,7 @@ #include <linux/initrd.h> #include <linux/module.h> #include <linux/fsl_devices.h> +#include <linux/fs_enet_pd.h> #include <asm/system.h> #include <asm/pgtable.h> @@ -58,6 +59,71 @@ * Setup the architecture * */ +static void init_fcc_ioports(void) +{ + struct immap *immap; + struct io_port *io; + u32 tempval; + + immap = cpm2_immr; + + io = &immap->im_ioport; + /* FCC2/3 are on the ports B/C. */ + tempval = in_be32(&io->iop_pdirb); + tempval &= ~PB2_DIRB0; + tempval |= PB2_DIRB1; + out_be32(&io->iop_pdirb, tempval); + + tempval = in_be32(&io->iop_psorb); + tempval &= ~PB2_PSORB0; + tempval |= PB2_PSORB1; + out_be32(&io->iop_psorb, tempval); + + tempval = in_be32(&io->iop_pparb); + tempval |= (PB2_DIRB0 | PB2_DIRB1); + out_be32(&io->iop_pparb, tempval); + + tempval = in_be32(&io->iop_pdirb); + tempval &= ~PB3_DIRB0; + tempval |= PB3_DIRB1; + out_be32(&io->iop_pdirb, tempval); + + tempval = in_be32(&io->iop_psorb); + tempval &= ~PB3_PSORB0; + tempval |= PB3_PSORB1; + out_be32(&io->iop_psorb, tempval); + + tempval = in_be32(&io->iop_pparb); + tempval |= (PB3_DIRB0 | PB3_DIRB1); + out_be32(&io->iop_pparb, tempval); + + tempval = in_be32(&io->iop_pdirc); + tempval |= PC3_DIRC1; + out_be32(&io->iop_pdirc, tempval); + + tempval = in_be32(&io->iop_pparc); + tempval |= PC3_DIRC1; + out_be32(&io->iop_pparc, tempval); + + /* Port C has clocks...... */ + tempval = in_be32(&io->iop_psorc); + tempval &= ~(CLK_TRX); + out_be32(&io->iop_psorc, tempval); + + tempval = in_be32(&io->iop_pdirc); + tempval &= ~(CLK_TRX); + out_be32(&io->iop_pdirc, tempval); + tempval = in_be32(&io->iop_pparc); + tempval |= (CLK_TRX); + out_be32(&io->iop_pparc, tempval); + + /* Configure Serial Interface clock routing. + * First, clear all FCC bits to zero, + * then set the ones we want. + */ + immap->im_cpmux.cmx_fcr &= ~(CPMUX_CLK_MASK); + immap->im_cpmux.cmx_fcr |= CPMUX_CLK_ROUTE; +} static void __init mpc8560ads_setup_arch(void) @@ -66,6 +132,7 @@ mpc8560ads_setup_arch(void) unsigned int freq; struct gianfar_platform_data *pdata; struct gianfar_mdio_data *mdata; + struct fs_platform_info *fpi; cpm2_reset(); @@ -110,6 +177,28 @@ mpc8560ads_setup_arch(void) memcpy(pdata->mac_addr, binfo->bi_enet1addr, 6); } + init_fcc_ioports(); + ppc_sys_device_remove(MPC85xx_CPM_FCC1); + + fpi = (struct fs_platform_info *) ppc_sys_get_pdata(MPC85xx_CPM_FCC2); + if (fpi) { + memcpy(fpi->macaddr, binfo->bi_enet2addr, 6); + fpi->bus_id = "0:02"; + fpi->phy_addr = 2; + fpi->dpram_offset = (u32)cpm2_immr->im_dprambase; + fpi->fcc_regs_c = (u32)&cpm2_immr->im_fcc_c[1]; + } + + fpi = (struct fs_platform_info *) ppc_sys_get_pdata(MPC85xx_CPM_FCC3); + if (fpi) { + memcpy(fpi->macaddr, binfo->bi_enet2addr, 6); + fpi->macaddr[5] += 1; + fpi->bus_id = "0:03"; + fpi->phy_addr = 3; + fpi->dpram_offset = (u32)cpm2_immr->im_dprambase; + fpi->fcc_regs_c = (u32)&cpm2_immr->im_fcc_c[2]; + } + #ifdef CONFIG_BLK_DEV_INITRD if (initrd_start) ROOT_DEV = Root_RAM0; diff --git a/arch/ppc/platforms/85xx/mpc85xx_ads_common.h b/arch/ppc/platforms/85xx/mpc85xx_ads_common.h index abf32281655..c8c322fe368 100644 --- a/arch/ppc/platforms/85xx/mpc85xx_ads_common.h +++ b/arch/ppc/platforms/85xx/mpc85xx_ads_common.h @@ -45,4 +45,23 @@ extern void mpc85xx_ads_map_io(void) __init; #define MPC85XX_PCI1_IO_SIZE 0x01000000 +/* FCC1 Clock Source Configuration. These can be + * redefined in the board specific file. + * Can only choose from CLK9-12 */ +#define F1_RXCLK 12 +#define F1_TXCLK 11 + +/* FCC2 Clock Source Configuration. These can be + * redefined in the board specific file. + * Can only choose from CLK13-16 */ +#define F2_RXCLK 13 +#define F2_TXCLK 14 + +/* FCC3 Clock Source Configuration. These can be + * redefined in the board specific file. + * Can only choose from CLK13-16 */ +#define F3_RXCLK 15 +#define F3_TXCLK 16 + + #endif /* __MACH_MPC85XX_ADS_H__ */ diff --git a/arch/ppc/platforms/mpc8272ads_setup.c b/arch/ppc/platforms/mpc8272ads_setup.c index abb7154de2c..2a35fe2b9b9 100644 --- a/arch/ppc/platforms/mpc8272ads_setup.c +++ b/arch/ppc/platforms/mpc8272ads_setup.c @@ -56,64 +56,51 @@ static struct fs_uart_platform_info mpc8272_uart_pdata[] = { }, }; -static struct fs_mii_bus_info mii_bus_info = { - .method = fsmii_bitbang, - .id = 0, - .i.bitbang = { - .mdio_port = fsiop_portc, - .mdio_bit = 18, - .mdc_port = fsiop_portc, - .mdc_bit = 19, - .delay = 1, - }, -}; - -static struct fs_platform_info mpc82xx_fcc1_pdata = { - .fs_no = fsid_fcc1, - .cp_page = CPM_CR_FCC1_PAGE, - .cp_block = CPM_CR_FCC1_SBLOCK, - .clk_trx = (PC_F1RXCLK | PC_F1TXCLK), - .clk_route = CMX1_CLK_ROUTE, - .clk_mask = CMX1_CLK_MASK, - .init_ioports = init_fcc1_ioports, - - .phy_addr = 0, -#ifdef PHY_INTERRUPT - .phy_irq = PHY_INTERRUPT, -#else - .phy_irq = -1; -#endif - .mem_offset = FCC1_MEM_OFFSET, - .bus_info = &mii_bus_info, - .rx_ring = 32, - .tx_ring = 32, - .rx_copybreak = 240, - .use_napi = 0, - .napi_weight = 17, +static struct fs_mii_bb_platform_info m82xx_mii_bb_pdata = { + .mdio_dat.bit = 18, + .mdio_dir.bit = 18, + .mdc_dat.bit = 19, + .delay = 1, }; -static struct fs_platform_info mpc82xx_fcc2_pdata = { - .fs_no = fsid_fcc2, - .cp_page = CPM_CR_FCC2_PAGE, - .cp_block = CPM_CR_FCC2_SBLOCK, - .clk_trx = (PC_F2RXCLK | PC_F2TXCLK), - .clk_route = CMX2_CLK_ROUTE, - .clk_mask = CMX2_CLK_MASK, - .init_ioports = init_fcc2_ioports, - - .phy_addr = 3, -#ifdef PHY_INTERRUPT - .phy_irq = PHY_INTERRUPT, -#else - .phy_irq = -1; -#endif - .mem_offset = FCC2_MEM_OFFSET, - .bus_info = &mii_bus_info, - .rx_ring = 32, - .tx_ring = 32, - .rx_copybreak = 240, - .use_napi = 0, - .napi_weight = 17, +static struct fs_platform_info mpc82xx_enet_pdata[] = { + [fsid_fcc1] = { + .fs_no = fsid_fcc1, + .cp_page = CPM_CR_FCC1_PAGE, + .cp_block = CPM_CR_FCC1_SBLOCK, + + .clk_trx = (PC_F1RXCLK | PC_F1TXCLK), + .clk_route = CMX1_CLK_ROUTE, + .clk_mask = CMX1_CLK_MASK, + .init_ioports = init_fcc1_ioports, + + .mem_offset = FCC1_MEM_OFFSET, + + .rx_ring = 32, + .tx_ring = 32, + .rx_copybreak = 240, + .use_napi = 0, + .napi_weight = 17, + .bus_id = "0:00", + }, + [fsid_fcc2] = { + .fs_no = fsid_fcc2, + .cp_page = CPM_CR_FCC2_PAGE, + .cp_block = CPM_CR_FCC2_SBLOCK, + .clk_trx = (PC_F2RXCLK | PC_F2TXCLK), + .clk_route = CMX2_CLK_ROUTE, + .clk_mask = CMX2_CLK_MASK, + .init_ioports = init_fcc2_ioports, + + .mem_offset = FCC2_MEM_OFFSET, + + .rx_ring = 32, + .tx_ring = 32, + .rx_copybreak = 240, + .use_napi = 0, + .napi_weight = 17, + .bus_id = "0:03", + }, }; static void init_fcc1_ioports(void) @@ -209,20 +196,21 @@ static void __init mpc8272ads_fixup_enet_pdata(struct platform_device *pdev, bd_t* bi = (void*)__res; int fs_no = fsid_fcc1+pdev->id-1; - mpc82xx_fcc1_pdata.dpram_offset = mpc82xx_fcc2_pdata.dpram_offset = (u32)cpm2_immr->im_dprambase; - mpc82xx_fcc1_pdata.fcc_regs_c = mpc82xx_fcc2_pdata.fcc_regs_c = (u32)cpm2_immr->im_fcc_c; - - switch(fs_no) { - case fsid_fcc1: - memcpy(&mpc82xx_fcc1_pdata.macaddr,bi->bi_enetaddr,6); - pdev->dev.platform_data = &mpc82xx_fcc1_pdata; - break; - case fsid_fcc2: - memcpy(&mpc82xx_fcc2_pdata.macaddr,bi->bi_enetaddr,6); - mpc82xx_fcc2_pdata.macaddr[5] ^= 1; - pdev->dev.platform_data = &mpc82xx_fcc2_pdata; - break; + if(fs_no > ARRAY_SIZE(mpc82xx_enet_pdata)) { + return; } + + mpc82xx_enet_pdata[fs_no].dpram_offset= + (u32)cpm2_immr->im_dprambase; + mpc82xx_enet_pdata[fs_no].fcc_regs_c = + (u32)cpm2_immr->im_fcc_c; + memcpy(&mpc82xx_enet_pdata[fs_no].macaddr,bi->bi_enetaddr,6); + + /* prevent dup mac */ + if(fs_no == fsid_fcc2) + mpc82xx_enet_pdata[fs_no].macaddr[5] ^= 1; + + pdev->dev.platform_data = &mpc82xx_enet_pdata[fs_no]; } static void mpc8272ads_fixup_uart_pdata(struct platform_device *pdev, @@ -274,6 +262,29 @@ static void init_scc4_uart_ioports(void) iounmap(immap); } +static void __init mpc8272ads_fixup_mdio_pdata(struct platform_device *pdev, + int idx) +{ + m82xx_mii_bb_pdata.irq[0] = PHY_INTERRUPT; + m82xx_mii_bb_pdata.irq[1] = -1; + m82xx_mii_bb_pdata.irq[2] = -1; + m82xx_mii_bb_pdata.irq[3] = PHY_INTERRUPT; + m82xx_mii_bb_pdata.irq[31] = -1; + + + m82xx_mii_bb_pdata.mdio_dat.offset = + (u32)&cpm2_immr->im_ioport.iop_pdatc; + + m82xx_mii_bb_pdata.mdio_dir.offset = + (u32)&cpm2_immr->im_ioport.iop_pdirc; + + m82xx_mii_bb_pdata.mdc_dat.offset = + (u32)&cpm2_immr->im_ioport.iop_pdatc; + + + pdev->dev.platform_data = &m82xx_mii_bb_pdata; +} + static int mpc8272ads_platform_notify(struct device *dev) { static const struct platform_notify_dev_map dev_map[] = { @@ -286,6 +297,10 @@ static int mpc8272ads_platform_notify(struct device *dev) .rtn = mpc8272ads_fixup_uart_pdata, }, { + .bus_id = "fsl-bb-mdio", + .rtn = mpc8272ads_fixup_mdio_pdata, + }, + { .bus_id = NULL } }; @@ -319,6 +334,7 @@ int __init mpc8272ads_init(void) ppc_sys_device_enable(MPC82xx_CPM_SCC4); #endif + ppc_sys_device_enable(MPC82xx_MDIO_BB); return 0; } diff --git a/arch/ppc/platforms/mpc866ads_setup.c b/arch/ppc/platforms/mpc866ads_setup.c index f19b6167c77..e12cece4c9f 100644 --- a/arch/ppc/platforms/mpc866ads_setup.c +++ b/arch/ppc/platforms/mpc866ads_setup.c @@ -1,10 +1,10 @@ -/*arch/ppc/platforms/mpc885ads-setup.c +/*arch/ppc/platforms/mpc866ads-setup.c * - * Platform setup for the Freescale mpc885ads board + * Platform setup for the Freescale mpc866ads board * * Vitaly Bordug <vbordug@ru.mvista.com> * - * Copyright 2005 MontaVista Software Inc. + * Copyright 2005-2006 MontaVista Software Inc. * * This file is licensed under the terms of the GNU General Public License * version 2. This program is licensed "as is" without any warranty of any @@ -42,49 +42,36 @@ static void setup_scc1_ioports(void); static void setup_smc1_ioports(void); static void setup_smc2_ioports(void); -static struct fs_mii_bus_info fec_mii_bus_info = { - .method = fsmii_fec, - .id = 0, -}; - -static struct fs_mii_bus_info scc_mii_bus_info = { - .method = fsmii_fixed, - .id = 0, - .i.fixed.speed = 10, - .i.fixed.duplex = 0, -}; +static struct fs_mii_fec_platform_info mpc8xx_mdio_fec_pdata; -static struct fs_platform_info mpc8xx_fec_pdata[] = { - { - .rx_ring = 128, - .tx_ring = 16, - .rx_copybreak = 240, +static struct fs_mii_fec_platform_info mpc8xx_mdio_fec_pdata; - .use_napi = 1, - .napi_weight = 17, +static struct fs_platform_info mpc8xx_enet_pdata[] = { + [fsid_fec1] = { + .rx_ring = 128, + .tx_ring = 16, + .rx_copybreak = 240, - .phy_addr = 15, - .phy_irq = -1, + .use_napi = 1, + .napi_weight = 17, - .use_rmii = 0, + .init_ioports = setup_fec1_ioports, - .bus_info = &fec_mii_bus_info, - } -}; + .bus_id = "0:0f", + .has_phy = 1, + }, + [fsid_scc1] = { + .rx_ring = 64, + .tx_ring = 8, + .rx_copybreak = 240, + .use_napi = 1, + .napi_weight = 17, -static struct fs_platform_info mpc8xx_scc_pdata = { - .rx_ring = 64, - .tx_ring = 8, - .rx_copybreak = 240, - .use_napi = 1, - .napi_weight = 17, - - .phy_addr = -1, - .phy_irq = -1, - - .bus_info = &scc_mii_bus_info, + .init_ioports = setup_scc1_ioports, + .bus_id = "fixed@100:1", + }, }; static struct fs_uart_platform_info mpc866_uart_pdata[] = { @@ -207,63 +194,6 @@ static void setup_scc1_ioports(void) } -static void mpc866ads_fixup_enet_pdata(struct platform_device *pdev, int fs_no) -{ - struct fs_platform_info *fpi = pdev->dev.platform_data; - - volatile cpm8xx_t *cp; - bd_t *bd = (bd_t *) __res; - char *e; - int i; - - /* Get pointer to Communication Processor */ - cp = cpmp; - switch (fs_no) { - case fsid_fec1: - fpi = &mpc8xx_fec_pdata[0]; - fpi->init_ioports = &setup_fec1_ioports; - - break; - case fsid_scc1: - fpi = &mpc8xx_scc_pdata; - fpi->init_ioports = &setup_scc1_ioports; - - break; - default: - printk(KERN_WARNING"Device %s is not supported!\n", pdev->name); - return; - } - - pdev->dev.platform_data = fpi; - fpi->fs_no = fs_no; - - e = (unsigned char *)&bd->bi_enetaddr; - for (i = 0; i < 6; i++) - fpi->macaddr[i] = *e++; - - fpi->macaddr[5 - pdev->id]++; - -} - -static void mpc866ads_fixup_fec_enet_pdata(struct platform_device *pdev, - int idx) -{ - /* This is for FEC devices only */ - if (!pdev || !pdev->name || (!strstr(pdev->name, "fsl-cpm-fec"))) - return; - mpc866ads_fixup_enet_pdata(pdev, fsid_fec1 + pdev->id - 1); -} - -static void mpc866ads_fixup_scc_enet_pdata(struct platform_device *pdev, - int idx) -{ - /* This is for SCC devices only */ - if (!pdev || !pdev->name || (!strstr(pdev->name, "fsl-cpm-scc"))) - return; - - mpc866ads_fixup_enet_pdata(pdev, fsid_scc1 + pdev->id - 1); -} - static void setup_smc1_ioports(void) { immap_t *immap = (immap_t *) IMAP_ADDR; @@ -315,6 +245,56 @@ static void setup_smc2_ioports(void) } +static int ma_count = 0; + +static void mpc866ads_fixup_enet_pdata(struct platform_device *pdev, int fs_no) +{ + struct fs_platform_info *fpi; + + volatile cpm8xx_t *cp; + bd_t *bd = (bd_t *) __res; + char *e; + int i; + + /* Get pointer to Communication Processor */ + cp = cpmp; + + if(fs_no > ARRAY_SIZE(mpc8xx_enet_pdata)) { + printk(KERN_ERR"No network-suitable #%d device on bus", fs_no); + return; + } + + + fpi = &mpc8xx_enet_pdata[fs_no]; + fpi->fs_no = fs_no; + pdev->dev.platform_data = fpi; + + e = (unsigned char *)&bd->bi_enetaddr; + for (i = 0; i < 6; i++) + fpi->macaddr[i] = *e++; + + fpi->macaddr[5] += ma_count++; +} + +static void mpc866ads_fixup_fec_enet_pdata(struct platform_device *pdev, + int idx) +{ + /* This is for FEC devices only */ + if (!pdev || !pdev->name || (!strstr(pdev->name, "fsl-cpm-fec"))) + return; + mpc866ads_fixup_enet_pdata(pdev, fsid_fec1 + pdev->id - 1); +} + +static void mpc866ads_fixup_scc_enet_pdata(struct platform_device *pdev, + int idx) +{ + /* This is for SCC devices only */ + if (!pdev || !pdev->name || (!strstr(pdev->name, "fsl-cpm-scc"))) + return; + + mpc866ads_fixup_enet_pdata(pdev, fsid_scc1 + pdev->id - 1); +} + static void __init mpc866ads_fixup_uart_pdata(struct platform_device *pdev, int idx) { @@ -359,6 +339,9 @@ static int mpc866ads_platform_notify(struct device *dev) int __init mpc866ads_init(void) { + bd_t *bd = (bd_t *) __res; + struct fs_mii_fec_platform_info* fmpi; + printk(KERN_NOTICE "mpc866ads: Init\n"); platform_notify = mpc866ads_platform_notify; @@ -366,11 +349,20 @@ int __init mpc866ads_init(void) ppc_sys_device_initfunc(); ppc_sys_device_disable_all(); -#ifdef MPC8xx_SECOND_ETH_SCC1 +#ifdef CONFIG_MPC8xx_SECOND_ETH_SCC1 ppc_sys_device_enable(MPC8xx_CPM_SCC1); #endif ppc_sys_device_enable(MPC8xx_CPM_FEC1); + ppc_sys_device_enable(MPC8xx_MDIO_FEC); + + fmpi = ppc_sys_platform_devices[MPC8xx_MDIO_FEC].dev.platform_data = + &mpc8xx_mdio_fec_pdata; + + fmpi->mii_speed = ((((bd->bi_intfreq + 4999999) / 2500000) / 2) & 0x3F) << 1; + /* No PHY interrupt line here */ + fmpi->irq[0xf] = -1; + /* Since either of the uarts could be used as console, they need to ready */ #ifdef CONFIG_SERIAL_CPM_SMC1 ppc_sys_device_enable(MPC8xx_CPM_SMC1); @@ -381,6 +373,14 @@ int __init mpc866ads_init(void) ppc_sys_device_enable(MPC8xx_CPM_SMC2); ppc_sys_device_setfunc(MPC8xx_CPM_SMC2, PPC_SYS_FUNC_UART); #endif + ppc_sys_device_enable(MPC8xx_MDIO_FEC); + + fmpi = ppc_sys_platform_devices[MPC8xx_MDIO_FEC].dev.platform_data = + &mpc8xx_mdio_fec_pdata; + + fmpi->mii_speed = ((((bd->bi_intfreq + 4999999) / 2500000) / 2) & 0x3F) << 1; + /* No PHY interrupt line here */ + fmpi->irq[0xf] = -1; return 0; } diff --git a/arch/ppc/platforms/mpc885ads_setup.c b/arch/ppc/platforms/mpc885ads_setup.c index c1fc4a16fea..5dfa4e6c2af 100644 --- a/arch/ppc/platforms/mpc885ads_setup.c +++ b/arch/ppc/platforms/mpc885ads_setup.c @@ -38,7 +38,10 @@ extern unsigned char __res[]; static void setup_smc1_ioports(void); static void setup_smc2_ioports(void); -static void __init mpc885ads_scc_phy_init(char); +static struct fs_mii_fec_platform_info mpc8xx_mdio_fec_pdata; +static void setup_fec1_ioports(void); +static void setup_fec2_ioports(void); +static void setup_scc3_ioports(void); static struct fs_uart_platform_info mpc885_uart_pdata[] = { [fsid_smc1_uart] = { @@ -61,23 +64,8 @@ static struct fs_uart_platform_info mpc885_uart_pdata[] = { }, }; -static struct fs_mii_bus_info fec_mii_bus_info = { - .method = fsmii_fec, - .id = 0, -}; - -static struct fs_mii_bus_info scc_mii_bus_info = { -#ifdef CONFIG_SCC_ENET_8xx_FIXED - .method = fsmii_fixed, -#else - .method = fsmii_fec, -#endif - - .id = 0, -}; - -static struct fs_platform_info mpc8xx_fec_pdata[] = { - { +static struct fs_platform_info mpc8xx_enet_pdata[] = { + [fsid_fec1] = { .rx_ring = 128, .tx_ring = 16, .rx_copybreak = 240, @@ -85,11 +73,12 @@ static struct fs_platform_info mpc8xx_fec_pdata[] = { .use_napi = 1, .napi_weight = 17, - .phy_addr = 0, - .phy_irq = SIU_IRQ7, + .init_ioports = setup_fec1_ioports, - .bus_info = &fec_mii_bus_info, - }, { + .bus_id = "0:00", + .has_phy = 1, + }, + [fsid_fec2] = { .rx_ring = 128, .tx_ring = 16, .rx_copybreak = 240, @@ -97,35 +86,32 @@ static struct fs_platform_info mpc8xx_fec_pdata[] = { .use_napi = 1, .napi_weight = 17, - .phy_addr = 1, - .phy_irq = SIU_IRQ7, - - .bus_info = &fec_mii_bus_info, - } -}; + .init_ioports = setup_fec2_ioports, -static struct fs_platform_info mpc8xx_scc_pdata = { - .rx_ring = 64, - .tx_ring = 8, - .rx_copybreak = 240, + .bus_id = "0:01", + .has_phy = 1, + }, + [fsid_scc3] = { + .rx_ring = 64, + .tx_ring = 8, + .rx_copybreak = 240, - .use_napi = 1, - .napi_weight = 17, + .use_napi = 1, + .napi_weight = 17, - .phy_addr = 2, -#ifdef CONFIG_MPC8xx_SCC_ENET_FIXED - .phy_irq = -1, + .init_ioports = setup_scc3_ioports, +#ifdef CONFIG_FIXED_MII_10_FDX + .bus_id = "fixed@100:1", #else - .phy_irq = SIU_IRQ7, -#endif - - .bus_info = &scc_mii_bus_info, + .bus_id = "0:02", + #endif + }, }; void __init board_init(void) { - volatile cpm8xx_t *cp = cpmp; - unsigned int *bcsr_io; + cpm8xx_t *cp = cpmp; + unsigned int *bcsr_io; #ifdef CONFIG_FS_ENET immap_t *immap = (immap_t *) IMAP_ADDR; @@ -164,6 +150,14 @@ void __init board_init(void) /* use MDC for MII (common) */ setbits16(&immap->im_ioport.iop_pdpar, 0x0080); clrbits16(&immap->im_ioport.iop_pddir, 0x0080); + bcsr_io = ioremap(BCSR5, sizeof(unsigned long)); + clrbits32(bcsr_io,BCSR5_MII1_EN); + clrbits32(bcsr_io,BCSR5_MII1_RST); +#ifdef CONFIG_MPC8xx_SECOND_ETH_FEC2 + clrbits32(bcsr_io,BCSR5_MII2_EN); + clrbits32(bcsr_io,BCSR5_MII2_RST); +#endif + iounmap(bcsr_io); #endif } @@ -194,8 +188,8 @@ static void setup_fec2_ioports(void) /* configure FEC2 pins */ setbits32(&immap->im_cpm.cp_pepar, 0x0003fffc); setbits32(&immap->im_cpm.cp_pedir, 0x0003fffc); - setbits32(&immap->im_cpm.cp_peso, 0x00037800); clrbits32(&immap->im_cpm.cp_peso, 0x000087fc); + setbits32(&immap->im_cpm.cp_peso, 0x00037800); clrbits32(&immap->im_cpm.cp_cptr, 0x00000080); } @@ -213,6 +207,8 @@ static void setup_scc3_ioports(void) /* Enable the PHY. */ + clrbits32(bcsr_io+4, BCSR4_ETH10_RST); + udelay(1000); setbits32(bcsr_io+4, BCSR4_ETH10_RST); /* Configure port A pins for Txd and Rxd. */ @@ -254,37 +250,38 @@ static void setup_scc3_ioports(void) clrbits32(&immap->im_cpm.cp_pedir, PE_ENET_TENA); setbits32(&immap->im_cpm.cp_peso, PE_ENET_TENA); - setbits32(bcsr_io+1, BCSR1_ETHEN); + setbits32(bcsr_io+4, BCSR1_ETHEN); iounmap(bcsr_io); } +static int mac_count = 0; + static void mpc885ads_fixup_enet_pdata(struct platform_device *pdev, int fs_no) { - struct fs_platform_info *fpi = pdev->dev.platform_data; - - volatile cpm8xx_t *cp; + struct fs_platform_info *fpi; bd_t *bd = (bd_t *) __res; char *e; int i; - /* Get pointer to Communication Processor */ - cp = cpmp; + if(fs_no > ARRAY_SIZE(mpc8xx_enet_pdata)) { + printk(KERN_ERR"No network-suitable #%d device on bus", fs_no); + return; + } + + fpi = &mpc8xx_enet_pdata[fs_no]; + switch (fs_no) { case fsid_fec1: - fpi = &mpc8xx_fec_pdata[0]; fpi->init_ioports = &setup_fec1_ioports; break; case fsid_fec2: - fpi = &mpc8xx_fec_pdata[1]; fpi->init_ioports = &setup_fec2_ioports; break; case fsid_scc3: - fpi = &mpc8xx_scc_pdata; fpi->init_ioports = &setup_scc3_ioports; - mpc885ads_scc_phy_init(fpi->phy_addr); break; default: - printk(KERN_WARNING"Device %s is not supported!\n", pdev->name); + printk(KERN_WARNING "Device %s is not supported!\n", pdev->name); return; } @@ -295,7 +292,7 @@ static void mpc885ads_fixup_enet_pdata(struct platform_device *pdev, int fs_no) for (i = 0; i < 6; i++) fpi->macaddr[i] = *e++; - fpi->macaddr[5 - pdev->id]++; + fpi->macaddr[5] += mac_count++; } @@ -318,58 +315,6 @@ static void __init mpc885ads_fixup_scc_enet_pdata(struct platform_device *pdev, mpc885ads_fixup_enet_pdata(pdev, fsid_scc1 + pdev->id - 1); } -/* SCC ethernet controller does not have MII management channel. FEC1 MII - * channel is used to communicate with the 10Mbit PHY. - */ - -#define MII_ECNTRL_PINMUX 0x4 -#define FEC_ECNTRL_PINMUX 0x00000004 -#define FEC_RCNTRL_MII_MODE 0x00000004 - -/* Make MII read/write commands. - */ -#define mk_mii_write(REG, VAL, PHY_ADDR) (0x50020000 | (((REG) & 0x1f) << 18) | \ - ((VAL) & 0xffff) | ((PHY_ADDR) << 23)) - -static void mpc885ads_scc_phy_init(char phy_addr) -{ - volatile immap_t *immap; - volatile fec_t *fecp; - bd_t *bd; - - bd = (bd_t *) __res; - immap = (immap_t *) IMAP_ADDR; /* pointer to internal registers */ - fecp = &(immap->im_cpm.cp_fec); - - /* Enable MII pins of the FEC1 - */ - setbits16(&immap->im_ioport.iop_pdpar, 0x0080); - clrbits16(&immap->im_ioport.iop_pddir, 0x0080); - /* Set MII speed to 2.5 MHz - */ - out_be32(&fecp->fec_mii_speed, - ((((bd->bi_intfreq + 4999999) / 2500000) / 2) & 0x3F) << 1); - - /* Enable FEC pin MUX - */ - setbits32(&fecp->fec_ecntrl, MII_ECNTRL_PINMUX); - setbits32(&fecp->fec_r_cntrl, FEC_RCNTRL_MII_MODE); - - out_be32(&fecp->fec_mii_data, - mk_mii_write(MII_BMCR, BMCR_ISOLATE, phy_addr)); - udelay(100); - out_be32(&fecp->fec_mii_data, - mk_mii_write(MII_ADVERTISE, - ADVERTISE_10HALF | ADVERTISE_CSMA, phy_addr)); - udelay(100); - - /* Disable FEC MII settings - */ - clrbits32(&fecp->fec_ecntrl, MII_ECNTRL_PINMUX); - clrbits32(&fecp->fec_r_cntrl, FEC_RCNTRL_MII_MODE); - out_be32(&fecp->fec_mii_speed, 0); -} - static void setup_smc1_ioports(void) { immap_t *immap = (immap_t *) IMAP_ADDR; @@ -462,6 +407,9 @@ static int mpc885ads_platform_notify(struct device *dev) int __init mpc885ads_init(void) { + struct fs_mii_fec_platform_info* fmpi; + bd_t *bd = (bd_t *) __res; + printk(KERN_NOTICE "mpc885ads: Init\n"); platform_notify = mpc885ads_platform_notify; @@ -471,8 +419,17 @@ int __init mpc885ads_init(void) ppc_sys_device_enable(MPC8xx_CPM_FEC1); + ppc_sys_device_enable(MPC8xx_MDIO_FEC); + fmpi = ppc_sys_platform_devices[MPC8xx_MDIO_FEC].dev.platform_data = + &mpc8xx_mdio_fec_pdata; + + fmpi->mii_speed = ((((bd->bi_intfreq + 4999999) / 2500000) / 2) & 0x3F) << 1; + + /* No PHY interrupt line here */ + fmpi->irq[0xf] = SIU_IRQ7; + #ifdef CONFIG_MPC8xx_SECOND_ETH_SCC3 - ppc_sys_device_enable(MPC8xx_CPM_SCC1); + ppc_sys_device_enable(MPC8xx_CPM_SCC3); #endif #ifdef CONFIG_MPC8xx_SECOND_ETH_FEC2 diff --git a/arch/ppc/platforms/pq2ads_pd.h b/arch/ppc/platforms/pq2ads_pd.h index 8f14a43eafe..672483df807 100644 --- a/arch/ppc/platforms/pq2ads_pd.h +++ b/arch/ppc/platforms/pq2ads_pd.h @@ -29,86 +29,4 @@ #define F3_RXCLK 13 #define F3_TXCLK 14 -/* Automatically generates register configurations */ -#define PC_CLK(x) ((uint)(1<<(x-1))) /* FCC CLK I/O ports */ - -#define CMXFCR_RF1CS(x) ((uint)((x-5)<<27)) /* FCC1 Receive Clock Source */ -#define CMXFCR_TF1CS(x) ((uint)((x-5)<<24)) /* FCC1 Transmit Clock Source */ -#define CMXFCR_RF2CS(x) ((uint)((x-9)<<19)) /* FCC2 Receive Clock Source */ -#define CMXFCR_TF2CS(x) ((uint)((x-9)<<16)) /* FCC2 Transmit Clock Source */ -#define CMXFCR_RF3CS(x) ((uint)((x-9)<<11)) /* FCC3 Receive Clock Source */ -#define CMXFCR_TF3CS(x) ((uint)((x-9)<<8)) /* FCC3 Transmit Clock Source */ - -#define PC_F1RXCLK PC_CLK(F1_RXCLK) -#define PC_F1TXCLK PC_CLK(F1_TXCLK) -#define CMX1_CLK_ROUTE (CMXFCR_RF1CS(F1_RXCLK) | CMXFCR_TF1CS(F1_TXCLK)) -#define CMX1_CLK_MASK ((uint)0xff000000) - -#define PC_F2RXCLK PC_CLK(F2_RXCLK) -#define PC_F2TXCLK PC_CLK(F2_TXCLK) -#define CMX2_CLK_ROUTE (CMXFCR_RF2CS(F2_RXCLK) | CMXFCR_TF2CS(F2_TXCLK)) -#define CMX2_CLK_MASK ((uint)0x00ff0000) - -#define PC_F3RXCLK PC_CLK(F3_RXCLK) -#define PC_F3TXCLK PC_CLK(F3_TXCLK) -#define CMX3_CLK_ROUTE (CMXFCR_RF3CS(F3_RXCLK) | CMXFCR_TF3CS(F3_TXCLK)) -#define CMX3_CLK_MASK ((uint)0x0000ff00) - -/* I/O Pin assignment for FCC1. I don't yet know the best way to do this, - * but there is little variation among the choices. - */ -#define PA1_COL 0x00000001U -#define PA1_CRS 0x00000002U -#define PA1_TXER 0x00000004U -#define PA1_TXEN 0x00000008U -#define PA1_RXDV 0x00000010U -#define PA1_RXER 0x00000020U -#define PA1_TXDAT 0x00003c00U -#define PA1_RXDAT 0x0003c000U -#define PA1_PSORA0 (PA1_RXDAT | PA1_TXDAT) -#define PA1_PSORA1 (PA1_COL | PA1_CRS | PA1_TXER | PA1_TXEN | \ - PA1_RXDV | PA1_RXER) -#define PA1_DIRA0 (PA1_RXDAT | PA1_CRS | PA1_COL | PA1_RXER | PA1_RXDV) -#define PA1_DIRA1 (PA1_TXDAT | PA1_TXEN | PA1_TXER) - - -/* I/O Pin assignment for FCC2. I don't yet know the best way to do this, - * but there is little variation among the choices. - */ -#define PB2_TXER 0x00000001U -#define PB2_RXDV 0x00000002U -#define PB2_TXEN 0x00000004U -#define PB2_RXER 0x00000008U -#define PB2_COL 0x00000010U -#define PB2_CRS 0x00000020U -#define PB2_TXDAT 0x000003c0U -#define PB2_RXDAT 0x00003c00U -#define PB2_PSORB0 (PB2_RXDAT | PB2_TXDAT | PB2_CRS | PB2_COL | \ - PB2_RXER | PB2_RXDV | PB2_TXER) -#define PB2_PSORB1 (PB2_TXEN) -#define PB2_DIRB0 (PB2_RXDAT | PB2_CRS | PB2_COL | PB2_RXER | PB2_RXDV) -#define PB2_DIRB1 (PB2_TXDAT | PB2_TXEN | PB2_TXER) - - -/* I/O Pin assignment for FCC3. I don't yet know the best way to do this, - * but there is little variation among the choices. - */ -#define PB3_RXDV 0x00004000U -#define PB3_RXER 0x00008000U -#define PB3_TXER 0x00010000U -#define PB3_TXEN 0x00020000U -#define PB3_COL 0x00040000U -#define PB3_CRS 0x00080000U -#define PB3_TXDAT 0x0f000000U -#define PB3_RXDAT 0x00f00000U -#define PB3_PSORB0 (PB3_RXDAT | PB3_TXDAT | PB3_CRS | PB3_COL | \ - PB3_RXER | PB3_RXDV | PB3_TXER | PB3_TXEN) -#define PB3_PSORB1 0 -#define PB3_DIRB0 (PB3_RXDAT | PB3_CRS | PB3_COL | PB3_RXER | PB3_RXDV) -#define PB3_DIRB1 (PB3_TXDAT | PB3_TXEN | PB3_TXER) - -#define FCC_MEM_OFFSET(x) (CPM_FCC_SPECIAL_BASE + (x*128)) -#define FCC1_MEM_OFFSET FCC_MEM_OFFSET(0) -#define FCC2_MEM_OFFSET FCC_MEM_OFFSET(1) - #endif diff --git a/arch/ppc/syslib/mpc85xx_devices.c b/arch/ppc/syslib/mpc85xx_devices.c index 7735336f5b8..325136e5aee 100644 --- a/arch/ppc/syslib/mpc85xx_devices.c +++ b/arch/ppc/syslib/mpc85xx_devices.c @@ -16,9 +16,11 @@ #include <linux/device.h> #include <linux/serial_8250.h> #include <linux/fsl_devices.h> +#include <linux/fs_enet_pd.h> #include <asm/mpc85xx.h> #include <asm/irq.h> #include <asm/ppc_sys.h> +#include <asm/cpm2.h> /* We use offsets for IORESOURCE_MEM since we do not know at compile time * what CCSRBAR is, will get fixed up by mach_mpc85xx_fixup @@ -82,6 +84,60 @@ static struct fsl_i2c_platform_data mpc85xx_fsl_i2c2_pdata = { .device_flags = FSL_I2C_DEV_SEPARATE_DFSRR, }; +static struct fs_platform_info mpc85xx_fcc1_pdata = { + .fs_no = fsid_fcc1, + .cp_page = CPM_CR_FCC1_PAGE, + .cp_block = CPM_CR_FCC1_SBLOCK, + + .rx_ring = 32, + .tx_ring = 32, + .rx_copybreak = 240, + .use_napi = 0, + .napi_weight = 17, + + .clk_mask = CMX1_CLK_MASK, + .clk_route = CMX1_CLK_ROUTE, + .clk_trx = (PC_F1RXCLK | PC_F1TXCLK), + + .mem_offset = FCC1_MEM_OFFSET, +}; + +static struct fs_platform_info mpc85xx_fcc2_pdata = { + .fs_no = fsid_fcc2, + .cp_page = CPM_CR_FCC2_PAGE, + .cp_block = CPM_CR_FCC2_SBLOCK, + + .rx_ring = 32, + .tx_ring = 32, + .rx_copybreak = 240, + .use_napi = 0, + .napi_weight = 17, + + .clk_mask = CMX2_CLK_MASK, + .clk_route = CMX2_CLK_ROUTE, + .clk_trx = (PC_F2RXCLK | PC_F2TXCLK), + + .mem_offset = FCC2_MEM_OFFSET, +}; + +static struct fs_platform_info mpc85xx_fcc3_pdata = { + .fs_no = fsid_fcc3, + .cp_page = CPM_CR_FCC3_PAGE, + .cp_block = CPM_CR_FCC3_SBLOCK, + + .rx_ring = 32, + .tx_ring = 32, + .rx_copybreak = 240, + .use_napi = 0, + .napi_weight = 17, + + .clk_mask = CMX3_CLK_MASK, + .clk_route = CMX3_CLK_ROUTE, + .clk_trx = (PC_F3RXCLK | PC_F3TXCLK), + + .mem_offset = FCC3_MEM_OFFSET, +}; + static struct plat_serial8250_port serial_platform_data[] = { [0] = { .mapbase = 0x4500, @@ -318,19 +374,28 @@ struct platform_device ppc_sys_platform_devices[] = { [MPC85xx_CPM_FCC1] = { .name = "fsl-cpm-fcc", .id = 1, - .num_resources = 3, + .num_resources = 4, + .dev.platform_data = &mpc85xx_fcc1_pdata, .resource = (struct resource[]) { { + .name = "fcc_regs", .start = 0x91300, .end = 0x9131F, .flags = IORESOURCE_MEM, }, { + .name = "fcc_regs_c", .start = 0x91380, .end = 0x9139F, .flags = IORESOURCE_MEM, }, { + .name = "fcc_pram", + .start = 0x88400, + .end = 0x884ff, + .flags = IORESOURCE_MEM, + }, + { .start = SIU_INT_FCC1, .end = SIU_INT_FCC1, .flags = IORESOURCE_IRQ, @@ -340,19 +405,28 @@ struct platform_device ppc_sys_platform_devices[] = { [MPC85xx_CPM_FCC2] = { .name = "fsl-cpm-fcc", .id = 2, - .num_resources = 3, + .num_resources = 4, + .dev.platform_data = &mpc85xx_fcc2_pdata, .resource = (struct resource[]) { { + .name = "fcc_regs", .start = 0x91320, .end = 0x9133F, .flags = IORESOURCE_MEM, }, { + .name = "fcc_regs_c", .start = 0x913A0, .end = 0x913CF, .flags = IORESOURCE_MEM, }, { + .name = "fcc_pram", + .start = 0x88500, + .end = 0x885ff, + .flags = IORESOURCE_MEM, + }, + { .start = SIU_INT_FCC2, .end = SIU_INT_FCC2, .flags = IORESOURCE_IRQ, @@ -362,19 +436,28 @@ struct platform_device ppc_sys_platform_devices[] = { [MPC85xx_CPM_FCC3] = { .name = "fsl-cpm-fcc", .id = 3, - .num_resources = 3, + .num_resources = 4, + .dev.platform_data = &mpc85xx_fcc3_pdata, .resource = (struct resource[]) { { + .name = "fcc_regs", .start = 0x91340, .end = 0x9135F, .flags = IORESOURCE_MEM, }, { + .name = "fcc_regs_c", .start = 0x913D0, .end = 0x913FF, .flags = IORESOURCE_MEM, }, { + .name = "fcc_pram", + .start = 0x88600, + .end = 0x886ff, + .flags = IORESOURCE_MEM, + }, + { .start = SIU_INT_FCC3, .end = SIU_INT_FCC3, .flags = IORESOURCE_IRQ, diff --git a/arch/ppc/syslib/mpc8xx_devices.c b/arch/ppc/syslib/mpc8xx_devices.c index 6f536383866..cf5ab47487a 100644 --- a/arch/ppc/syslib/mpc8xx_devices.c +++ b/arch/ppc/syslib/mpc8xx_devices.c @@ -218,6 +218,14 @@ struct platform_device ppc_sys_platform_devices[] = { }, }, }, + + [MPC8xx_MDIO_FEC] = { + .name = "fsl-cpm-fec-mdio", + .id = 0, + .num_resources = 0, + + }, + }; static int __init mach_mpc8xx_fixup(struct platform_device *pdev) diff --git a/arch/ppc/syslib/mpc8xx_sys.c b/arch/ppc/syslib/mpc8xx_sys.c index eee21328485..18ba1d7ff9f 100644 --- a/arch/ppc/syslib/mpc8xx_sys.c +++ b/arch/ppc/syslib/mpc8xx_sys.c @@ -22,7 +22,7 @@ struct ppc_sys_spec ppc_sys_specs[] = { .ppc_sys_name = "MPC86X", .mask = 0xFFFFFFFF, .value = 0x00000000, - .num_devices = 7, + .num_devices = 8, .device_list = (enum ppc_sys_devices[]) { MPC8xx_CPM_FEC1, @@ -32,13 +32,14 @@ struct ppc_sys_spec ppc_sys_specs[] = { MPC8xx_CPM_SCC4, MPC8xx_CPM_SMC1, MPC8xx_CPM_SMC2, + MPC8xx_MDIO_FEC, }, }, { .ppc_sys_name = "MPC885", .mask = 0xFFFFFFFF, .value = 0x00000000, - .num_devices = 8, + .num_devices = 9, .device_list = (enum ppc_sys_devices[]) { MPC8xx_CPM_FEC1, @@ -49,6 +50,7 @@ struct ppc_sys_spec ppc_sys_specs[] = { MPC8xx_CPM_SCC4, MPC8xx_CPM_SMC1, MPC8xx_CPM_SMC2, + MPC8xx_MDIO_FEC, }, }, { /* default match */ diff --git a/arch/ppc/syslib/pq2_devices.c b/arch/ppc/syslib/pq2_devices.c index 8692d00c08c..fefbc217a56 100644 --- a/arch/ppc/syslib/pq2_devices.c +++ b/arch/ppc/syslib/pq2_devices.c @@ -369,6 +369,11 @@ struct platform_device ppc_sys_platform_devices[] = { }, }, }, + [MPC82xx_MDIO_BB] = { + .name = "fsl-bb-mdio", + .id = 0, + .num_resources = 0, + }, }; static int __init mach_mpc82xx_fixup(struct platform_device *pdev) diff --git a/arch/ppc/syslib/pq2_sys.c b/arch/ppc/syslib/pq2_sys.c index fee8948162b..f52600c0db2 100644 --- a/arch/ppc/syslib/pq2_sys.c +++ b/arch/ppc/syslib/pq2_sys.c @@ -139,13 +139,14 @@ struct ppc_sys_spec ppc_sys_specs[] = { .ppc_sys_name = "8272", .mask = 0x0000ff00, .value = 0x00000c00, - .num_devices = 12, + .num_devices = 13, .device_list = (enum ppc_sys_devices[]) { MPC82xx_CPM_FCC1, MPC82xx_CPM_FCC2, MPC82xx_CPM_SCC1, MPC82xx_CPM_SCC2, MPC82xx_CPM_SCC3, MPC82xx_CPM_SCC4, MPC82xx_CPM_SMC1, MPC82xx_CPM_SMC2, MPC82xx_CPM_SPI, MPC82xx_CPM_I2C, MPC82xx_CPM_USB, MPC82xx_SEC1, + MPC82xx_MDIO_BB, }, }, /* below is a list of the 8280 family of processors */ diff --git a/arch/s390/appldata/appldata_base.c b/arch/s390/appldata/appldata_base.c index 6a4b5f9715c..a0a94e0ef8d 100644 --- a/arch/s390/appldata/appldata_base.c +++ b/arch/s390/appldata/appldata_base.c @@ -618,7 +618,7 @@ appldata_offline_cpu(int cpu) } #ifdef CONFIG_HOTPLUG_CPU -static int +static int __cpuinit appldata_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu) { diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c index eb6ebfef134..6e6b6de7777 100644 --- a/arch/s390/mm/init.c +++ b/arch/s390/mm/init.c @@ -129,7 +129,7 @@ void __init paging_init(void) /* * pg_table is physical at this point */ - pg_table = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE); + pg_table = (pte_t *) alloc_bootmem_pages(PAGE_SIZE); pg_dir->pgd0 = (_PAGE_TABLE | __pa(pg_table)); pg_dir->pgd1 = (_PAGE_TABLE | (__pa(pg_table)+1024)); @@ -219,7 +219,7 @@ void __init paging_init(void) continue; } - pm_dir = (pmd_t *) alloc_bootmem_low_pages(PAGE_SIZE*4); + pm_dir = (pmd_t *) alloc_bootmem_pages(PAGE_SIZE * 4); pgd_populate(&init_mm, pg_dir, pm_dir); for (j = 0 ; j < PTRS_PER_PMD ; j++,pm_dir++) { @@ -228,7 +228,7 @@ void __init paging_init(void) continue; } - pt_dir = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE); + pt_dir = (pte_t *) alloc_bootmem_pages(PAGE_SIZE); pmd_populate_kernel(&init_mm, pm_dir, pt_dir); for (k = 0 ; k < PTRS_PER_PTE ; k++,pt_dir++) { diff --git a/arch/sparc/kernel/setup.c b/arch/sparc/kernel/setup.c index 35488d6c745..0251cab4708 100644 --- a/arch/sparc/kernel/setup.c +++ b/arch/sparc/kernel/setup.c @@ -348,9 +348,9 @@ void __init setup_arch(char **cmdline_p) init_mm.context = (unsigned long) NO_CONTEXT; init_task.thread.kregs = &fake_swapper_regs; - smp_setup_cpu_possible_map(); - paging_init(); + + smp_setup_cpu_possible_map(); } static int __init set_preferred_console(void) diff --git a/arch/sparc/kernel/smp.c b/arch/sparc/kernel/smp.c index e311ade1b49..276f22881d0 100644 --- a/arch/sparc/kernel/smp.c +++ b/arch/sparc/kernel/smp.c @@ -34,7 +34,6 @@ #include <asm/tlbflush.h> #include <asm/cpudata.h> -volatile int smp_processors_ready = 0; int smp_num_cpus = 1; volatile unsigned long cpu_callin_map[NR_CPUS] __initdata = {0,}; unsigned char boot_cpu_id = 0; diff --git a/arch/sparc/kernel/sun4d_smp.c b/arch/sparc/kernel/sun4d_smp.c index ba843f6a283..3ff4edd3281 100644 --- a/arch/sparc/kernel/sun4d_smp.c +++ b/arch/sparc/kernel/sun4d_smp.c @@ -42,7 +42,7 @@ extern ctxd_t *srmmu_ctx_table_phys; extern void calibrate_delay(void); -extern volatile int smp_processors_ready; +static volatile int smp_processors_ready = 0; static int smp_highest_cpu; extern volatile unsigned long cpu_callin_map[NR_CPUS]; extern cpuinfo_sparc cpu_data[NR_CPUS]; diff --git a/arch/sparc/kernel/sun4m_smp.c b/arch/sparc/kernel/sun4m_smp.c index 3b32096134a..7d4a649138f 100644 --- a/arch/sparc/kernel/sun4m_smp.c +++ b/arch/sparc/kernel/sun4m_smp.c @@ -39,7 +39,6 @@ extern ctxd_t *srmmu_ctx_table_phys; extern void calibrate_delay(void); -extern volatile int smp_processors_ready; extern volatile unsigned long cpu_callin_map[NR_CPUS]; extern unsigned char boot_cpu_id; @@ -217,7 +216,6 @@ void __init smp4m_smp_done(void) } /* Ok, they are spinning and ready to go. */ - smp_processors_ready = 1; } /* At each hardware IRQ, we get this called to forward IRQ reception diff --git a/arch/sparc64/mm/generic.c b/arch/sparc64/mm/generic.c index 8cb06205d26..af9d81db0b3 100644 --- a/arch/sparc64/mm/generic.c +++ b/arch/sparc64/mm/generic.c @@ -69,6 +69,8 @@ static inline void io_remap_pte_range(struct mm_struct *mm, pte_t * pte, } else offset += PAGE_SIZE; + if (pte_write(entry)) + entry = pte_mkdirty(entry); do { BUG_ON(!pte_none(*pte)); set_pte_at(mm, address, pte, entry); diff --git a/arch/x86_64/defconfig b/arch/x86_64/defconfig index 840d5d93d5c..5fb97071594 100644 --- a/arch/x86_64/defconfig +++ b/arch/x86_64/defconfig @@ -1,7 +1,7 @@ # # Automatically generated make config: don't edit -# Linux kernel version: 2.6.18-rc2 -# Tue Jul 18 17:13:20 2006 +# Linux kernel version: 2.6.18-rc4 +# Thu Aug 24 21:05:55 2006 # CONFIG_X86_64=y CONFIG_64BIT=y @@ -201,7 +201,7 @@ CONFIG_ACPI_THERMAL=y CONFIG_ACPI_NUMA=y # CONFIG_ACPI_ASUS is not set # CONFIG_ACPI_IBM is not set -CONFIG_ACPI_TOSHIBA=y +# CONFIG_ACPI_TOSHIBA is not set CONFIG_ACPI_BLACKLIST_YEAR=0 # CONFIG_ACPI_DEBUG is not set CONFIG_ACPI_EC=y @@ -216,7 +216,7 @@ CONFIG_ACPI_CONTAINER=y # CONFIG_CPU_FREQ=y CONFIG_CPU_FREQ_TABLE=y -# CONFIG_CPU_FREQ_DEBUG is not set +CONFIG_CPU_FREQ_DEBUG=y CONFIG_CPU_FREQ_STAT=y # CONFIG_CPU_FREQ_STAT_DETAILS is not set CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE=y @@ -495,8 +495,9 @@ CONFIG_SCSI=y CONFIG_BLK_DEV_SD=y # CONFIG_CHR_DEV_ST is not set # CONFIG_CHR_DEV_OSST is not set -# CONFIG_BLK_DEV_SR is not set -# CONFIG_CHR_DEV_SG is not set +CONFIG_BLK_DEV_SR=y +# CONFIG_BLK_DEV_SR_VENDOR is not set +CONFIG_CHR_DEV_SG=y # CONFIG_CHR_DEV_SCH is not set # @@ -512,7 +513,7 @@ CONFIG_SCSI_CONSTANTS=y CONFIG_SCSI_SPI_ATTRS=y CONFIG_SCSI_FC_ATTRS=y # CONFIG_SCSI_ISCSI_ATTRS is not set -# CONFIG_SCSI_SAS_ATTRS is not set +CONFIG_SCSI_SAS_ATTRS=y # # SCSI low-level drivers @@ -538,7 +539,7 @@ CONFIG_MEGARAID_MAILBOX=y CONFIG_MEGARAID_SAS=y CONFIG_SCSI_SATA=y CONFIG_SCSI_SATA_AHCI=y -# CONFIG_SCSI_SATA_SVW is not set +CONFIG_SCSI_SATA_SVW=y CONFIG_SCSI_ATA_PIIX=y # CONFIG_SCSI_SATA_MV is not set CONFIG_SCSI_SATA_NV=y @@ -589,7 +590,7 @@ CONFIG_BLK_DEV_DM=y CONFIG_FUSION=y CONFIG_FUSION_SPI=y # CONFIG_FUSION_FC is not set -# CONFIG_FUSION_SAS is not set +CONFIG_FUSION_SAS=y CONFIG_FUSION_MAX_SGE=128 # CONFIG_FUSION_CTL is not set @@ -675,7 +676,7 @@ CONFIG_NET_PCI=y # CONFIG_PCNET32 is not set # CONFIG_AMD8111_ETH is not set # CONFIG_ADAPTEC_STARFIRE is not set -# CONFIG_B44 is not set +CONFIG_B44=y CONFIG_FORCEDETH=y # CONFIG_DGRS is not set # CONFIG_EEPRO100 is not set @@ -712,7 +713,7 @@ CONFIG_E1000=y # CONFIG_SK98LIN is not set # CONFIG_VIA_VELOCITY is not set CONFIG_TIGON3=y -# CONFIG_BNX2 is not set +CONFIG_BNX2=y # # Ethernet (10000 Mbit) @@ -842,44 +843,7 @@ CONFIG_LEGACY_PTY_COUNT=256 # # Watchdog Cards # -CONFIG_WATCHDOG=y -# CONFIG_WATCHDOG_NOWAYOUT is not set - -# -# Watchdog Device Drivers -# -CONFIG_SOFT_WATCHDOG=y -# CONFIG_ACQUIRE_WDT is not set -# CONFIG_ADVANTECH_WDT is not set -# CONFIG_ALIM1535_WDT is not set -# CONFIG_ALIM7101_WDT is not set -# CONFIG_SC520_WDT is not set -# CONFIG_EUROTECH_WDT is not set -# CONFIG_IB700_WDT is not set -# CONFIG_IBMASR is not set -# CONFIG_WAFER_WDT is not set -# CONFIG_I6300ESB_WDT is not set -# CONFIG_I8XX_TCO is not set -# CONFIG_SC1200_WDT is not set -# CONFIG_60XX_WDT is not set -# CONFIG_SBC8360_WDT is not set -# CONFIG_CPU5_WDT is not set -# CONFIG_W83627HF_WDT is not set -# CONFIG_W83877F_WDT is not set -# CONFIG_W83977F_WDT is not set -# CONFIG_MACHZ_WDT is not set -# CONFIG_SBC_EPX_C3_WATCHDOG is not set - -# -# PCI-based Watchdog Cards -# -# CONFIG_PCIPCWATCHDOG is not set -# CONFIG_WDTPCI is not set - -# -# USB-based Watchdog Cards -# -# CONFIG_USBPCWATCHDOG is not set +# CONFIG_WATCHDOG is not set CONFIG_HW_RANDOM=y CONFIG_HW_RANDOM_INTEL=y CONFIG_HW_RANDOM_AMD=y @@ -1056,6 +1020,7 @@ CONFIG_VGACON_SOFT_SCROLLBACK=y CONFIG_VGACON_SOFT_SCROLLBACK_SIZE=256 CONFIG_VIDEO_SELECT=y CONFIG_DUMMY_CONSOLE=y +# CONFIG_BACKLIGHT_LCD_SUPPORT is not set # # Sound @@ -1301,7 +1266,7 @@ CONFIG_INOTIFY=y CONFIG_INOTIFY_USER=y # CONFIG_QUOTA is not set CONFIG_DNOTIFY=y -CONFIG_AUTOFS_FS=y +# CONFIG_AUTOFS_FS is not set CONFIG_AUTOFS4_FS=y # CONFIG_FUSE_FS is not set @@ -1494,4 +1459,5 @@ CONFIG_DEBUG_STACKOVERFLOW=y # CONFIG_CRC16 is not set CONFIG_CRC32=y # CONFIG_LIBCRC32C is not set +CONFIG_ZLIB_INFLATE=y CONFIG_PLIST=y diff --git a/arch/x86_64/ia32/ia32_binfmt.c b/arch/x86_64/ia32/ia32_binfmt.c index a9dc0f3b5b5..2fd5a67fd43 100644 --- a/arch/x86_64/ia32/ia32_binfmt.c +++ b/arch/x86_64/ia32/ia32_binfmt.c @@ -73,39 +73,44 @@ typedef elf_greg_t elf_gregset_t[ELF_NGREG]; * Dumping its extra ELF program headers includes all the other information * a debugger needs to easily find how the vsyscall DSO was being used. */ -#define ELF_CORE_EXTRA_PHDRS (VSYSCALL32_EHDR->e_phnum) +#define ELF_CORE_EXTRA_PHDRS (find_vma(current->mm, VSYSCALL32_BASE) ? \ + (VSYSCALL32_EHDR->e_phnum) : 0) #define ELF_CORE_WRITE_EXTRA_PHDRS \ do { \ - const struct elf32_phdr *const vsyscall_phdrs = \ - (const struct elf32_phdr *) (VSYSCALL32_BASE \ - + VSYSCALL32_EHDR->e_phoff); \ - int i; \ - Elf32_Off ofs = 0; \ - for (i = 0; i < VSYSCALL32_EHDR->e_phnum; ++i) { \ - struct elf32_phdr phdr = vsyscall_phdrs[i]; \ - if (phdr.p_type == PT_LOAD) { \ - BUG_ON(ofs != 0); \ - ofs = phdr.p_offset = offset; \ - phdr.p_memsz = PAGE_ALIGN(phdr.p_memsz); \ - phdr.p_filesz = phdr.p_memsz; \ - offset += phdr.p_filesz; \ + if (find_vma(current->mm, VSYSCALL32_BASE)) { \ + const struct elf32_phdr *const vsyscall_phdrs = \ + (const struct elf32_phdr *) (VSYSCALL32_BASE \ + + VSYSCALL32_EHDR->e_phoff);\ + int i; \ + Elf32_Off ofs = 0; \ + for (i = 0; i < VSYSCALL32_EHDR->e_phnum; ++i) { \ + struct elf32_phdr phdr = vsyscall_phdrs[i]; \ + if (phdr.p_type == PT_LOAD) { \ + BUG_ON(ofs != 0); \ + ofs = phdr.p_offset = offset; \ + phdr.p_memsz = PAGE_ALIGN(phdr.p_memsz); \ + phdr.p_filesz = phdr.p_memsz; \ + offset += phdr.p_filesz; \ + } \ + else \ + phdr.p_offset += ofs; \ + phdr.p_paddr = 0; /* match other core phdrs */ \ + DUMP_WRITE(&phdr, sizeof(phdr)); \ } \ - else \ - phdr.p_offset += ofs; \ - phdr.p_paddr = 0; /* match other core phdrs */ \ - DUMP_WRITE(&phdr, sizeof(phdr)); \ } \ } while (0) #define ELF_CORE_WRITE_EXTRA_DATA \ do { \ - const struct elf32_phdr *const vsyscall_phdrs = \ - (const struct elf32_phdr *) (VSYSCALL32_BASE \ - + VSYSCALL32_EHDR->e_phoff); \ - int i; \ - for (i = 0; i < VSYSCALL32_EHDR->e_phnum; ++i) { \ - if (vsyscall_phdrs[i].p_type == PT_LOAD) \ - DUMP_WRITE((void *) (u64) vsyscall_phdrs[i].p_vaddr, \ - PAGE_ALIGN(vsyscall_phdrs[i].p_memsz)); \ + if (find_vma(current->mm, VSYSCALL32_BASE)) { \ + const struct elf32_phdr *const vsyscall_phdrs = \ + (const struct elf32_phdr *) (VSYSCALL32_BASE \ + + VSYSCALL32_EHDR->e_phoff); \ + int i; \ + for (i = 0; i < VSYSCALL32_EHDR->e_phnum; ++i) { \ + if (vsyscall_phdrs[i].p_type == PT_LOAD) \ + DUMP_WRITE((void *) (u64) vsyscall_phdrs[i].p_vaddr,\ + PAGE_ALIGN(vsyscall_phdrs[i].p_memsz)); \ + } \ } \ } while (0) diff --git a/arch/x86_64/kernel/e820.c b/arch/x86_64/kernel/e820.c index e56c2adf57a..764bf23c710 100644 --- a/arch/x86_64/kernel/e820.c +++ b/arch/x86_64/kernel/e820.c @@ -71,7 +71,11 @@ static inline int bad_addr(unsigned long *addrp, unsigned long size) #endif /* kernel code + 640k memory hole (later should not be needed, but be paranoid for now) */ - if (last >= 640*1024 && addr < __pa_symbol(&_end)) { + if (last >= 640*1024 && addr < 1024*1024) { + *addrp = 1024*1024; + return 1; + } + if (last >= __pa_symbol(&_text) && last < __pa_symbol(&_end)) { *addrp = __pa_symbol(&_end); return 1; } @@ -104,35 +108,6 @@ e820_any_mapped(unsigned long start, unsigned long end, unsigned type) return 0; } -/* - * This function checks if the entire range <start,end> is mapped with type. - * - * Note: this function only works correct if the e820 table is sorted and - * not-overlapping, which is the case - */ -int __init e820_all_mapped(unsigned long start, unsigned long end, unsigned type) -{ - int i; - for (i = 0; i < e820.nr_map; i++) { - struct e820entry *ei = &e820.map[i]; - if (type && ei->type != type) - continue; - /* is the region (part) in overlap with the current region ?*/ - if (ei->addr >= end || ei->addr + ei->size <= start) - continue; - - /* if the region is at the beginning of <start,end> we move - * start to the end of the region since it's ok until there - */ - if (ei->addr <= start) - start = ei->addr + ei->size; - /* if start is now at or beyond end, we're done, full coverage */ - if (start >= end) - return 1; /* we're done */ - } - return 0; -} - /* * Find a free area in a specific range. */ diff --git a/arch/x86_64/kernel/entry.S b/arch/x86_64/kernel/entry.S index 6f810424df4..aa8d8939abc 100644 --- a/arch/x86_64/kernel/entry.S +++ b/arch/x86_64/kernel/entry.S @@ -973,6 +973,8 @@ ENTRY(kernel_thread) ENDPROC(kernel_thread) child_rip: + pushq $0 # fake return address + CFI_STARTPROC /* * Here we are in the child and the registers are set as they were * at kernel_thread() invocation in the parent. @@ -983,6 +985,7 @@ child_rip: # exit xorl %edi, %edi call do_exit + CFI_ENDPROC ENDPROC(child_rip) /* diff --git a/arch/x86_64/kernel/head.S b/arch/x86_64/kernel/head.S index 6df05e6034f..c9739ca81d0 100644 --- a/arch/x86_64/kernel/head.S +++ b/arch/x86_64/kernel/head.S @@ -191,6 +191,7 @@ startup_64: * jump */ movq initial_code(%rip),%rax + pushq $0 # fake return address jmp *%rax /* SMP bootup changes these two */ diff --git a/arch/x86_64/kernel/init_task.c b/arch/x86_64/kernel/init_task.c index ce31d904d60..3dc5854ba21 100644 --- a/arch/x86_64/kernel/init_task.c +++ b/arch/x86_64/kernel/init_task.c @@ -46,4 +46,9 @@ EXPORT_SYMBOL(init_task); */ DEFINE_PER_CPU(struct tss_struct, init_tss) ____cacheline_internodealigned_in_smp = INIT_TSS; +/* Copies of the original ist values from the tss are only accessed during + * debugging, no special alignment required. + */ +DEFINE_PER_CPU(struct orig_ist, orig_ist); + #define ALIGN_TO_4K __attribute__((section(".data.init_task"))) diff --git a/arch/x86_64/kernel/setup.c b/arch/x86_64/kernel/setup.c index 8a099ff1f8b..34afad70482 100644 --- a/arch/x86_64/kernel/setup.c +++ b/arch/x86_64/kernel/setup.c @@ -521,8 +521,6 @@ static void discover_ebda(void) void __init setup_arch(char **cmdline_p) { - unsigned long kernel_end; - ROOT_DEV = old_decode_dev(ORIG_ROOT_DEV); screen_info = SCREEN_INFO; edid_info = EDID_INFO; @@ -596,8 +594,8 @@ void __init setup_arch(char **cmdline_p) (table_end - table_start) << PAGE_SHIFT); /* reserve kernel */ - kernel_end = round_up(__pa_symbol(&_end),PAGE_SIZE); - reserve_bootmem_generic(HIGH_MEMORY, kernel_end - HIGH_MEMORY); + reserve_bootmem_generic(__pa_symbol(&_text), + __pa_symbol(&_end) - __pa_symbol(&_text)); /* * reserve physical page 0 - it's a special BIOS page on many boxes, diff --git a/arch/x86_64/kernel/setup64.c b/arch/x86_64/kernel/setup64.c index 6fe58a634b5..417de564456 100644 --- a/arch/x86_64/kernel/setup64.c +++ b/arch/x86_64/kernel/setup64.c @@ -189,6 +189,7 @@ void __cpuinit cpu_init (void) { int cpu = stack_smp_processor_id(); struct tss_struct *t = &per_cpu(init_tss, cpu); + struct orig_ist *orig_ist = &per_cpu(orig_ist, cpu); unsigned long v; char *estacks = NULL; struct task_struct *me; @@ -256,7 +257,7 @@ void __cpuinit cpu_init (void) estacks += EXCEPTION_STKSZ; break; } - t->ist[v] = (unsigned long)estacks; + orig_ist->ist[v] = t->ist[v] = (unsigned long)estacks; } t->io_bitmap_base = offsetof(struct tss_struct, io_bitmap); diff --git a/arch/x86_64/kernel/traps.c b/arch/x86_64/kernel/traps.c index 4e9938dee06..b1249774d1e 100644 --- a/arch/x86_64/kernel/traps.c +++ b/arch/x86_64/kernel/traps.c @@ -107,7 +107,11 @@ static inline void preempt_conditional_cli(struct pt_regs *regs) } static int kstack_depth_to_print = 12; +#ifdef CONFIG_STACK_UNWIND static int call_trace = 1; +#else +#define call_trace (-1) +#endif #ifdef CONFIG_KALLSYMS # include <linux/kallsyms.h> @@ -174,7 +178,7 @@ static unsigned long *in_exception_stack(unsigned cpu, unsigned long stack, break; #endif default: - end = per_cpu(init_tss, cpu).ist[k]; + end = per_cpu(orig_ist, cpu).ist[k]; break; } /* @@ -274,21 +278,21 @@ void show_trace(struct task_struct *tsk, struct pt_regs *regs, unsigned long * s if (unwind_init_blocked(&info, tsk) == 0) unw_ret = show_trace_unwind(&info, NULL); } - if (unw_ret > 0 && !arch_unw_user_mode(&info)) { -#ifdef CONFIG_STACK_UNWIND - unsigned long rip = info.regs.rip; - print_symbol("DWARF2 unwinder stuck at %s\n", rip); - if (call_trace == 1) { - printk("Leftover inexact backtrace:\n"); - stack = (unsigned long *)info.regs.rsp; - } else if (call_trace > 1) + if (unw_ret > 0) { + if (call_trace == 1 && !arch_unw_user_mode(&info)) { + print_symbol("DWARF2 unwinder stuck at %s\n", + UNW_PC(&info)); + if ((long)UNW_SP(&info) < 0) { + printk("Leftover inexact backtrace:\n"); + stack = (unsigned long *)UNW_SP(&info); + } else + printk("Full inexact backtrace again:\n"); + } else if (call_trace >= 1) return; else printk("Full inexact backtrace again:\n"); -#else + } else printk("Inexact backtrace:\n"); -#endif - } } /* @@ -529,7 +533,7 @@ void __kprobes oops_end(unsigned long flags) /* Nest count reaches zero, release the lock. */ spin_unlock_irqrestore(&die_lock, flags); if (panic_on_oops) - panic("Fatal exception: panic_on_oops"); + panic("Fatal exception"); } void __kprobes __die(const char * str, struct pt_regs * regs, long err) @@ -1120,6 +1124,7 @@ static int __init kstack_setup(char *s) } __setup("kstack=", kstack_setup); +#ifdef CONFIG_STACK_UNWIND static int __init call_trace_setup(char *s) { if (strcmp(s, "old") == 0) @@ -1133,3 +1138,4 @@ static int __init call_trace_setup(char *s) return 1; } __setup("call_trace=", call_trace_setup); +#endif diff --git a/arch/x86_64/pci/mmconfig.c b/arch/x86_64/pci/mmconfig.c index 3c55c76c6fd..2d48a7941d4 100644 --- a/arch/x86_64/pci/mmconfig.c +++ b/arch/x86_64/pci/mmconfig.c @@ -9,6 +9,7 @@ #include <linux/init.h> #include <linux/acpi.h> #include <linux/bitmap.h> +#include <linux/dmi.h> #include <asm/e820.h> #include "pci.h" @@ -164,11 +165,33 @@ static __init void unreachable_devices(void) } } +static int disable_mcfg(struct dmi_system_id *d) +{ + printk("PCI: %s detected. Disabling MCFG.\n", d->ident); + pci_probe &= ~PCI_PROBE_MMCONF; + return 0; +} + +static struct dmi_system_id __initdata dmi_bad_mcfg[] = { + /* Has broken MCFG table that makes the system hang when used */ + { + .callback = disable_mcfg, + .ident = "Intel D3C5105 SDV", + .matches = { + DMI_MATCH(DMI_BIOS_VENDOR, "Intel"), + DMI_MATCH(DMI_BOARD_NAME, "D26928"), + }, + }, + {} +}; + void __init pci_mmcfg_init(void) { int i; - if ((pci_probe & PCI_PROBE_MMCONF) == 0) + dmi_check_system(dmi_bad_mcfg); + + if ((pci_probe & (PCI_PROBE_MMCONF|PCI_PROBE_MMCONF_FORCE)) == 0) return; acpi_table_parse(ACPI_MCFG, acpi_parse_mcfg); @@ -177,15 +200,6 @@ void __init pci_mmcfg_init(void) (pci_mmcfg_config[0].base_address == 0)) return; - if (!e820_all_mapped(pci_mmcfg_config[0].base_address, - pci_mmcfg_config[0].base_address + MMCONFIG_APER_MIN, - E820_RESERVED)) { - printk(KERN_ERR "PCI: BIOS Bug: MCFG area at %x is not E820-reserved\n", - pci_mmcfg_config[0].base_address); - printk(KERN_ERR "PCI: Not using MMCONFIG.\n"); - return; - } - /* RED-PEN i386 doesn't do _nocache right now */ pci_mmcfg_virt = kmalloc(sizeof(*pci_mmcfg_virt) * pci_mmcfg_config_num, GFP_KERNEL); if (pci_mmcfg_virt == NULL) { diff --git a/arch/xtensa/kernel/traps.c b/arch/xtensa/kernel/traps.c index 9734960a245..ce077d6bf3a 100644 --- a/arch/xtensa/kernel/traps.c +++ b/arch/xtensa/kernel/traps.c @@ -488,7 +488,7 @@ void die(const char * str, struct pt_regs * regs, long err) panic("Fatal exception in interrupt"); if (panic_on_oops) - panic("Fatal exception: panic_on_oops"); + panic("Fatal exception"); do_exit(err); } diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index aae3123bf3e..3a3aee08ec5 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c @@ -1561,7 +1561,7 @@ restart: /* ->key must be copied to avoid race with cfq_exit_queue() */ k = __cic->key; if (unlikely(!k)) { - cfq_drop_dead_cic(ioc, cic); + cfq_drop_dead_cic(ioc, __cic); goto restart; } diff --git a/block/elevator.c b/block/elevator.c index bc7baeec0d1..9b72dc7c8a5 100644 --- a/block/elevator.c +++ b/block/elevator.c @@ -765,7 +765,8 @@ void elv_unregister(struct elevator_type *e) read_lock(&tasklist_lock); do_each_thread(g, p) { task_lock(p); - e->ops.trim(p->io_context); + if (p->io_context) + e->ops.trim(p->io_context); task_unlock(p); } while_each_thread(g, p); read_unlock(&tasklist_lock); diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c index 61d6b3c65b6..ddd9253f9d5 100644 --- a/block/ll_rw_blk.c +++ b/block/ll_rw_blk.c @@ -3628,6 +3628,8 @@ struct io_context *current_io_context(gfp_t gfp_flags) ret->nr_batch_requests = 0; /* because this is 0 */ ret->aic = NULL; ret->cic_root.rb_node = NULL; + /* make sure set_task_ioprio() sees the settings above */ + smp_wmb(); tsk->io_context = ret; } diff --git a/drivers/acpi/ac.c b/drivers/acpi/ac.c index 96309b9660d..11abc7bf777 100644 --- a/drivers/acpi/ac.c +++ b/drivers/acpi/ac.c @@ -285,6 +285,8 @@ static int __init acpi_ac_init(void) { int result; + if (acpi_disabled) + return -ENODEV; acpi_ac_dir = acpi_lock_ac_dir(); if (!acpi_ac_dir) diff --git a/drivers/acpi/acpi_memhotplug.c b/drivers/acpi/acpi_memhotplug.c index b0d4b147b19..1dda370f402 100644 --- a/drivers/acpi/acpi_memhotplug.c +++ b/drivers/acpi/acpi_memhotplug.c @@ -484,10 +484,8 @@ acpi_memory_register_notify_handler(acpi_handle handle, status = is_memory_device(handle); - if (ACPI_FAILURE(status)){ - ACPI_EXCEPTION((AE_INFO, status, "handle is no memory device")); + if (ACPI_FAILURE(status)) return AE_OK; /* continue */ - } status = acpi_install_notify_handler(handle, ACPI_SYSTEM_NOTIFY, acpi_memory_device_notify, NULL); @@ -503,10 +501,8 @@ acpi_memory_deregister_notify_handler(acpi_handle handle, status = is_memory_device(handle); - if (ACPI_FAILURE(status)){ - ACPI_EXCEPTION((AE_INFO, status, "handle is no memory device")); + if (ACPI_FAILURE(status)) return AE_OK; /* continue */ - } status = acpi_remove_notify_handler(handle, ACPI_SYSTEM_NOTIFY, diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c index 6e5221707d9..9810e2a55d0 100644 --- a/drivers/acpi/battery.c +++ b/drivers/acpi/battery.c @@ -757,6 +757,9 @@ static int __init acpi_battery_init(void) { int result; + if (acpi_disabled) + return -ENODEV; + acpi_battery_dir = acpi_lock_battery_dir(); if (!acpi_battery_dir) return -ENODEV; diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c index b2977695e12..279c4bac92e 100644 --- a/drivers/acpi/bus.c +++ b/drivers/acpi/bus.c @@ -25,6 +25,7 @@ #include <linux/module.h> #include <linux/init.h> #include <linux/ioport.h> +#include <linux/kernel.h> #include <linux/list.h> #include <linux/sched.h> #include <linux/pm.h> @@ -68,7 +69,8 @@ int acpi_bus_get_device(acpi_handle handle, struct acpi_device **device) status = acpi_get_data(handle, acpi_bus_data_handler, (void **)device); if (ACPI_FAILURE(status) || !*device) { - ACPI_EXCEPTION((AE_INFO, status, "No context for object [%p]", handle)); + ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No context for object [%p]\n", + handle)); return -ENODEV; } @@ -192,7 +194,7 @@ int acpi_bus_set_power(acpi_handle handle, int state) /* Make sure this is a valid target state */ if (!device->flags.power_manageable) { - ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Device `[%s]' is not power manageable", + ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Device `[%s]' is not power manageable\n", device->kobj.name)); return -ENODEV; } @@ -738,7 +740,10 @@ static int __init acpi_init(void) return -ENODEV; } - firmware_register(&acpi_subsys); + result = firmware_register(&acpi_subsys); + if (result < 0) + printk(KERN_WARNING "%s: firmware_register error: %d\n", + __FUNCTION__, result); result = acpi_bus_init(); diff --git a/drivers/acpi/hotkey.c b/drivers/acpi/hotkey.c index 32c9d88fd19..1ba2db67186 100644 --- a/drivers/acpi/hotkey.c +++ b/drivers/acpi/hotkey.c @@ -91,6 +91,14 @@ enum { HK_EVENT_ENTERRING_S5, }; +enum conf_entry_enum { + bus_handle = 0, + bus_method = 1, + action_handle = 2, + method = 3, + LAST_CONF_ENTRY +}; + /* procdir we use */ static struct proc_dir_entry *hotkey_proc_dir; static struct proc_dir_entry *hotkey_config; @@ -244,19 +252,15 @@ static int hotkey_info_open_fs(struct inode *inode, struct file *file) static char *format_result(union acpi_object *object) { - char *buf = NULL; - - buf = (char *)kmalloc(RESULT_STR_LEN, GFP_KERNEL); - if (buf) - memset(buf, 0, RESULT_STR_LEN); - else - goto do_fail; + char *buf; + buf = kzalloc(RESULT_STR_LEN, GFP_KERNEL); + if (!buf) + return NULL; /* Now, just support integer type */ if (object->type == ACPI_TYPE_INTEGER) sprintf(buf, "%d\n", (u32) object->integer.value); - do_fail: - return (buf); + return buf; } static int hotkey_polling_seq_show(struct seq_file *seq, void *offset) @@ -486,98 +490,102 @@ static void free_hotkey_device(union acpi_hotkey *key) static void free_hotkey_buffer(union acpi_hotkey *key) { + /* key would never be null, action method could be */ kfree(key->event_hotkey.action_method); } static void free_poll_hotkey_buffer(union acpi_hotkey *key) { + /* key would never be null, others could be*/ kfree(key->poll_hotkey.action_method); kfree(key->poll_hotkey.poll_method); kfree(key->poll_hotkey.poll_result); } static int -init_hotkey_device(union acpi_hotkey *key, char *bus_str, char *action_str, - char *method, int std_num, int external_num) +init_hotkey_device(union acpi_hotkey *key, char **config_entry, + int std_num, int external_num) { acpi_handle tmp_handle; acpi_status status = AE_OK; - if (std_num < 0 || IS_POLL(std_num) || !key) goto do_fail; - if (!bus_str || !action_str || !method) + if (!config_entry[bus_handle] || !config_entry[action_handle] + || !config_entry[method]) goto do_fail; key->link.hotkey_type = ACPI_HOTKEY_EVENT; key->link.hotkey_standard_num = std_num; key->event_hotkey.flag = 0; - key->event_hotkey.action_method = method; + key->event_hotkey.action_method = config_entry[method]; - status = - acpi_get_handle(NULL, bus_str, &(key->event_hotkey.bus_handle)); + status = acpi_get_handle(NULL, config_entry[bus_handle], + &(key->event_hotkey.bus_handle)); if (ACPI_FAILURE(status)) - goto do_fail; + goto do_fail_zero; key->event_hotkey.external_hotkey_num = external_num; - status = - acpi_get_handle(NULL, action_str, + status = acpi_get_handle(NULL, config_entry[action_handle], &(key->event_hotkey.action_handle)); if (ACPI_FAILURE(status)) - goto do_fail; + goto do_fail_zero; status = acpi_get_handle(key->event_hotkey.action_handle, - method, &tmp_handle); + config_entry[method], &tmp_handle); if (ACPI_FAILURE(status)) - goto do_fail; + goto do_fail_zero; return AE_OK; - do_fail: +do_fail_zero: + key->event_hotkey.action_method = NULL; +do_fail: return -ENODEV; } static int -init_poll_hotkey_device(union acpi_hotkey *key, - char *poll_str, - char *poll_method, - char *action_str, char *action_method, int std_num) +init_poll_hotkey_device(union acpi_hotkey *key, char **config_entry, + int std_num) { acpi_status status = AE_OK; acpi_handle tmp_handle; - if (std_num < 0 || IS_EVENT(std_num) || !key) goto do_fail; - - if (!poll_str || !poll_method || !action_str || !action_method) + if (!config_entry[bus_handle] ||!config_entry[bus_method] || + !config_entry[action_handle] || !config_entry[method]) goto do_fail; key->link.hotkey_type = ACPI_HOTKEY_POLLING; key->link.hotkey_standard_num = std_num; key->poll_hotkey.flag = 0; - key->poll_hotkey.poll_method = poll_method; - key->poll_hotkey.action_method = action_method; + key->poll_hotkey.poll_method = config_entry[bus_method]; + key->poll_hotkey.action_method = config_entry[method]; - status = - acpi_get_handle(NULL, poll_str, &(key->poll_hotkey.poll_handle)); + status = acpi_get_handle(NULL, config_entry[bus_handle], + &(key->poll_hotkey.poll_handle)); if (ACPI_FAILURE(status)) - goto do_fail; + goto do_fail_zero; status = acpi_get_handle(key->poll_hotkey.poll_handle, - poll_method, &tmp_handle); + config_entry[bus_method], &tmp_handle); if (ACPI_FAILURE(status)) - goto do_fail; + goto do_fail_zero; status = - acpi_get_handle(NULL, action_str, + acpi_get_handle(NULL, config_entry[action_handle], &(key->poll_hotkey.action_handle)); if (ACPI_FAILURE(status)) - goto do_fail; + goto do_fail_zero; status = acpi_get_handle(key->poll_hotkey.action_handle, - action_method, &tmp_handle); + config_entry[method], &tmp_handle); if (ACPI_FAILURE(status)) - goto do_fail; + goto do_fail_zero; key->poll_hotkey.poll_result = (union acpi_object *)kmalloc(sizeof(union acpi_object), GFP_KERNEL); if (!key->poll_hotkey.poll_result) - goto do_fail; + goto do_fail_zero; return AE_OK; - do_fail: + +do_fail_zero: + key->poll_hotkey.poll_method = NULL; + key->poll_hotkey.action_method = NULL; +do_fail: return -ENODEV; } @@ -652,17 +660,18 @@ static int hotkey_poll_config_seq_show(struct seq_file *seq, void *offset) } static int -get_parms(char *config_record, - int *cmd, - char **bus_handle, - char **bus_method, - char **action_handle, - char **method, int *internal_event_num, int *external_event_num) +get_parms(char *config_record, int *cmd, char **config_entry, + int *internal_event_num, int *external_event_num) { +/* the format of *config_record = + * "1:\d+:*" : "cmd:internal_event_num" + * "\d+:\w+:\w+:\w+:\w+:\d+:\d+" : + * "cmd:bus_handle:bus_method:action_handle:method:internal_event_num:external_event_num" + */ char *tmp, *tmp1, count; + int i; sscanf(config_record, "%d", cmd); - if (*cmd == 1) { if (sscanf(config_record, "%d:%d", cmd, internal_event_num) != 2) @@ -674,59 +683,27 @@ get_parms(char *config_record, if (!tmp) goto do_fail; tmp++; - tmp1 = strchr(tmp, ':'); - if (!tmp1) - goto do_fail; - - count = tmp1 - tmp; - *bus_handle = (char *)kmalloc(count + 1, GFP_KERNEL); - if (!*bus_handle) - goto do_fail; - strncpy(*bus_handle, tmp, count); - *(*bus_handle + count) = 0; - - tmp = tmp1; - tmp++; - tmp1 = strchr(tmp, ':'); - if (!tmp1) - goto do_fail; - count = tmp1 - tmp; - *bus_method = (char *)kmalloc(count + 1, GFP_KERNEL); - if (!*bus_method) - goto do_fail; - strncpy(*bus_method, tmp, count); - *(*bus_method + count) = 0; - - tmp = tmp1; - tmp++; - tmp1 = strchr(tmp, ':'); - if (!tmp1) - goto do_fail; - count = tmp1 - tmp; - *action_handle = (char *)kmalloc(count + 1, GFP_KERNEL); - if (!*action_handle) - goto do_fail; - strncpy(*action_handle, tmp, count); - *(*action_handle + count) = 0; - - tmp = tmp1; - tmp++; - tmp1 = strchr(tmp, ':'); - if (!tmp1) - goto do_fail; - count = tmp1 - tmp; - *method = (char *)kmalloc(count + 1, GFP_KERNEL); - if (!*method) - goto do_fail; - strncpy(*method, tmp, count); - *(*method + count) = 0; - - if (sscanf(tmp1 + 1, "%d:%d", internal_event_num, external_event_num) <= - 0) - goto do_fail; - - return 6; - do_fail: + for (i = 0; i < LAST_CONF_ENTRY; i++) { + tmp1 = strchr(tmp, ':'); + if (!tmp1) { + goto do_fail; + } + count = tmp1 - tmp; + config_entry[i] = kzalloc(count + 1, GFP_KERNEL); + if (!config_entry[i]) + goto handle_failure; + strncpy(config_entry[i], tmp, count); + tmp = tmp1 + 1; + } + if (sscanf(tmp, "%d:%d", internal_event_num, external_event_num) <= 0) + goto handle_failure; + if (!IS_OTHERS(*internal_event_num)) { + return 6; + } +handle_failure: + while (i-- > 0) + kfree(config_entry[i]); +do_fail: return -1; } @@ -736,50 +713,34 @@ static ssize_t hotkey_write_config(struct file *file, size_t count, loff_t * data) { char *config_record = NULL; - char *bus_handle = NULL; - char *bus_method = NULL; - char *action_handle = NULL; - char *method = NULL; + char *config_entry[LAST_CONF_ENTRY]; int cmd, internal_event_num, external_event_num; int ret = 0; - union acpi_hotkey *key = NULL; + union acpi_hotkey *key = kzalloc(sizeof(union acpi_hotkey), GFP_KERNEL); + if (!key) + return -ENOMEM; - config_record = (char *)kmalloc(count + 1, GFP_KERNEL); - if (!config_record) + config_record = kzalloc(count + 1, GFP_KERNEL); + if (!config_record) { + kfree(key); return -ENOMEM; + } if (copy_from_user(config_record, buffer, count)) { kfree(config_record); + kfree(key); printk(KERN_ERR PREFIX "Invalid data\n"); return -EINVAL; } - config_record[count] = 0; - - ret = get_parms(config_record, - &cmd, - &bus_handle, - &bus_method, - &action_handle, - &method, &internal_event_num, &external_event_num); - + ret = get_parms(config_record, &cmd, config_entry, + &internal_event_num, &external_event_num); kfree(config_record); - if (IS_OTHERS(internal_event_num)) - goto do_fail; if (ret != 6) { - do_fail: - kfree(bus_handle); - kfree(bus_method); - kfree(action_handle); - kfree(method); printk(KERN_ERR PREFIX "Invalid data format ret=%d\n", ret); return -EINVAL; } - key = kmalloc(sizeof(union acpi_hotkey), GFP_KERNEL); - if (!key) - goto do_fail; - memset(key, 0, sizeof(union acpi_hotkey)); if (cmd == 1) { union acpi_hotkey *tmp = NULL; tmp = get_hotkey_by_event(&global_hotkey_list, @@ -791,34 +752,19 @@ static ssize_t hotkey_write_config(struct file *file, goto cont_cmd; } if (IS_EVENT(internal_event_num)) { - kfree(bus_method); - ret = init_hotkey_device(key, bus_handle, action_handle, method, - internal_event_num, - external_event_num); - } else - ret = init_poll_hotkey_device(key, bus_handle, bus_method, - action_handle, method, - internal_event_num); - if (ret) { - kfree(bus_handle); - kfree(action_handle); - if (IS_EVENT(internal_event_num)) - free_hotkey_buffer(key); - else - free_poll_hotkey_buffer(key); - kfree(key); - printk(KERN_ERR PREFIX "Invalid hotkey\n"); - return -EINVAL; + if (init_hotkey_device(key, config_entry, + internal_event_num, external_event_num)) + goto init_hotkey_fail; + } else { + if (init_poll_hotkey_device(key, config_entry, + internal_event_num)) + goto init_poll_hotkey_fail; } - - cont_cmd: - kfree(bus_handle); - kfree(action_handle); - +cont_cmd: switch (cmd) { case 0: - if (get_hotkey_by_event - (&global_hotkey_list, key->link.hotkey_standard_num)) + if (get_hotkey_by_event(&global_hotkey_list, + key->link.hotkey_standard_num)) goto fail_out; else hotkey_add(key); @@ -827,6 +773,7 @@ static ssize_t hotkey_write_config(struct file *file, hotkey_remove(key); break; case 2: + /* key is kfree()ed if matched*/ if (hotkey_update(key)) goto fail_out; break; @@ -835,11 +782,22 @@ static ssize_t hotkey_write_config(struct file *file, break; } return count; - fail_out: - if (IS_EVENT(internal_event_num)) - free_hotkey_buffer(key); - else - free_poll_hotkey_buffer(key); + +init_poll_hotkey_fail: /* failed init_poll_hotkey_device */ + kfree(config_entry[bus_method]); + config_entry[bus_method] = NULL; +init_hotkey_fail: /* failed init_hotkey_device */ + kfree(config_entry[method]); +fail_out: + kfree(config_entry[bus_handle]); + kfree(config_entry[action_handle]); + /* No double free since elements =NULL for error cases */ + if (IS_EVENT(internal_event_num)) { + if (config_entry[bus_method]) + kfree(config_entry[bus_method]); + free_hotkey_buffer(key); /* frees [method] */ + } else + free_poll_hotkey_buffer(key); /* frees [bus_method]+[method] */ kfree(key); printk(KERN_ERR PREFIX "invalid key\n"); return -EINVAL; @@ -923,10 +881,9 @@ static ssize_t hotkey_execute_aml_method(struct file *file, union acpi_hotkey *key; - arg = (char *)kmalloc(count + 1, GFP_KERNEL); + arg = kzalloc(count + 1, GFP_KERNEL); if (!arg) return -ENOMEM; - arg[count] = 0; if (copy_from_user(arg, buffer, count)) { kfree(arg); diff --git a/drivers/acpi/i2c_ec.c b/drivers/acpi/i2c_ec.c index 84239d51dc0..6809c283ec5 100644 --- a/drivers/acpi/i2c_ec.c +++ b/drivers/acpi/i2c_ec.c @@ -330,7 +330,7 @@ static int acpi_ec_hc_add(struct acpi_device *device) status = acpi_evaluate_integer(ec_hc->handle, "_EC", NULL, &val); if (ACPI_FAILURE(status)) { ACPI_DEBUG_PRINT((ACPI_DB_WARN, "Error obtaining _EC\n")); - kfree(ec_hc->smbus); + kfree(ec_hc); kfree(smbus); return -EIO; } diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c index b7d1514cd19..507f051d1ce 100644 --- a/drivers/acpi/osl.c +++ b/drivers/acpi/osl.c @@ -746,6 +746,16 @@ acpi_status acpi_os_wait_semaphore(acpi_handle handle, u32 units, u16 timeout) ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Waiting for semaphore[%p|%d|%d]\n", handle, units, timeout)); + /* + * This can be called during resume with interrupts off. + * Like boot-time, we should be single threaded and will + * always get the lock if we try -- timeout or not. + * If this doesn't succeed, then we will oops courtesy of + * might_sleep() in down(). + */ + if (!down_trylock(sem)) + return AE_OK; + switch (timeout) { /* * No Wait: diff --git a/drivers/acpi/sbs.c b/drivers/acpi/sbs.c index db7b350a503..62bef0b3b61 100644 --- a/drivers/acpi/sbs.c +++ b/drivers/acpi/sbs.c @@ -1714,6 +1714,9 @@ static int __init acpi_sbs_init(void) { int result = 0; + if (acpi_disabled) + return -ENODEV; + init_MUTEX(&sbs_sem); if (capacity_mode != DEF_CAPACITY_UNIT diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c index 5fcb50c7b77..698a1540e30 100644 --- a/drivers/acpi/scan.c +++ b/drivers/acpi/scan.c @@ -4,6 +4,7 @@ #include <linux/module.h> #include <linux/init.h> +#include <linux/kernel.h> #include <linux/acpi.h> #include <acpi/acpi_drivers.h> @@ -113,6 +114,8 @@ static struct kset acpi_namespace_kset = { static void acpi_device_register(struct acpi_device *device, struct acpi_device *parent) { + int err; + /* * Linkage * ------- @@ -138,7 +141,10 @@ static void acpi_device_register(struct acpi_device *device, device->kobj.parent = &parent->kobj; device->kobj.ktype = &ktype_acpi_ns; device->kobj.kset = &acpi_namespace_kset; - kobject_register(&device->kobj); + err = kobject_register(&device->kobj); + if (err < 0) + printk(KERN_WARNING "%s: kobject_register error: %d\n", + __FUNCTION__, err); create_sysfs_device_files(device); } @@ -1450,7 +1456,9 @@ static int __init acpi_scan_init(void) if (acpi_disabled) return 0; - kset_register(&acpi_namespace_kset); + result = kset_register(&acpi_namespace_kset); + if (result < 0) + printk(KERN_ERR PREFIX "kset_register error: %d\n", result); result = bus_register(&acpi_bus_type); if (result) { diff --git a/drivers/acpi/utils.c b/drivers/acpi/utils.c index f48227f4c8c..d0d84c43a9d 100644 --- a/drivers/acpi/utils.c +++ b/drivers/acpi/utils.c @@ -262,7 +262,7 @@ acpi_evaluate_integer(acpi_handle handle, if (!data) return AE_BAD_PARAMETER; - element = kmalloc(sizeof(union acpi_object), GFP_KERNEL); + element = kmalloc(sizeof(union acpi_object), irqs_disabled() ? GFP_ATOMIC: GFP_KERNEL); if (!element) return AE_NO_MEMORY; diff --git a/drivers/base/node.c b/drivers/base/node.c index d7de1753e09..e9b0957f15d 100644 --- a/drivers/base/node.c +++ b/drivers/base/node.c @@ -64,7 +64,7 @@ static ssize_t node_read_meminfo(struct sys_device * dev, char * buf) "Node %d Mapped: %8lu kB\n" "Node %d AnonPages: %8lu kB\n" "Node %d PageTables: %8lu kB\n" - "Node %d NFS Unstable: %8lu kB\n" + "Node %d NFS_Unstable: %8lu kB\n" "Node %d Bounce: %8lu kB\n" "Node %d Slab: %8lu kB\n", nid, K(i.totalram), diff --git a/drivers/cdrom/gscd.c b/drivers/cdrom/gscd.c index b6ee50a2916..fa708248976 100644 --- a/drivers/cdrom/gscd.c +++ b/drivers/cdrom/gscd.c @@ -266,7 +266,7 @@ repeat: goto out; if (req->cmd != READ) { - printk("GSCD: bad cmd %lu\n", rq_data_dir(req)); + printk("GSCD: bad cmd %u\n", rq_data_dir(req)); end_request(req, 0); goto repeat; } diff --git a/drivers/char/drm/radeon_state.c b/drivers/char/drm/radeon_state.c index 5bb2234a909..39a7f685e3f 100644 --- a/drivers/char/drm/radeon_state.c +++ b/drivers/char/drm/radeon_state.c @@ -175,6 +175,14 @@ static __inline__ int radeon_check_and_fixup_packets(drm_radeon_private_t * } break; + case R200_EMIT_VAP_CTL:{ + RING_LOCALS; + BEGIN_RING(2); + OUT_RING_REG(RADEON_SE_TCL_STATE_FLUSH, 0); + ADVANCE_RING(); + } + break; + case RADEON_EMIT_RB3D_COLORPITCH: case RADEON_EMIT_RE_LINE_PATTERN: case RADEON_EMIT_SE_LINE_WIDTH: @@ -202,7 +210,6 @@ static __inline__ int radeon_check_and_fixup_packets(drm_radeon_private_t * case R200_EMIT_TCL_LIGHT_MODEL_CTL_0: case R200_EMIT_TFACTOR_0: case R200_EMIT_VTX_FMT_0: - case R200_EMIT_VAP_CTL: case R200_EMIT_MATRIX_SELECT_0: case R200_EMIT_TEX_PROC_CTL_2: case R200_EMIT_TCL_UCP_VERT_BLEND_CTL: diff --git a/drivers/char/moxa.c b/drivers/char/moxa.c index 4ea7bd5f4f5..a369dd6877d 100644 --- a/drivers/char/moxa.c +++ b/drivers/char/moxa.c @@ -142,6 +142,7 @@ typedef struct _moxa_board_conf { static moxa_board_conf moxa_boards[MAX_BOARDS]; static void __iomem *moxaBaseAddr[MAX_BOARDS]; +static int loadstat[MAX_BOARDS]; struct moxa_str { int type; @@ -1688,6 +1689,8 @@ int MoxaDriverPoll(void) if (moxaCard == 0) return (-1); for (card = 0; card < MAX_BOARDS; card++) { + if (loadstat[card] == 0) + continue; if ((ports = moxa_boards[card].numPorts) == 0) continue; if (readb(moxaIntPend[card]) == 0xff) { @@ -2903,6 +2906,7 @@ static int moxaloadcode(int cardno, unsigned char __user *tmp, int len) } break; } + loadstat[cardno] = 1; return (0); } @@ -2920,7 +2924,7 @@ static int moxaloadc218(int cardno, void __iomem *baseAddr, int len) len1 = len >> 1; ptr = (ushort *) moxaBuff; for (i = 0; i < len1; i++) - usum += *(ptr + i); + usum += le16_to_cpu(*(ptr + i)); retry = 0; do { len1 = len >> 1; @@ -2992,7 +2996,7 @@ static int moxaloadc320(int cardno, void __iomem *baseAddr, int len, int *numPor wlen = len >> 1; uptr = (ushort *) moxaBuff; for (i = 0; i < wlen; i++) - usum += uptr[i]; + usum += le16_to_cpu(uptr[i]); retry = 0; j = 0; do { diff --git a/drivers/char/tty_io.c b/drivers/char/tty_io.c index bfdb90242a9..bb0d9199e99 100644 --- a/drivers/char/tty_io.c +++ b/drivers/char/tty_io.c @@ -153,6 +153,15 @@ int tty_ioctl(struct inode * inode, struct file * file, static int tty_fasync(int fd, struct file * filp, int on); static void release_mem(struct tty_struct *tty, int idx); +/** + * alloc_tty_struct - allocate a tty object + * + * Return a new empty tty structure. The data fields have not + * been initialized in any way but has been zeroed + * + * Locking: none + * FIXME: use kzalloc + */ static struct tty_struct *alloc_tty_struct(void) { @@ -166,6 +175,15 @@ static struct tty_struct *alloc_tty_struct(void) static void tty_buffer_free_all(struct tty_struct *); +/** + * free_tty_struct - free a disused tty + * @tty: tty struct to free + * + * Free the write buffers, tty queue and tty memory itself. + * + * Locking: none. Must be called after tty is definitely unused + */ + static inline void free_tty_struct(struct tty_struct *tty) { kfree(tty->write_buf); @@ -175,6 +193,17 @@ static inline void free_tty_struct(struct tty_struct *tty) #define TTY_NUMBER(tty) ((tty)->index + (tty)->driver->name_base) +/** + * tty_name - return tty naming + * @tty: tty structure + * @buf: buffer for output + * + * Convert a tty structure into a name. The name reflects the kernel + * naming policy and if udev is in use may not reflect user space + * + * Locking: none + */ + char *tty_name(struct tty_struct *tty, char *buf) { if (!tty) /* Hmm. NULL pointer. That's fun. */ @@ -235,6 +264,28 @@ static int check_tty_count(struct tty_struct *tty, const char *routine) * Tty buffer allocation management */ + +/** + * tty_buffer_free_all - free buffers used by a tty + * @tty: tty to free from + * + * Remove all the buffers pending on a tty whether queued with data + * or in the free ring. Must be called when the tty is no longer in use + * + * Locking: none + */ + + +/** + * tty_buffer_free_all - free buffers used by a tty + * @tty: tty to free from + * + * Remove all the buffers pending on a tty whether queued with data + * or in the free ring. Must be called when the tty is no longer in use + * + * Locking: none + */ + static void tty_buffer_free_all(struct tty_struct *tty) { struct tty_buffer *thead; @@ -247,19 +298,47 @@ static void tty_buffer_free_all(struct tty_struct *tty) kfree(thead); } tty->buf.tail = NULL; + tty->buf.memory_used = 0; } +/** + * tty_buffer_init - prepare a tty buffer structure + * @tty: tty to initialise + * + * Set up the initial state of the buffer management for a tty device. + * Must be called before the other tty buffer functions are used. + * + * Locking: none + */ + static void tty_buffer_init(struct tty_struct *tty) { spin_lock_init(&tty->buf.lock); tty->buf.head = NULL; tty->buf.tail = NULL; tty->buf.free = NULL; + tty->buf.memory_used = 0; } -static struct tty_buffer *tty_buffer_alloc(size_t size) +/** + * tty_buffer_alloc - allocate a tty buffer + * @tty: tty device + * @size: desired size (characters) + * + * Allocate a new tty buffer to hold the desired number of characters. + * Return NULL if out of memory or the allocation would exceed the + * per device queue + * + * Locking: Caller must hold tty->buf.lock + */ + +static struct tty_buffer *tty_buffer_alloc(struct tty_struct *tty, size_t size) { - struct tty_buffer *p = kmalloc(sizeof(struct tty_buffer) + 2 * size, GFP_ATOMIC); + struct tty_buffer *p; + + if (tty->buf.memory_used + size > 65536) + return NULL; + p = kmalloc(sizeof(struct tty_buffer) + 2 * size, GFP_ATOMIC); if(p == NULL) return NULL; p->used = 0; @@ -269,17 +348,27 @@ static struct tty_buffer *tty_buffer_alloc(size_t size) p->read = 0; p->char_buf_ptr = (char *)(p->data); p->flag_buf_ptr = (unsigned char *)p->char_buf_ptr + size; -/* printk("Flip create %p\n", p); */ + tty->buf.memory_used += size; return p; } -/* Must be called with the tty_read lock held. This needs to acquire strategy - code to decide if we should kfree or relink a given expired buffer */ +/** + * tty_buffer_free - free a tty buffer + * @tty: tty owning the buffer + * @b: the buffer to free + * + * Free a tty buffer, or add it to the free list according to our + * internal strategy + * + * Locking: Caller must hold tty->buf.lock + */ static void tty_buffer_free(struct tty_struct *tty, struct tty_buffer *b) { /* Dumb strategy for now - should keep some stats */ -/* printk("Flip dispose %p\n", b); */ + tty->buf.memory_used -= b->size; + WARN_ON(tty->buf.memory_used < 0); + if(b->size >= 512) kfree(b); else { @@ -288,6 +377,18 @@ static void tty_buffer_free(struct tty_struct *tty, struct tty_buffer *b) } } +/** + * tty_buffer_find - find a free tty buffer + * @tty: tty owning the buffer + * @size: characters wanted + * + * Locate an existing suitable tty buffer or if we are lacking one then + * allocate a new one. We round our buffers off in 256 character chunks + * to get better allocation behaviour. + * + * Locking: Caller must hold tty->buf.lock + */ + static struct tty_buffer *tty_buffer_find(struct tty_struct *tty, size_t size) { struct tty_buffer **tbh = &tty->buf.free; @@ -299,20 +400,28 @@ static struct tty_buffer *tty_buffer_find(struct tty_struct *tty, size_t size) t->used = 0; t->commit = 0; t->read = 0; - /* DEBUG ONLY */ -/* memset(t->data, '*', size); */ -/* printk("Flip recycle %p\n", t); */ + tty->buf.memory_used += t->size; return t; } tbh = &((*tbh)->next); } /* Round the buffer size out */ size = (size + 0xFF) & ~ 0xFF; - return tty_buffer_alloc(size); + return tty_buffer_alloc(tty, size); /* Should possibly check if this fails for the largest buffer we have queued and recycle that ? */ } +/** + * tty_buffer_request_room - grow tty buffer if needed + * @tty: tty structure + * @size: size desired + * + * Make at least size bytes of linear space available for the tty + * buffer. If we fail return the size we managed to find. + * + * Locking: Takes tty->buf.lock + */ int tty_buffer_request_room(struct tty_struct *tty, size_t size) { struct tty_buffer *b, *n; @@ -347,6 +456,18 @@ int tty_buffer_request_room(struct tty_struct *tty, size_t size) } EXPORT_SYMBOL_GPL(tty_buffer_request_room); +/** + * tty_insert_flip_string - Add characters to the tty buffer + * @tty: tty structure + * @chars: characters + * @size: size + * + * Queue a series of bytes to the tty buffering. All the characters + * passed are marked as without error. Returns the number added. + * + * Locking: Called functions may take tty->buf.lock + */ + int tty_insert_flip_string(struct tty_struct *tty, const unsigned char *chars, size_t size) { @@ -370,6 +491,20 @@ int tty_insert_flip_string(struct tty_struct *tty, const unsigned char *chars, } EXPORT_SYMBOL(tty_insert_flip_string); +/** + * tty_insert_flip_string_flags - Add characters to the tty buffer + * @tty: tty structure + * @chars: characters + * @flags: flag bytes + * @size: size + * + * Queue a series of bytes to the tty buffering. For each character + * the flags array indicates the status of the character. Returns the + * number added. + * + * Locking: Called functions may take tty->buf.lock + */ + int tty_insert_flip_string_flags(struct tty_struct *tty, const unsigned char *chars, const char *flags, size_t size) { @@ -394,6 +529,17 @@ int tty_insert_flip_string_flags(struct tty_struct *tty, } EXPORT_SYMBOL(tty_insert_flip_string_flags); +/** + * tty_schedule_flip - push characters to ldisc + * @tty: tty to push from + * + * Takes any pending buffers and transfers their ownership to the + * ldisc side of the queue. It then schedules those characters for + * processing by the line discipline. + * + * Locking: Takes tty->buf.lock + */ + void tty_schedule_flip(struct tty_struct *tty) { unsigned long flags; @@ -405,12 +551,19 @@ void tty_schedule_flip(struct tty_struct *tty) } EXPORT_SYMBOL(tty_schedule_flip); -/* +/** + * tty_prepare_flip_string - make room for characters + * @tty: tty + * @chars: return pointer for character write area + * @size: desired size + * * Prepare a block of space in the buffer for data. Returns the length * available and buffer pointer to the space which is now allocated and * accounted for as ready for normal characters. This is used for drivers * that need their own block copy routines into the buffer. There is no * guarantee the buffer is a DMA target! + * + * Locking: May call functions taking tty->buf.lock */ int tty_prepare_flip_string(struct tty_struct *tty, unsigned char **chars, size_t size) @@ -427,12 +580,20 @@ int tty_prepare_flip_string(struct tty_struct *tty, unsigned char **chars, size_ EXPORT_SYMBOL_GPL(tty_prepare_flip_string); -/* +/** + * tty_prepare_flip_string_flags - make room for characters + * @tty: tty + * @chars: return pointer for character write area + * @flags: return pointer for status flag write area + * @size: desired size + * * Prepare a block of space in the buffer for data. Returns the length * available and buffer pointer to the space which is now allocated and * accounted for as ready for characters. This is used for drivers * that need their own block copy routines into the buffer. There is no * guarantee the buffer is a DMA target! + * + * Locking: May call functions taking tty->buf.lock */ int tty_prepare_flip_string_flags(struct tty_struct *tty, unsigned char **chars, char **flags, size_t size) @@ -451,10 +612,16 @@ EXPORT_SYMBOL_GPL(tty_prepare_flip_string_flags); -/* +/** + * tty_set_termios_ldisc - set ldisc field + * @tty: tty structure + * @num: line discipline number + * * This is probably overkill for real world processors but * they are not on hot paths so a little discipline won't do * any harm. + * + * Locking: takes termios_sem */ static void tty_set_termios_ldisc(struct tty_struct *tty, int num) @@ -474,6 +641,19 @@ static DEFINE_SPINLOCK(tty_ldisc_lock); static DECLARE_WAIT_QUEUE_HEAD(tty_ldisc_wait); static struct tty_ldisc tty_ldiscs[NR_LDISCS]; /* line disc dispatch table */ +/** + * tty_register_ldisc - install a line discipline + * @disc: ldisc number + * @new_ldisc: pointer to the ldisc object + * + * Installs a new line discipline into the kernel. The discipline + * is set up as unreferenced and then made available to the kernel + * from this point onwards. + * + * Locking: + * takes tty_ldisc_lock to guard against ldisc races + */ + int tty_register_ldisc(int disc, struct tty_ldisc *new_ldisc) { unsigned long flags; @@ -493,6 +673,18 @@ int tty_register_ldisc(int disc, struct tty_ldisc *new_ldisc) } EXPORT_SYMBOL(tty_register_ldisc); +/** + * tty_unregister_ldisc - unload a line discipline + * @disc: ldisc number + * @new_ldisc: pointer to the ldisc object + * + * Remove a line discipline from the kernel providing it is not + * currently in use. + * + * Locking: + * takes tty_ldisc_lock to guard against ldisc races + */ + int tty_unregister_ldisc(int disc) { unsigned long flags; @@ -512,6 +704,19 @@ int tty_unregister_ldisc(int disc) } EXPORT_SYMBOL(tty_unregister_ldisc); +/** + * tty_ldisc_get - take a reference to an ldisc + * @disc: ldisc number + * + * Takes a reference to a line discipline. Deals with refcounts and + * module locking counts. Returns NULL if the discipline is not available. + * Returns a pointer to the discipline and bumps the ref count if it is + * available + * + * Locking: + * takes tty_ldisc_lock to guard against ldisc races + */ + struct tty_ldisc *tty_ldisc_get(int disc) { unsigned long flags; @@ -540,6 +745,17 @@ struct tty_ldisc *tty_ldisc_get(int disc) EXPORT_SYMBOL_GPL(tty_ldisc_get); +/** + * tty_ldisc_put - drop ldisc reference + * @disc: ldisc number + * + * Drop a reference to a line discipline. Manage refcounts and + * module usage counts + * + * Locking: + * takes tty_ldisc_lock to guard against ldisc races + */ + void tty_ldisc_put(int disc) { struct tty_ldisc *ld; @@ -557,6 +773,19 @@ void tty_ldisc_put(int disc) EXPORT_SYMBOL_GPL(tty_ldisc_put); +/** + * tty_ldisc_assign - set ldisc on a tty + * @tty: tty to assign + * @ld: line discipline + * + * Install an instance of a line discipline into a tty structure. The + * ldisc must have a reference count above zero to ensure it remains/ + * The tty instance refcount starts at zero. + * + * Locking: + * Caller must hold references + */ + static void tty_ldisc_assign(struct tty_struct *tty, struct tty_ldisc *ld) { tty->ldisc = *ld; @@ -571,6 +800,8 @@ static void tty_ldisc_assign(struct tty_struct *tty, struct tty_ldisc *ld) * the tty ldisc. Return 0 on failure or 1 on success. This is * used to implement both the waiting and non waiting versions * of tty_ldisc_ref + * + * Locking: takes tty_ldisc_lock */ static int tty_ldisc_try(struct tty_struct *tty) @@ -602,6 +833,8 @@ static int tty_ldisc_try(struct tty_struct *tty) * must also be careful not to hold other locks that will deadlock * against a discipline change, such as an existing ldisc reference * (which we check for) + * + * Locking: call functions take tty_ldisc_lock */ struct tty_ldisc *tty_ldisc_ref_wait(struct tty_struct *tty) @@ -622,6 +855,8 @@ EXPORT_SYMBOL_GPL(tty_ldisc_ref_wait); * Dereference the line discipline for the terminal and take a * reference to it. If the line discipline is in flux then * return NULL. Can be called from IRQ and timer functions. + * + * Locking: called functions take tty_ldisc_lock */ struct tty_ldisc *tty_ldisc_ref(struct tty_struct *tty) @@ -639,6 +874,8 @@ EXPORT_SYMBOL_GPL(tty_ldisc_ref); * * Undoes the effect of tty_ldisc_ref or tty_ldisc_ref_wait. May * be called in IRQ context. + * + * Locking: takes tty_ldisc_lock */ void tty_ldisc_deref(struct tty_ldisc *ld) @@ -683,6 +920,9 @@ static void tty_ldisc_enable(struct tty_struct *tty) * * Set the discipline of a tty line. Must be called from a process * context. + * + * Locking: takes tty_ldisc_lock. + * called functions take termios_sem */ static int tty_set_ldisc(struct tty_struct *tty, int ldisc) @@ -846,9 +1086,17 @@ restart: return retval; } -/* - * This routine returns a tty driver structure, given a device number +/** + * get_tty_driver - find device of a tty + * @dev_t: device identifier + * @index: returns the index of the tty + * + * This routine returns a tty driver structure, given a device number + * and also passes back the index number. + * + * Locking: caller must hold tty_mutex */ + static struct tty_driver *get_tty_driver(dev_t device, int *index) { struct tty_driver *p; @@ -863,11 +1111,17 @@ static struct tty_driver *get_tty_driver(dev_t device, int *index) return NULL; } -/* - * If we try to write to, or set the state of, a terminal and we're - * not in the foreground, send a SIGTTOU. If the signal is blocked or - * ignored, go ahead and perform the operation. (POSIX 7.2) +/** + * tty_check_change - check for POSIX terminal changes + * @tty: tty to check + * + * If we try to write to, or set the state of, a terminal and we're + * not in the foreground, send a SIGTTOU. If the signal is blocked or + * ignored, go ahead and perform the operation. (POSIX 7.2) + * + * Locking: none */ + int tty_check_change(struct tty_struct * tty) { if (current->signal->tty != tty) @@ -1005,10 +1259,27 @@ void tty_ldisc_flush(struct tty_struct *tty) EXPORT_SYMBOL_GPL(tty_ldisc_flush); -/* - * This can be called by the "eventd" kernel thread. That is process synchronous, - * but doesn't hold any locks, so we need to make sure we have the appropriate - * locks for what we're doing.. +/** + * do_tty_hangup - actual handler for hangup events + * @data: tty device + * + * This can be called by the "eventd" kernel thread. That is process + * synchronous but doesn't hold any locks, so we need to make sure we + * have the appropriate locks for what we're doing. + * + * The hangup event clears any pending redirections onto the hung up + * device. It ensures future writes will error and it does the needed + * line discipline hangup and signal delivery. The tty object itself + * remains intact. + * + * Locking: + * BKL + * redirect lock for undoing redirection + * file list lock for manipulating list of ttys + * tty_ldisc_lock from called functions + * termios_sem resetting termios data + * tasklist_lock to walk task list for hangup event + * */ static void do_tty_hangup(void *data) { @@ -1133,6 +1404,14 @@ static void do_tty_hangup(void *data) fput(f); } +/** + * tty_hangup - trigger a hangup event + * @tty: tty to hangup + * + * A carrier loss (virtual or otherwise) has occurred on this like + * schedule a hangup sequence to run after this event. + */ + void tty_hangup(struct tty_struct * tty) { #ifdef TTY_DEBUG_HANGUP @@ -1145,6 +1424,15 @@ void tty_hangup(struct tty_struct * tty) EXPORT_SYMBOL(tty_hangup); +/** + * tty_vhangup - process vhangup + * @tty: tty to hangup + * + * The user has asked via system call for the terminal to be hung up. + * We do this synchronously so that when the syscall returns the process + * is complete. That guarantee is neccessary for security reasons. + */ + void tty_vhangup(struct tty_struct * tty) { #ifdef TTY_DEBUG_HANGUP @@ -1156,6 +1444,14 @@ void tty_vhangup(struct tty_struct * tty) } EXPORT_SYMBOL(tty_vhangup); +/** + * tty_hung_up_p - was tty hung up + * @filp: file pointer of tty + * + * Return true if the tty has been subject to a vhangup or a carrier + * loss + */ + int tty_hung_up_p(struct file * filp) { return (filp->f_op == &hung_up_tty_fops); @@ -1163,19 +1459,28 @@ int tty_hung_up_p(struct file * filp) EXPORT_SYMBOL(tty_hung_up_p); -/* - * This function is typically called only by the session leader, when - * it wants to disassociate itself from its controlling tty. +/** + * disassociate_ctty - disconnect controlling tty + * @on_exit: true if exiting so need to "hang up" the session + * + * This function is typically called only by the session leader, when + * it wants to disassociate itself from its controlling tty. * - * It performs the following functions: + * It performs the following functions: * (1) Sends a SIGHUP and SIGCONT to the foreground process group * (2) Clears the tty from being controlling the session * (3) Clears the controlling tty for all processes in the * session group. * - * The argument on_exit is set to 1 if called when a process is - * exiting; it is 0 if called by the ioctl TIOCNOTTY. + * The argument on_exit is set to 1 if called when a process is + * exiting; it is 0 if called by the ioctl TIOCNOTTY. + * + * Locking: tty_mutex is taken to protect current->signal->tty + * BKL is taken for hysterical raisins + * Tasklist lock is taken (under tty_mutex) to walk process + * lists for the session. */ + void disassociate_ctty(int on_exit) { struct tty_struct *tty; @@ -1222,6 +1527,25 @@ void disassociate_ctty(int on_exit) unlock_kernel(); } + +/** + * stop_tty - propogate flow control + * @tty: tty to stop + * + * Perform flow control to the driver. For PTY/TTY pairs we + * must also propogate the TIOCKPKT status. May be called + * on an already stopped device and will not re-call the driver + * method. + * + * This functionality is used by both the line disciplines for + * halting incoming flow and by the driver. It may therefore be + * called from any context, may be under the tty atomic_write_lock + * but not always. + * + * Locking: + * Broken. Relies on BKL which is unsafe here. + */ + void stop_tty(struct tty_struct *tty) { if (tty->stopped) @@ -1238,6 +1562,19 @@ void stop_tty(struct tty_struct *tty) EXPORT_SYMBOL(stop_tty); +/** + * start_tty - propogate flow control + * @tty: tty to start + * + * Start a tty that has been stopped if at all possible. Perform + * any neccessary wakeups and propogate the TIOCPKT status. If this + * is the tty was previous stopped and is being started then the + * driver start method is invoked and the line discipline woken. + * + * Locking: + * Broken. Relies on BKL which is unsafe here. + */ + void start_tty(struct tty_struct *tty) { if (!tty->stopped || tty->flow_stopped) @@ -1258,6 +1595,23 @@ void start_tty(struct tty_struct *tty) EXPORT_SYMBOL(start_tty); +/** + * tty_read - read method for tty device files + * @file: pointer to tty file + * @buf: user buffer + * @count: size of user buffer + * @ppos: unused + * + * Perform the read system call function on this terminal device. Checks + * for hung up devices before calling the line discipline method. + * + * Locking: + * Locks the line discipline internally while needed + * For historical reasons the line discipline read method is + * invoked under the BKL. This will go away in time so do not rely on it + * in new code. Multiple read calls may be outstanding in parallel. + */ + static ssize_t tty_read(struct file * file, char __user * buf, size_t count, loff_t *ppos) { @@ -1302,6 +1656,7 @@ static inline ssize_t do_tty_write( ssize_t ret = 0, written = 0; unsigned int chunk; + /* FIXME: O_NDELAY ... */ if (mutex_lock_interruptible(&tty->atomic_write_lock)) { return -ERESTARTSYS; } @@ -1318,6 +1673,9 @@ static inline ssize_t do_tty_write( * layer has problems with bigger chunks. It will * claim to be able to handle more characters than * it actually does. + * + * FIXME: This can probably go away now except that 64K chunks + * are too likely to fail unless switched to vmalloc... */ chunk = 2048; if (test_bit(TTY_NO_WRITE_SPLIT, &tty->flags)) @@ -1375,6 +1733,24 @@ static inline ssize_t do_tty_write( } +/** + * tty_write - write method for tty device file + * @file: tty file pointer + * @buf: user data to write + * @count: bytes to write + * @ppos: unused + * + * Write data to a tty device via the line discipline. + * + * Locking: + * Locks the line discipline as required + * Writes to the tty driver are serialized by the atomic_write_lock + * and are then processed in chunks to the device. The line discipline + * write method will not be involked in parallel for each device + * The line discipline write method is called under the big + * kernel lock for historical reasons. New code should not rely on this. + */ + static ssize_t tty_write(struct file * file, const char __user * buf, size_t count, loff_t *ppos) { @@ -1422,7 +1798,18 @@ ssize_t redirected_tty_write(struct file * file, const char __user * buf, size_t static char ptychar[] = "pqrstuvwxyzabcde"; -static inline void pty_line_name(struct tty_driver *driver, int index, char *p) +/** + * pty_line_name - generate name for a pty + * @driver: the tty driver in use + * @index: the minor number + * @p: output buffer of at least 6 bytes + * + * Generate a name from a driver reference and write it to the output + * buffer. + * + * Locking: None + */ +static void pty_line_name(struct tty_driver *driver, int index, char *p) { int i = index + driver->name_base; /* ->name is initialized to "ttyp", but "tty" is expected */ @@ -1431,24 +1818,53 @@ static inline void pty_line_name(struct tty_driver *driver, int index, char *p) ptychar[i >> 4 & 0xf], i & 0xf); } -static inline void tty_line_name(struct tty_driver *driver, int index, char *p) +/** + * pty_line_name - generate name for a tty + * @driver: the tty driver in use + * @index: the minor number + * @p: output buffer of at least 7 bytes + * + * Generate a name from a driver reference and write it to the output + * buffer. + * + * Locking: None + */ +static void tty_line_name(struct tty_driver *driver, int index, char *p) { sprintf(p, "%s%d", driver->name, index + driver->name_base); } -/* +/** + * init_dev - initialise a tty device + * @driver: tty driver we are opening a device on + * @idx: device index + * @tty: returned tty structure + * + * Prepare a tty device. This may not be a "new" clean device but + * could also be an active device. The pty drivers require special + * handling because of this. + * + * Locking: + * The function is called under the tty_mutex, which + * protects us from the tty struct or driver itself going away. + * + * On exit the tty device has the line discipline attached and + * a reference count of 1. If a pair was created for pty/tty use + * and the other was a pty master then it too has a reference count of 1. + * * WSH 06/09/97: Rewritten to remove races and properly clean up after a * failed open. The new code protects the open with a mutex, so it's * really quite straightforward. The mutex locking can probably be * relaxed for the (most common) case of reopening a tty. */ + static int init_dev(struct tty_driver *driver, int idx, struct tty_struct **ret_tty) { struct tty_struct *tty, *o_tty; struct termios *tp, **tp_loc, *o_tp, **o_tp_loc; struct termios *ltp, **ltp_loc, *o_ltp, **o_ltp_loc; - int retval=0; + int retval = 0; /* check whether we're reopening an existing tty */ if (driver->flags & TTY_DRIVER_DEVPTS_MEM) { @@ -1662,10 +2078,20 @@ release_mem_out: goto end_init; } -/* - * Releases memory associated with a tty structure, and clears out the - * driver table slots. +/** + * release_mem - release tty structure memory + * + * Releases memory associated with a tty structure, and clears out the + * driver table slots. This function is called when a device is no longer + * in use. It also gets called when setup of a device fails. + * + * Locking: + * tty_mutex - sometimes only + * takes the file list lock internally when working on the list + * of ttys that the driver keeps. + * FIXME: should we require tty_mutex is held here ?? */ + static void release_mem(struct tty_struct *tty, int idx) { struct tty_struct *o_tty; @@ -2006,18 +2432,27 @@ static void release_dev(struct file * filp) } -/* - * tty_open and tty_release keep up the tty count that contains the - * number of opens done on a tty. We cannot use the inode-count, as - * different inodes might point to the same tty. +/** + * tty_open - open a tty device + * @inode: inode of device file + * @filp: file pointer to tty + * + * tty_open and tty_release keep up the tty count that contains the + * number of opens done on a tty. We cannot use the inode-count, as + * different inodes might point to the same tty. * - * Open-counting is needed for pty masters, as well as for keeping - * track of serial lines: DTR is dropped when the last close happens. - * (This is not done solely through tty->count, now. - Ted 1/27/92) + * Open-counting is needed for pty masters, as well as for keeping + * track of serial lines: DTR is dropped when the last close happens. + * (This is not done solely through tty->count, now. - Ted 1/27/92) * - * The termios state of a pty is reset on first open so that - * settings don't persist across reuse. + * The termios state of a pty is reset on first open so that + * settings don't persist across reuse. + * + * Locking: tty_mutex protects current->signal->tty, get_tty_driver and + * init_dev work. tty->count should protect the rest. + * task_lock is held to update task details for sessions */ + static int tty_open(struct inode * inode, struct file * filp) { struct tty_struct *tty; @@ -2132,6 +2567,18 @@ got_driver: } #ifdef CONFIG_UNIX98_PTYS +/** + * ptmx_open - open a unix 98 pty master + * @inode: inode of device file + * @filp: file pointer to tty + * + * Allocate a unix98 pty master device from the ptmx driver. + * + * Locking: tty_mutex protects theinit_dev work. tty->count should + protect the rest. + * allocated_ptys_lock handles the list of free pty numbers + */ + static int ptmx_open(struct inode * inode, struct file * filp) { struct tty_struct *tty; @@ -2191,6 +2638,18 @@ out: } #endif +/** + * tty_release - vfs callback for close + * @inode: inode of tty + * @filp: file pointer for handle to tty + * + * Called the last time each file handle is closed that references + * this tty. There may however be several such references. + * + * Locking: + * Takes bkl. See release_dev + */ + static int tty_release(struct inode * inode, struct file * filp) { lock_kernel(); @@ -2199,7 +2658,18 @@ static int tty_release(struct inode * inode, struct file * filp) return 0; } -/* No kernel lock held - fine */ +/** + * tty_poll - check tty status + * @filp: file being polled + * @wait: poll wait structures to update + * + * Call the line discipline polling method to obtain the poll + * status of the device. + * + * Locking: locks called line discipline but ldisc poll method + * may be re-entered freely by other callers. + */ + static unsigned int tty_poll(struct file * filp, poll_table * wait) { struct tty_struct * tty; @@ -2243,6 +2713,21 @@ static int tty_fasync(int fd, struct file * filp, int on) return 0; } +/** + * tiocsti - fake input character + * @tty: tty to fake input into + * @p: pointer to character + * + * Fake input to a tty device. Does the neccessary locking and + * input management. + * + * FIXME: does not honour flow control ?? + * + * Locking: + * Called functions take tty_ldisc_lock + * current->signal->tty check is safe without locks + */ + static int tiocsti(struct tty_struct *tty, char __user *p) { char ch, mbz = 0; @@ -2258,6 +2743,18 @@ static int tiocsti(struct tty_struct *tty, char __user *p) return 0; } +/** + * tiocgwinsz - implement window query ioctl + * @tty; tty + * @arg: user buffer for result + * + * Copies the kernel idea of the window size into the user buffer. No + * locking is done. + * + * FIXME: Returning random values racing a window size set is wrong + * should lock here against that + */ + static int tiocgwinsz(struct tty_struct *tty, struct winsize __user * arg) { if (copy_to_user(arg, &tty->winsize, sizeof(*arg))) @@ -2265,6 +2762,24 @@ static int tiocgwinsz(struct tty_struct *tty, struct winsize __user * arg) return 0; } +/** + * tiocswinsz - implement window size set ioctl + * @tty; tty + * @arg: user buffer for result + * + * Copies the user idea of the window size to the kernel. Traditionally + * this is just advisory information but for the Linux console it + * actually has driver level meaning and triggers a VC resize. + * + * Locking: + * The console_sem is used to ensure we do not try and resize + * the console twice at once. + * FIXME: Two racing size sets may leave the console and kernel + * parameters disagreeing. Is this exploitable ? + * FIXME: Random values racing a window size get is wrong + * should lock here against that + */ + static int tiocswinsz(struct tty_struct *tty, struct tty_struct *real_tty, struct winsize __user * arg) { @@ -2294,6 +2809,15 @@ static int tiocswinsz(struct tty_struct *tty, struct tty_struct *real_tty, return 0; } +/** + * tioccons - allow admin to move logical console + * @file: the file to become console + * + * Allow the adminstrator to move the redirected console device + * + * Locking: uses redirect_lock to guard the redirect information + */ + static int tioccons(struct file *file) { if (!capable(CAP_SYS_ADMIN)) @@ -2319,6 +2843,17 @@ static int tioccons(struct file *file) return 0; } +/** + * fionbio - non blocking ioctl + * @file: file to set blocking value + * @p: user parameter + * + * Historical tty interfaces had a blocking control ioctl before + * the generic functionality existed. This piece of history is preserved + * in the expected tty API of posix OS's. + * + * Locking: none, the open fle handle ensures it won't go away. + */ static int fionbio(struct file *file, int __user *p) { @@ -2334,6 +2869,23 @@ static int fionbio(struct file *file, int __user *p) return 0; } +/** + * tiocsctty - set controlling tty + * @tty: tty structure + * @arg: user argument + * + * This ioctl is used to manage job control. It permits a session + * leader to set this tty as the controlling tty for the session. + * + * Locking: + * Takes tasklist lock internally to walk sessions + * Takes task_lock() when updating signal->tty + * + * FIXME: tty_mutex is needed to protect signal->tty references. + * FIXME: why task_lock on the signal->tty reference ?? + * + */ + static int tiocsctty(struct tty_struct *tty, int arg) { struct task_struct *p; @@ -2374,6 +2926,18 @@ static int tiocsctty(struct tty_struct *tty, int arg) return 0; } +/** + * tiocgpgrp - get process group + * @tty: tty passed by user + * @real_tty: tty side of the tty pased by the user if a pty else the tty + * @p: returned pid + * + * Obtain the process group of the tty. If there is no process group + * return an error. + * + * Locking: none. Reference to ->signal->tty is safe. + */ + static int tiocgpgrp(struct tty_struct *tty, struct tty_struct *real_tty, pid_t __user *p) { /* @@ -2385,6 +2949,20 @@ static int tiocgpgrp(struct tty_struct *tty, struct tty_struct *real_tty, pid_t return put_user(real_tty->pgrp, p); } +/** + * tiocspgrp - attempt to set process group + * @tty: tty passed by user + * @real_tty: tty side device matching tty passed by user + * @p: pid pointer + * + * Set the process group of the tty to the session passed. Only + * permitted where the tty session is our session. + * + * Locking: None + * + * FIXME: current->signal->tty referencing is unsafe. + */ + static int tiocspgrp(struct tty_struct *tty, struct tty_struct *real_tty, pid_t __user *p) { pid_t pgrp; @@ -2408,6 +2986,18 @@ static int tiocspgrp(struct tty_struct *tty, struct tty_struct *real_tty, pid_t return 0; } +/** + * tiocgsid - get session id + * @tty: tty passed by user + * @real_tty: tty side of the tty pased by the user if a pty else the tty + * @p: pointer to returned session id + * + * Obtain the session id of the tty. If there is no session + * return an error. + * + * Locking: none. Reference to ->signal->tty is safe. + */ + static int tiocgsid(struct tty_struct *tty, struct tty_struct *real_tty, pid_t __user *p) { /* @@ -2421,6 +3011,16 @@ static int tiocgsid(struct tty_struct *tty, struct tty_struct *real_tty, pid_t _ return put_user(real_tty->session, p); } +/** + * tiocsetd - set line discipline + * @tty: tty device + * @p: pointer to user data + * + * Set the line discipline according to user request. + * + * Locking: see tty_set_ldisc, this function is just a helper + */ + static int tiocsetd(struct tty_struct *tty, int __user *p) { int ldisc; @@ -2430,6 +3030,21 @@ static int tiocsetd(struct tty_struct *tty, int __user *p) return tty_set_ldisc(tty, ldisc); } +/** + * send_break - performed time break + * @tty: device to break on + * @duration: timeout in mS + * + * Perform a timed break on hardware that lacks its own driver level + * timed break functionality. + * + * Locking: + * None + * + * FIXME: + * What if two overlap + */ + static int send_break(struct tty_struct *tty, unsigned int duration) { tty->driver->break_ctl(tty, -1); @@ -2442,8 +3057,19 @@ static int send_break(struct tty_struct *tty, unsigned int duration) return 0; } -static int -tty_tiocmget(struct tty_struct *tty, struct file *file, int __user *p) +/** + * tiocmget - get modem status + * @tty: tty device + * @file: user file pointer + * @p: pointer to result + * + * Obtain the modem status bits from the tty driver if the feature + * is supported. Return -EINVAL if it is not available. + * + * Locking: none (up to the driver) + */ + +static int tty_tiocmget(struct tty_struct *tty, struct file *file, int __user *p) { int retval = -EINVAL; @@ -2456,8 +3082,20 @@ tty_tiocmget(struct tty_struct *tty, struct file *file, int __user *p) return retval; } -static int -tty_tiocmset(struct tty_struct *tty, struct file *file, unsigned int cmd, +/** + * tiocmset - set modem status + * @tty: tty device + * @file: user file pointer + * @cmd: command - clear bits, set bits or set all + * @p: pointer to desired bits + * + * Set the modem status bits from the tty driver if the feature + * is supported. Return -EINVAL if it is not available. + * + * Locking: none (up to the driver) + */ + +static int tty_tiocmset(struct tty_struct *tty, struct file *file, unsigned int cmd, unsigned __user *p) { int retval = -EINVAL; @@ -2573,6 +3211,7 @@ int tty_ioctl(struct inode * inode, struct file * file, clear_bit(TTY_EXCLUSIVE, &tty->flags); return 0; case TIOCNOTTY: + /* FIXME: taks lock or tty_mutex ? */ if (current->signal->tty != tty) return -ENOTTY; if (current->signal->leader) @@ -2753,9 +3392,16 @@ void do_SAK(struct tty_struct *tty) EXPORT_SYMBOL(do_SAK); -/* - * This routine is called out of the software interrupt to flush data - * from the buffer chain to the line discipline. +/** + * flush_to_ldisc + * @private_: tty structure passed from work queue. + * + * This routine is called out of the software interrupt to flush data + * from the buffer chain to the line discipline. + * + * Locking: holds tty->buf.lock to guard buffer list. Drops the lock + * while invoking the line discipline receive_buf method. The + * receive_buf method is single threaded for each tty instance. */ static void flush_to_ldisc(void *private_) @@ -2831,6 +3477,8 @@ static int n_baud_table = ARRAY_SIZE(baud_table); * Convert termios baud rate data into a speed. This should be called * with the termios lock held if this termios is a terminal termios * structure. May change the termios data. + * + * Locking: none */ int tty_termios_baud_rate(struct termios *termios) @@ -2859,6 +3507,8 @@ EXPORT_SYMBOL(tty_termios_baud_rate); * Returns the baud rate as an integer for this terminal. The * termios lock must be held by the caller and the terminal bit * flags may be updated. + * + * Locking: none */ int tty_get_baud_rate(struct tty_struct *tty) @@ -2888,6 +3538,8 @@ EXPORT_SYMBOL(tty_get_baud_rate); * * In the event of the queue being busy for flipping the work will be * held off and retried later. + * + * Locking: tty buffer lock. Driver locks in low latency mode. */ void tty_flip_buffer_push(struct tty_struct *tty) @@ -2907,9 +3559,16 @@ void tty_flip_buffer_push(struct tty_struct *tty) EXPORT_SYMBOL(tty_flip_buffer_push); -/* - * This subroutine initializes a tty structure. +/** + * initialize_tty_struct + * @tty: tty to initialize + * + * This subroutine initializes a tty structure that has been newly + * allocated. + * + * Locking: none - tty in question must not be exposed at this point */ + static void initialize_tty_struct(struct tty_struct *tty) { memset(tty, 0, sizeof(struct tty_struct)); @@ -2935,6 +3594,7 @@ static void initialize_tty_struct(struct tty_struct *tty) /* * The default put_char routine if the driver did not define one. */ + static void tty_default_put_char(struct tty_struct *tty, unsigned char ch) { tty->driver->write(tty, &ch, 1); @@ -2943,19 +3603,23 @@ static void tty_default_put_char(struct tty_struct *tty, unsigned char ch) static struct class *tty_class; /** - * tty_register_device - register a tty device - * @driver: the tty driver that describes the tty device - * @index: the index in the tty driver for this tty device - * @device: a struct device that is associated with this tty device. - * This field is optional, if there is no known struct device for this - * tty device it can be set to NULL safely. + * tty_register_device - register a tty device + * @driver: the tty driver that describes the tty device + * @index: the index in the tty driver for this tty device + * @device: a struct device that is associated with this tty device. + * This field is optional, if there is no known struct device + * for this tty device it can be set to NULL safely. * - * Returns a pointer to the class device (or ERR_PTR(-EFOO) on error). + * Returns a pointer to the class device (or ERR_PTR(-EFOO) on error). * - * This call is required to be made to register an individual tty device if - * the tty driver's flags have the TTY_DRIVER_DYNAMIC_DEV bit set. If that - * bit is not set, this function should not be called by a tty driver. + * This call is required to be made to register an individual tty device + * if the tty driver's flags have the TTY_DRIVER_DYNAMIC_DEV bit set. If + * that bit is not set, this function should not be called by a tty + * driver. + * + * Locking: ?? */ + struct class_device *tty_register_device(struct tty_driver *driver, unsigned index, struct device *device) { @@ -2977,13 +3641,16 @@ struct class_device *tty_register_device(struct tty_driver *driver, } /** - * tty_unregister_device - unregister a tty device - * @driver: the tty driver that describes the tty device - * @index: the index in the tty driver for this tty device + * tty_unregister_device - unregister a tty device + * @driver: the tty driver that describes the tty device + * @index: the index in the tty driver for this tty device * - * If a tty device is registered with a call to tty_register_device() then - * this function must be made when the tty device is gone. + * If a tty device is registered with a call to tty_register_device() then + * this function must be called when the tty device is gone. + * + * Locking: ?? */ + void tty_unregister_device(struct tty_driver *driver, unsigned index) { class_device_destroy(tty_class, MKDEV(driver->major, driver->minor_start) + index); @@ -3094,7 +3761,6 @@ int tty_register_driver(struct tty_driver *driver) driver->cdev.owner = driver->owner; error = cdev_add(&driver->cdev, dev, driver->num); if (error) { - cdev_del(&driver->cdev); unregister_chrdev_region(dev, driver->num); driver->ttys = NULL; driver->termios = driver->termios_locked = NULL; diff --git a/drivers/char/tty_ioctl.c b/drivers/char/tty_ioctl.c index f19cf9d7792..4ad47d321bd 100644 --- a/drivers/char/tty_ioctl.c +++ b/drivers/char/tty_ioctl.c @@ -36,6 +36,18 @@ #define TERMIOS_WAIT 2 #define TERMIOS_TERMIO 4 + +/** + * tty_wait_until_sent - wait for I/O to finish + * @tty: tty we are waiting for + * @timeout: how long we will wait + * + * Wait for characters pending in a tty driver to hit the wire, or + * for a timeout to occur (eg due to flow control) + * + * Locking: none + */ + void tty_wait_until_sent(struct tty_struct * tty, long timeout) { DECLARE_WAITQUEUE(wait, current); @@ -94,6 +106,18 @@ static void unset_locked_termios(struct termios *termios, old->c_cc[i] : termios->c_cc[i]; } +/** + * change_termios - update termios values + * @tty: tty to update + * @new_termios: desired new value + * + * Perform updates to the termios values set on this terminal. There + * is a bit of layering violation here with n_tty in terms of the + * internal knowledge of this function. + * + * Locking: termios_sem + */ + static void change_termios(struct tty_struct * tty, struct termios * new_termios) { int canon_change; @@ -155,6 +179,19 @@ static void change_termios(struct tty_struct * tty, struct termios * new_termios up(&tty->termios_sem); } +/** + * set_termios - set termios values for a tty + * @tty: terminal device + * @arg: user data + * @opt: option information + * + * Helper function to prepare termios data and run neccessary other + * functions before using change_termios to do the actual changes. + * + * Locking: + * Called functions take ldisc and termios_sem locks + */ + static int set_termios(struct tty_struct * tty, void __user *arg, int opt) { struct termios tmp_termios; @@ -284,6 +321,17 @@ static void set_sgflags(struct termios * termios, int flags) } } +/** + * set_sgttyb - set legacy terminal values + * @tty: tty structure + * @sgttyb: pointer to old style terminal structure + * + * Updates a terminal from the legacy BSD style terminal information + * structure. + * + * Locking: termios_sem + */ + static int set_sgttyb(struct tty_struct * tty, struct sgttyb __user * sgttyb) { int retval; @@ -369,9 +417,16 @@ static int set_ltchars(struct tty_struct * tty, struct ltchars __user * ltchars) } #endif -/* - * Send a high priority character to the tty. +/** + * send_prio_char - send priority character + * + * Send a high priority character to the tty even if stopped + * + * Locking: none + * + * FIXME: overlapping calls with start/stop tty lose state of tty */ + static void send_prio_char(struct tty_struct *tty, char ch) { int was_stopped = tty->stopped; diff --git a/drivers/char/vt_ioctl.c b/drivers/char/vt_ioctl.c index eccffaf26fa..a5628a8b662 100644 --- a/drivers/char/vt_ioctl.c +++ b/drivers/char/vt_ioctl.c @@ -1011,6 +1011,8 @@ int vt_ioctl(struct tty_struct *tty, struct file * file, return -EPERM; vt_dont_switch = 0; return 0; + case VT_GETHIFONTMASK: + return put_user(vc->vc_hi_font_mask, (unsigned short __user *)arg); default: return -ENOIOCTLCMD; } diff --git a/drivers/char/watchdog/Kconfig b/drivers/char/watchdog/Kconfig index d53f664a4dd..fff89c2d88f 100644 --- a/drivers/char/watchdog/Kconfig +++ b/drivers/char/watchdog/Kconfig @@ -45,7 +45,7 @@ config WATCHDOG_NOWAYOUT comment "Watchdog Device Drivers" depends on WATCHDOG -# Architecture Independant +# Architecture Independent config SOFT_WATCHDOG tristate "Software watchdog" @@ -127,7 +127,7 @@ config S3C2410_WATCHDOG enabled. The driver is limited by the speed of the system's PCLK - signal, so with reasonbaly fast systems (PCLK around 50-66MHz) + signal, so with reasonably fast systems (PCLK around 50-66MHz) then watchdog intervals of over approximately 20seconds are unavailable. @@ -423,7 +423,7 @@ config SBC_EPX_C3_WATCHDOG is no way to know if writing to its IO address will corrupt your system or have any real effect. The only way to be sure that this driver does what you want is to make sure you - are runnning it on an EPX-C3 from Winsystems with the watchdog + are running it on an EPX-C3 from Winsystems with the watchdog timer at IO address 0x1ee and 0x1ef. It will write to both those IO ports. Basically, the assumption is made that if you compile this driver into your kernel and/or load it as a module, that you @@ -472,7 +472,7 @@ config INDYDOG tristate "Indy/I2 Hardware Watchdog" depends on WATCHDOG && SGI_IP22 help - Hardwaredriver for the Indy's/I2's watchdog. This is a + Hardware driver for the Indy's/I2's watchdog. This is a watchdog timer that will reboot the machine after a 60 second timer expired and no process has written to /dev/watchdog during that time. diff --git a/drivers/hwmon/abituguru.c b/drivers/hwmon/abituguru.c index cc15c4f2e9e..35ad1b03272 100644 --- a/drivers/hwmon/abituguru.c +++ b/drivers/hwmon/abituguru.c @@ -26,6 +26,7 @@ #include <linux/jiffies.h> #include <linux/mutex.h> #include <linux/err.h> +#include <linux/delay.h> #include <linux/platform_device.h> #include <linux/hwmon.h> #include <linux/hwmon-sysfs.h> @@ -64,17 +65,17 @@ #define ABIT_UGURU_IN_SENSOR 0 #define ABIT_UGURU_TEMP_SENSOR 1 #define ABIT_UGURU_NC 2 -/* Timeouts / Retries, if these turn out to need a lot of fiddling we could - convert them to params. */ -/* 250 was determined by trial and error, 200 works most of the time, but not - always. I assume this is cpu-speed independent, since the ISA-bus and not - the CPU should be the bottleneck. Note that 250 sometimes is still not - enough (only reported on AN7 mb) this is handled by a higher layer. */ -#define ABIT_UGURU_WAIT_TIMEOUT 250 +/* In many cases we need to wait for the uGuru to reach a certain status, most + of the time it will reach this status within 30 - 90 ISA reads, and thus we + can best busy wait. This define gives the total amount of reads to try. */ +#define ABIT_UGURU_WAIT_TIMEOUT 125 +/* However sometimes older versions of the uGuru seem to be distracted and they + do not respond for a long time. To handle this we sleep before each of the + last ABIT_UGURU_WAIT_TIMEOUT_SLEEP tries. */ +#define ABIT_UGURU_WAIT_TIMEOUT_SLEEP 5 /* Normally all expected status in abituguru_ready, are reported after the - first read, but sometimes not and we need to poll, 5 polls was not enough - 50 sofar is. */ -#define ABIT_UGURU_READY_TIMEOUT 50 + first read, but sometimes not and we need to poll. */ +#define ABIT_UGURU_READY_TIMEOUT 5 /* Maximum 3 retries on timedout reads/writes, delay 200 ms before retrying */ #define ABIT_UGURU_MAX_RETRIES 3 #define ABIT_UGURU_RETRY_DELAY (HZ/5) @@ -226,6 +227,10 @@ static int abituguru_wait(struct abituguru_data *data, u8 state) timeout--; if (timeout == 0) return -EBUSY; + /* sleep a bit before our last few tries, see the comment on + this where ABIT_UGURU_WAIT_TIMEOUT_SLEEP is defined. */ + if (timeout <= ABIT_UGURU_WAIT_TIMEOUT_SLEEP) + msleep(0); } return 0; } @@ -256,6 +261,7 @@ static int abituguru_ready(struct abituguru_data *data) "CMD reg does not hold 0xAC after ready command\n"); return -EIO; } + msleep(0); } /* After this the ABIT_UGURU_DATA port should contain @@ -268,6 +274,7 @@ static int abituguru_ready(struct abituguru_data *data) "state != more input after ready command\n"); return -EIO; } + msleep(0); } data->uguru_ready = 1; @@ -331,7 +338,8 @@ static int abituguru_read(struct abituguru_data *data, /* And read the data */ for (i = 0; i < count; i++) { if (abituguru_wait(data, ABIT_UGURU_STATUS_READ)) { - ABIT_UGURU_DEBUG(1, "timeout exceeded waiting for " + ABIT_UGURU_DEBUG(retries ? 1 : 3, + "timeout exceeded waiting for " "read state (bank: %d, sensor: %d)\n", (int)bank_addr, (int)sensor_addr); break; @@ -350,7 +358,9 @@ static int abituguru_read(struct abituguru_data *data, static int abituguru_write(struct abituguru_data *data, u8 bank_addr, u8 sensor_addr, u8 *buf, int count) { - int i; + /* We use the ready timeout as we have to wait for 0xAC just like the + ready function */ + int i, timeout = ABIT_UGURU_READY_TIMEOUT; /* Send the address */ i = abituguru_send_address(data, bank_addr, sensor_addr, @@ -370,7 +380,8 @@ static int abituguru_write(struct abituguru_data *data, } /* Now we need to wait till the chip is ready to be read again, - don't ask why */ + so that we can read 0xAC as confirmation that our write has + succeeded. */ if (abituguru_wait(data, ABIT_UGURU_STATUS_READ)) { ABIT_UGURU_DEBUG(1, "timeout exceeded waiting for read state " "after write (bank: %d, sensor: %d)\n", (int)bank_addr, @@ -379,11 +390,15 @@ static int abituguru_write(struct abituguru_data *data, } /* Cmd port MUST be read now and should contain 0xAC */ - if (inb_p(data->addr + ABIT_UGURU_CMD) != 0xAC) { - ABIT_UGURU_DEBUG(1, "CMD reg does not hold 0xAC after write " - "(bank: %d, sensor: %d)\n", (int)bank_addr, - (int)sensor_addr); - return -EIO; + while (inb_p(data->addr + ABIT_UGURU_CMD) != 0xAC) { + timeout--; + if (timeout == 0) { + ABIT_UGURU_DEBUG(1, "CMD reg does not hold 0xAC after " + "write (bank: %d, sensor: %d)\n", + (int)bank_addr, (int)sensor_addr); + return -EIO; + } + msleep(0); } /* Last put the chip back in ready state */ @@ -403,7 +418,7 @@ abituguru_detect_bank1_sensor_type(struct abituguru_data *data, u8 sensor_addr) { u8 val, buf[3]; - int ret = ABIT_UGURU_NC; + int i, ret = -ENODEV; /* error is the most common used retval :| */ /* If overriden by the user return the user selected type */ if (bank1_types[sensor_addr] >= ABIT_UGURU_IN_SENSOR && @@ -439,7 +454,7 @@ abituguru_detect_bank1_sensor_type(struct abituguru_data *data, buf[2] = 250; if (abituguru_write(data, ABIT_UGURU_SENSOR_BANK1 + 2, sensor_addr, buf, 3) != 3) - return -ENODEV; + goto abituguru_detect_bank1_sensor_type_exit; /* Now we need 20 ms to give the uguru time to read the sensors and raise a voltage alarm */ set_current_state(TASK_UNINTERRUPTIBLE); @@ -447,21 +462,16 @@ abituguru_detect_bank1_sensor_type(struct abituguru_data *data, /* Check for alarm and check the alarm is a volt low alarm. */ if (abituguru_read(data, ABIT_UGURU_ALARM_BANK, 0, buf, 3, ABIT_UGURU_MAX_RETRIES) != 3) - return -ENODEV; + goto abituguru_detect_bank1_sensor_type_exit; if (buf[sensor_addr/8] & (0x01 << (sensor_addr % 8))) { if (abituguru_read(data, ABIT_UGURU_SENSOR_BANK1 + 1, sensor_addr, buf, 3, ABIT_UGURU_MAX_RETRIES) != 3) - return -ENODEV; + goto abituguru_detect_bank1_sensor_type_exit; if (buf[0] & ABIT_UGURU_VOLT_LOW_ALARM_FLAG) { - /* Restore original settings */ - if (abituguru_write(data, ABIT_UGURU_SENSOR_BANK1 + 2, - sensor_addr, - data->bank1_settings[sensor_addr], - 3) != 3) - return -ENODEV; ABIT_UGURU_DEBUG(2, " found volt sensor\n"); - return ABIT_UGURU_IN_SENSOR; + ret = ABIT_UGURU_IN_SENSOR; + goto abituguru_detect_bank1_sensor_type_exit; } else ABIT_UGURU_DEBUG(2, " alarm raised during volt " "sensor test, but volt low flag not set\n"); @@ -477,7 +487,7 @@ abituguru_detect_bank1_sensor_type(struct abituguru_data *data, buf[2] = 10; if (abituguru_write(data, ABIT_UGURU_SENSOR_BANK1 + 2, sensor_addr, buf, 3) != 3) - return -ENODEV; + goto abituguru_detect_bank1_sensor_type_exit; /* Now we need 50 ms to give the uguru time to read the sensors and raise a temp alarm */ set_current_state(TASK_UNINTERRUPTIBLE); @@ -485,15 +495,16 @@ abituguru_detect_bank1_sensor_type(struct abituguru_data *data, /* Check for alarm and check the alarm is a temp high alarm. */ if (abituguru_read(data, ABIT_UGURU_ALARM_BANK, 0, buf, 3, ABIT_UGURU_MAX_RETRIES) != 3) - return -ENODEV; + goto abituguru_detect_bank1_sensor_type_exit; if (buf[sensor_addr/8] & (0x01 << (sensor_addr % 8))) { if (abituguru_read(data, ABIT_UGURU_SENSOR_BANK1 + 1, sensor_addr, buf, 3, ABIT_UGURU_MAX_RETRIES) != 3) - return -ENODEV; + goto abituguru_detect_bank1_sensor_type_exit; if (buf[0] & ABIT_UGURU_TEMP_HIGH_ALARM_FLAG) { - ret = ABIT_UGURU_TEMP_SENSOR; ABIT_UGURU_DEBUG(2, " found temp sensor\n"); + ret = ABIT_UGURU_TEMP_SENSOR; + goto abituguru_detect_bank1_sensor_type_exit; } else ABIT_UGURU_DEBUG(2, " alarm raised during temp " "sensor test, but temp high flag not set\n"); @@ -501,11 +512,23 @@ abituguru_detect_bank1_sensor_type(struct abituguru_data *data, ABIT_UGURU_DEBUG(2, " alarm not raised during temp sensor " "test\n"); - /* Restore original settings */ - if (abituguru_write(data, ABIT_UGURU_SENSOR_BANK1 + 2, sensor_addr, - data->bank1_settings[sensor_addr], 3) != 3) + ret = ABIT_UGURU_NC; +abituguru_detect_bank1_sensor_type_exit: + /* Restore original settings, failing here is really BAD, it has been + reported that some BIOS-es hang when entering the uGuru menu with + invalid settings present in the uGuru, so we try this 3 times. */ + for (i = 0; i < 3; i++) + if (abituguru_write(data, ABIT_UGURU_SENSOR_BANK1 + 2, + sensor_addr, data->bank1_settings[sensor_addr], + 3) == 3) + break; + if (i == 3) { + printk(KERN_ERR ABIT_UGURU_NAME + ": Fatal error could not restore original settings. " + "This should never happen please report this to the " + "abituguru maintainer (see MAINTAINERS)\n"); return -ENODEV; - + } return ret; } @@ -1305,7 +1328,7 @@ static struct abituguru_data *abituguru_update_device(struct device *dev) data->update_timeouts = 0; LEAVE_UPDATE: /* handle timeout condition */ - if (err == -EBUSY) { + if (!success && (err == -EBUSY || err >= 0)) { /* No overflow please */ if (data->update_timeouts < 255u) data->update_timeouts++; diff --git a/drivers/i2c/chips/tps65010.c b/drivers/i2c/chips/tps65010.c index e7e27049fbf..0be6fd6a267 100644 --- a/drivers/i2c/chips/tps65010.c +++ b/drivers/i2c/chips/tps65010.c @@ -43,13 +43,12 @@ /*-------------------------------------------------------------------------*/ #define DRIVER_VERSION "2 May 2005" -#define DRIVER_NAME (tps65010_driver.name) +#define DRIVER_NAME (tps65010_driver.driver.name) MODULE_DESCRIPTION("TPS6501x Power Management Driver"); MODULE_LICENSE("GPL"); static unsigned short normal_i2c[] = { 0x48, /* 0x49, */ I2C_CLIENT_END }; -static unsigned short normal_i2c_range[] = { I2C_CLIENT_END }; I2C_CLIENT_INSMOD; @@ -100,7 +99,7 @@ struct tps65010 { /* not currently tracking GPIO state */ }; -#define POWER_POLL_DELAY msecs_to_jiffies(800) +#define POWER_POLL_DELAY msecs_to_jiffies(5000) /*-------------------------------------------------------------------------*/ @@ -520,8 +519,11 @@ tps65010_probe(struct i2c_adapter *bus, int address, int kind) goto fail1; } + /* the IRQ is active low, but many gpio lines can't support that + * so this driver can use falling-edge triggers instead. + */ + irqflags = IRQF_SAMPLE_RANDOM; #ifdef CONFIG_ARM - irqflags = IRQF_SAMPLE_RANDOM | IRQF_TRIGGER_LOW; if (machine_is_omap_h2()) { tps->model = TPS65010; omap_cfg_reg(W4_GPIO58); @@ -543,8 +545,6 @@ tps65010_probe(struct i2c_adapter *bus, int address, int kind) // FIXME set up this board's IRQ ... } -#else - irqflags = IRQF_SAMPLE_RANDOM; #endif if (tps->irq > 0) { diff --git a/drivers/ide/pci/generic.c b/drivers/ide/pci/generic.c index 2f962cfa3f7..78810ba982e 100644 --- a/drivers/ide/pci/generic.c +++ b/drivers/ide/pci/generic.c @@ -180,6 +180,36 @@ static ide_pci_device_t generic_chipsets[] __devinitdata = { .channels = 2, .autodma = AUTODMA, .bootable = OFF_BOARD, + },{ /* 15 */ + .name = "JMB361", + .init_hwif = init_hwif_generic, + .channels = 2, + .autodma = AUTODMA, + .bootable = OFF_BOARD, + },{ /* 16 */ + .name = "JMB363", + .init_hwif = init_hwif_generic, + .channels = 2, + .autodma = AUTODMA, + .bootable = OFF_BOARD, + },{ /* 17 */ + .name = "JMB365", + .init_hwif = init_hwif_generic, + .channels = 2, + .autodma = AUTODMA, + .bootable = OFF_BOARD, + },{ /* 18 */ + .name = "JMB366", + .init_hwif = init_hwif_generic, + .channels = 2, + .autodma = AUTODMA, + .bootable = OFF_BOARD, + },{ /* 19 */ + .name = "JMB368", + .init_hwif = init_hwif_generic, + .channels = 2, + .autodma = AUTODMA, + .bootable = OFF_BOARD, } }; diff --git a/drivers/ide/pci/via82cxxx.c b/drivers/ide/pci/via82cxxx.c index afdaee3c15c..9b7589e8e93 100644 --- a/drivers/ide/pci/via82cxxx.c +++ b/drivers/ide/pci/via82cxxx.c @@ -6,7 +6,7 @@ * * vt82c576, vt82c586, vt82c586a, vt82c586b, vt82c596a, vt82c596b, * vt82c686, vt82c686a, vt82c686b, vt8231, vt8233, vt8233c, vt8233a, - * vt8235, vt8237 + * vt8235, vt8237, vt8237a * * Copyright (c) 2000-2002 Vojtech Pavlik * @@ -81,6 +81,7 @@ static struct via_isa_bridge { { "vt6410", PCI_DEVICE_ID_VIA_6410, 0x00, 0x2f, VIA_UDMA_133 | VIA_BAD_AST }, { "vt8251", PCI_DEVICE_ID_VIA_8251, 0x00, 0x2f, VIA_UDMA_133 | VIA_BAD_AST }, { "vt8237", PCI_DEVICE_ID_VIA_8237, 0x00, 0x2f, VIA_UDMA_133 | VIA_BAD_AST }, + { "vt8237a", PCI_DEVICE_ID_VIA_8237A, 0x00, 0x2f, VIA_UDMA_133 | VIA_BAD_AST }, { "vt8235", PCI_DEVICE_ID_VIA_8235, 0x00, 0x2f, VIA_UDMA_133 | VIA_BAD_AST }, { "vt8233a", PCI_DEVICE_ID_VIA_8233A, 0x00, 0x2f, VIA_UDMA_133 | VIA_BAD_AST }, { "vt8233c", PCI_DEVICE_ID_VIA_8233C_0, 0x00, 0x2f, VIA_UDMA_100 }, diff --git a/drivers/ieee1394/ohci1394.c b/drivers/ieee1394/ohci1394.c index d4bad6704bb..448df277337 100644 --- a/drivers/ieee1394/ohci1394.c +++ b/drivers/ieee1394/ohci1394.c @@ -3552,6 +3552,8 @@ static int ohci1394_pci_resume (struct pci_dev *pdev) static int ohci1394_pci_suspend (struct pci_dev *pdev, pm_message_t state) { + pci_save_state(pdev); + #ifdef CONFIG_PPC_PMAC if (machine_is(powermac)) { struct device_node *of_node; @@ -3563,8 +3565,6 @@ static int ohci1394_pci_suspend (struct pci_dev *pdev, pm_message_t state) } #endif - pci_save_state(pdev); - return 0; } diff --git a/drivers/infiniband/core/cache.c b/drivers/infiniband/core/cache.c index e05ca2cdc73..75313ade2e0 100644 --- a/drivers/infiniband/core/cache.c +++ b/drivers/infiniband/core/cache.c @@ -301,7 +301,8 @@ static void ib_cache_event(struct ib_event_handler *handler, event->event == IB_EVENT_PORT_ACTIVE || event->event == IB_EVENT_LID_CHANGE || event->event == IB_EVENT_PKEY_CHANGE || - event->event == IB_EVENT_SM_CHANGE) { + event->event == IB_EVENT_SM_CHANGE || + event->event == IB_EVENT_CLIENT_REREGISTER) { work = kmalloc(sizeof *work, GFP_ATOMIC); if (work) { INIT_WORK(&work->work, ib_cache_task, work); diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c index aeda484ffd8..d6b84226bba 100644 --- a/drivers/infiniband/core/sa_query.c +++ b/drivers/infiniband/core/sa_query.c @@ -405,7 +405,8 @@ static void ib_sa_event(struct ib_event_handler *handler, struct ib_event *event event->event == IB_EVENT_PORT_ACTIVE || event->event == IB_EVENT_LID_CHANGE || event->event == IB_EVENT_PKEY_CHANGE || - event->event == IB_EVENT_SM_CHANGE) { + event->event == IB_EVENT_SM_CHANGE || + event->event == IB_EVENT_CLIENT_REREGISTER) { struct ib_sa_device *sa_dev; sa_dev = container_of(handler, typeof(*sa_dev), event_handler); diff --git a/drivers/infiniband/hw/mthca/mthca_main.c b/drivers/infiniband/hw/mthca/mthca_main.c index 557cde3a456..7b82c1907f0 100644 --- a/drivers/infiniband/hw/mthca/mthca_main.c +++ b/drivers/infiniband/hw/mthca/mthca_main.c @@ -967,12 +967,12 @@ static struct { } mthca_hca_table[] = { [TAVOR] = { .latest_fw = MTHCA_FW_VER(3, 4, 0), .flags = 0 }, - [ARBEL_COMPAT] = { .latest_fw = MTHCA_FW_VER(4, 7, 400), + [ARBEL_COMPAT] = { .latest_fw = MTHCA_FW_VER(4, 7, 600), .flags = MTHCA_FLAG_PCIE }, - [ARBEL_NATIVE] = { .latest_fw = MTHCA_FW_VER(5, 1, 0), + [ARBEL_NATIVE] = { .latest_fw = MTHCA_FW_VER(5, 1, 400), .flags = MTHCA_FLAG_MEMFREE | MTHCA_FLAG_PCIE }, - [SINAI] = { .latest_fw = MTHCA_FW_VER(1, 0, 800), + [SINAI] = { .latest_fw = MTHCA_FW_VER(1, 1, 0), .flags = MTHCA_FLAG_MEMFREE | MTHCA_FLAG_PCIE | MTHCA_FLAG_SINAI_OPT } diff --git a/drivers/infiniband/hw/mthca/mthca_provider.c b/drivers/infiniband/hw/mthca/mthca_provider.c index 230ae21db8f..265b1d1c4a6 100644 --- a/drivers/infiniband/hw/mthca/mthca_provider.c +++ b/drivers/infiniband/hw/mthca/mthca_provider.c @@ -1287,11 +1287,7 @@ int mthca_register_device(struct mthca_dev *dev) (1ull << IB_USER_VERBS_CMD_MODIFY_QP) | (1ull << IB_USER_VERBS_CMD_DESTROY_QP) | (1ull << IB_USER_VERBS_CMD_ATTACH_MCAST) | - (1ull << IB_USER_VERBS_CMD_DETACH_MCAST) | - (1ull << IB_USER_VERBS_CMD_CREATE_SRQ) | - (1ull << IB_USER_VERBS_CMD_MODIFY_SRQ) | - (1ull << IB_USER_VERBS_CMD_QUERY_SRQ) | - (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ); + (1ull << IB_USER_VERBS_CMD_DETACH_MCAST); dev->ib_dev.node_type = IB_NODE_CA; dev->ib_dev.phys_port_cnt = dev->limits.num_ports; dev->ib_dev.dma_device = &dev->pdev->dev; @@ -1316,6 +1312,11 @@ int mthca_register_device(struct mthca_dev *dev) dev->ib_dev.modify_srq = mthca_modify_srq; dev->ib_dev.query_srq = mthca_query_srq; dev->ib_dev.destroy_srq = mthca_destroy_srq; + dev->ib_dev.uverbs_cmd_mask |= + (1ull << IB_USER_VERBS_CMD_CREATE_SRQ) | + (1ull << IB_USER_VERBS_CMD_MODIFY_SRQ) | + (1ull << IB_USER_VERBS_CMD_QUERY_SRQ) | + (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ); if (mthca_is_memfree(dev)) dev->ib_dev.post_srq_recv = mthca_arbel_post_srq_recv; diff --git a/drivers/infiniband/hw/mthca/mthca_provider.h b/drivers/infiniband/hw/mthca/mthca_provider.h index 8de2887ba15..9a5bece3fa5 100644 --- a/drivers/infiniband/hw/mthca/mthca_provider.h +++ b/drivers/infiniband/hw/mthca/mthca_provider.h @@ -136,8 +136,8 @@ struct mthca_ah { * We have one global lock that protects dev->cq/qp_table. Each * struct mthca_cq/qp also has its own lock. An individual qp lock * may be taken inside of an individual cq lock. Both cqs attached to - * a qp may be locked, with the send cq locked first. No other - * nesting should be done. + * a qp may be locked, with the cq with the lower cqn locked first. + * No other nesting should be done. * * Each struct mthca_cq/qp also has an ref count, protected by the * corresponding table lock. The pointer from the cq/qp_table to the diff --git a/drivers/infiniband/hw/mthca/mthca_qp.c b/drivers/infiniband/hw/mthca/mthca_qp.c index cd8b6721ac9..2e8f6f36e0a 100644 --- a/drivers/infiniband/hw/mthca/mthca_qp.c +++ b/drivers/infiniband/hw/mthca/mthca_qp.c @@ -99,6 +99,10 @@ enum { MTHCA_QP_BIT_RSC = 1 << 3 }; +enum { + MTHCA_SEND_DOORBELL_FENCE = 1 << 5 +}; + struct mthca_qp_path { __be32 port_pkey; u8 rnr_retry; @@ -1259,6 +1263,32 @@ int mthca_alloc_qp(struct mthca_dev *dev, return 0; } +static void mthca_lock_cqs(struct mthca_cq *send_cq, struct mthca_cq *recv_cq) +{ + if (send_cq == recv_cq) + spin_lock_irq(&send_cq->lock); + else if (send_cq->cqn < recv_cq->cqn) { + spin_lock_irq(&send_cq->lock); + spin_lock_nested(&recv_cq->lock, SINGLE_DEPTH_NESTING); + } else { + spin_lock_irq(&recv_cq->lock); + spin_lock_nested(&send_cq->lock, SINGLE_DEPTH_NESTING); + } +} + +static void mthca_unlock_cqs(struct mthca_cq *send_cq, struct mthca_cq *recv_cq) +{ + if (send_cq == recv_cq) + spin_unlock_irq(&send_cq->lock); + else if (send_cq->cqn < recv_cq->cqn) { + spin_unlock(&recv_cq->lock); + spin_unlock_irq(&send_cq->lock); + } else { + spin_unlock(&send_cq->lock); + spin_unlock_irq(&recv_cq->lock); + } +} + int mthca_alloc_sqp(struct mthca_dev *dev, struct mthca_pd *pd, struct mthca_cq *send_cq, @@ -1311,17 +1341,13 @@ int mthca_alloc_sqp(struct mthca_dev *dev, * Lock CQs here, so that CQ polling code can do QP lookup * without taking a lock. */ - spin_lock_irq(&send_cq->lock); - if (send_cq != recv_cq) - spin_lock(&recv_cq->lock); + mthca_lock_cqs(send_cq, recv_cq); spin_lock(&dev->qp_table.lock); mthca_array_clear(&dev->qp_table.qp, mqpn); spin_unlock(&dev->qp_table.lock); - if (send_cq != recv_cq) - spin_unlock(&recv_cq->lock); - spin_unlock_irq(&send_cq->lock); + mthca_unlock_cqs(send_cq, recv_cq); err_out: dma_free_coherent(&dev->pdev->dev, sqp->header_buf_size, @@ -1355,9 +1381,7 @@ void mthca_free_qp(struct mthca_dev *dev, * Lock CQs here, so that CQ polling code can do QP lookup * without taking a lock. */ - spin_lock_irq(&send_cq->lock); - if (send_cq != recv_cq) - spin_lock(&recv_cq->lock); + mthca_lock_cqs(send_cq, recv_cq); spin_lock(&dev->qp_table.lock); mthca_array_clear(&dev->qp_table.qp, @@ -1365,9 +1389,7 @@ void mthca_free_qp(struct mthca_dev *dev, --qp->refcount; spin_unlock(&dev->qp_table.lock); - if (send_cq != recv_cq) - spin_unlock(&recv_cq->lock); - spin_unlock_irq(&send_cq->lock); + mthca_unlock_cqs(send_cq, recv_cq); wait_event(qp->wait, !get_qp_refcount(dev, qp)); @@ -1502,7 +1524,7 @@ int mthca_tavor_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, int i; int size; int size0 = 0; - u32 f0 = 0; + u32 f0; int ind; u8 op0 = 0; @@ -1686,6 +1708,8 @@ int mthca_tavor_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, if (!size0) { size0 = size; op0 = mthca_opcode[wr->opcode]; + f0 = wr->send_flags & IB_SEND_FENCE ? + MTHCA_SEND_DOORBELL_FENCE : 0; } ++ind; @@ -1843,7 +1867,7 @@ int mthca_arbel_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, int i; int size; int size0 = 0; - u32 f0 = 0; + u32 f0; int ind; u8 op0 = 0; @@ -2051,6 +2075,8 @@ int mthca_arbel_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, if (!size0) { size0 = size; op0 = mthca_opcode[wr->opcode]; + f0 = wr->send_flags & IB_SEND_FENCE ? + MTHCA_SEND_DOORBELL_FENCE : 0; } ++ind; diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c index 34b0da5cfa0..1437d7ee3b1 100644 --- a/drivers/infiniband/ulp/iser/iscsi_iser.c +++ b/drivers/infiniband/ulp/iser/iscsi_iser.c @@ -378,21 +378,6 @@ iscsi_iser_conn_start(struct iscsi_cls_conn *cls_conn) return iser_conn_set_full_featured_mode(conn); } -static void -iscsi_iser_conn_terminate(struct iscsi_conn *conn) -{ - struct iscsi_iser_conn *iser_conn = conn->dd_data; - struct iser_conn *ib_conn = iser_conn->ib_conn; - - BUG_ON(!ib_conn); - /* starts conn teardown process, waits until all previously * - * posted buffers get flushed, deallocates all conn resources */ - iser_conn_terminate(ib_conn); - iser_conn->ib_conn = NULL; - conn->recv_lock = NULL; -} - - static struct iscsi_transport iscsi_iser_transport; static struct iscsi_cls_session * @@ -555,13 +540,13 @@ iscsi_iser_ep_poll(__u64 ep_handle, int timeout_ms) static void iscsi_iser_ep_disconnect(__u64 ep_handle) { - struct iser_conn *ib_conn = iscsi_iser_ib_conn_lookup(ep_handle); + struct iser_conn *ib_conn; + ib_conn = iscsi_iser_ib_conn_lookup(ep_handle); if (!ib_conn) return; iser_err("ib conn %p state %d\n",ib_conn, ib_conn->state); - iser_conn_terminate(ib_conn); } @@ -614,9 +599,6 @@ static struct iscsi_transport iscsi_iser_transport = { .get_session_param = iscsi_session_get_param, .start_conn = iscsi_iser_conn_start, .stop_conn = iscsi_conn_stop, - /* these are called as part of conn recovery */ - .suspend_conn_recv = NULL, /* FIXME is/how this relvant to iser? */ - .terminate_conn = iscsi_iser_conn_terminate, /* IO */ .send_pdu = iscsi_conn_send_pdu, .get_stats = iscsi_iser_conn_get_stats, diff --git a/drivers/input/keyboard/atkbd.c b/drivers/input/keyboard/atkbd.c index 6bfa0cf4b1d..a86afd0a5ef 100644 --- a/drivers/input/keyboard/atkbd.c +++ b/drivers/input/keyboard/atkbd.c @@ -498,7 +498,7 @@ static int atkbd_set_repeat_rate(struct atkbd *atkbd) i++; dev->rep[REP_PERIOD] = period[i]; - while (j < ARRAY_SIZE(period) - 1 && delay[j] < dev->rep[REP_DELAY]) + while (j < ARRAY_SIZE(delay) - 1 && delay[j] < dev->rep[REP_DELAY]) j++; dev->rep[REP_DELAY] = delay[j]; diff --git a/drivers/input/misc/wistron_btns.c b/drivers/input/misc/wistron_btns.c index a8efc1af36c..de0f46dd969 100644 --- a/drivers/input/misc/wistron_btns.c +++ b/drivers/input/misc/wistron_btns.c @@ -259,11 +259,11 @@ static int __init dmi_matched(struct dmi_system_id *dmi) return 1; } -static struct key_entry keymap_empty[] __initdata = { +static struct key_entry keymap_empty[] = { { KE_END, 0 } }; -static struct key_entry keymap_fs_amilo_pro_v2000[] __initdata = { +static struct key_entry keymap_fs_amilo_pro_v2000[] = { { KE_KEY, 0x01, KEY_HELP }, { KE_KEY, 0x11, KEY_PROG1 }, { KE_KEY, 0x12, KEY_PROG2 }, @@ -273,7 +273,7 @@ static struct key_entry keymap_fs_amilo_pro_v2000[] __initdata = { { KE_END, 0 } }; -static struct key_entry keymap_fujitsu_n3510[] __initdata = { +static struct key_entry keymap_fujitsu_n3510[] = { { KE_KEY, 0x11, KEY_PROG1 }, { KE_KEY, 0x12, KEY_PROG2 }, { KE_KEY, 0x36, KEY_WWW }, @@ -285,7 +285,7 @@ static struct key_entry keymap_fujitsu_n3510[] __initdata = { { KE_END, 0 } }; -static struct key_entry keymap_wistron_ms2111[] __initdata = { +static struct key_entry keymap_wistron_ms2111[] = { { KE_KEY, 0x11, KEY_PROG1 }, { KE_KEY, 0x12, KEY_PROG2 }, { KE_KEY, 0x13, KEY_PROG3 }, @@ -294,7 +294,7 @@ static struct key_entry keymap_wistron_ms2111[] __initdata = { { KE_END, 0 } }; -static struct key_entry keymap_wistron_ms2141[] __initdata = { +static struct key_entry keymap_wistron_ms2141[] = { { KE_KEY, 0x11, KEY_PROG1 }, { KE_KEY, 0x12, KEY_PROG2 }, { KE_WIFI, 0x30, 0 }, @@ -307,7 +307,7 @@ static struct key_entry keymap_wistron_ms2141[] __initdata = { { KE_END, 0 } }; -static struct key_entry keymap_acer_aspire_1500[] __initdata = { +static struct key_entry keymap_acer_aspire_1500[] = { { KE_KEY, 0x11, KEY_PROG1 }, { KE_KEY, 0x12, KEY_PROG2 }, { KE_WIFI, 0x30, 0 }, @@ -317,7 +317,7 @@ static struct key_entry keymap_acer_aspire_1500[] __initdata = { { KE_END, 0 } }; -static struct key_entry keymap_acer_travelmate_240[] __initdata = { +static struct key_entry keymap_acer_travelmate_240[] = { { KE_KEY, 0x31, KEY_MAIL }, { KE_KEY, 0x36, KEY_WWW }, { KE_KEY, 0x11, KEY_PROG1 }, @@ -327,7 +327,7 @@ static struct key_entry keymap_acer_travelmate_240[] __initdata = { { KE_END, 0 } }; -static struct key_entry keymap_aopen_1559as[] __initdata = { +static struct key_entry keymap_aopen_1559as[] = { { KE_KEY, 0x01, KEY_HELP }, { KE_KEY, 0x06, KEY_PROG3 }, { KE_KEY, 0x11, KEY_PROG1 }, diff --git a/drivers/input/mouse/psmouse-base.c b/drivers/input/mouse/psmouse-base.c index 8bc9f51ae6c..343afa38f4c 100644 --- a/drivers/input/mouse/psmouse-base.c +++ b/drivers/input/mouse/psmouse-base.c @@ -485,13 +485,6 @@ static int im_explorer_detect(struct psmouse *psmouse, int set_properties) param[0] = 40; ps2_command(ps2dev, param, PSMOUSE_CMD_SETRATE); - param[0] = 200; - ps2_command(ps2dev, param, PSMOUSE_CMD_SETRATE); - param[0] = 200; - ps2_command(ps2dev, param, PSMOUSE_CMD_SETRATE); - param[0] = 60; - ps2_command(ps2dev, param, PSMOUSE_CMD_SETRATE); - if (set_properties) { set_bit(BTN_MIDDLE, psmouse->dev->keybit); set_bit(REL_WHEEL, psmouse->dev->relbit); diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c index 217615b3322..93f701ea87b 100644 --- a/drivers/md/dm-mpath.c +++ b/drivers/md/dm-mpath.c @@ -710,6 +710,8 @@ static int multipath_ctr(struct dm_target *ti, unsigned int argc, return -EINVAL; } + m->ti = ti; + r = parse_features(&as, m, ti); if (r) goto bad; @@ -751,7 +753,6 @@ static int multipath_ctr(struct dm_target *ti, unsigned int argc, } ti->private = m; - m->ti = ti; return 0; diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c index be48cedf986..c54de989eb0 100644 --- a/drivers/md/dm-raid1.c +++ b/drivers/md/dm-raid1.c @@ -255,7 +255,9 @@ static struct region *__rh_alloc(struct region_hash *rh, region_t region) struct region *reg, *nreg; read_unlock(&rh->hash_lock); - nreg = mempool_alloc(rh->region_pool, GFP_NOIO); + nreg = mempool_alloc(rh->region_pool, GFP_ATOMIC); + if (unlikely(!nreg)) + nreg = kmalloc(sizeof(struct region), GFP_NOIO); nreg->state = rh->log->type->in_sync(rh->log, region, 1) ? RH_CLEAN : RH_NOSYNC; nreg->rh = rh; diff --git a/drivers/md/md.c b/drivers/md/md.c index b6d16022a53..8dbab2ef388 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -1597,6 +1597,19 @@ void md_update_sb(mddev_t * mddev) repeat: spin_lock_irq(&mddev->write_lock); + + if (mddev->degraded && mddev->sb_dirty == 3) + /* If the array is degraded, then skipping spares is both + * dangerous and fairly pointless. + * Dangerous because a device that was removed from the array + * might have a event_count that still looks up-to-date, + * so it can be re-added without a resync. + * Pointless because if there are any spares to skip, + * then a recovery will happen and soon that array won't + * be degraded any more and the spare can go back to sleep then. + */ + mddev->sb_dirty = 1; + sync_req = mddev->in_sync; mddev->utime = get_seconds(); if (mddev->sb_dirty == 3) diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index 1efe22a2d04..87bfe9e7d8c 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -1625,15 +1625,16 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i return 0; } - /* before building a request, check if we can skip these blocks.. - * This call the bitmap_start_sync doesn't actually record anything - */ if (mddev->bitmap == NULL && mddev->recovery_cp == MaxSector && + !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) && conf->fullsync == 0) { *skipped = 1; return max_sector - sector_nr; } + /* before building a request, check if we can skip these blocks.. + * This call the bitmap_start_sync doesn't actually record anything + */ if (!bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, 1) && !conf->fullsync && !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) { /* We can skip this block, and probably several more */ diff --git a/drivers/media/dvb/bt8xx/dst.c b/drivers/media/dvb/bt8xx/dst.c index d687a14ec0a..06ac899a9a2 100644 --- a/drivers/media/dvb/bt8xx/dst.c +++ b/drivers/media/dvb/bt8xx/dst.c @@ -393,7 +393,7 @@ static int dst_set_bandwidth(struct dst_state *state, fe_bandwidth_t bandwidth) state->bandwidth = bandwidth; if (state->dst_type != DST_TYPE_IS_TERR) - return 0; + return -EOPNOTSUPP; switch (bandwidth) { case BANDWIDTH_6_MHZ: @@ -462,7 +462,7 @@ static int dst_set_symbolrate(struct dst_state *state, u32 srate) state->symbol_rate = srate; if (state->dst_type == DST_TYPE_IS_TERR) { - return 0; + return -EOPNOTSUPP; } dprintk(verbose, DST_INFO, 1, "set symrate %u", srate); srate /= 1000; @@ -504,7 +504,7 @@ static int dst_set_symbolrate(struct dst_state *state, u32 srate) static int dst_set_modulation(struct dst_state *state, fe_modulation_t modulation) { if (state->dst_type != DST_TYPE_IS_CABLE) - return 0; + return -EOPNOTSUPP; state->modulation = modulation; switch (modulation) { @@ -1234,7 +1234,7 @@ int dst_command(struct dst_state *state, u8 *data, u8 len) goto error; } if (write_dst(state, data, len)) { - dprintk(verbose, DST_INFO, 1, "Tring to recover.. "); + dprintk(verbose, DST_INFO, 1, "Trying to recover.. "); if ((dst_error_recovery(state)) < 0) { dprintk(verbose, DST_ERROR, 1, "Recovery Failed."); goto error; @@ -1328,15 +1328,13 @@ static int dst_tone_power_cmd(struct dst_state *state) { u8 paket[8] = { 0x00, 0x09, 0xff, 0xff, 0x01, 0x00, 0x00, 0x00 }; - if (state->dst_type == DST_TYPE_IS_TERR) - return 0; + if (state->dst_type != DST_TYPE_IS_SAT) + return -EOPNOTSUPP; paket[4] = state->tx_tuna[4]; paket[2] = state->tx_tuna[2]; paket[3] = state->tx_tuna[3]; paket[7] = dst_check_sum (paket, 7); - dst_command(state, paket, 8); - - return 0; + return dst_command(state, paket, 8); } static int dst_get_tuna(struct dst_state *state) @@ -1465,7 +1463,7 @@ static int dst_set_diseqc(struct dvb_frontend *fe, struct dvb_diseqc_master_cmd u8 paket[8] = { 0x00, 0x08, 0x04, 0xe0, 0x10, 0x38, 0xf0, 0xec }; if (state->dst_type != DST_TYPE_IS_SAT) - return 0; + return -EOPNOTSUPP; if (cmd->msg_len > 0 && cmd->msg_len < 5) memcpy(&paket[3], cmd->msg, cmd->msg_len); else if (cmd->msg_len == 5 && state->dst_hw_cap & DST_TYPE_HAS_DISEQC5) @@ -1473,18 +1471,17 @@ static int dst_set_diseqc(struct dvb_frontend *fe, struct dvb_diseqc_master_cmd else return -EINVAL; paket[7] = dst_check_sum(&paket[0], 7); - dst_command(state, paket, 8); - return 0; + return dst_command(state, paket, 8); } static int dst_set_voltage(struct dvb_frontend *fe, fe_sec_voltage_t voltage) { - int need_cmd; + int need_cmd, retval = 0; struct dst_state *state = fe->demodulator_priv; state->voltage = voltage; if (state->dst_type != DST_TYPE_IS_SAT) - return 0; + return -EOPNOTSUPP; need_cmd = 0; @@ -1506,9 +1503,9 @@ static int dst_set_voltage(struct dvb_frontend *fe, fe_sec_voltage_t voltage) } if (need_cmd) - dst_tone_power_cmd(state); + retval = dst_tone_power_cmd(state); - return 0; + return retval; } static int dst_set_tone(struct dvb_frontend *fe, fe_sec_tone_mode_t tone) @@ -1517,7 +1514,7 @@ static int dst_set_tone(struct dvb_frontend *fe, fe_sec_tone_mode_t tone) state->tone = tone; if (state->dst_type != DST_TYPE_IS_SAT) - return 0; + return -EOPNOTSUPP; switch (tone) { case SEC_TONE_OFF: @@ -1533,9 +1530,7 @@ static int dst_set_tone(struct dvb_frontend *fe, fe_sec_tone_mode_t tone) default: return -EINVAL; } - dst_tone_power_cmd(state); - - return 0; + return dst_tone_power_cmd(state); } static int dst_send_burst(struct dvb_frontend *fe, fe_sec_mini_cmd_t minicmd) @@ -1543,7 +1538,7 @@ static int dst_send_burst(struct dvb_frontend *fe, fe_sec_mini_cmd_t minicmd) struct dst_state *state = fe->demodulator_priv; if (state->dst_type != DST_TYPE_IS_SAT) - return 0; + return -EOPNOTSUPP; state->minicmd = minicmd; switch (minicmd) { case SEC_MINI_A: @@ -1553,9 +1548,7 @@ static int dst_send_burst(struct dvb_frontend *fe, fe_sec_mini_cmd_t minicmd) state->tx_tuna[3] = 0xff; break; } - dst_tone_power_cmd(state); - - return 0; + return dst_tone_power_cmd(state); } @@ -1608,28 +1601,31 @@ static int dst_read_signal_strength(struct dvb_frontend *fe, u16 *strength) { struct dst_state *state = fe->demodulator_priv; - dst_get_signal(state); + int retval = dst_get_signal(state); *strength = state->decode_strength; - return 0; + return retval; } static int dst_read_snr(struct dvb_frontend *fe, u16 *snr) { struct dst_state *state = fe->demodulator_priv; - dst_get_signal(state); + int retval = dst_get_signal(state); *snr = state->decode_snr; - return 0; + return retval; } static int dst_set_frontend(struct dvb_frontend *fe, struct dvb_frontend_parameters *p) { + int retval = -EINVAL; struct dst_state *state = fe->demodulator_priv; if (p != NULL) { - dst_set_freq(state, p->frequency); + retval = dst_set_freq(state, p->frequency); + if(retval != 0) + return retval; dprintk(verbose, DST_DEBUG, 1, "Set Frequency=[%d]", p->frequency); if (state->dst_type == DST_TYPE_IS_SAT) { @@ -1647,10 +1643,10 @@ static int dst_set_frontend(struct dvb_frontend *fe, struct dvb_frontend_paramet dst_set_symbolrate(state, p->u.qam.symbol_rate); dst_set_modulation(state, p->u.qam.modulation); } - dst_write_tuna(fe); + retval = dst_write_tuna(fe); } - return 0; + return retval; } static int dst_tune_frontend(struct dvb_frontend* fe, diff --git a/drivers/media/dvb/dvb-core/Makefile b/drivers/media/dvb/dvb-core/Makefile index 11054657fdb..0b5182835cc 100644 --- a/drivers/media/dvb/dvb-core/Makefile +++ b/drivers/media/dvb/dvb-core/Makefile @@ -2,8 +2,8 @@ # Makefile for the kernel DVB device drivers. # -dvb-core-objs = dvbdev.o dmxdev.o dvb_demux.o dvb_filter.o \ - dvb_ca_en50221.o dvb_frontend.o \ - dvb_net.o dvb_ringbuffer.o dvb_math.o +dvb-core-objs := dvbdev.o dmxdev.o dvb_demux.o dvb_filter.o \ + dvb_ca_en50221.o dvb_frontend.o \ + dvb_net.o dvb_ringbuffer.o dvb_math.o obj-$(CONFIG_DVB_CORE) += dvb-core.o diff --git a/drivers/media/radio/Kconfig b/drivers/media/radio/Kconfig index de3128a31de..220076b1b95 100644 --- a/drivers/media/radio/Kconfig +++ b/drivers/media/radio/Kconfig @@ -350,5 +350,15 @@ config RADIO_ZOLTRIX_PORT help Enter the I/O port of your Zoltrix radio card. -endmenu +config USB_DSBR + tristate "D-Link USB FM radio support (EXPERIMENTAL)" + depends on USB && VIDEO_V4L1 && EXPERIMENTAL + ---help--- + Say Y here if you want to connect this type of radio to your + computer's USB port. Note that the audio is not digital, and + you must connect the line out connector to a sound card or a + set of speakers. + To compile this driver as a module, choose M here: the + module will be called dsbr100. +endmenu diff --git a/drivers/media/radio/Makefile b/drivers/media/radio/Makefile index e95b6805e00..cf55a18e3dd 100644 --- a/drivers/media/radio/Makefile +++ b/drivers/media/radio/Makefile @@ -20,5 +20,6 @@ obj-$(CONFIG_RADIO_GEMTEK) += radio-gemtek.o obj-$(CONFIG_RADIO_GEMTEK_PCI) += radio-gemtek-pci.o obj-$(CONFIG_RADIO_TRUST) += radio-trust.o obj-$(CONFIG_RADIO_MAESTRO) += radio-maestro.o +obj-$(CONFIG_USB_DSBR) += dsbr100.o EXTRA_CFLAGS += -Isound diff --git a/drivers/media/video/dsbr100.c b/drivers/media/radio/dsbr100.c index f7e33f9ee8e..f7e33f9ee8e 100644 --- a/drivers/media/video/dsbr100.c +++ b/drivers/media/radio/dsbr100.c diff --git a/drivers/media/video/Kconfig b/drivers/media/video/Kconfig index fe56862d51e..732bf1e7c32 100644 --- a/drivers/media/video/Kconfig +++ b/drivers/media/video/Kconfig @@ -449,18 +449,6 @@ source "drivers/media/video/pvrusb2/Kconfig" source "drivers/media/video/em28xx/Kconfig" -config USB_DSBR - tristate "D-Link USB FM radio support (EXPERIMENTAL)" - depends on USB && VIDEO_V4L1 && EXPERIMENTAL - ---help--- - Say Y here if you want to connect this type of radio to your - computer's USB port. Note that the audio is not digital, and - you must connect the line out connector to a sound card or a - set of speakers. - - To compile this driver as a module, choose M here: the - module will be called dsbr100. - source "drivers/media/video/usbvideo/Kconfig" source "drivers/media/video/et61x251/Kconfig" diff --git a/drivers/media/video/Makefile b/drivers/media/video/Makefile index 353d61cfac1..e82e511f2a7 100644 --- a/drivers/media/video/Makefile +++ b/drivers/media/video/Makefile @@ -77,7 +77,6 @@ obj-$(CONFIG_VIDEO_UPD64083) += upd64083.o obj-$(CONFIG_VIDEO_CX2341X) += cx2341x.o obj-$(CONFIG_USB_DABUSB) += dabusb.o -obj-$(CONFIG_USB_DSBR) += dsbr100.o obj-$(CONFIG_USB_OV511) += ov511.o obj-$(CONFIG_USB_SE401) += se401.o obj-$(CONFIG_USB_STV680) += stv680.o @@ -91,6 +90,7 @@ obj-$(CONFIG_USB_ZC0301) += zc0301/ obj-$(CONFIG_USB_IBMCAM) += usbvideo/ obj-$(CONFIG_USB_KONICAWC) += usbvideo/ obj-$(CONFIG_USB_VICAM) += usbvideo/ +obj-$(CONFIG_USB_QUICKCAM_MESSENGER) += usbvideo/ obj-$(CONFIG_VIDEO_VIVI) += vivi.o diff --git a/drivers/media/video/compat_ioctl32.c b/drivers/media/video/compat_ioctl32.c index 9dddff42ec1..b69ee119481 100644 --- a/drivers/media/video/compat_ioctl32.c +++ b/drivers/media/video/compat_ioctl32.c @@ -21,7 +21,7 @@ #ifdef CONFIG_COMPAT - +#ifdef CONFIG_VIDEO_V4L1_COMPAT struct video_tuner32 { compat_int_t tuner; char name[32]; @@ -107,6 +107,7 @@ struct video_window32 { compat_caddr_t clips; compat_int_t clipcount; }; +#endif static int native_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { @@ -124,6 +125,7 @@ static int native_ioctl(struct file *file, unsigned int cmd, unsigned long arg) } +#ifdef CONFIG_VIDEO_V4L1_COMPAT /* You get back everything except the clips... */ static int put_video_window32(struct video_window *kp, struct video_window32 __user *up) { @@ -138,6 +140,7 @@ static int put_video_window32(struct video_window *kp, struct video_window32 __u return -EFAULT; return 0; } +#endif struct v4l2_clip32 { @@ -490,6 +493,7 @@ static inline int put_v4l2_input(struct v4l2_input *kp, struct v4l2_input __user return 0; } +#ifdef CONFIG_VIDEO_V4L1_COMPAT struct video_code32 { char loadwhat[16]; /* name or tag of file being passed */ @@ -517,6 +521,8 @@ static inline int microcode32(struct video_code *kp, struct video_code32 __user #define VIDIOCSFREQ32 _IOW('v',15, u32) #define VIDIOCSMICROCODE32 _IOW('v',27, struct video_code32) +#endif + /* VIDIOC_ENUMINPUT32 is VIDIOC_ENUMINPUT minus 4 bytes of padding alignement */ #define VIDIOC_ENUMINPUT32 VIDIOC_ENUMINPUT - _IOC(0, 0, 0, 4) #define VIDIOC_G_FMT32 _IOWR ('V', 4, struct v4l2_format32) @@ -537,6 +543,7 @@ static inline int microcode32(struct video_code *kp, struct video_code32 __user #define VIDIOC_S_INPUT32 _IOWR ('V', 39, compat_int_t) #define VIDIOC_TRY_FMT32 _IOWR ('V', 64, struct v4l2_format32) +#ifdef CONFIG_VIDEO_V4L1_COMPAT enum { MaxClips = (~0U-sizeof(struct video_window))/sizeof(struct video_clip) }; @@ -601,14 +608,17 @@ static int do_set_window(struct file *file, unsigned int cmd, unsigned long arg) return native_ioctl(file, VIDIOCSWIN, (unsigned long)vw); } +#endif static int do_video_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { union { +#ifdef CONFIG_VIDEO_V4L1_COMPAT struct video_tuner vt; struct video_buffer vb; struct video_window vw; struct video_code vc; +#endif struct v4l2_format v2f; struct v4l2_buffer v2b; struct v4l2_framebuffer v2fb; @@ -624,6 +634,7 @@ static int do_video_ioctl(struct file *file, unsigned int cmd, unsigned long arg /* First, convert the command. */ switch(cmd) { +#ifdef CONFIG_VIDEO_V4L1_COMPAT case VIDIOCGTUNER32: cmd = VIDIOCGTUNER; break; case VIDIOCSTUNER32: cmd = VIDIOCSTUNER; break; case VIDIOCGWIN32: cmd = VIDIOCGWIN; break; @@ -631,6 +642,8 @@ static int do_video_ioctl(struct file *file, unsigned int cmd, unsigned long arg case VIDIOCSFBUF32: cmd = VIDIOCSFBUF; break; case VIDIOCGFREQ32: cmd = VIDIOCGFREQ; break; case VIDIOCSFREQ32: cmd = VIDIOCSFREQ; break; + case VIDIOCSMICROCODE32: cmd = VIDIOCSMICROCODE; break; +#endif case VIDIOC_G_FMT32: cmd = VIDIOC_G_FMT; break; case VIDIOC_S_FMT32: cmd = VIDIOC_S_FMT; break; case VIDIOC_QUERYBUF32: cmd = VIDIOC_QUERYBUF; break; @@ -647,10 +660,10 @@ static int do_video_ioctl(struct file *file, unsigned int cmd, unsigned long arg case VIDIOC_G_INPUT32: cmd = VIDIOC_G_INPUT; break; case VIDIOC_S_INPUT32: cmd = VIDIOC_S_INPUT; break; case VIDIOC_TRY_FMT32: cmd = VIDIOC_TRY_FMT; break; - case VIDIOCSMICROCODE32: cmd = VIDIOCSMICROCODE; break; }; switch(cmd) { +#ifdef CONFIG_VIDEO_V4L1_COMPAT case VIDIOCSTUNER: case VIDIOCGTUNER: err = get_video_tuner32(&karg.vt, up); @@ -664,6 +677,7 @@ static int do_video_ioctl(struct file *file, unsigned int cmd, unsigned long arg break; case VIDIOCSFREQ: +#endif case VIDIOC_S_INPUT: case VIDIOC_OVERLAY: case VIDIOC_STREAMON: @@ -717,18 +731,21 @@ static int do_video_ioctl(struct file *file, unsigned int cmd, unsigned long arg compatible_arg = 0; break; +#ifdef CONFIG_VIDEO_V4L1_COMPAT case VIDIOCGWIN: case VIDIOCGFBUF: case VIDIOCGFREQ: +#endif case VIDIOC_G_FBUF: case VIDIOC_G_INPUT: compatible_arg = 0; +#ifdef CONFIG_VIDEO_V4L1_COMPAT case VIDIOCSMICROCODE: err = microcode32(&karg.vc, up); compatible_arg = 0; break; +#endif }; - if(err) goto out; @@ -743,6 +760,7 @@ static int do_video_ioctl(struct file *file, unsigned int cmd, unsigned long arg } if(err == 0) { switch(cmd) { +#ifdef CONFIG_VIDEO_V4L1_COMPAT case VIDIOCGTUNER: err = put_video_tuner32(&karg.vt, up); break; @@ -754,7 +772,7 @@ static int do_video_ioctl(struct file *file, unsigned int cmd, unsigned long arg case VIDIOCGFBUF: err = put_video_buffer32(&karg.vb, up); break; - +#endif case VIDIOC_G_FBUF: err = put_v4l2_framebuffer32(&karg.v2fb, up); break; @@ -792,7 +810,9 @@ static int do_video_ioctl(struct file *file, unsigned int cmd, unsigned long arg err = put_v4l2_input32(&karg.v2i, up); break; +#ifdef CONFIG_VIDEO_V4L1_COMPAT case VIDIOCGFREQ: +#endif case VIDIOC_G_INPUT: err = put_user(((u32)karg.vx), (u32 __user *)up); break; @@ -810,6 +830,7 @@ long v4l_compat_ioctl32(struct file *file, unsigned int cmd, unsigned long arg) return ret; switch (cmd) { +#ifdef CONFIG_VIDEO_V4L1_COMPAT case VIDIOCSWIN32: ret = do_set_window(file, cmd, arg); break; @@ -820,6 +841,7 @@ long v4l_compat_ioctl32(struct file *file, unsigned int cmd, unsigned long arg) case VIDIOCSFBUF32: case VIDIOCGFREQ32: case VIDIOCSFREQ32: +#endif case VIDIOC_QUERYCAP: case VIDIOC_ENUM_FMT: case VIDIOC_G_FMT32: @@ -851,6 +873,7 @@ long v4l_compat_ioctl32(struct file *file, unsigned int cmd, unsigned long arg) ret = do_video_ioctl(file, cmd, arg); break; +#ifdef CONFIG_VIDEO_V4L1_COMPAT /* Little v, the video4linux ioctls (conflict?) */ case VIDIOCGCAP: case VIDIOCGCHAN: @@ -879,6 +902,7 @@ long v4l_compat_ioctl32(struct file *file, unsigned int cmd, unsigned long arg) case _IOR('v' , BASE_VIDIOCPRIVATE+7, int): ret = native_ioctl(file, cmd, (unsigned long)compat_ptr(arg)); break; +#endif default: v4l_print_ioctl("compat_ioctl32", cmd); } diff --git a/drivers/media/video/cx25840/cx25840-core.c b/drivers/media/video/cx25840/cx25840-core.c index 5c2036b40ea..7bb7589a07c 100644 --- a/drivers/media/video/cx25840/cx25840-core.c +++ b/drivers/media/video/cx25840/cx25840-core.c @@ -104,8 +104,8 @@ u32 cx25840_read4(struct i2c_client * client, u16 addr) if (i2c_master_recv(client, buffer, 4) < 4) return 0; - return (buffer[0] << 24) | (buffer[1] << 16) | - (buffer[2] << 8) | buffer[3]; + return (buffer[3] << 24) | (buffer[2] << 16) | + (buffer[1] << 8) | buffer[0]; } int cx25840_and_or(struct i2c_client *client, u16 addr, unsigned and_mask, diff --git a/drivers/media/video/cx88/cx88-video.c b/drivers/media/video/cx88/cx88-video.c index 547cdbdb644..94c92bacc34 100644 --- a/drivers/media/video/cx88/cx88-video.c +++ b/drivers/media/video/cx88/cx88-video.c @@ -1225,7 +1225,7 @@ static int video_do_ioctl(struct inode *inode, struct file *file, struct v4l2_format *f = arg; return cx8800_try_fmt(dev,fh,f); } -#ifdef CONFIG_V4L1_COMPAT +#ifdef CONFIG_VIDEO_V4L1_COMPAT /* --- streaming capture ------------------------------------- */ case VIDIOCGMBUF: { @@ -1584,7 +1584,7 @@ static int radio_do_ioctl(struct inode *inode, struct file *file, *id = 0; return 0; } -#ifdef CONFIG_V4L1_COMPAT +#ifdef CONFIG_VIDEO_V4L1_COMPAT case VIDIOCSTUNER: { struct video_tuner *v = arg; diff --git a/drivers/media/video/msp3400-kthreads.c b/drivers/media/video/msp3400-kthreads.c index f2fd9195b3a..ed02ff81138 100644 --- a/drivers/media/video/msp3400-kthreads.c +++ b/drivers/media/video/msp3400-kthreads.c @@ -961,10 +961,10 @@ int msp34xxg_thread(void *data) /* setup the chip*/ msp34xxg_reset(client); state->std = state->radio ? 0x40 : msp_standard; - if (state->std != 1) - goto unmute; /* start autodetect */ msp_write_dem(client, 0x20, state->std); + if (state->std != 1) + goto unmute; /* watch autodetect */ v4l_dbg(1, msp_debug, client, "started autodetect, waiting for result\n"); diff --git a/drivers/media/video/pwc/Kconfig b/drivers/media/video/pwc/Kconfig index 697145e0bf1..8fdf7101d3b 100644 --- a/drivers/media/video/pwc/Kconfig +++ b/drivers/media/video/pwc/Kconfig @@ -30,7 +30,7 @@ config USB_PWC config USB_PWC_DEBUG bool "USB Philips Cameras verbose debug" - depends USB_PWC + depends on USB_PWC help Say Y here in order to have the pwc driver generate verbose debugging messages. diff --git a/drivers/media/video/pwc/pwc-if.c b/drivers/media/video/pwc/pwc-if.c index 47d0d83a026..d4703944df9 100644 --- a/drivers/media/video/pwc/pwc-if.c +++ b/drivers/media/video/pwc/pwc-if.c @@ -160,6 +160,7 @@ static struct file_operations pwc_fops = { .poll = pwc_video_poll, .mmap = pwc_video_mmap, .ioctl = pwc_video_ioctl, + .compat_ioctl = v4l_compat_ioctl32, .llseek = no_llseek, }; static struct video_device pwc_template = { diff --git a/drivers/media/video/saa7134/saa7134-video.c b/drivers/media/video/saa7134/saa7134-video.c index 8656f2400e1..2c171af9a9f 100644 --- a/drivers/media/video/saa7134/saa7134-video.c +++ b/drivers/media/video/saa7134/saa7134-video.c @@ -2087,7 +2087,7 @@ static int video_do_ioctl(struct inode *inode, struct file *file, struct v4l2_format *f = arg; return saa7134_try_fmt(dev,fh,f); } -#ifdef CONFIG_V4L1_COMPAT +#ifdef CONFIG_VIDEO_V4L1_COMPAT case VIDIOCGMBUF: { struct video_mbuf *mbuf = arg; diff --git a/drivers/media/video/tuner-types.c b/drivers/media/video/tuner-types.c index a167e17c6dc..d7eadc2c298 100644 --- a/drivers/media/video/tuner-types.c +++ b/drivers/media/video/tuner-types.c @@ -1027,10 +1027,11 @@ static struct tuner_params tuner_tnf_5335mf_params[] = { /* 70-79 */ /* ------------ TUNER_SAMSUNG_TCPN_2121P30A - Samsung NTSC ------------ */ +/* '+ 4' turns on the Low Noise Amplifier */ static struct tuner_range tuner_samsung_tcpn_2121p30a_ntsc_ranges[] = { - { 16 * 130.00 /*MHz*/, 0xce, 0x01, }, - { 16 * 364.50 /*MHz*/, 0xce, 0x02, }, - { 16 * 999.99 , 0xce, 0x08, }, + { 16 * 130.00 /*MHz*/, 0xce, 0x01 + 4, }, + { 16 * 364.50 /*MHz*/, 0xce, 0x02 + 4, }, + { 16 * 999.99 , 0xce, 0x08 + 4, }, }; static struct tuner_params tuner_samsung_tcpn_2121p30a_params[] = { @@ -1060,10 +1061,11 @@ static struct tuner_params tuner_thomson_fe6600_params[] = { /* ------------ TUNER_SAMSUNG_TCPG_6121P30A - Samsung PAL ------------ */ +/* '+ 4' turns on the Low Noise Amplifier */ static struct tuner_range tuner_samsung_tcpg_6121p30a_pal_ranges[] = { - { 16 * 146.25 /*MHz*/, 0xce, 0x01, }, - { 16 * 428.50 /*MHz*/, 0xce, 0x02, }, - { 16 * 999.99 , 0xce, 0x08, }, + { 16 * 146.25 /*MHz*/, 0xce, 0x01 + 4, }, + { 16 * 428.50 /*MHz*/, 0xce, 0x02 + 4, }, + { 16 * 999.99 , 0xce, 0x08 + 4, }, }; static struct tuner_params tuner_samsung_tcpg_6121p30a_params[] = { diff --git a/drivers/media/video/v4l1-compat.c b/drivers/media/video/v4l1-compat.c index d83a2c84d23..d7c3fcbc80f 100644 --- a/drivers/media/video/v4l1-compat.c +++ b/drivers/media/video/v4l1-compat.c @@ -599,6 +599,10 @@ v4l_compat_translate_ioctl(struct inode *inode, dprintk("VIDIOCGPICT / VIDIOC_G_FMT: %d\n",err); break; } + + pict->depth = ((fmt2->fmt.pix.bytesperline<<3) + + (fmt2->fmt.pix.width-1) ) + /fmt2->fmt.pix.width; pict->palette = pixelformat_to_palette( fmt2->fmt.pix.pixelformat); break; diff --git a/drivers/media/video/v4l2-common.c b/drivers/media/video/v4l2-common.c index 2ecbeffb559..8d972ffdaf9 100644 --- a/drivers/media/video/v4l2-common.c +++ b/drivers/media/video/v4l2-common.c @@ -202,7 +202,7 @@ static char *v4l2_memory_names[] = { /* ------------------------------------------------------------------ */ /* debug help functions */ -#ifdef CONFIG_V4L1_COMPAT +#ifdef CONFIG_VIDEO_V4L1_COMPAT static const char *v4l1_ioctls[] = { [_IOC_NR(VIDIOCGCAP)] = "VIDIOCGCAP", [_IOC_NR(VIDIOCGCHAN)] = "VIDIOCGCHAN", @@ -301,7 +301,7 @@ static const char *v4l2_ioctls[] = { #define V4L2_IOCTLS ARRAY_SIZE(v4l2_ioctls) static const char *v4l2_int_ioctls[] = { -#ifdef CONFIG_V4L1_COMPAT +#ifdef CONFIG_VIDEO_V4L1_COMPAT [_IOC_NR(DECODER_GET_CAPABILITIES)] = "DECODER_GET_CAPABILITIES", [_IOC_NR(DECODER_GET_STATUS)] = "DECODER_GET_STATUS", [_IOC_NR(DECODER_SET_NORM)] = "DECODER_SET_NORM", @@ -367,7 +367,7 @@ void v4l_printk_ioctl(unsigned int cmd) (_IOC_NR(cmd) < V4L2_INT_IOCTLS) ? v4l2_int_ioctls[_IOC_NR(cmd)] : "UNKNOWN", dir, cmd); break; -#ifdef CONFIG_V4L1_COMPAT +#ifdef CONFIG_VIDEO_V4L1_COMPAT case 'v': printk("v4l1 ioctl %s, dir=%s (0x%08x)\n", (_IOC_NR(cmd) < V4L1_IOCTLS) ? diff --git a/drivers/media/video/videodev.c b/drivers/media/video/videodev.c index 0fc90cd393f..88bf2af2a0e 100644 --- a/drivers/media/video/videodev.c +++ b/drivers/media/video/videodev.c @@ -760,7 +760,7 @@ static int __video_do_ioctl(struct inode *inode, struct file *file, ret=vfd->vidioc_overlay(file, fh, *i); break; } -#ifdef CONFIG_V4L1_COMPAT +#ifdef CONFIG_VIDEO_V4L1_COMPAT /* --- streaming capture ------------------------------------- */ case VIDIOCGMBUF: { diff --git a/drivers/media/video/vivi.c b/drivers/media/video/vivi.c index 38bd0c1018c..841884af0cc 100644 --- a/drivers/media/video/vivi.c +++ b/drivers/media/video/vivi.c @@ -986,7 +986,7 @@ static int vidioc_dqbuf (struct file *file, void *priv, struct v4l2_buffer *p) file->f_flags & O_NONBLOCK)); } -#ifdef CONFIG_V4L1_COMPAT +#ifdef CONFIG_VIDEO_V4L1_COMPAT static int vidiocgmbuf (struct file *file, void *priv, struct video_mbuf *mbuf) { struct vivi_fh *fh=priv; @@ -1328,7 +1328,7 @@ static struct video_device vivi = { .vidioc_s_ctrl = vidioc_s_ctrl, .vidioc_streamon = vidioc_streamon, .vidioc_streamoff = vidioc_streamoff, -#ifdef CONFIG_V4L1_COMPAT +#ifdef CONFIG_VIDEO_V4L1_COMPAT .vidiocgmbuf = vidiocgmbuf, #endif .tvnorms = tvnorms, diff --git a/drivers/message/fusion/mptbase.h b/drivers/message/fusion/mptbase.h index d4cb144ab40..c537d71c18e 100644 --- a/drivers/message/fusion/mptbase.h +++ b/drivers/message/fusion/mptbase.h @@ -640,7 +640,6 @@ typedef struct _MPT_ADAPTER struct work_struct fc_setup_reset_work; struct list_head fc_rports; spinlock_t fc_rescan_work_lock; - int fc_rescan_work_count; struct work_struct fc_rescan_work; char fc_rescan_work_q_name[KOBJ_NAME_LEN]; struct workqueue_struct *fc_rescan_work_q; diff --git a/drivers/message/fusion/mptfc.c b/drivers/message/fusion/mptfc.c index 90da7d63b08..85696f34c31 100644 --- a/drivers/message/fusion/mptfc.c +++ b/drivers/message/fusion/mptfc.c @@ -669,7 +669,10 @@ mptfc_GetFcPortPage0(MPT_ADAPTER *ioc, int portnum) * if still doing discovery, * hang loose a while until finished */ - if (pp0dest->PortState == MPI_FCPORTPAGE0_PORTSTATE_UNKNOWN) { + if ((pp0dest->PortState == MPI_FCPORTPAGE0_PORTSTATE_UNKNOWN) || + (pp0dest->PortState == MPI_FCPORTPAGE0_PORTSTATE_ONLINE && + (pp0dest->Flags & MPI_FCPORTPAGE0_FLAGS_ATTACH_TYPE_MASK) + == MPI_FCPORTPAGE0_FLAGS_ATTACH_NO_INIT)) { if (count-- > 0) { msleep(100); goto try_again; @@ -895,59 +898,45 @@ mptfc_rescan_devices(void *arg) { MPT_ADAPTER *ioc = (MPT_ADAPTER *)arg; int ii; - int work_to_do; u64 pn; - unsigned long flags; struct mptfc_rport_info *ri; - do { - /* start by tagging all ports as missing */ - list_for_each_entry(ri, &ioc->fc_rports, list) { - if (ri->flags & MPT_RPORT_INFO_FLAGS_REGISTERED) { - ri->flags |= MPT_RPORT_INFO_FLAGS_MISSING; - } + /* start by tagging all ports as missing */ + list_for_each_entry(ri, &ioc->fc_rports, list) { + if (ri->flags & MPT_RPORT_INFO_FLAGS_REGISTERED) { + ri->flags |= MPT_RPORT_INFO_FLAGS_MISSING; } + } - /* - * now rescan devices known to adapter, - * will reregister existing rports - */ - for (ii=0; ii < ioc->facts.NumberOfPorts; ii++) { - (void) mptfc_GetFcPortPage0(ioc, ii); - mptfc_init_host_attr(ioc,ii); /* refresh */ - mptfc_GetFcDevPage0(ioc,ii,mptfc_register_dev); - } + /* + * now rescan devices known to adapter, + * will reregister existing rports + */ + for (ii=0; ii < ioc->facts.NumberOfPorts; ii++) { + (void) mptfc_GetFcPortPage0(ioc, ii); + mptfc_init_host_attr(ioc, ii); /* refresh */ + mptfc_GetFcDevPage0(ioc, ii, mptfc_register_dev); + } - /* delete devices still missing */ - list_for_each_entry(ri, &ioc->fc_rports, list) { - /* if newly missing, delete it */ - if (ri->flags & MPT_RPORT_INFO_FLAGS_MISSING) { + /* delete devices still missing */ + list_for_each_entry(ri, &ioc->fc_rports, list) { + /* if newly missing, delete it */ + if (ri->flags & MPT_RPORT_INFO_FLAGS_MISSING) { - ri->flags &= ~(MPT_RPORT_INFO_FLAGS_REGISTERED| - MPT_RPORT_INFO_FLAGS_MISSING); - fc_remote_port_delete(ri->rport); /* won't sleep */ - ri->rport = NULL; + ri->flags &= ~(MPT_RPORT_INFO_FLAGS_REGISTERED| + MPT_RPORT_INFO_FLAGS_MISSING); + fc_remote_port_delete(ri->rport); /* won't sleep */ + ri->rport = NULL; - pn = (u64)ri->pg0.WWPN.High << 32 | - (u64)ri->pg0.WWPN.Low; - dfcprintk ((MYIOC_s_INFO_FMT - "mptfc_rescan.%d: %llx deleted\n", - ioc->name, - ioc->sh->host_no, - (unsigned long long)pn)); - } + pn = (u64)ri->pg0.WWPN.High << 32 | + (u64)ri->pg0.WWPN.Low; + dfcprintk ((MYIOC_s_INFO_FMT + "mptfc_rescan.%d: %llx deleted\n", + ioc->name, + ioc->sh->host_no, + (unsigned long long)pn)); } - - /* - * allow multiple passes as target state - * might have changed during scan - */ - spin_lock_irqsave(&ioc->fc_rescan_work_lock, flags); - if (ioc->fc_rescan_work_count > 2) /* only need one more */ - ioc->fc_rescan_work_count = 2; - work_to_do = --ioc->fc_rescan_work_count; - spin_unlock_irqrestore(&ioc->fc_rescan_work_lock, flags); - } while (work_to_do); + } } static int @@ -1159,7 +1148,6 @@ mptfc_probe(struct pci_dev *pdev, const struct pci_device_id *id) * by doing it via the workqueue, some locking is eliminated */ - ioc->fc_rescan_work_count = 1; queue_work(ioc->fc_rescan_work_q, &ioc->fc_rescan_work); flush_workqueue(ioc->fc_rescan_work_q); @@ -1202,10 +1190,8 @@ mptfc_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply) case MPI_EVENT_RESCAN: spin_lock_irqsave(&ioc->fc_rescan_work_lock, flags); if (ioc->fc_rescan_work_q) { - if (ioc->fc_rescan_work_count++ == 0) { - queue_work(ioc->fc_rescan_work_q, - &ioc->fc_rescan_work); - } + queue_work(ioc->fc_rescan_work_q, + &ioc->fc_rescan_work); } spin_unlock_irqrestore(&ioc->fc_rescan_work_lock, flags); break; @@ -1248,10 +1234,8 @@ mptfc_ioc_reset(MPT_ADAPTER *ioc, int reset_phase) mptfc_SetFcPortPage1_defaults(ioc); spin_lock_irqsave(&ioc->fc_rescan_work_lock, flags); if (ioc->fc_rescan_work_q) { - if (ioc->fc_rescan_work_count++ == 0) { - queue_work(ioc->fc_rescan_work_q, - &ioc->fc_rescan_work); - } + queue_work(ioc->fc_rescan_work_q, + &ioc->fc_rescan_work); } spin_unlock_irqrestore(&ioc->fc_rescan_work_lock, flags); } diff --git a/drivers/mmc/mmc_queue.c b/drivers/mmc/mmc_queue.c index 0b9682e9a35..74f8cdeeff0 100644 --- a/drivers/mmc/mmc_queue.c +++ b/drivers/mmc/mmc_queue.c @@ -79,7 +79,8 @@ static int mmc_queue_thread(void *d) spin_lock_irq(q->queue_lock); set_current_state(TASK_INTERRUPTIBLE); if (!blk_queue_plugged(q)) - mq->req = req = elv_next_request(q); + req = elv_next_request(q); + mq->req = req; spin_unlock_irq(q->queue_lock); if (!req) { diff --git a/drivers/mmc/wbsd.c b/drivers/mmc/wbsd.c index 8a30ef3ae41..c351c6d1a18 100644 --- a/drivers/mmc/wbsd.c +++ b/drivers/mmc/wbsd.c @@ -41,7 +41,7 @@ #include "wbsd.h" #define DRIVER_NAME "wbsd" -#define DRIVER_VERSION "1.5" +#define DRIVER_VERSION "1.6" #define DBG(x...) \ pr_debug(DRIVER_NAME ": " x) @@ -1439,13 +1439,13 @@ static int __devinit wbsd_scan(struct wbsd_host *host) static int __devinit wbsd_request_region(struct wbsd_host *host, int base) { - if (io & 0x7) + if (base & 0x7) return -EINVAL; if (!request_region(base, 8, DRIVER_NAME)) return -EIO; - host->base = io; + host->base = base; return 0; } @@ -1773,7 +1773,7 @@ static int __devinit wbsd_init(struct device *dev, int base, int irq, int dma, /* * Request resources. */ - ret = wbsd_request_resources(host, io, irq, dma); + ret = wbsd_request_resources(host, base, irq, dma); if (ret) { wbsd_release_resources(host); wbsd_free_mmc(dev); @@ -1861,6 +1861,7 @@ static void __devexit wbsd_shutdown(struct device *dev, int pnp) static int __devinit wbsd_probe(struct platform_device *dev) { + /* Use the module parameters for resources */ return wbsd_init(&dev->dev, io, irq, dma, 0); } diff --git a/drivers/mtd/nand/ams-delta.c b/drivers/mtd/nand/ams-delta.c index d7897dc6b3c..a0ba07c36ee 100644 --- a/drivers/mtd/nand/ams-delta.c +++ b/drivers/mtd/nand/ams-delta.c @@ -130,11 +130,13 @@ static void ams_delta_hwcontrol(struct mtd_info *mtd, int cmd, if (ctrl & NAND_CTRL_CHANGE) { unsigned long bits; - bits = (~ctrl & NAND_NCE) << 2; - bits |= (ctrl & NAND_CLE) << 7; - bits |= (ctrl & NAND_ALE) << 6; + bits = (~ctrl & NAND_NCE) ? AMS_DELTA_LATCH2_NAND_NCE : 0; + bits |= (ctrl & NAND_CLE) ? AMS_DELTA_LATCH2_NAND_CLE : 0; + bits |= (ctrl & NAND_ALE) ? AMS_DELTA_LATCH2_NAND_ALE : 0; - ams_delta_latch2_write(0xC2, bits); + ams_delta_latch2_write(AMS_DELTA_LATCH2_NAND_CLE | + AMS_DELTA_LATCH2_NAND_ALE | + AMS_DELTA_LATCH2_NAND_NCE, bits); } if (cmd != NAND_CMD_NONE) diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c index 62b861304e0..c8cbc00243f 100644 --- a/drivers/mtd/nand/nand_base.c +++ b/drivers/mtd/nand/nand_base.c @@ -1093,9 +1093,10 @@ static int nand_read(struct mtd_info *mtd, loff_t from, size_t len, ret = nand_do_read_ops(mtd, from, &chip->ops); + *retlen = chip->ops.retlen; + nand_release_device(mtd); - *retlen = chip->ops.retlen; return ret; } @@ -1691,9 +1692,10 @@ static int nand_write(struct mtd_info *mtd, loff_t to, size_t len, ret = nand_do_write_ops(mtd, to, &chip->ops); + *retlen = chip->ops.retlen; + nand_release_device(mtd); - *retlen = chip->ops.retlen; return ret; } diff --git a/drivers/net/3c515.c b/drivers/net/3c515.c index 4532b17e40e..aedfddf20cb 100644 --- a/drivers/net/3c515.c +++ b/drivers/net/3c515.c @@ -1003,7 +1003,8 @@ static int corkscrew_start_xmit(struct sk_buff *skb, /* Calculate the next Tx descriptor entry. */ int entry = vp->cur_tx % TX_RING_SIZE; struct boom_tx_desc *prev_entry; - unsigned long flags, i; + unsigned long flags; + int i; if (vp->tx_full) /* No room to transmit with */ return 1; diff --git a/drivers/net/82596.c b/drivers/net/82596.c index 7e2ca957146..257d3bce399 100644 --- a/drivers/net/82596.c +++ b/drivers/net/82596.c @@ -899,7 +899,7 @@ memory_squeeze: } -static inline void i596_cleanup_cmd(struct net_device *dev, struct i596_private *lp) +static void i596_cleanup_cmd(struct net_device *dev, struct i596_private *lp) { struct i596_cmd *ptr; @@ -932,7 +932,8 @@ static inline void i596_cleanup_cmd(struct net_device *dev, struct i596_private lp->scb.cmd = I596_NULL; } -static inline void i596_reset(struct net_device *dev, struct i596_private *lp, int ioaddr) +static void i596_reset(struct net_device *dev, struct i596_private *lp, + int ioaddr) { unsigned long flags; @@ -1578,7 +1579,7 @@ static int debug = -1; module_param(debug, int, 0); MODULE_PARM_DESC(debug, "i82596 debug mask"); -int init_module(void) +int __init init_module(void) { if (debug >= 0) i596_debug = debug; @@ -1588,7 +1589,7 @@ int init_module(void) return 0; } -void cleanup_module(void) +void __exit cleanup_module(void) { unregister_netdev(dev_82596); #ifdef __mc68000__ diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig index 39189903e35..30b3671d833 100644 --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig @@ -1724,6 +1724,20 @@ config VIA_RHINE_MMIO If unsure, say Y. +config VIA_RHINE_NAPI + bool "Use Rx Polling (NAPI)" + depends on VIA_RHINE + help + NAPI is a new driver API designed to reduce CPU and interrupt load + when the driver is receiving lots of packets from the card. + + If your estimated Rx load is 10kpps or more, or if the card will be + deployed on potentially unfriendly networks (e.g. in a firewall), + then say Y here. + + See <file:Documentation/networking/NAPI_HOWTO.txt> for more + information. + config LAN_SAA9730 bool "Philips SAA9730 Ethernet support (EXPERIMENTAL)" depends on NET_PCI && EXPERIMENTAL && MIPS @@ -2219,6 +2233,33 @@ config GFAR_NAPI bool "NAPI Support" depends on GIANFAR +config UCC_GETH + tristate "Freescale QE UCC GETH" + depends on QUICC_ENGINE && UCC_FAST + help + This driver supports the Gigabit Ethernet mode of QE UCC. + QE can be found on MPC836x CPUs. + +config UGETH_NAPI + bool "NAPI Support" + depends on UCC_GETH + +config UGETH_MAGIC_PACKET + bool "Magic Packet detection support" + depends on UCC_GETH + +config UGETH_FILTERING + bool "Mac address filtering support" + depends on UCC_GETH + +config UGETH_TX_ON_DEMOND + bool "Transmit on Demond support" + depends on UCC_GETH + +config UGETH_HAS_GIGA + bool + depends on UCC_GETH && MPC836x + config MV643XX_ETH tristate "MV-643XX Ethernet support" depends on MOMENCO_OCELOT_C || MOMENCO_JAGUAR_ATX || MV64360 || MOMENCO_OCELOT_3 || PPC_MULTIPLATFORM diff --git a/drivers/net/Makefile b/drivers/net/Makefile index c91e95126f7..8427bf9dec9 100644 --- a/drivers/net/Makefile +++ b/drivers/net/Makefile @@ -18,6 +18,9 @@ gianfar_driver-objs := gianfar.o \ gianfar_mii.o \ gianfar_sysfs.o +obj-$(CONFIG_UCC_GETH) += ucc_geth_driver.o +ucc_geth_driver-objs := ucc_geth.o ucc_geth_phy.o + # # link order important here # diff --git a/drivers/net/ac3200.c b/drivers/net/ac3200.c index 7952dc6d77e..0fbbcb75af6 100644 --- a/drivers/net/ac3200.c +++ b/drivers/net/ac3200.c @@ -370,8 +370,7 @@ MODULE_PARM_DESC(mem, "Memory base address(es)"); MODULE_DESCRIPTION("Ansel AC3200 EISA ethernet driver"); MODULE_LICENSE("GPL"); -int -init_module(void) +int __init init_module(void) { struct net_device *dev; int this_dev, found = 0; diff --git a/drivers/net/appletalk/cops.c b/drivers/net/appletalk/cops.c index 1d01ac0000e..ae7f828344d 100644 --- a/drivers/net/appletalk/cops.c +++ b/drivers/net/appletalk/cops.c @@ -1030,7 +1030,7 @@ module_param(io, int, 0); module_param(irq, int, 0); module_param(board_type, int, 0); -int init_module(void) +int __init init_module(void) { if (io == 0) printk(KERN_WARNING "%s: You shouldn't autoprobe with insmod\n", diff --git a/drivers/net/at1700.c b/drivers/net/at1700.c index 5d7929c79bc..4ca061c2d5b 100644 --- a/drivers/net/at1700.c +++ b/drivers/net/at1700.c @@ -901,7 +901,7 @@ MODULE_PARM_DESC(io, "AT1700/FMV18X I/O base address"); MODULE_PARM_DESC(irq, "AT1700/FMV18X IRQ number"); MODULE_PARM_DESC(net_debug, "AT1700/FMV18X debug level (0-6)"); -int init_module(void) +int __init init_module(void) { if (io == 0) printk("at1700: You should not use auto-probing with insmod!\n"); diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c index db73de0d251..652eb05a6c2 100644 --- a/drivers/net/bnx2.c +++ b/drivers/net/bnx2.c @@ -56,8 +56,8 @@ #define DRV_MODULE_NAME "bnx2" #define PFX DRV_MODULE_NAME ": " -#define DRV_MODULE_VERSION "1.4.43" -#define DRV_MODULE_RELDATE "June 28, 2006" +#define DRV_MODULE_VERSION "1.4.44" +#define DRV_MODULE_RELDATE "August 10, 2006" #define RUN_AT(x) (jiffies + (x)) @@ -209,8 +209,10 @@ MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl); static inline u32 bnx2_tx_avail(struct bnx2 *bp) { - u32 diff = TX_RING_IDX(bp->tx_prod) - TX_RING_IDX(bp->tx_cons); + u32 diff; + smp_mb(); + diff = TX_RING_IDX(bp->tx_prod) - TX_RING_IDX(bp->tx_cons); if (diff > MAX_TX_DESC_CNT) diff = (diff & MAX_TX_DESC_CNT) - 1; return (bp->tx_ring_size - diff); @@ -1569,7 +1571,7 @@ bnx2_alloc_rx_skb(struct bnx2 *bp, u16 index) struct rx_bd *rxbd = &bp->rx_desc_ring[RX_RING(index)][RX_IDX(index)]; unsigned long align; - skb = dev_alloc_skb(bp->rx_buf_size); + skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size); if (skb == NULL) { return -ENOMEM; } @@ -1578,7 +1580,6 @@ bnx2_alloc_rx_skb(struct bnx2 *bp, u16 index) skb_reserve(skb, 8 - align); } - skb->dev = bp->dev; mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size, PCI_DMA_FROMDEVICE); @@ -1686,15 +1687,20 @@ bnx2_tx_int(struct bnx2 *bp) } bp->tx_cons = sw_cons; + /* Need to make the tx_cons update visible to bnx2_start_xmit() + * before checking for netif_queue_stopped(). Without the + * memory barrier, there is a small possibility that bnx2_start_xmit() + * will miss it and cause the queue to be stopped forever. + */ + smp_mb(); - if (unlikely(netif_queue_stopped(bp->dev))) { - spin_lock(&bp->tx_lock); + if (unlikely(netif_queue_stopped(bp->dev)) && + (bnx2_tx_avail(bp) > bp->tx_wake_thresh)) { + netif_tx_lock(bp->dev); if ((netif_queue_stopped(bp->dev)) && - (bnx2_tx_avail(bp) > MAX_SKB_FRAGS)) { - + (bnx2_tx_avail(bp) > bp->tx_wake_thresh)) netif_wake_queue(bp->dev); - } - spin_unlock(&bp->tx_lock); + netif_tx_unlock(bp->dev); } } @@ -1786,7 +1792,7 @@ bnx2_rx_int(struct bnx2 *bp, int budget) if ((bp->dev->mtu > 1500) && (len <= RX_COPY_THRESH)) { struct sk_buff *new_skb; - new_skb = dev_alloc_skb(len + 2); + new_skb = netdev_alloc_skb(bp->dev, len + 2); if (new_skb == NULL) goto reuse_rx; @@ -1797,7 +1803,6 @@ bnx2_rx_int(struct bnx2 *bp, int budget) skb_reserve(new_skb, 2); skb_put(new_skb, len); - new_skb->dev = bp->dev; bnx2_reuse_rx_skb(bp, skb, sw_ring_cons, sw_ring_prod); @@ -3503,6 +3508,8 @@ bnx2_init_tx_ring(struct bnx2 *bp) struct tx_bd *txbd; u32 val; + bp->tx_wake_thresh = bp->tx_ring_size / 2; + txbd = &bp->tx_desc_ring[MAX_TX_DESC_CNT]; txbd->tx_bd_haddr_hi = (u64) bp->tx_desc_mapping >> 32; @@ -3952,7 +3959,7 @@ bnx2_run_loopback(struct bnx2 *bp, int loopback_mode) return -EINVAL; pkt_size = 1514; - skb = dev_alloc_skb(pkt_size); + skb = netdev_alloc_skb(bp->dev, pkt_size); if (!skb) return -ENOMEM; packet = skb_put(skb, pkt_size); @@ -4390,10 +4397,8 @@ bnx2_vlan_rx_kill_vid(struct net_device *dev, uint16_t vid) #endif /* Called with netif_tx_lock. - * hard_start_xmit is pseudo-lockless - a lock is only required when - * the tx queue is full. This way, we get the benefit of lockless - * operations most of the time without the complexities to handle - * netif_stop_queue/wake_queue race conditions. + * bnx2_tx_int() runs without netif_tx_lock unless it needs to call + * netif_wake_queue(). */ static int bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev) @@ -4512,12 +4517,9 @@ bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev) dev->trans_start = jiffies; if (unlikely(bnx2_tx_avail(bp) <= MAX_SKB_FRAGS)) { - spin_lock(&bp->tx_lock); netif_stop_queue(dev); - - if (bnx2_tx_avail(bp) > MAX_SKB_FRAGS) + if (bnx2_tx_avail(bp) > bp->tx_wake_thresh) netif_wake_queue(dev); - spin_unlock(&bp->tx_lock); } return NETDEV_TX_OK; @@ -5628,7 +5630,6 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev) bp->pdev = pdev; spin_lock_init(&bp->phy_lock); - spin_lock_init(&bp->tx_lock); INIT_WORK(&bp->reset_task, bnx2_reset_task, bp); dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0); @@ -5751,7 +5752,7 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev) bp->mac_addr[5] = (u8) reg; bp->tx_ring_size = MAX_TX_DESC_CNT; - bnx2_set_rx_ring_size(bp, 100); + bnx2_set_rx_ring_size(bp, 255); bp->rx_csum = 1; diff --git a/drivers/net/bnx2.h b/drivers/net/bnx2.h index 658c5ee95c7..fe804763c60 100644 --- a/drivers/net/bnx2.h +++ b/drivers/net/bnx2.h @@ -3890,10 +3890,6 @@ struct bnx2 { u32 tx_prod_bseq __attribute__((aligned(L1_CACHE_BYTES))); u16 tx_prod; - struct tx_bd *tx_desc_ring; - struct sw_bd *tx_buf_ring; - int tx_ring_size; - u16 tx_cons __attribute__((aligned(L1_CACHE_BYTES))); u16 hw_tx_cons; @@ -3916,9 +3912,11 @@ struct bnx2 { struct sw_bd *rx_buf_ring; struct rx_bd *rx_desc_ring[MAX_RX_RINGS]; - /* Only used to synchronize netif_stop_queue/wake_queue when tx */ - /* ring is full */ - spinlock_t tx_lock; + /* TX constants */ + struct tx_bd *tx_desc_ring; + struct sw_bd *tx_buf_ring; + int tx_ring_size; + u32 tx_wake_thresh; /* End of fields used in the performance code paths. */ diff --git a/drivers/net/cs89x0.c b/drivers/net/cs89x0.c index 47eecce35fa..2dcca79b1f6 100644 --- a/drivers/net/cs89x0.c +++ b/drivers/net/cs89x0.c @@ -1905,8 +1905,7 @@ MODULE_LICENSE("GPL"); */ -int -init_module(void) +int __init init_module(void) { struct net_device *dev = alloc_etherdev(sizeof(struct net_local)); struct net_local *lp; diff --git a/drivers/net/dm9000.c b/drivers/net/dm9000.c index 1b758b70713..3d76fa144c4 100644 --- a/drivers/net/dm9000.c +++ b/drivers/net/dm9000.c @@ -339,6 +339,17 @@ static void dm9000_timeout(struct net_device *dev) spin_unlock_irqrestore(&db->lock,flags); } +#ifdef CONFIG_NET_POLL_CONTROLLER +/* + *Used by netconsole + */ +static void dm9000_poll_controller(struct net_device *dev) +{ + disable_irq(dev->irq); + dm9000_interrupt(dev->irq,dev,NULL); + enable_irq(dev->irq); +} +#endif /* dm9000_release_board * @@ -538,6 +549,9 @@ dm9000_probe(struct platform_device *pdev) ndev->stop = &dm9000_stop; ndev->get_stats = &dm9000_get_stats; ndev->set_multicast_list = &dm9000_hash_table; +#ifdef CONFIG_NET_POLL_CONTROLLER + ndev->poll_controller = &dm9000_poll_controller; +#endif #ifdef DM9000_PROGRAM_EEPROM program_eeprom(db); diff --git a/drivers/net/e100.c b/drivers/net/e100.c index 91ef5f2fd76..ce850f1078b 100644 --- a/drivers/net/e100.c +++ b/drivers/net/e100.c @@ -173,8 +173,11 @@ MODULE_LICENSE("GPL"); MODULE_VERSION(DRV_VERSION); static int debug = 3; +static int eeprom_bad_csum_allow = 0; module_param(debug, int, 0); +module_param(eeprom_bad_csum_allow, int, 0); MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); +MODULE_PARM_DESC(eeprom_bad_csum_allow, "Allow bad eeprom checksums"); #define DPRINTK(nlevel, klevel, fmt, args...) \ (void)((NETIF_MSG_##nlevel & nic->msg_enable) && \ printk(KERN_##klevel PFX "%s: %s: " fmt, nic->netdev->name, \ @@ -756,7 +759,8 @@ static int e100_eeprom_load(struct nic *nic) checksum = le16_to_cpu(0xBABA - checksum); if(checksum != nic->eeprom[nic->eeprom_wc - 1]) { DPRINTK(PROBE, ERR, "EEPROM corrupted\n"); - return -EAGAIN; + if (!eeprom_bad_csum_allow) + return -EAGAIN; } return 0; diff --git a/drivers/net/e1000/e1000_hw.c b/drivers/net/e1000/e1000_hw.c index 583518ae49c..b3b919116e0 100644 --- a/drivers/net/e1000/e1000_hw.c +++ b/drivers/net/e1000/e1000_hw.c @@ -105,6 +105,33 @@ static int32_t e1000_configure_kmrn_for_10_100(struct e1000_hw *hw, uint16_t duplex); static int32_t e1000_configure_kmrn_for_1000(struct e1000_hw *hw); +static int32_t e1000_erase_ich8_4k_segment(struct e1000_hw *hw, + uint32_t segment); +static int32_t e1000_get_software_flag(struct e1000_hw *hw); +static int32_t e1000_get_software_semaphore(struct e1000_hw *hw); +static int32_t e1000_init_lcd_from_nvm(struct e1000_hw *hw); +static int32_t e1000_kumeran_lock_loss_workaround(struct e1000_hw *hw); +static int32_t e1000_read_eeprom_ich8(struct e1000_hw *hw, uint16_t offset, + uint16_t words, uint16_t *data); +static int32_t e1000_read_ich8_byte(struct e1000_hw *hw, uint32_t index, + uint8_t* data); +static int32_t e1000_read_ich8_word(struct e1000_hw *hw, uint32_t index, + uint16_t *data); +static int32_t e1000_read_kmrn_reg(struct e1000_hw *hw, uint32_t reg_addr, + uint16_t *data); +static void e1000_release_software_flag(struct e1000_hw *hw); +static void e1000_release_software_semaphore(struct e1000_hw *hw); +static int32_t e1000_set_pci_ex_no_snoop(struct e1000_hw *hw, + uint32_t no_snoop); +static int32_t e1000_verify_write_ich8_byte(struct e1000_hw *hw, + uint32_t index, uint8_t byte); +static int32_t e1000_write_eeprom_ich8(struct e1000_hw *hw, uint16_t offset, + uint16_t words, uint16_t *data); +static int32_t e1000_write_ich8_byte(struct e1000_hw *hw, uint32_t index, + uint8_t data); +static int32_t e1000_write_kmrn_reg(struct e1000_hw *hw, uint32_t reg_addr, + uint16_t data); + /* IGP cable length table */ static const uint16_t e1000_igp_cable_length_table[IGP01E1000_AGC_LENGTH_TABLE_SIZE] = @@ -3233,7 +3260,7 @@ e1000_shift_in_mdi_bits(struct e1000_hw *hw) return data; } -int32_t +static int32_t e1000_swfw_sync_acquire(struct e1000_hw *hw, uint16_t mask) { uint32_t swfw_sync = 0; @@ -3277,7 +3304,7 @@ e1000_swfw_sync_acquire(struct e1000_hw *hw, uint16_t mask) return E1000_SUCCESS; } -void +static void e1000_swfw_sync_release(struct e1000_hw *hw, uint16_t mask) { uint32_t swfw_sync; @@ -3575,7 +3602,7 @@ e1000_write_phy_reg_ex(struct e1000_hw *hw, return E1000_SUCCESS; } -int32_t +static int32_t e1000_read_kmrn_reg(struct e1000_hw *hw, uint32_t reg_addr, uint16_t *data) @@ -3608,7 +3635,7 @@ e1000_read_kmrn_reg(struct e1000_hw *hw, return E1000_SUCCESS; } -int32_t +static int32_t e1000_write_kmrn_reg(struct e1000_hw *hw, uint32_t reg_addr, uint16_t data) @@ -3839,7 +3866,7 @@ e1000_phy_powerdown_workaround(struct e1000_hw *hw) * * hw - struct containing variables accessed by shared code ******************************************************************************/ -int32_t +static int32_t e1000_kumeran_lock_loss_workaround(struct e1000_hw *hw) { int32_t ret_val; @@ -4086,7 +4113,7 @@ e1000_phy_igp_get_info(struct e1000_hw *hw, * hw - Struct containing variables accessed by shared code * phy_info - PHY information structure ******************************************************************************/ -int32_t +static int32_t e1000_phy_ife_get_info(struct e1000_hw *hw, struct e1000_phy_info *phy_info) { @@ -5643,6 +5670,7 @@ e1000_init_rx_addrs(struct e1000_hw *hw) * for the first 15 multicast addresses, and hashes the rest into the * multicast table. *****************************************************************************/ +#if 0 void e1000_mc_addr_list_update(struct e1000_hw *hw, uint8_t *mc_addr_list, @@ -5719,6 +5747,7 @@ e1000_mc_addr_list_update(struct e1000_hw *hw, } DEBUGOUT("MC Update Complete\n"); } +#endif /* 0 */ /****************************************************************************** * Hashes an address to determine its location in the multicast table @@ -6587,6 +6616,7 @@ e1000_get_bus_info(struct e1000_hw *hw) * hw - Struct containing variables accessed by shared code * offset - offset to read from *****************************************************************************/ +#if 0 uint32_t e1000_read_reg_io(struct e1000_hw *hw, uint32_t offset) @@ -6597,6 +6627,7 @@ e1000_read_reg_io(struct e1000_hw *hw, e1000_io_write(hw, io_addr, offset); return e1000_io_read(hw, io_data); } +#endif /* 0 */ /****************************************************************************** * Writes a value to one of the devices registers using port I/O (as opposed to @@ -7909,6 +7940,7 @@ e1000_set_pci_express_master_disable(struct e1000_hw *hw) * returns: - none. * ***************************************************************************/ +#if 0 void e1000_enable_pciex_master(struct e1000_hw *hw) { @@ -7923,6 +7955,7 @@ e1000_enable_pciex_master(struct e1000_hw *hw) ctrl &= ~E1000_CTRL_GIO_MASTER_DISABLE; E1000_WRITE_REG(hw, CTRL, ctrl); } +#endif /* 0 */ /******************************************************************************* * @@ -8148,7 +8181,7 @@ e1000_put_hw_eeprom_semaphore(struct e1000_hw *hw) * E1000_SUCCESS at any other case. * ***************************************************************************/ -int32_t +static int32_t e1000_get_software_semaphore(struct e1000_hw *hw) { int32_t timeout = hw->eeprom.word_size + 1; @@ -8183,7 +8216,7 @@ e1000_get_software_semaphore(struct e1000_hw *hw) * hw: Struct containing variables accessed by shared code * ***************************************************************************/ -void +static void e1000_release_software_semaphore(struct e1000_hw *hw) { uint32_t swsm; @@ -8265,7 +8298,7 @@ e1000_arc_subsystem_valid(struct e1000_hw *hw) * returns: E1000_SUCCESS * *****************************************************************************/ -int32_t +static int32_t e1000_set_pci_ex_no_snoop(struct e1000_hw *hw, uint32_t no_snoop) { uint32_t gcr_reg = 0; @@ -8306,7 +8339,7 @@ e1000_set_pci_ex_no_snoop(struct e1000_hw *hw, uint32_t no_snoop) * hw: Struct containing variables accessed by shared code * ***************************************************************************/ -int32_t +static int32_t e1000_get_software_flag(struct e1000_hw *hw) { int32_t timeout = PHY_CFG_TIMEOUT; @@ -8345,7 +8378,7 @@ e1000_get_software_flag(struct e1000_hw *hw) * hw: Struct containing variables accessed by shared code * ***************************************************************************/ -void +static void e1000_release_software_flag(struct e1000_hw *hw) { uint32_t extcnf_ctrl; @@ -8369,6 +8402,7 @@ e1000_release_software_flag(struct e1000_hw *hw) * hw: Struct containing variables accessed by shared code * ***************************************************************************/ +#if 0 int32_t e1000_ife_disable_dynamic_power_down(struct e1000_hw *hw) { @@ -8388,6 +8422,7 @@ e1000_ife_disable_dynamic_power_down(struct e1000_hw *hw) return ret_val; } +#endif /* 0 */ /*************************************************************************** * @@ -8397,6 +8432,7 @@ e1000_ife_disable_dynamic_power_down(struct e1000_hw *hw) * hw: Struct containing variables accessed by shared code * ***************************************************************************/ +#if 0 int32_t e1000_ife_enable_dynamic_power_down(struct e1000_hw *hw) { @@ -8416,6 +8452,7 @@ e1000_ife_enable_dynamic_power_down(struct e1000_hw *hw) return ret_val; } +#endif /* 0 */ /****************************************************************************** * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access @@ -8426,7 +8463,7 @@ e1000_ife_enable_dynamic_power_down(struct e1000_hw *hw) * data - word read from the EEPROM * words - number of words to read *****************************************************************************/ -int32_t +static int32_t e1000_read_eeprom_ich8(struct e1000_hw *hw, uint16_t offset, uint16_t words, uint16_t *data) { @@ -8482,7 +8519,7 @@ e1000_read_eeprom_ich8(struct e1000_hw *hw, uint16_t offset, uint16_t words, * words - number of words to write * data - words to write to the EEPROM *****************************************************************************/ -int32_t +static int32_t e1000_write_eeprom_ich8(struct e1000_hw *hw, uint16_t offset, uint16_t words, uint16_t *data) { @@ -8529,7 +8566,7 @@ e1000_write_eeprom_ich8(struct e1000_hw *hw, uint16_t offset, uint16_t words, * * hw - The pointer to the hw structure ****************************************************************************/ -int32_t +static int32_t e1000_ich8_cycle_init(struct e1000_hw *hw) { union ich8_hws_flash_status hsfsts; @@ -8596,7 +8633,7 @@ e1000_ich8_cycle_init(struct e1000_hw *hw) * * hw - The pointer to the hw structure ****************************************************************************/ -int32_t +static int32_t e1000_ich8_flash_cycle(struct e1000_hw *hw, uint32_t timeout) { union ich8_hws_flash_ctrl hsflctl; @@ -8631,7 +8668,7 @@ e1000_ich8_flash_cycle(struct e1000_hw *hw, uint32_t timeout) * size - Size of data to read, 1=byte 2=word * data - Pointer to the word to store the value read. *****************************************************************************/ -int32_t +static int32_t e1000_read_ich8_data(struct e1000_hw *hw, uint32_t index, uint32_t size, uint16_t* data) { @@ -8710,7 +8747,7 @@ e1000_read_ich8_data(struct e1000_hw *hw, uint32_t index, * size - Size of data to read, 1=byte 2=word * data - The byte(s) to write to the NVM. *****************************************************************************/ -int32_t +static int32_t e1000_write_ich8_data(struct e1000_hw *hw, uint32_t index, uint32_t size, uint16_t data) { @@ -8785,7 +8822,7 @@ e1000_write_ich8_data(struct e1000_hw *hw, uint32_t index, uint32_t size, * index - The index of the byte to read. * data - Pointer to a byte to store the value read. *****************************************************************************/ -int32_t +static int32_t e1000_read_ich8_byte(struct e1000_hw *hw, uint32_t index, uint8_t* data) { int32_t status = E1000_SUCCESS; @@ -8808,7 +8845,7 @@ e1000_read_ich8_byte(struct e1000_hw *hw, uint32_t index, uint8_t* data) * index - The index of the byte to write. * byte - The byte to write to the NVM. *****************************************************************************/ -int32_t +static int32_t e1000_verify_write_ich8_byte(struct e1000_hw *hw, uint32_t index, uint8_t byte) { int32_t error = E1000_SUCCESS; @@ -8839,7 +8876,7 @@ e1000_verify_write_ich8_byte(struct e1000_hw *hw, uint32_t index, uint8_t byte) * index - The index of the byte to read. * data - The byte to write to the NVM. *****************************************************************************/ -int32_t +static int32_t e1000_write_ich8_byte(struct e1000_hw *hw, uint32_t index, uint8_t data) { int32_t status = E1000_SUCCESS; @@ -8857,7 +8894,7 @@ e1000_write_ich8_byte(struct e1000_hw *hw, uint32_t index, uint8_t data) * index - The starting byte index of the word to read. * data - Pointer to a word to store the value read. *****************************************************************************/ -int32_t +static int32_t e1000_read_ich8_word(struct e1000_hw *hw, uint32_t index, uint16_t *data) { int32_t status = E1000_SUCCESS; @@ -8872,6 +8909,7 @@ e1000_read_ich8_word(struct e1000_hw *hw, uint32_t index, uint16_t *data) * index - The starting byte index of the word to read. * data - The word to write to the NVM. *****************************************************************************/ +#if 0 int32_t e1000_write_ich8_word(struct e1000_hw *hw, uint32_t index, uint16_t data) { @@ -8879,6 +8917,7 @@ e1000_write_ich8_word(struct e1000_hw *hw, uint32_t index, uint16_t data) status = e1000_write_ich8_data(hw, index, 2, data); return status; } +#endif /* 0 */ /****************************************************************************** * Erases the bank specified. Each bank is a 4k block. Segments are 0 based. @@ -8887,7 +8926,7 @@ e1000_write_ich8_word(struct e1000_hw *hw, uint32_t index, uint16_t data) * hw - pointer to e1000_hw structure * segment - 0 for first segment, 1 for second segment, etc. *****************************************************************************/ -int32_t +static int32_t e1000_erase_ich8_4k_segment(struct e1000_hw *hw, uint32_t segment) { union ich8_hws_flash_status hsfsts; @@ -8984,6 +9023,7 @@ e1000_erase_ich8_4k_segment(struct e1000_hw *hw, uint32_t segment) * hw: Struct containing variables accessed by shared code * *****************************************************************************/ +#if 0 int32_t e1000_duplex_reversal(struct e1000_hw *hw) { @@ -9012,8 +9052,9 @@ e1000_duplex_reversal(struct e1000_hw *hw) return ret_val; } +#endif /* 0 */ -int32_t +static int32_t e1000_init_lcd_from_nvm_config_region(struct e1000_hw *hw, uint32_t cnf_base_addr, uint32_t cnf_size) { @@ -9047,7 +9088,7 @@ e1000_init_lcd_from_nvm_config_region(struct e1000_hw *hw, } -int32_t +static int32_t e1000_init_lcd_from_nvm(struct e1000_hw *hw) { uint32_t reg_data, cnf_base_addr, cnf_size, ret_val, loop; diff --git a/drivers/net/e1000/e1000_hw.h b/drivers/net/e1000/e1000_hw.h index f9341e3276b..375b95518c3 100644 --- a/drivers/net/e1000/e1000_hw.h +++ b/drivers/net/e1000/e1000_hw.h @@ -323,13 +323,8 @@ int32_t e1000_write_phy_reg(struct e1000_hw *hw, uint32_t reg_addr, uint16_t dat int32_t e1000_phy_hw_reset(struct e1000_hw *hw); int32_t e1000_phy_reset(struct e1000_hw *hw); void e1000_phy_powerdown_workaround(struct e1000_hw *hw); -int32_t e1000_kumeran_lock_loss_workaround(struct e1000_hw *hw); -int32_t e1000_init_lcd_from_nvm_config_region(struct e1000_hw *hw, uint32_t cnf_base_addr, uint32_t cnf_size); -int32_t e1000_init_lcd_from_nvm(struct e1000_hw *hw); int32_t e1000_phy_get_info(struct e1000_hw *hw, struct e1000_phy_info *phy_info); int32_t e1000_validate_mdi_setting(struct e1000_hw *hw); -int32_t e1000_read_kmrn_reg(struct e1000_hw *hw, uint32_t reg_addr, uint16_t *data); -int32_t e1000_write_kmrn_reg(struct e1000_hw *hw, uint32_t reg_addr, uint16_t data); /* EEPROM Functions */ int32_t e1000_init_eeprom_params(struct e1000_hw *hw); @@ -400,13 +395,8 @@ int32_t e1000_update_eeprom_checksum(struct e1000_hw *hw); int32_t e1000_write_eeprom(struct e1000_hw *hw, uint16_t reg, uint16_t words, uint16_t *data); int32_t e1000_read_part_num(struct e1000_hw *hw, uint32_t * part_num); int32_t e1000_read_mac_addr(struct e1000_hw * hw); -int32_t e1000_swfw_sync_acquire(struct e1000_hw *hw, uint16_t mask); -void e1000_swfw_sync_release(struct e1000_hw *hw, uint16_t mask); -void e1000_release_software_flag(struct e1000_hw *hw); -int32_t e1000_get_software_flag(struct e1000_hw *hw); /* Filters (multicast, vlan, receive) */ -void e1000_mc_addr_list_update(struct e1000_hw *hw, uint8_t * mc_addr_list, uint32_t mc_addr_count, uint32_t pad, uint32_t rar_used_count); uint32_t e1000_hash_mc_addr(struct e1000_hw *hw, uint8_t * mc_addr); void e1000_mta_set(struct e1000_hw *hw, uint32_t hash_value); void e1000_rar_set(struct e1000_hw *hw, uint8_t * mc_addr, uint32_t rar_index); @@ -431,31 +421,9 @@ void e1000_pci_clear_mwi(struct e1000_hw *hw); void e1000_read_pci_cfg(struct e1000_hw *hw, uint32_t reg, uint16_t * value); void e1000_write_pci_cfg(struct e1000_hw *hw, uint32_t reg, uint16_t * value); /* Port I/O is only supported on 82544 and newer */ -uint32_t e1000_io_read(struct e1000_hw *hw, unsigned long port); -uint32_t e1000_read_reg_io(struct e1000_hw *hw, uint32_t offset); void e1000_io_write(struct e1000_hw *hw, unsigned long port, uint32_t value); -void e1000_enable_pciex_master(struct e1000_hw *hw); int32_t e1000_disable_pciex_master(struct e1000_hw *hw); -int32_t e1000_get_software_semaphore(struct e1000_hw *hw); -void e1000_release_software_semaphore(struct e1000_hw *hw); int32_t e1000_check_phy_reset_block(struct e1000_hw *hw); -int32_t e1000_set_pci_ex_no_snoop(struct e1000_hw *hw, uint32_t no_snoop); - -int32_t e1000_read_ich8_byte(struct e1000_hw *hw, uint32_t index, - uint8_t *data); -int32_t e1000_verify_write_ich8_byte(struct e1000_hw *hw, uint32_t index, - uint8_t byte); -int32_t e1000_write_ich8_byte(struct e1000_hw *hw, uint32_t index, - uint8_t byte); -int32_t e1000_read_ich8_word(struct e1000_hw *hw, uint32_t index, - uint16_t *data); -int32_t e1000_read_ich8_data(struct e1000_hw *hw, uint32_t index, - uint32_t size, uint16_t *data); -int32_t e1000_read_eeprom_ich8(struct e1000_hw *hw, uint16_t offset, - uint16_t words, uint16_t *data); -int32_t e1000_write_eeprom_ich8(struct e1000_hw *hw, uint16_t offset, - uint16_t words, uint16_t *data); -int32_t e1000_erase_ich8_4k_segment(struct e1000_hw *hw, uint32_t segment); #define E1000_READ_REG_IO(a, reg) \ diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c index 627f224d78b..726f43d5593 100644 --- a/drivers/net/e1000/e1000_main.c +++ b/drivers/net/e1000/e1000_main.c @@ -4386,11 +4386,13 @@ e1000_write_pci_cfg(struct e1000_hw *hw, uint32_t reg, uint16_t *value) pci_write_config_word(adapter->pdev, reg, *value); } +#if 0 uint32_t e1000_io_read(struct e1000_hw *hw, unsigned long port) { return inl(port); } +#endif /* 0 */ void e1000_io_write(struct e1000_hw *hw, unsigned long port, uint32_t value) diff --git a/drivers/net/e2100.c b/drivers/net/e2100.c index e5c5cd2a271..e4e733a380e 100644 --- a/drivers/net/e2100.c +++ b/drivers/net/e2100.c @@ -425,8 +425,8 @@ MODULE_LICENSE("GPL"); /* This is set up so that only a single autoprobe takes place per call. ISA device autoprobes on a running machine are not recommended. */ -int -init_module(void) + +int __init init_module(void) { struct net_device *dev; int this_dev, found = 0; diff --git a/drivers/net/eepro.c b/drivers/net/eepro.c index 20d31430c74..8dc61d65dd2 100644 --- a/drivers/net/eepro.c +++ b/drivers/net/eepro.c @@ -1807,8 +1807,7 @@ MODULE_PARM_DESC(irq, "EtherExpress Pro/10 IRQ number(s)"); MODULE_PARM_DESC(mem, "EtherExpress Pro/10 Rx buffer size(es) in kB (3-29)"); MODULE_PARM_DESC(autodetect, "EtherExpress Pro/10 force board(s) detection (0-1)"); -int -init_module(void) +int __init init_module(void) { struct net_device *dev; int i; diff --git a/drivers/net/eexpress.c b/drivers/net/eexpress.c index 33291bcf6d4..0701c1d810c 100644 --- a/drivers/net/eexpress.c +++ b/drivers/net/eexpress.c @@ -1698,7 +1698,7 @@ MODULE_LICENSE("GPL"); * are specified, we verify and then use them. If no parameters are given, we * autoprobe for one card only. */ -int init_module(void) +int __init init_module(void) { struct net_device *dev; int this_dev, found = 0; diff --git a/drivers/net/es3210.c b/drivers/net/es3210.c index 6b0ab1eac3f..fd7b32a24ea 100644 --- a/drivers/net/es3210.c +++ b/drivers/net/es3210.c @@ -421,8 +421,7 @@ MODULE_PARM_DESC(mem, "memory base address(es)"); MODULE_DESCRIPTION("Racal-Interlan ES3210 EISA ethernet driver"); MODULE_LICENSE("GPL"); -int -init_module(void) +int __init init_module(void) { struct net_device *dev; int this_dev, found = 0; diff --git a/drivers/net/eth16i.c b/drivers/net/eth16i.c index 4bf76f86d8e..ca42efa9143 100644 --- a/drivers/net/eth16i.c +++ b/drivers/net/eth16i.c @@ -1434,7 +1434,7 @@ MODULE_PARM_DESC(mediatype, "eth16i media type of interface(s) (bnc,tp,dix,auto, module_param(debug, int, 0); MODULE_PARM_DESC(debug, "eth16i debug level (0-6)"); -int init_module(void) +int __init init_module(void) { int this_dev, found = 0; struct net_device *dev; diff --git a/drivers/net/fealnx.c b/drivers/net/fealnx.c index 97d34fee8c1..567e27413cf 100644 --- a/drivers/net/fealnx.c +++ b/drivers/net/fealnx.c @@ -92,7 +92,7 @@ static int full_duplex[MAX_UNITS] = { -1, -1, -1, -1, -1, -1, -1, -1 }; #include <asm/uaccess.h> /* These identify the driver base version and may not be removed. */ -static char version[] __devinitdata = +static char version[] = KERN_INFO DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE "\n"; diff --git a/drivers/net/fs_enet/Makefile b/drivers/net/fs_enet/Makefile index d6dd3f2fb43..02d4dc18ba6 100644 --- a/drivers/net/fs_enet/Makefile +++ b/drivers/net/fs_enet/Makefile @@ -4,7 +4,7 @@ obj-$(CONFIG_FS_ENET) += fs_enet.o -obj-$(CONFIG_8xx) += mac-fec.o mac-scc.o -obj-$(CONFIG_8260) += mac-fcc.o +obj-$(CONFIG_8xx) += mac-fec.o mac-scc.o mii-fec.o +obj-$(CONFIG_CPM2) += mac-fcc.o mii-bitbang.o -fs_enet-objs := fs_enet-main.o fs_enet-mii.o mii-bitbang.o mii-fixed.o +fs_enet-objs := fs_enet-main.o diff --git a/drivers/net/fs_enet/fec.h b/drivers/net/fs_enet/fec.h new file mode 100644 index 00000000000..e980527e2b9 --- /dev/null +++ b/drivers/net/fs_enet/fec.h @@ -0,0 +1,42 @@ +#ifndef FS_ENET_FEC_H +#define FS_ENET_FEC_H + +/* CRC polynomium used by the FEC for the multicast group filtering */ +#define FEC_CRC_POLY 0x04C11DB7 + +#define FEC_MAX_MULTICAST_ADDRS 64 + +/* Interrupt events/masks. +*/ +#define FEC_ENET_HBERR 0x80000000U /* Heartbeat error */ +#define FEC_ENET_BABR 0x40000000U /* Babbling receiver */ +#define FEC_ENET_BABT 0x20000000U /* Babbling transmitter */ +#define FEC_ENET_GRA 0x10000000U /* Graceful stop complete */ +#define FEC_ENET_TXF 0x08000000U /* Full frame transmitted */ +#define FEC_ENET_TXB 0x04000000U /* A buffer was transmitted */ +#define FEC_ENET_RXF 0x02000000U /* Full frame received */ +#define FEC_ENET_RXB 0x01000000U /* A buffer was received */ +#define FEC_ENET_MII 0x00800000U /* MII interrupt */ +#define FEC_ENET_EBERR 0x00400000U /* SDMA bus error */ + +#define FEC_ECNTRL_PINMUX 0x00000004 +#define FEC_ECNTRL_ETHER_EN 0x00000002 +#define FEC_ECNTRL_RESET 0x00000001 + +#define FEC_RCNTRL_BC_REJ 0x00000010 +#define FEC_RCNTRL_PROM 0x00000008 +#define FEC_RCNTRL_MII_MODE 0x00000004 +#define FEC_RCNTRL_DRT 0x00000002 +#define FEC_RCNTRL_LOOP 0x00000001 + +#define FEC_TCNTRL_FDEN 0x00000004 +#define FEC_TCNTRL_HBC 0x00000002 +#define FEC_TCNTRL_GTS 0x00000001 + + + +/* + * Delay to wait for FEC reset command to complete (in us) + */ +#define FEC_RESET_DELAY 50 +#endif diff --git a/drivers/net/fs_enet/fs_enet-main.c b/drivers/net/fs_enet/fs_enet-main.c index f6abff5846b..df62506a178 100644 --- a/drivers/net/fs_enet/fs_enet-main.c +++ b/drivers/net/fs_enet/fs_enet-main.c @@ -37,6 +37,7 @@ #include <linux/bitops.h> #include <linux/fs.h> #include <linux/platform_device.h> +#include <linux/phy.h> #include <linux/vmalloc.h> #include <asm/pgtable.h> @@ -682,35 +683,6 @@ static void fs_free_irq(struct net_device *dev, int irq) (*fep->ops->post_free_irq)(dev, irq); } -/**********************************************************************************/ - -/* This interrupt occurs when the PHY detects a link change. */ -static irqreturn_t -fs_mii_link_interrupt(int irq, void *dev_id, struct pt_regs *regs) -{ - struct net_device *dev = dev_id; - struct fs_enet_private *fep; - const struct fs_platform_info *fpi; - - fep = netdev_priv(dev); - fpi = fep->fpi; - - /* - * Acknowledge the interrupt if possible. If we have not - * found the PHY yet we can't process or acknowledge the - * interrupt now. Instead we ignore this interrupt for now, - * which we can do since it is edge triggered. It will be - * acknowledged later by fs_enet_open(). - */ - if (!fep->phy) - return IRQ_NONE; - - fs_mii_ack_int(dev); - fs_mii_link_status_change_check(dev, 0); - - return IRQ_HANDLED; -} - static void fs_timeout(struct net_device *dev) { struct fs_enet_private *fep = netdev_priv(dev); @@ -722,10 +694,13 @@ static void fs_timeout(struct net_device *dev) spin_lock_irqsave(&fep->lock, flags); if (dev->flags & IFF_UP) { + phy_stop(fep->phydev); (*fep->ops->stop)(dev); (*fep->ops->restart)(dev); + phy_start(fep->phydev); } + phy_start(fep->phydev); wake = fep->tx_free && !(CBDR_SC(fep->cur_tx) & BD_ENET_TX_READY); spin_unlock_irqrestore(&fep->lock, flags); @@ -733,35 +708,112 @@ static void fs_timeout(struct net_device *dev) netif_wake_queue(dev); } +/*----------------------------------------------------------------------------- + * generic link-change handler - should be sufficient for most cases + *-----------------------------------------------------------------------------*/ +static void generic_adjust_link(struct net_device *dev) +{ + struct fs_enet_private *fep = netdev_priv(dev); + struct phy_device *phydev = fep->phydev; + int new_state = 0; + + if (phydev->link) { + + /* adjust to duplex mode */ + if (phydev->duplex != fep->oldduplex){ + new_state = 1; + fep->oldduplex = phydev->duplex; + } + + if (phydev->speed != fep->oldspeed) { + new_state = 1; + fep->oldspeed = phydev->speed; + } + + if (!fep->oldlink) { + new_state = 1; + fep->oldlink = 1; + netif_schedule(dev); + netif_carrier_on(dev); + netif_start_queue(dev); + } + + if (new_state) + fep->ops->restart(dev); + + } else if (fep->oldlink) { + new_state = 1; + fep->oldlink = 0; + fep->oldspeed = 0; + fep->oldduplex = -1; + netif_carrier_off(dev); + netif_stop_queue(dev); + } + + if (new_state && netif_msg_link(fep)) + phy_print_status(phydev); +} + + +static void fs_adjust_link(struct net_device *dev) +{ + struct fs_enet_private *fep = netdev_priv(dev); + unsigned long flags; + + spin_lock_irqsave(&fep->lock, flags); + + if(fep->ops->adjust_link) + fep->ops->adjust_link(dev); + else + generic_adjust_link(dev); + + spin_unlock_irqrestore(&fep->lock, flags); +} + +static int fs_init_phy(struct net_device *dev) +{ + struct fs_enet_private *fep = netdev_priv(dev); + struct phy_device *phydev; + + fep->oldlink = 0; + fep->oldspeed = 0; + fep->oldduplex = -1; + if(fep->fpi->bus_id) + phydev = phy_connect(dev, fep->fpi->bus_id, &fs_adjust_link, 0); + else { + printk("No phy bus ID specified in BSP code\n"); + return -EINVAL; + } + if (IS_ERR(phydev)) { + printk(KERN_ERR "%s: Could not attach to PHY\n", dev->name); + return PTR_ERR(phydev); + } + + fep->phydev = phydev; + + return 0; +} + + static int fs_enet_open(struct net_device *dev) { struct fs_enet_private *fep = netdev_priv(dev); - const struct fs_platform_info *fpi = fep->fpi; int r; + int err; /* Install our interrupt handler. */ r = fs_request_irq(dev, fep->interrupt, "fs_enet-mac", fs_enet_interrupt); if (r != 0) { printk(KERN_ERR DRV_MODULE_NAME - ": %s Could not allocate FEC IRQ!", dev->name); + ": %s Could not allocate FS_ENET IRQ!", dev->name); return -EINVAL; } - /* Install our phy interrupt handler */ - if (fpi->phy_irq != -1) { - - r = fs_request_irq(dev, fpi->phy_irq, "fs_enet-phy", fs_mii_link_interrupt); - if (r != 0) { - printk(KERN_ERR DRV_MODULE_NAME - ": %s Could not allocate PHY IRQ!", dev->name); - fs_free_irq(dev, fep->interrupt); - return -EINVAL; - } - } + err = fs_init_phy(dev); + if(err) + return err; - fs_mii_startup(dev); - netif_carrier_off(dev); - fs_mii_link_status_change_check(dev, 1); + phy_start(fep->phydev); return 0; } @@ -769,20 +821,19 @@ static int fs_enet_open(struct net_device *dev) static int fs_enet_close(struct net_device *dev) { struct fs_enet_private *fep = netdev_priv(dev); - const struct fs_platform_info *fpi = fep->fpi; unsigned long flags; netif_stop_queue(dev); netif_carrier_off(dev); - fs_mii_shutdown(dev); + phy_stop(fep->phydev); spin_lock_irqsave(&fep->lock, flags); (*fep->ops->stop)(dev); spin_unlock_irqrestore(&fep->lock, flags); /* release any irqs */ - if (fpi->phy_irq != -1) - fs_free_irq(dev, fpi->phy_irq); + phy_disconnect(fep->phydev); + fep->phydev = NULL; fs_free_irq(dev, fep->interrupt); return 0; @@ -830,33 +881,19 @@ static void fs_get_regs(struct net_device *dev, struct ethtool_regs *regs, static int fs_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) { struct fs_enet_private *fep = netdev_priv(dev); - unsigned long flags; - int rc; - - spin_lock_irqsave(&fep->lock, flags); - rc = mii_ethtool_gset(&fep->mii_if, cmd); - spin_unlock_irqrestore(&fep->lock, flags); - - return rc; + return phy_ethtool_gset(fep->phydev, cmd); } static int fs_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) { struct fs_enet_private *fep = netdev_priv(dev); - unsigned long flags; - int rc; - - spin_lock_irqsave(&fep->lock, flags); - rc = mii_ethtool_sset(&fep->mii_if, cmd); - spin_unlock_irqrestore(&fep->lock, flags); - - return rc; + phy_ethtool_sset(fep->phydev, cmd); + return 0; } static int fs_nway_reset(struct net_device *dev) { - struct fs_enet_private *fep = netdev_priv(dev); - return mii_nway_restart(&fep->mii_if); + return 0; } static u32 fs_get_msglevel(struct net_device *dev) @@ -898,7 +935,7 @@ static int fs_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) return -EINVAL; spin_lock_irqsave(&fep->lock, flags); - rc = generic_mii_ioctl(&fep->mii_if, mii, cmd, NULL); + rc = phy_mii_ioctl(fep->phydev, mii, cmd); spin_unlock_irqrestore(&fep->lock, flags); return rc; } @@ -1030,12 +1067,6 @@ static struct net_device *fs_init_instance(struct device *dev, } registered = 1; - err = fs_mii_connect(ndev); - if (err != 0) { - printk(KERN_ERR DRV_MODULE_NAME - ": %s fs_mii_connect failed.\n", ndev->name); - goto err; - } return ndev; @@ -1073,8 +1104,6 @@ static int fs_cleanup_instance(struct net_device *ndev) fpi = fep->fpi; - fs_mii_disconnect(ndev); - unregister_netdev(ndev); dma_free_coherent(fep->dev, (fpi->tx_ring + fpi->rx_ring) * sizeof(cbd_t), @@ -1196,17 +1225,39 @@ static int __init fs_init(void) r = setup_immap(); if (r != 0) return r; - r = driver_register(&fs_enet_fec_driver); + +#ifdef CONFIG_FS_ENET_HAS_FCC + /* let's insert mii stuff */ + r = fs_enet_mdio_bb_init(); + + if (r != 0) { + printk(KERN_ERR DRV_MODULE_NAME + "BB PHY init failed.\n"); + return r; + } + r = driver_register(&fs_enet_fcc_driver); if (r != 0) goto err; +#endif - r = driver_register(&fs_enet_fcc_driver); +#ifdef CONFIG_FS_ENET_HAS_FEC + r = fs_enet_mdio_fec_init(); + if (r != 0) { + printk(KERN_ERR DRV_MODULE_NAME + "FEC PHY init failed.\n"); + return r; + } + + r = driver_register(&fs_enet_fec_driver); if (r != 0) goto err; +#endif +#ifdef CONFIG_FS_ENET_HAS_SCC r = driver_register(&fs_enet_scc_driver); if (r != 0) goto err; +#endif return 0; err: diff --git a/drivers/net/fs_enet/fs_enet-mii.c b/drivers/net/fs_enet/fs_enet-mii.c deleted file mode 100644 index b7e6e21725c..00000000000 --- a/drivers/net/fs_enet/fs_enet-mii.c +++ /dev/null @@ -1,505 +0,0 @@ -/* - * Combined Ethernet driver for Motorola MPC8xx and MPC82xx. - * - * Copyright (c) 2003 Intracom S.A. - * by Pantelis Antoniou <panto@intracom.gr> - * - * 2005 (c) MontaVista Software, Inc. - * Vitaly Bordug <vbordug@ru.mvista.com> - * - * Heavily based on original FEC driver by Dan Malek <dan@embeddededge.com> - * and modifications by Joakim Tjernlund <joakim.tjernlund@lumentis.se> - * - * This file is licensed under the terms of the GNU General Public License - * version 2. This program is licensed "as is" without any warranty of any - * kind, whether express or implied. - */ - - -#include <linux/module.h> -#include <linux/types.h> -#include <linux/kernel.h> -#include <linux/sched.h> -#include <linux/string.h> -#include <linux/ptrace.h> -#include <linux/errno.h> -#include <linux/ioport.h> -#include <linux/slab.h> -#include <linux/interrupt.h> -#include <linux/pci.h> -#include <linux/init.h> -#include <linux/delay.h> -#include <linux/netdevice.h> -#include <linux/etherdevice.h> -#include <linux/skbuff.h> -#include <linux/spinlock.h> -#include <linux/mii.h> -#include <linux/ethtool.h> -#include <linux/bitops.h> - -#include <asm/pgtable.h> -#include <asm/irq.h> -#include <asm/uaccess.h> - -#include "fs_enet.h" - -/*************************************************/ - -/* - * Generic PHY support. - * Should work for all PHYs, but link change is detected by polling - */ - -static void generic_timer_callback(unsigned long data) -{ - struct net_device *dev = (struct net_device *)data; - struct fs_enet_private *fep = netdev_priv(dev); - - fep->phy_timer_list.expires = jiffies + HZ / 2; - - add_timer(&fep->phy_timer_list); - - fs_mii_link_status_change_check(dev, 0); -} - -static void generic_startup(struct net_device *dev) -{ - struct fs_enet_private *fep = netdev_priv(dev); - - fep->phy_timer_list.expires = jiffies + HZ / 2; /* every 500ms */ - fep->phy_timer_list.data = (unsigned long)dev; - fep->phy_timer_list.function = generic_timer_callback; - add_timer(&fep->phy_timer_list); -} - -static void generic_shutdown(struct net_device *dev) -{ - struct fs_enet_private *fep = netdev_priv(dev); - - del_timer_sync(&fep->phy_timer_list); -} - -/* ------------------------------------------------------------------------- */ -/* The Davicom DM9161 is used on the NETTA board */ - -/* register definitions */ - -#define MII_DM9161_ANAR 4 /* Aux. Config Register */ -#define MII_DM9161_ACR 16 /* Aux. Config Register */ -#define MII_DM9161_ACSR 17 /* Aux. Config/Status Register */ -#define MII_DM9161_10TCSR 18 /* 10BaseT Config/Status Reg. */ -#define MII_DM9161_INTR 21 /* Interrupt Register */ -#define MII_DM9161_RECR 22 /* Receive Error Counter Reg. */ -#define MII_DM9161_DISCR 23 /* Disconnect Counter Register */ - -static void dm9161_startup(struct net_device *dev) -{ - struct fs_enet_private *fep = netdev_priv(dev); - - fs_mii_write(dev, fep->mii_if.phy_id, MII_DM9161_INTR, 0x0000); - /* Start autonegotiation */ - fs_mii_write(dev, fep->mii_if.phy_id, MII_BMCR, 0x1200); - - set_current_state(TASK_UNINTERRUPTIBLE); - schedule_timeout(HZ*8); -} - -static void dm9161_ack_int(struct net_device *dev) -{ - struct fs_enet_private *fep = netdev_priv(dev); - - fs_mii_read(dev, fep->mii_if.phy_id, MII_DM9161_INTR); -} - -static void dm9161_shutdown(struct net_device *dev) -{ - struct fs_enet_private *fep = netdev_priv(dev); - - fs_mii_write(dev, fep->mii_if.phy_id, MII_DM9161_INTR, 0x0f00); -} - -/**********************************************************************************/ - -static const struct phy_info phy_info[] = { - { - .id = 0x00181b88, - .name = "DM9161", - .startup = dm9161_startup, - .ack_int = dm9161_ack_int, - .shutdown = dm9161_shutdown, - }, { - .id = 0, - .name = "GENERIC", - .startup = generic_startup, - .shutdown = generic_shutdown, - }, -}; - -/**********************************************************************************/ - -static int phy_id_detect(struct net_device *dev) -{ - struct fs_enet_private *fep = netdev_priv(dev); - const struct fs_platform_info *fpi = fep->fpi; - struct fs_enet_mii_bus *bus = fep->mii_bus; - int i, r, start, end, phytype, physubtype; - const struct phy_info *phy; - int phy_hwid, phy_id; - - phy_hwid = -1; - fep->phy = NULL; - - /* auto-detect? */ - if (fpi->phy_addr == -1) { - start = 1; - end = 32; - } else { /* direct */ - start = fpi->phy_addr; - end = start + 1; - } - - for (phy_id = start; phy_id < end; phy_id++) { - /* skip already used phy addresses on this bus */ - if (bus->usage_map & (1 << phy_id)) - continue; - r = fs_mii_read(dev, phy_id, MII_PHYSID1); - if (r == -1 || (phytype = (r & 0xffff)) == 0xffff) - continue; - r = fs_mii_read(dev, phy_id, MII_PHYSID2); - if (r == -1 || (physubtype = (r & 0xffff)) == 0xffff) - continue; - phy_hwid = (phytype << 16) | physubtype; - if (phy_hwid != -1) - break; - } - - if (phy_hwid == -1) { - printk(KERN_ERR DRV_MODULE_NAME - ": %s No PHY detected! range=0x%02x-0x%02x\n", - dev->name, start, end); - return -1; - } - - for (i = 0, phy = phy_info; i < ARRAY_SIZE(phy_info); i++, phy++) - if (phy->id == (phy_hwid >> 4) || phy->id == 0) - break; - - if (i >= ARRAY_SIZE(phy_info)) { - printk(KERN_ERR DRV_MODULE_NAME - ": %s PHY id 0x%08x is not supported!\n", - dev->name, phy_hwid); - return -1; - } - - fep->phy = phy; - - /* mark this address as used */ - bus->usage_map |= (1 << phy_id); - - printk(KERN_INFO DRV_MODULE_NAME - ": %s Phy @ 0x%x, type %s (0x%08x)%s\n", - dev->name, phy_id, fep->phy->name, phy_hwid, - fpi->phy_addr == -1 ? " (auto-detected)" : ""); - - return phy_id; -} - -void fs_mii_startup(struct net_device *dev) -{ - struct fs_enet_private *fep = netdev_priv(dev); - - if (fep->phy->startup) - (*fep->phy->startup) (dev); -} - -void fs_mii_shutdown(struct net_device *dev) -{ - struct fs_enet_private *fep = netdev_priv(dev); - - if (fep->phy->shutdown) - (*fep->phy->shutdown) (dev); -} - -void fs_mii_ack_int(struct net_device *dev) -{ - struct fs_enet_private *fep = netdev_priv(dev); - - if (fep->phy->ack_int) - (*fep->phy->ack_int) (dev); -} - -#define MII_LINK 0x0001 -#define MII_HALF 0x0002 -#define MII_FULL 0x0004 -#define MII_BASE4 0x0008 -#define MII_10M 0x0010 -#define MII_100M 0x0020 -#define MII_1G 0x0040 -#define MII_10G 0x0080 - -/* return full mii info at one gulp, with a usable form */ -static unsigned int mii_full_status(struct mii_if_info *mii) -{ - unsigned int status; - int bmsr, adv, lpa, neg; - struct fs_enet_private* fep = netdev_priv(mii->dev); - - /* first, a dummy read, needed to latch some MII phys */ - (void)mii->mdio_read(mii->dev, mii->phy_id, MII_BMSR); - bmsr = mii->mdio_read(mii->dev, mii->phy_id, MII_BMSR); - - /* no link */ - if ((bmsr & BMSR_LSTATUS) == 0) - return 0; - - status = MII_LINK; - - /* Lets look what ANEG says if it's supported - otherwize we shall - take the right values from the platform info*/ - if(!mii->force_media) { - /* autoneg not completed; don't bother */ - if ((bmsr & BMSR_ANEGCOMPLETE) == 0) - return 0; - - adv = (*mii->mdio_read)(mii->dev, mii->phy_id, MII_ADVERTISE); - lpa = (*mii->mdio_read)(mii->dev, mii->phy_id, MII_LPA); - - neg = lpa & adv; - } else { - neg = fep->fpi->bus_info->lpa; - } - - if (neg & LPA_100FULL) - status |= MII_FULL | MII_100M; - else if (neg & LPA_100BASE4) - status |= MII_FULL | MII_BASE4 | MII_100M; - else if (neg & LPA_100HALF) - status |= MII_HALF | MII_100M; - else if (neg & LPA_10FULL) - status |= MII_FULL | MII_10M; - else - status |= MII_HALF | MII_10M; - - return status; -} - -void fs_mii_link_status_change_check(struct net_device *dev, int init_media) -{ - struct fs_enet_private *fep = netdev_priv(dev); - struct mii_if_info *mii = &fep->mii_if; - unsigned int mii_status; - int ok_to_print, link, duplex, speed; - unsigned long flags; - - ok_to_print = netif_msg_link(fep); - - mii_status = mii_full_status(mii); - - if (!init_media && mii_status == fep->last_mii_status) - return; - - fep->last_mii_status = mii_status; - - link = !!(mii_status & MII_LINK); - duplex = !!(mii_status & MII_FULL); - speed = (mii_status & MII_100M) ? 100 : 10; - - if (link == 0) { - netif_carrier_off(mii->dev); - netif_stop_queue(dev); - if (!init_media) { - spin_lock_irqsave(&fep->lock, flags); - (*fep->ops->stop)(dev); - spin_unlock_irqrestore(&fep->lock, flags); - } - - if (ok_to_print) - printk(KERN_INFO "%s: link down\n", mii->dev->name); - - } else { - - mii->full_duplex = duplex; - - netif_carrier_on(mii->dev); - - spin_lock_irqsave(&fep->lock, flags); - fep->duplex = duplex; - fep->speed = speed; - (*fep->ops->restart)(dev); - spin_unlock_irqrestore(&fep->lock, flags); - - netif_start_queue(dev); - - if (ok_to_print) - printk(KERN_INFO "%s: link up, %dMbps, %s-duplex\n", - dev->name, speed, duplex ? "full" : "half"); - } -} - -/**********************************************************************************/ - -int fs_mii_read(struct net_device *dev, int phy_id, int location) -{ - struct fs_enet_private *fep = netdev_priv(dev); - struct fs_enet_mii_bus *bus = fep->mii_bus; - - unsigned long flags; - int ret; - - spin_lock_irqsave(&bus->mii_lock, flags); - ret = (*bus->mii_read)(bus, phy_id, location); - spin_unlock_irqrestore(&bus->mii_lock, flags); - - return ret; -} - -void fs_mii_write(struct net_device *dev, int phy_id, int location, int value) -{ - struct fs_enet_private *fep = netdev_priv(dev); - struct fs_enet_mii_bus *bus = fep->mii_bus; - unsigned long flags; - - spin_lock_irqsave(&bus->mii_lock, flags); - (*bus->mii_write)(bus, phy_id, location, value); - spin_unlock_irqrestore(&bus->mii_lock, flags); -} - -/*****************************************************************************/ - -/* list of all registered mii buses */ -static LIST_HEAD(fs_mii_bus_list); - -static struct fs_enet_mii_bus *lookup_bus(int method, int id) -{ - struct list_head *ptr; - struct fs_enet_mii_bus *bus; - - list_for_each(ptr, &fs_mii_bus_list) { - bus = list_entry(ptr, struct fs_enet_mii_bus, list); - if (bus->bus_info->method == method && - bus->bus_info->id == id) - return bus; - } - return NULL; -} - -static struct fs_enet_mii_bus *create_bus(const struct fs_mii_bus_info *bi) -{ - struct fs_enet_mii_bus *bus; - int ret = 0; - - bus = kmalloc(sizeof(*bus), GFP_KERNEL); - if (bus == NULL) { - ret = -ENOMEM; - goto err; - } - memset(bus, 0, sizeof(*bus)); - spin_lock_init(&bus->mii_lock); - bus->bus_info = bi; - bus->refs = 0; - bus->usage_map = 0; - - /* perform initialization */ - switch (bi->method) { - - case fsmii_fixed: - ret = fs_mii_fixed_init(bus); - if (ret != 0) - goto err; - break; - - case fsmii_bitbang: - ret = fs_mii_bitbang_init(bus); - if (ret != 0) - goto err; - break; -#ifdef CONFIG_FS_ENET_HAS_FEC - case fsmii_fec: - ret = fs_mii_fec_init(bus); - if (ret != 0) - goto err; - break; -#endif - default: - ret = -EINVAL; - goto err; - } - - list_add(&bus->list, &fs_mii_bus_list); - - return bus; - -err: - kfree(bus); - return ERR_PTR(ret); -} - -static void destroy_bus(struct fs_enet_mii_bus *bus) -{ - /* remove from bus list */ - list_del(&bus->list); - - /* nothing more needed */ - kfree(bus); -} - -int fs_mii_connect(struct net_device *dev) -{ - struct fs_enet_private *fep = netdev_priv(dev); - const struct fs_platform_info *fpi = fep->fpi; - struct fs_enet_mii_bus *bus = NULL; - - /* check method validity */ - switch (fpi->bus_info->method) { - case fsmii_fixed: - case fsmii_bitbang: - break; -#ifdef CONFIG_FS_ENET_HAS_FEC - case fsmii_fec: - break; -#endif - default: - printk(KERN_ERR DRV_MODULE_NAME - ": %s Unknown MII bus method (%d)!\n", - dev->name, fpi->bus_info->method); - return -EINVAL; - } - - bus = lookup_bus(fpi->bus_info->method, fpi->bus_info->id); - - /* if not found create new bus */ - if (bus == NULL) { - bus = create_bus(fpi->bus_info); - if (IS_ERR(bus)) { - printk(KERN_ERR DRV_MODULE_NAME - ": %s MII bus creation failure!\n", dev->name); - return PTR_ERR(bus); - } - } - - bus->refs++; - - fep->mii_bus = bus; - - fep->mii_if.dev = dev; - fep->mii_if.phy_id_mask = 0x1f; - fep->mii_if.reg_num_mask = 0x1f; - fep->mii_if.mdio_read = fs_mii_read; - fep->mii_if.mdio_write = fs_mii_write; - fep->mii_if.force_media = fpi->bus_info->disable_aneg; - fep->mii_if.phy_id = phy_id_detect(dev); - - return 0; -} - -void fs_mii_disconnect(struct net_device *dev) -{ - struct fs_enet_private *fep = netdev_priv(dev); - struct fs_enet_mii_bus *bus = NULL; - - bus = fep->mii_bus; - fep->mii_bus = NULL; - - if (--bus->refs <= 0) - destroy_bus(bus); -} diff --git a/drivers/net/fs_enet/fs_enet.h b/drivers/net/fs_enet/fs_enet.h index e7ec96c964a..95022c005f7 100644 --- a/drivers/net/fs_enet/fs_enet.h +++ b/drivers/net/fs_enet/fs_enet.h @@ -5,6 +5,7 @@ #include <linux/netdevice.h> #include <linux/types.h> #include <linux/list.h> +#include <linux/phy.h> #include <linux/fs_enet_pd.h> @@ -12,12 +13,30 @@ #ifdef CONFIG_CPM1 #include <asm/commproc.h> + +struct fec_info { + fec_t* fecp; + u32 mii_speed; +}; #endif #ifdef CONFIG_CPM2 #include <asm/cpm2.h> #endif +/* This is used to operate with pins. + Note that the actual port size may + be different; cpm(s) handle it OK */ +struct bb_info { + u8 mdio_dat_msk; + u8 mdio_dir_msk; + u8 *mdio_dir; + u8 *mdio_dat; + u8 mdc_msk; + u8 *mdc_dat; + int delay; +}; + /* hw driver ops */ struct fs_ops { int (*setup_data)(struct net_device *dev); @@ -25,6 +44,7 @@ struct fs_ops { void (*free_bd)(struct net_device *dev); void (*cleanup_data)(struct net_device *dev); void (*set_multicast_list)(struct net_device *dev); + void (*adjust_link)(struct net_device *dev); void (*restart)(struct net_device *dev); void (*stop)(struct net_device *dev); void (*pre_request_irq)(struct net_device *dev, int irq); @@ -100,10 +120,6 @@ struct fs_enet_mii_bus { }; }; -int fs_mii_bitbang_init(struct fs_enet_mii_bus *bus); -int fs_mii_fixed_init(struct fs_enet_mii_bus *bus); -int fs_mii_fec_init(struct fs_enet_mii_bus *bus); - struct fs_enet_private { struct device *dev; /* pointer back to the device (must be initialized first) */ spinlock_t lock; /* during all ops except TX pckt processing */ @@ -130,7 +146,8 @@ struct fs_enet_private { struct fs_enet_mii_bus *mii_bus; int interrupt; - int duplex, speed; /* current settings */ + struct phy_device *phydev; + int oldduplex, oldspeed, oldlink; /* current settings */ /* event masks */ u32 ev_napi_rx; /* mask of NAPI rx events */ @@ -168,15 +185,9 @@ struct fs_enet_private { }; /***************************************************************************/ - -int fs_mii_read(struct net_device *dev, int phy_id, int location); -void fs_mii_write(struct net_device *dev, int phy_id, int location, int value); - -void fs_mii_startup(struct net_device *dev); -void fs_mii_shutdown(struct net_device *dev); -void fs_mii_ack_int(struct net_device *dev); - -void fs_mii_link_status_change_check(struct net_device *dev, int init_media); +int fs_enet_mdio_bb_init(void); +int fs_mii_fixed_init(struct fs_enet_mii_bus *bus); +int fs_enet_mdio_fec_init(void); void fs_init_bds(struct net_device *dev); void fs_cleanup_bds(struct net_device *dev); @@ -194,7 +205,6 @@ int fs_enet_platform_init(void); void fs_enet_platform_cleanup(void); /***************************************************************************/ - /* buffer descriptor access macros */ /* access macros */ diff --git a/drivers/net/fs_enet/mac-fcc.c b/drivers/net/fs_enet/mac-fcc.c index 64e20982c1f..1ff2597b849 100644 --- a/drivers/net/fs_enet/mac-fcc.c +++ b/drivers/net/fs_enet/mac-fcc.c @@ -34,6 +34,7 @@ #include <linux/bitops.h> #include <linux/fs.h> #include <linux/platform_device.h> +#include <linux/phy.h> #include <asm/immap_cpm2.h> #include <asm/mpc8260.h> @@ -122,22 +123,32 @@ static int do_pd_setup(struct fs_enet_private *fep) /* Attach the memory for the FCC Parameter RAM */ r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "fcc_pram"); - fep->fcc.ep = (void *)r->start; - + fep->fcc.ep = (void *)ioremap(r->start, r->end - r->start + 1); if (fep->fcc.ep == NULL) return -EINVAL; r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "fcc_regs"); - fep->fcc.fccp = (void *)r->start; - + fep->fcc.fccp = (void *)ioremap(r->start, r->end - r->start + 1); if (fep->fcc.fccp == NULL) return -EINVAL; - fep->fcc.fcccp = (void *)fep->fpi->fcc_regs_c; + if (fep->fpi->fcc_regs_c) { + + fep->fcc.fcccp = (void *)fep->fpi->fcc_regs_c; + } else { + r = platform_get_resource_byname(pdev, IORESOURCE_MEM, + "fcc_regs_c"); + fep->fcc.fcccp = (void *)ioremap(r->start, + r->end - r->start + 1); + } if (fep->fcc.fcccp == NULL) return -EINVAL; + fep->fcc.mem = (void *)fep->fpi->mem_offset; + if (fep->fcc.mem == NULL) + return -EINVAL; + return 0; } @@ -155,8 +166,6 @@ static int setup_data(struct net_device *dev) if ((unsigned int)fep->fcc.idx >= 3) /* max 3 FCCs */ return -EINVAL; - fep->fcc.mem = (void *)fpi->mem_offset; - if (do_pd_setup(fep) != 0) return -EINVAL; @@ -394,7 +403,7 @@ static void restart(struct net_device *dev) /* adjust to speed (for RMII mode) */ if (fpi->use_rmii) { - if (fep->speed == 100) + if (fep->phydev->speed == 100) C8(fcccp, fcc_gfemr, 0x20); else S8(fcccp, fcc_gfemr, 0x20); @@ -420,7 +429,7 @@ static void restart(struct net_device *dev) S32(fccp, fcc_fpsmr, FCC_PSMR_RMII); /* adjust to duplex mode */ - if (fep->duplex) + if (fep->phydev->duplex) S32(fccp, fcc_fpsmr, FCC_PSMR_FDE | FCC_PSMR_LPB); else C32(fccp, fcc_fpsmr, FCC_PSMR_FDE | FCC_PSMR_LPB); @@ -486,7 +495,10 @@ static void rx_bd_done(struct net_device *dev) static void tx_kickstart(struct net_device *dev) { - /* nothing */ + struct fs_enet_private *fep = netdev_priv(dev); + fcc_t *fccp = fep->fcc.fccp; + + S32(fccp, fcc_ftodr, 0x80); } static u32 get_int_events(struct net_device *dev) diff --git a/drivers/net/fs_enet/mac-fec.c b/drivers/net/fs_enet/mac-fec.c index e0954707752..c2c5fd419bd 100644 --- a/drivers/net/fs_enet/mac-fec.c +++ b/drivers/net/fs_enet/mac-fec.c @@ -46,6 +46,7 @@ #endif #include "fs_enet.h" +#include "fec.h" /*************************************************/ @@ -75,50 +76,8 @@ /* clear bits */ #define FC(_fecp, _reg, _v) FW(_fecp, _reg, FR(_fecp, _reg) & ~(_v)) - -/* CRC polynomium used by the FEC for the multicast group filtering */ -#define FEC_CRC_POLY 0x04C11DB7 - -#define FEC_MAX_MULTICAST_ADDRS 64 - -/* Interrupt events/masks. -*/ -#define FEC_ENET_HBERR 0x80000000U /* Heartbeat error */ -#define FEC_ENET_BABR 0x40000000U /* Babbling receiver */ -#define FEC_ENET_BABT 0x20000000U /* Babbling transmitter */ -#define FEC_ENET_GRA 0x10000000U /* Graceful stop complete */ -#define FEC_ENET_TXF 0x08000000U /* Full frame transmitted */ -#define FEC_ENET_TXB 0x04000000U /* A buffer was transmitted */ -#define FEC_ENET_RXF 0x02000000U /* Full frame received */ -#define FEC_ENET_RXB 0x01000000U /* A buffer was received */ -#define FEC_ENET_MII 0x00800000U /* MII interrupt */ -#define FEC_ENET_EBERR 0x00400000U /* SDMA bus error */ - -#define FEC_ECNTRL_PINMUX 0x00000004 -#define FEC_ECNTRL_ETHER_EN 0x00000002 -#define FEC_ECNTRL_RESET 0x00000001 - -#define FEC_RCNTRL_BC_REJ 0x00000010 -#define FEC_RCNTRL_PROM 0x00000008 -#define FEC_RCNTRL_MII_MODE 0x00000004 -#define FEC_RCNTRL_DRT 0x00000002 -#define FEC_RCNTRL_LOOP 0x00000001 - -#define FEC_TCNTRL_FDEN 0x00000004 -#define FEC_TCNTRL_HBC 0x00000002 -#define FEC_TCNTRL_GTS 0x00000001 - - -/* Make MII read/write commands for the FEC. -*/ -#define mk_mii_read(REG) (0x60020000 | ((REG & 0x1f) << 18)) -#define mk_mii_write(REG, VAL) (0x50020000 | ((REG & 0x1f) << 18) | (VAL & 0xffff)) -#define mk_mii_end 0 - -#define FEC_MII_LOOPS 10000 - /* - * Delay to wait for FEC reset command to complete (in us) + * Delay to wait for FEC reset command to complete (in us) */ #define FEC_RESET_DELAY 50 @@ -303,13 +262,15 @@ static void restart(struct net_device *dev) int r; u32 addrhi, addrlo; + struct mii_bus* mii = fep->phydev->bus; + struct fec_info* fec_inf = mii->priv; + r = whack_reset(fep->fec.fecp); if (r != 0) printk(KERN_ERR DRV_MODULE_NAME ": %s FEC Reset FAILED!\n", dev->name); - /* - * Set station address. + * Set station address. */ addrhi = ((u32) dev->dev_addr[0] << 24) | ((u32) dev->dev_addr[1] << 16) | @@ -350,12 +311,12 @@ static void restart(struct net_device *dev) FW(fecp, fun_code, 0x78000000); /* - * Set MII speed. + * Set MII speed. */ - FW(fecp, mii_speed, fep->mii_bus->fec.mii_speed); + FW(fecp, mii_speed, fec_inf->mii_speed); /* - * Clear any outstanding interrupt. + * Clear any outstanding interrupt. */ FW(fecp, ievent, 0xffc0); FW(fecp, ivec, (fep->interrupt / 2) << 29); @@ -390,11 +351,12 @@ static void restart(struct net_device *dev) } #endif + FW(fecp, r_cntrl, FEC_RCNTRL_MII_MODE); /* MII enable */ /* - * adjust to duplex mode + * adjust to duplex mode */ - if (fep->duplex) { + if (fep->phydev->duplex) { FC(fecp, r_cntrl, FEC_RCNTRL_DRT); FS(fecp, x_cntrl, FEC_TCNTRL_FDEN); /* FD enable */ } else { @@ -418,9 +380,11 @@ static void restart(struct net_device *dev) static void stop(struct net_device *dev) { struct fs_enet_private *fep = netdev_priv(dev); + const struct fs_platform_info *fpi = fep->fpi; fec_t *fecp = fep->fec.fecp; - struct fs_enet_mii_bus *bus = fep->mii_bus; - const struct fs_mii_bus_info *bi = bus->bus_info; + + struct fec_info* feci= fep->phydev->bus->priv; + int i; if ((FR(fecp, ecntrl) & FEC_ECNTRL_ETHER_EN) == 0) @@ -444,11 +408,11 @@ static void stop(struct net_device *dev) fs_cleanup_bds(dev); /* shut down FEC1? that's where the mii bus is */ - if (fep->fec.idx == 0 && bus->refs > 1 && bi->method == fsmii_fec) { + if (fpi->has_phy) { FS(fecp, r_cntrl, FEC_RCNTRL_MII_MODE); /* MII enable */ FS(fecp, ecntrl, FEC_ECNTRL_PINMUX | FEC_ECNTRL_ETHER_EN); FW(fecp, ievent, FEC_ENET_MII); - FW(fecp, mii_speed, bus->fec.mii_speed); + FW(fecp, mii_speed, feci->mii_speed); } } @@ -583,73 +547,3 @@ const struct fs_ops fs_fec_ops = { .free_bd = free_bd, }; -/***********************************************************************/ - -static int mii_read(struct fs_enet_mii_bus *bus, int phy_id, int location) -{ - fec_t *fecp = bus->fec.fecp; - int i, ret = -1; - - if ((FR(fecp, r_cntrl) & FEC_RCNTRL_MII_MODE) == 0) - BUG(); - - /* Add PHY address to register command. */ - FW(fecp, mii_data, (phy_id << 23) | mk_mii_read(location)); - - for (i = 0; i < FEC_MII_LOOPS; i++) - if ((FR(fecp, ievent) & FEC_ENET_MII) != 0) - break; - - if (i < FEC_MII_LOOPS) { - FW(fecp, ievent, FEC_ENET_MII); - ret = FR(fecp, mii_data) & 0xffff; - } - - return ret; -} - -static void mii_write(struct fs_enet_mii_bus *bus, int phy_id, int location, int value) -{ - fec_t *fecp = bus->fec.fecp; - int i; - - /* this must never happen */ - if ((FR(fecp, r_cntrl) & FEC_RCNTRL_MII_MODE) == 0) - BUG(); - - /* Add PHY address to register command. */ - FW(fecp, mii_data, (phy_id << 23) | mk_mii_write(location, value)); - - for (i = 0; i < FEC_MII_LOOPS; i++) - if ((FR(fecp, ievent) & FEC_ENET_MII) != 0) - break; - - if (i < FEC_MII_LOOPS) - FW(fecp, ievent, FEC_ENET_MII); -} - -int fs_mii_fec_init(struct fs_enet_mii_bus *bus) -{ - bd_t *bd = (bd_t *)__res; - const struct fs_mii_bus_info *bi = bus->bus_info; - fec_t *fecp; - - if (bi->id != 0) - return -1; - - bus->fec.fecp = &((immap_t *)fs_enet_immap)->im_cpm.cp_fec; - bus->fec.mii_speed = ((((bd->bi_intfreq + 4999999) / 2500000) / 2) - & 0x3F) << 1; - - fecp = bus->fec.fecp; - - FS(fecp, r_cntrl, FEC_RCNTRL_MII_MODE); /* MII enable */ - FS(fecp, ecntrl, FEC_ECNTRL_PINMUX | FEC_ECNTRL_ETHER_EN); - FW(fecp, ievent, FEC_ENET_MII); - FW(fecp, mii_speed, bus->fec.mii_speed); - - bus->mii_read = mii_read; - bus->mii_write = mii_write; - - return 0; -} diff --git a/drivers/net/fs_enet/mac-scc.c b/drivers/net/fs_enet/mac-scc.c index eaa24fab645..95ec5872c50 100644 --- a/drivers/net/fs_enet/mac-scc.c +++ b/drivers/net/fs_enet/mac-scc.c @@ -369,7 +369,7 @@ static void restart(struct net_device *dev) W16(sccp, scc_psmr, SCC_PSMR_ENCRC | SCC_PSMR_NIB22); /* Set full duplex mode if needed */ - if (fep->duplex) + if (fep->phydev->duplex) S16(sccp, scc_psmr, SCC_PSMR_LPB | SCC_PSMR_FDE); S32(sccp, scc_gsmrl, SCC_GSMRL_ENR | SCC_GSMRL_ENT); @@ -500,6 +500,8 @@ static void tx_restart(struct net_device *dev) scc_cr_cmd(fep, CPM_CR_RESTART_TX); } + + /*************************************************************************/ const struct fs_ops fs_scc_ops = { diff --git a/drivers/net/fs_enet/mii-bitbang.c b/drivers/net/fs_enet/mii-bitbang.c index 48f9cf83ab6..0b9b8b5c847 100644 --- a/drivers/net/fs_enet/mii-bitbang.c +++ b/drivers/net/fs_enet/mii-bitbang.c @@ -33,6 +33,7 @@ #include <linux/mii.h> #include <linux/ethtool.h> #include <linux/bitops.h> +#include <linux/platform_device.h> #include <asm/pgtable.h> #include <asm/irq.h> @@ -40,129 +41,25 @@ #include "fs_enet.h" -#ifdef CONFIG_8xx -static int bitbang_prep_bit(u8 **dirp, u8 **datp, u8 *mskp, int port, int bit) +static int bitbang_prep_bit(u8 **datp, u8 *mskp, + struct fs_mii_bit *mii_bit) { - immap_t *im = (immap_t *)fs_enet_immap; - void *dir, *dat, *ppar; + void *dat; int adv; u8 msk; - switch (port) { - case fsiop_porta: - dir = &im->im_ioport.iop_padir; - dat = &im->im_ioport.iop_padat; - ppar = &im->im_ioport.iop_papar; - break; - - case fsiop_portb: - dir = &im->im_cpm.cp_pbdir; - dat = &im->im_cpm.cp_pbdat; - ppar = &im->im_cpm.cp_pbpar; - break; - - case fsiop_portc: - dir = &im->im_ioport.iop_pcdir; - dat = &im->im_ioport.iop_pcdat; - ppar = &im->im_ioport.iop_pcpar; - break; - - case fsiop_portd: - dir = &im->im_ioport.iop_pddir; - dat = &im->im_ioport.iop_pddat; - ppar = &im->im_ioport.iop_pdpar; - break; - - case fsiop_porte: - dir = &im->im_cpm.cp_pedir; - dat = &im->im_cpm.cp_pedat; - ppar = &im->im_cpm.cp_pepar; - break; - - default: - printk(KERN_ERR DRV_MODULE_NAME - "Illegal port value %d!\n", port); - return -EINVAL; - } - - adv = bit >> 3; - dir = (char *)dir + adv; - dat = (char *)dat + adv; - ppar = (char *)ppar + adv; - - msk = 1 << (7 - (bit & 7)); - if ((in_8(ppar) & msk) != 0) { - printk(KERN_ERR DRV_MODULE_NAME - "pin %d on port %d is not general purpose!\n", bit, port); - return -EINVAL; - } - - *dirp = dir; - *datp = dat; - *mskp = msk; - - return 0; -} -#endif - -#ifdef CONFIG_8260 -static int bitbang_prep_bit(u8 **dirp, u8 **datp, u8 *mskp, int port, int bit) -{ - iop_cpm2_t *io = &((cpm2_map_t *)fs_enet_immap)->im_ioport; - void *dir, *dat, *ppar; - int adv; - u8 msk; - - switch (port) { - case fsiop_porta: - dir = &io->iop_pdira; - dat = &io->iop_pdata; - ppar = &io->iop_ppara; - break; - - case fsiop_portb: - dir = &io->iop_pdirb; - dat = &io->iop_pdatb; - ppar = &io->iop_pparb; - break; - - case fsiop_portc: - dir = &io->iop_pdirc; - dat = &io->iop_pdatc; - ppar = &io->iop_pparc; - break; - - case fsiop_portd: - dir = &io->iop_pdird; - dat = &io->iop_pdatd; - ppar = &io->iop_ppard; - break; - - default: - printk(KERN_ERR DRV_MODULE_NAME - "Illegal port value %d!\n", port); - return -EINVAL; - } + dat = (void*) mii_bit->offset; - adv = bit >> 3; - dir = (char *)dir + adv; + adv = mii_bit->bit >> 3; dat = (char *)dat + adv; - ppar = (char *)ppar + adv; - msk = 1 << (7 - (bit & 7)); - if ((in_8(ppar) & msk) != 0) { - printk(KERN_ERR DRV_MODULE_NAME - "pin %d on port %d is not general purpose!\n", bit, port); - return -EINVAL; - } + msk = 1 << (7 - (mii_bit->bit & 7)); - *dirp = dir; *datp = dat; *mskp = msk; return 0; } -#endif static inline void bb_set(u8 *p, u8 m) { @@ -179,44 +76,44 @@ static inline int bb_read(u8 *p, u8 m) return (in_8(p) & m) != 0; } -static inline void mdio_active(struct fs_enet_mii_bus *bus) +static inline void mdio_active(struct bb_info *bitbang) { - bb_set(bus->bitbang.mdio_dir, bus->bitbang.mdio_msk); + bb_set(bitbang->mdio_dir, bitbang->mdio_dir_msk); } -static inline void mdio_tristate(struct fs_enet_mii_bus *bus) +static inline void mdio_tristate(struct bb_info *bitbang ) { - bb_clr(bus->bitbang.mdio_dir, bus->bitbang.mdio_msk); + bb_clr(bitbang->mdio_dir, bitbang->mdio_dir_msk); } -static inline int mdio_read(struct fs_enet_mii_bus *bus) +static inline int mdio_read(struct bb_info *bitbang ) { - return bb_read(bus->bitbang.mdio_dat, bus->bitbang.mdio_msk); + return bb_read(bitbang->mdio_dat, bitbang->mdio_dat_msk); } -static inline void mdio(struct fs_enet_mii_bus *bus, int what) +static inline void mdio(struct bb_info *bitbang , int what) { if (what) - bb_set(bus->bitbang.mdio_dat, bus->bitbang.mdio_msk); + bb_set(bitbang->mdio_dat, bitbang->mdio_dat_msk); else - bb_clr(bus->bitbang.mdio_dat, bus->bitbang.mdio_msk); + bb_clr(bitbang->mdio_dat, bitbang->mdio_dat_msk); } -static inline void mdc(struct fs_enet_mii_bus *bus, int what) +static inline void mdc(struct bb_info *bitbang , int what) { if (what) - bb_set(bus->bitbang.mdc_dat, bus->bitbang.mdc_msk); + bb_set(bitbang->mdc_dat, bitbang->mdc_msk); else - bb_clr(bus->bitbang.mdc_dat, bus->bitbang.mdc_msk); + bb_clr(bitbang->mdc_dat, bitbang->mdc_msk); } -static inline void mii_delay(struct fs_enet_mii_bus *bus) +static inline void mii_delay(struct bb_info *bitbang ) { - udelay(bus->bus_info->i.bitbang.delay); + udelay(bitbang->delay); } /* Utility to send the preamble, address, and register (common to read and write). */ -static void bitbang_pre(struct fs_enet_mii_bus *bus, int read, u8 addr, u8 reg) +static void bitbang_pre(struct bb_info *bitbang , int read, u8 addr, u8 reg) { int j; @@ -228,177 +125,284 @@ static void bitbang_pre(struct fs_enet_mii_bus *bus, int read, u8 addr, u8 reg) * but it is safer and will be much more robust. */ - mdio_active(bus); - mdio(bus, 1); + mdio_active(bitbang); + mdio(bitbang, 1); for (j = 0; j < 32; j++) { - mdc(bus, 0); - mii_delay(bus); - mdc(bus, 1); - mii_delay(bus); + mdc(bitbang, 0); + mii_delay(bitbang); + mdc(bitbang, 1); + mii_delay(bitbang); } /* send the start bit (01) and the read opcode (10) or write (10) */ - mdc(bus, 0); - mdio(bus, 0); - mii_delay(bus); - mdc(bus, 1); - mii_delay(bus); - mdc(bus, 0); - mdio(bus, 1); - mii_delay(bus); - mdc(bus, 1); - mii_delay(bus); - mdc(bus, 0); - mdio(bus, read); - mii_delay(bus); - mdc(bus, 1); - mii_delay(bus); - mdc(bus, 0); - mdio(bus, !read); - mii_delay(bus); - mdc(bus, 1); - mii_delay(bus); + mdc(bitbang, 0); + mdio(bitbang, 0); + mii_delay(bitbang); + mdc(bitbang, 1); + mii_delay(bitbang); + mdc(bitbang, 0); + mdio(bitbang, 1); + mii_delay(bitbang); + mdc(bitbang, 1); + mii_delay(bitbang); + mdc(bitbang, 0); + mdio(bitbang, read); + mii_delay(bitbang); + mdc(bitbang, 1); + mii_delay(bitbang); + mdc(bitbang, 0); + mdio(bitbang, !read); + mii_delay(bitbang); + mdc(bitbang, 1); + mii_delay(bitbang); /* send the PHY address */ for (j = 0; j < 5; j++) { - mdc(bus, 0); - mdio(bus, (addr & 0x10) != 0); - mii_delay(bus); - mdc(bus, 1); - mii_delay(bus); + mdc(bitbang, 0); + mdio(bitbang, (addr & 0x10) != 0); + mii_delay(bitbang); + mdc(bitbang, 1); + mii_delay(bitbang); addr <<= 1; } /* send the register address */ for (j = 0; j < 5; j++) { - mdc(bus, 0); - mdio(bus, (reg & 0x10) != 0); - mii_delay(bus); - mdc(bus, 1); - mii_delay(bus); + mdc(bitbang, 0); + mdio(bitbang, (reg & 0x10) != 0); + mii_delay(bitbang); + mdc(bitbang, 1); + mii_delay(bitbang); reg <<= 1; } } -static int mii_read(struct fs_enet_mii_bus *bus, int phy_id, int location) +static int fs_enet_mii_bb_read(struct mii_bus *bus , int phy_id, int location) { u16 rdreg; int ret, j; u8 addr = phy_id & 0xff; u8 reg = location & 0xff; + struct bb_info* bitbang = bus->priv; - bitbang_pre(bus, 1, addr, reg); + bitbang_pre(bitbang, 1, addr, reg); /* tri-state our MDIO I/O pin so we can read */ - mdc(bus, 0); - mdio_tristate(bus); - mii_delay(bus); - mdc(bus, 1); - mii_delay(bus); + mdc(bitbang, 0); + mdio_tristate(bitbang); + mii_delay(bitbang); + mdc(bitbang, 1); + mii_delay(bitbang); /* check the turnaround bit: the PHY should be driving it to zero */ - if (mdio_read(bus) != 0) { + if (mdio_read(bitbang) != 0) { /* PHY didn't drive TA low */ for (j = 0; j < 32; j++) { - mdc(bus, 0); - mii_delay(bus); - mdc(bus, 1); - mii_delay(bus); + mdc(bitbang, 0); + mii_delay(bitbang); + mdc(bitbang, 1); + mii_delay(bitbang); } ret = -1; goto out; } - mdc(bus, 0); - mii_delay(bus); + mdc(bitbang, 0); + mii_delay(bitbang); /* read 16 bits of register data, MSB first */ rdreg = 0; for (j = 0; j < 16; j++) { - mdc(bus, 1); - mii_delay(bus); + mdc(bitbang, 1); + mii_delay(bitbang); rdreg <<= 1; - rdreg |= mdio_read(bus); - mdc(bus, 0); - mii_delay(bus); + rdreg |= mdio_read(bitbang); + mdc(bitbang, 0); + mii_delay(bitbang); } - mdc(bus, 1); - mii_delay(bus); - mdc(bus, 0); - mii_delay(bus); - mdc(bus, 1); - mii_delay(bus); + mdc(bitbang, 1); + mii_delay(bitbang); + mdc(bitbang, 0); + mii_delay(bitbang); + mdc(bitbang, 1); + mii_delay(bitbang); ret = rdreg; out: return ret; } -static void mii_write(struct fs_enet_mii_bus *bus, int phy_id, int location, int val) +static int fs_enet_mii_bb_write(struct mii_bus *bus, int phy_id, int location, u16 val) { int j; + struct bb_info* bitbang = bus->priv; + u8 addr = phy_id & 0xff; u8 reg = location & 0xff; u16 value = val & 0xffff; - bitbang_pre(bus, 0, addr, reg); + bitbang_pre(bitbang, 0, addr, reg); /* send the turnaround (10) */ - mdc(bus, 0); - mdio(bus, 1); - mii_delay(bus); - mdc(bus, 1); - mii_delay(bus); - mdc(bus, 0); - mdio(bus, 0); - mii_delay(bus); - mdc(bus, 1); - mii_delay(bus); + mdc(bitbang, 0); + mdio(bitbang, 1); + mii_delay(bitbang); + mdc(bitbang, 1); + mii_delay(bitbang); + mdc(bitbang, 0); + mdio(bitbang, 0); + mii_delay(bitbang); + mdc(bitbang, 1); + mii_delay(bitbang); /* write 16 bits of register data, MSB first */ for (j = 0; j < 16; j++) { - mdc(bus, 0); - mdio(bus, (value & 0x8000) != 0); - mii_delay(bus); - mdc(bus, 1); - mii_delay(bus); + mdc(bitbang, 0); + mdio(bitbang, (value & 0x8000) != 0); + mii_delay(bitbang); + mdc(bitbang, 1); + mii_delay(bitbang); value <<= 1; } /* * Tri-state the MDIO line. */ - mdio_tristate(bus); - mdc(bus, 0); - mii_delay(bus); - mdc(bus, 1); - mii_delay(bus); + mdio_tristate(bitbang); + mdc(bitbang, 0); + mii_delay(bitbang); + mdc(bitbang, 1); + mii_delay(bitbang); + return 0; } -int fs_mii_bitbang_init(struct fs_enet_mii_bus *bus) +static int fs_enet_mii_bb_reset(struct mii_bus *bus) +{ + /*nothing here - dunno how to reset it*/ + return 0; +} + +static int fs_mii_bitbang_init(struct bb_info *bitbang, struct fs_mii_bb_platform_info* fmpi) { - const struct fs_mii_bus_info *bi = bus->bus_info; int r; - r = bitbang_prep_bit(&bus->bitbang.mdio_dir, - &bus->bitbang.mdio_dat, - &bus->bitbang.mdio_msk, - bi->i.bitbang.mdio_port, - bi->i.bitbang.mdio_bit); + bitbang->delay = fmpi->delay; + + r = bitbang_prep_bit(&bitbang->mdio_dir, + &bitbang->mdio_dir_msk, + &fmpi->mdio_dir); if (r != 0) return r; - r = bitbang_prep_bit(&bus->bitbang.mdc_dir, - &bus->bitbang.mdc_dat, - &bus->bitbang.mdc_msk, - bi->i.bitbang.mdc_port, - bi->i.bitbang.mdc_bit); + r = bitbang_prep_bit(&bitbang->mdio_dat, + &bitbang->mdio_dat_msk, + &fmpi->mdio_dat); if (r != 0) return r; - bus->mii_read = mii_read; - bus->mii_write = mii_write; + r = bitbang_prep_bit(&bitbang->mdc_dat, + &bitbang->mdc_msk, + &fmpi->mdc_dat); + if (r != 0) + return r; return 0; } + + +static int __devinit fs_enet_mdio_probe(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct fs_mii_bb_platform_info *pdata; + struct mii_bus *new_bus; + struct bb_info *bitbang; + int err = 0; + + if (NULL == dev) + return -EINVAL; + + new_bus = kzalloc(sizeof(struct mii_bus), GFP_KERNEL); + + if (NULL == new_bus) + return -ENOMEM; + + bitbang = kzalloc(sizeof(struct bb_info), GFP_KERNEL); + + if (NULL == bitbang) + return -ENOMEM; + + new_bus->name = "BB MII Bus", + new_bus->read = &fs_enet_mii_bb_read, + new_bus->write = &fs_enet_mii_bb_write, + new_bus->reset = &fs_enet_mii_bb_reset, + new_bus->id = pdev->id; + + new_bus->phy_mask = ~0x9; + pdata = (struct fs_mii_bb_platform_info *)pdev->dev.platform_data; + + if (NULL == pdata) { + printk(KERN_ERR "gfar mdio %d: Missing platform data!\n", pdev->id); + return -ENODEV; + } + + /*set up workspace*/ + fs_mii_bitbang_init(bitbang, pdata); + + new_bus->priv = bitbang; + + new_bus->irq = pdata->irq; + + new_bus->dev = dev; + dev_set_drvdata(dev, new_bus); + + err = mdiobus_register(new_bus); + + if (0 != err) { + printk (KERN_ERR "%s: Cannot register as MDIO bus\n", + new_bus->name); + goto bus_register_fail; + } + + return 0; + +bus_register_fail: + kfree(bitbang); + kfree(new_bus); + + return err; +} + + +static int fs_enet_mdio_remove(struct device *dev) +{ + struct mii_bus *bus = dev_get_drvdata(dev); + + mdiobus_unregister(bus); + + dev_set_drvdata(dev, NULL); + + iounmap((void *) (&bus->priv)); + bus->priv = NULL; + kfree(bus); + + return 0; +} + +static struct device_driver fs_enet_bb_mdio_driver = { + .name = "fsl-bb-mdio", + .bus = &platform_bus_type, + .probe = fs_enet_mdio_probe, + .remove = fs_enet_mdio_remove, +}; + +int fs_enet_mdio_bb_init(void) +{ + return driver_register(&fs_enet_bb_mdio_driver); +} + +void fs_enet_mdio_bb_exit(void) +{ + driver_unregister(&fs_enet_bb_mdio_driver); +} + diff --git a/drivers/net/fs_enet/mii-fec.c b/drivers/net/fs_enet/mii-fec.c new file mode 100644 index 00000000000..1328e10caa3 --- /dev/null +++ b/drivers/net/fs_enet/mii-fec.c @@ -0,0 +1,243 @@ +/* + * Combined Ethernet driver for Motorola MPC8xx and MPC82xx. + * + * Copyright (c) 2003 Intracom S.A. + * by Pantelis Antoniou <panto@intracom.gr> + * + * 2005 (c) MontaVista Software, Inc. + * Vitaly Bordug <vbordug@ru.mvista.com> + * + * This file is licensed under the terms of the GNU General Public License + * version 2. This program is licensed "as is" without any warranty of any + * kind, whether express or implied. + */ + + +#include <linux/config.h> +#include <linux/module.h> +#include <linux/types.h> +#include <linux/kernel.h> +#include <linux/sched.h> +#include <linux/string.h> +#include <linux/ptrace.h> +#include <linux/errno.h> +#include <linux/ioport.h> +#include <linux/slab.h> +#include <linux/interrupt.h> +#include <linux/pci.h> +#include <linux/init.h> +#include <linux/delay.h> +#include <linux/netdevice.h> +#include <linux/etherdevice.h> +#include <linux/skbuff.h> +#include <linux/spinlock.h> +#include <linux/mii.h> +#include <linux/ethtool.h> +#include <linux/bitops.h> +#include <linux/platform_device.h> + +#include <asm/pgtable.h> +#include <asm/irq.h> +#include <asm/uaccess.h> + +#include "fs_enet.h" +#include "fec.h" + +/* Make MII read/write commands for the FEC. +*/ +#define mk_mii_read(REG) (0x60020000 | ((REG & 0x1f) << 18)) +#define mk_mii_write(REG, VAL) (0x50020000 | ((REG & 0x1f) << 18) | (VAL & 0xffff)) +#define mk_mii_end 0 + +#define FEC_MII_LOOPS 10000 + +static int match_has_phy (struct device *dev, void* data) +{ + struct platform_device* pdev = container_of(dev, struct platform_device, dev); + struct fs_platform_info* fpi; + if(strcmp(pdev->name, (char*)data)) + { + return 0; + } + + fpi = pdev->dev.platform_data; + if((fpi)&&(fpi->has_phy)) + return 1; + return 0; +} + +static int fs_mii_fec_init(struct fec_info* fec, struct fs_mii_fec_platform_info *fmpi) +{ + struct resource *r; + fec_t *fecp; + char* name = "fsl-cpm-fec"; + + /* we need fec in order to be useful */ + struct platform_device *fec_pdev = + container_of(bus_find_device(&platform_bus_type, NULL, name, match_has_phy), + struct platform_device, dev); + + if(fec_pdev == NULL) { + printk(KERN_ERR"Unable to find PHY for %s", name); + return -ENODEV; + } + + r = platform_get_resource_byname(fec_pdev, IORESOURCE_MEM, "regs"); + + fec->fecp = fecp = (fec_t*)ioremap(r->start,sizeof(fec_t)); + fec->mii_speed = fmpi->mii_speed; + + setbits32(&fecp->fec_r_cntrl, FEC_RCNTRL_MII_MODE); /* MII enable */ + setbits32(&fecp->fec_ecntrl, FEC_ECNTRL_PINMUX | FEC_ECNTRL_ETHER_EN); + out_be32(&fecp->fec_ievent, FEC_ENET_MII); + out_be32(&fecp->fec_mii_speed, fec->mii_speed); + + return 0; +} + +static int fs_enet_fec_mii_read(struct mii_bus *bus , int phy_id, int location) +{ + struct fec_info* fec = bus->priv; + fec_t *fecp = fec->fecp; + int i, ret = -1; + + if ((in_be32(&fecp->fec_r_cntrl) & FEC_RCNTRL_MII_MODE) == 0) + BUG(); + + /* Add PHY address to register command. */ + out_be32(&fecp->fec_mii_data, (phy_id << 23) | mk_mii_read(location)); + + for (i = 0; i < FEC_MII_LOOPS; i++) + if ((in_be32(&fecp->fec_ievent) & FEC_ENET_MII) != 0) + break; + + if (i < FEC_MII_LOOPS) { + out_be32(&fecp->fec_ievent, FEC_ENET_MII); + ret = in_be32(&fecp->fec_mii_data) & 0xffff; + } + + return ret; + +} + +static int fs_enet_fec_mii_write(struct mii_bus *bus, int phy_id, int location, u16 val) +{ + struct fec_info* fec = bus->priv; + fec_t *fecp = fec->fecp; + int i; + + /* this must never happen */ + if ((in_be32(&fecp->fec_r_cntrl) & FEC_RCNTRL_MII_MODE) == 0) + BUG(); + + /* Add PHY address to register command. */ + out_be32(&fecp->fec_mii_data, (phy_id << 23) | mk_mii_write(location, val)); + + for (i = 0; i < FEC_MII_LOOPS; i++) + if ((in_be32(&fecp->fec_ievent) & FEC_ENET_MII) != 0) + break; + + if (i < FEC_MII_LOOPS) + out_be32(&fecp->fec_ievent, FEC_ENET_MII); + + return 0; + +} + +static int fs_enet_fec_mii_reset(struct mii_bus *bus) +{ + /* nothing here - for now */ + return 0; +} + +static int __devinit fs_enet_fec_mdio_probe(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct fs_mii_fec_platform_info *pdata; + struct mii_bus *new_bus; + struct fec_info *fec; + int err = 0; + if (NULL == dev) + return -EINVAL; + new_bus = kzalloc(sizeof(struct mii_bus), GFP_KERNEL); + + if (NULL == new_bus) + return -ENOMEM; + + fec = kzalloc(sizeof(struct fec_info), GFP_KERNEL); + + if (NULL == fec) + return -ENOMEM; + + new_bus->name = "FEC MII Bus", + new_bus->read = &fs_enet_fec_mii_read, + new_bus->write = &fs_enet_fec_mii_write, + new_bus->reset = &fs_enet_fec_mii_reset, + new_bus->id = pdev->id; + + pdata = (struct fs_mii_fec_platform_info *)pdev->dev.platform_data; + + if (NULL == pdata) { + printk(KERN_ERR "fs_enet FEC mdio %d: Missing platform data!\n", pdev->id); + return -ENODEV; + } + + /*set up workspace*/ + + fs_mii_fec_init(fec, pdata); + new_bus->priv = fec; + + new_bus->irq = pdata->irq; + + new_bus->dev = dev; + dev_set_drvdata(dev, new_bus); + + err = mdiobus_register(new_bus); + + if (0 != err) { + printk (KERN_ERR "%s: Cannot register as MDIO bus\n", + new_bus->name); + goto bus_register_fail; + } + + return 0; + +bus_register_fail: + kfree(new_bus); + + return err; +} + + +static int fs_enet_fec_mdio_remove(struct device *dev) +{ + struct mii_bus *bus = dev_get_drvdata(dev); + + mdiobus_unregister(bus); + + dev_set_drvdata(dev, NULL); + kfree(bus->priv); + + bus->priv = NULL; + kfree(bus); + + return 0; +} + +static struct device_driver fs_enet_fec_mdio_driver = { + .name = "fsl-cpm-fec-mdio", + .bus = &platform_bus_type, + .probe = fs_enet_fec_mdio_probe, + .remove = fs_enet_fec_mdio_remove, +}; + +int fs_enet_mdio_fec_init(void) +{ + return driver_register(&fs_enet_fec_mdio_driver); +} + +void fs_enet_mdio_fec_exit(void) +{ + driver_unregister(&fs_enet_fec_mdio_driver); +} + diff --git a/drivers/net/fs_enet/mii-fixed.c b/drivers/net/fs_enet/mii-fixed.c deleted file mode 100644 index ae4a9c3bb39..00000000000 --- a/drivers/net/fs_enet/mii-fixed.c +++ /dev/null @@ -1,91 +0,0 @@ -/* - * Combined Ethernet driver for Motorola MPC8xx and MPC82xx. - * - * Copyright (c) 2003 Intracom S.A. - * by Pantelis Antoniou <panto@intracom.gr> - * - * 2005 (c) MontaVista Software, Inc. - * Vitaly Bordug <vbordug@ru.mvista.com> - * - * This file is licensed under the terms of the GNU General Public License - * version 2. This program is licensed "as is" without any warranty of any - * kind, whether express or implied. - */ - - -#include <linux/module.h> -#include <linux/types.h> -#include <linux/kernel.h> -#include <linux/sched.h> -#include <linux/string.h> -#include <linux/ptrace.h> -#include <linux/errno.h> -#include <linux/ioport.h> -#include <linux/slab.h> -#include <linux/interrupt.h> -#include <linux/pci.h> -#include <linux/init.h> -#include <linux/delay.h> -#include <linux/netdevice.h> -#include <linux/etherdevice.h> -#include <linux/skbuff.h> -#include <linux/spinlock.h> -#include <linux/mii.h> -#include <linux/ethtool.h> -#include <linux/bitops.h> - -#include <asm/pgtable.h> -#include <asm/irq.h> -#include <asm/uaccess.h> - -#include "fs_enet.h" - -static const u16 mii_regs[7] = { - 0x3100, - 0x786d, - 0x0fff, - 0x0fff, - 0x01e1, - 0x45e1, - 0x0003, -}; - -static int mii_read(struct fs_enet_mii_bus *bus, int phy_id, int location) -{ - int ret = 0; - - if ((unsigned int)location >= ARRAY_SIZE(mii_regs)) - return -1; - - if (location != 5) - ret = mii_regs[location]; - else - ret = bus->fixed.lpa; - - return ret; -} - -static void mii_write(struct fs_enet_mii_bus *bus, int phy_id, int location, int val) -{ - /* do nothing */ -} - -int fs_mii_fixed_init(struct fs_enet_mii_bus *bus) -{ - const struct fs_mii_bus_info *bi = bus->bus_info; - - bus->fixed.lpa = 0x45e1; /* default 100Mb, full duplex */ - - /* if speed is fixed at 10Mb, remove 100Mb modes */ - if (bi->i.fixed.speed == 10) - bus->fixed.lpa &= ~LPA_100; - - /* if duplex is half, remove full duplex modes */ - if (bi->i.fixed.duplex == 0) - bus->fixed.lpa &= ~LPA_DUPLEX; - - bus->mii_read = mii_read; - bus->mii_write = mii_write; - - return 0; -} diff --git a/drivers/net/lance.c b/drivers/net/lance.c index c1c3452c90c..5b4dbfe5fb7 100644 --- a/drivers/net/lance.c +++ b/drivers/net/lance.c @@ -326,7 +326,7 @@ MODULE_PARM_DESC(dma, "LANCE/PCnet ISA DMA channel (ignored for some devices)"); MODULE_PARM_DESC(irq, "LANCE/PCnet IRQ number (ignored for some devices)"); MODULE_PARM_DESC(lance_debug, "LANCE/PCnet debug level (0-7)"); -int init_module(void) +int __init init_module(void) { struct net_device *dev; int this_dev, found = 0; diff --git a/drivers/net/lne390.c b/drivers/net/lne390.c index 646e89fc356..c0ec7f6abcb 100644 --- a/drivers/net/lne390.c +++ b/drivers/net/lne390.c @@ -406,7 +406,7 @@ MODULE_PARM_DESC(mem, "memory base address(es)"); MODULE_DESCRIPTION("Mylex LNE390A/B EISA Ethernet driver"); MODULE_LICENSE("GPL"); -int init_module(void) +int __init init_module(void) { struct net_device *dev; int this_dev, found = 0; diff --git a/drivers/net/myri10ge/myri10ge.c b/drivers/net/myri10ge/myri10ge.c index 06440a86bae..9bdd43ab357 100644 --- a/drivers/net/myri10ge/myri10ge.c +++ b/drivers/net/myri10ge/myri10ge.c @@ -2425,7 +2425,7 @@ static int myri10ge_resume(struct pci_dev *pdev) } myri10ge_reset(mgp); - myri10ge_dummy_rdma(mgp, mgp->tx.boundary != 4096); + myri10ge_dummy_rdma(mgp, 1); /* Save configuration space to be restored if the * nic resets due to a parity error */ diff --git a/drivers/net/ni52.c b/drivers/net/ni52.c index fa854c8fde7..4d52ecf8af5 100644 --- a/drivers/net/ni52.c +++ b/drivers/net/ni52.c @@ -1323,7 +1323,7 @@ MODULE_PARM_DESC(irq, "NI5210 IRQ number,required"); MODULE_PARM_DESC(memstart, "NI5210 memory base address,required"); MODULE_PARM_DESC(memend, "NI5210 memory end address,required"); -int init_module(void) +int __init init_module(void) { if(io <= 0x0 || !memend || !memstart || irq < 2) { printk("ni52: Autoprobing not allowed for modules.\nni52: Set symbols 'io' 'irq' 'memstart' and 'memend'\n"); diff --git a/drivers/net/ni65.c b/drivers/net/ni65.c index bb42ff21848..810cc572f5f 100644 --- a/drivers/net/ni65.c +++ b/drivers/net/ni65.c @@ -1253,7 +1253,7 @@ MODULE_PARM_DESC(irq, "ni6510 IRQ number (ignored for some cards)"); MODULE_PARM_DESC(io, "ni6510 I/O base address"); MODULE_PARM_DESC(dma, "ni6510 ISA DMA channel (ignored for some cards)"); -int init_module(void) +int __init init_module(void) { dev_ni65 = ni65_probe(-1); return IS_ERR(dev_ni65) ? PTR_ERR(dev_ni65) : 0; diff --git a/drivers/net/pcmcia/xirc2ps_cs.c b/drivers/net/pcmcia/xirc2ps_cs.c index 9bae77ce131..4122bb46f5f 100644 --- a/drivers/net/pcmcia/xirc2ps_cs.c +++ b/drivers/net/pcmcia/xirc2ps_cs.c @@ -345,6 +345,7 @@ typedef struct local_info_t { void __iomem *dingo_ccr; /* only used for CEM56 cards */ unsigned last_ptr_value; /* last packets transmitted value */ const char *manf_str; + struct work_struct tx_timeout_task; } local_info_t; /**************** @@ -352,6 +353,7 @@ typedef struct local_info_t { */ static int do_start_xmit(struct sk_buff *skb, struct net_device *dev); static void do_tx_timeout(struct net_device *dev); +static void xirc2ps_tx_timeout_task(void *data); static struct net_device_stats *do_get_stats(struct net_device *dev); static void set_addresses(struct net_device *dev); static void set_multicast_list(struct net_device *dev); @@ -589,6 +591,7 @@ xirc2ps_probe(struct pcmcia_device *link) #ifdef HAVE_TX_TIMEOUT dev->tx_timeout = do_tx_timeout; dev->watchdog_timeo = TX_TIMEOUT; + INIT_WORK(&local->tx_timeout_task, xirc2ps_tx_timeout_task, dev); #endif return xirc2ps_config(link); @@ -1341,17 +1344,24 @@ xirc2ps_interrupt(int irq, void *dev_id, struct pt_regs *regs) /*====================================================================*/ static void -do_tx_timeout(struct net_device *dev) +xirc2ps_tx_timeout_task(void *data) { - local_info_t *lp = netdev_priv(dev); - printk(KERN_NOTICE "%s: transmit timed out\n", dev->name); - lp->stats.tx_errors++; + struct net_device *dev = data; /* reset the card */ do_reset(dev,1); dev->trans_start = jiffies; netif_wake_queue(dev); } +static void +do_tx_timeout(struct net_device *dev) +{ + local_info_t *lp = netdev_priv(dev); + lp->stats.tx_errors++; + printk(KERN_NOTICE "%s: transmit timed out\n", dev->name); + schedule_work(&lp->tx_timeout_task); +} + static int do_start_xmit(struct sk_buff *skb, struct net_device *dev) { diff --git a/drivers/net/pcnet32.c b/drivers/net/pcnet32.c index 4daafe30335..d50bcb89dd2 100644 --- a/drivers/net/pcnet32.c +++ b/drivers/net/pcnet32.c @@ -202,6 +202,8 @@ static int homepna[MAX_UNITS]; #define CSR15 15 #define PCNET32_MC_FILTER 8 +#define PCNET32_79C970A 0x2621 + /* The PCNET32 Rx and Tx ring descriptors. */ struct pcnet32_rx_head { u32 base; @@ -289,6 +291,7 @@ struct pcnet32_private { /* each bit indicates an available PHY */ u32 phymask; + unsigned short chip_version; /* which variant this is */ }; static int pcnet32_probe_pci(struct pci_dev *, const struct pci_device_id *); @@ -724,9 +727,11 @@ static u32 pcnet32_get_link(struct net_device *dev) spin_lock_irqsave(&lp->lock, flags); if (lp->mii) { r = mii_link_ok(&lp->mii_if); - } else { + } else if (lp->chip_version >= PCNET32_79C970A) { ulong ioaddr = dev->base_addr; /* card base I/O address */ r = (lp->a.read_bcr(ioaddr, 4) != 0xc0); + } else { /* can not detect link on really old chips */ + r = 1; } spin_unlock_irqrestore(&lp->lock, flags); @@ -1091,6 +1096,10 @@ static int pcnet32_suspend(struct net_device *dev, unsigned long *flags, ulong ioaddr = dev->base_addr; int ticks; + /* really old chips have to be stopped. */ + if (lp->chip_version < PCNET32_79C970A) + return 0; + /* set SUSPEND (SPND) - CSR5 bit 0 */ csr5 = a->read_csr(ioaddr, CSR5); a->write_csr(ioaddr, CSR5, csr5 | CSR5_SUSPEND); @@ -1529,6 +1538,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev) lp->mii_if.reg_num_mask = 0x1f; lp->dxsuflo = dxsuflo; lp->mii = mii; + lp->chip_version = chip_version; lp->msg_enable = pcnet32_debug; if ((cards_found >= MAX_UNITS) || (options[cards_found] > sizeof(options_mapping))) @@ -1839,10 +1849,7 @@ static int pcnet32_open(struct net_device *dev) val |= 2; } else if (lp->options & PCNET32_PORT_ASEL) { /* workaround of xSeries250, turn on for 79C975 only */ - i = ((lp->a.read_csr(ioaddr, 88) | - (lp->a. - read_csr(ioaddr, 89) << 16)) >> 12) & 0xffff; - if (i == 0x2627) + if (lp->chip_version == 0x2627) val |= 3; } lp->a.write_bcr(ioaddr, 9, val); @@ -1986,9 +1993,11 @@ static int pcnet32_open(struct net_device *dev) netif_start_queue(dev); - /* Print the link status and start the watchdog */ - pcnet32_check_media(dev, 1); - mod_timer(&(lp->watchdog_timer), PCNET32_WATCHDOG_TIMEOUT); + if (lp->chip_version >= PCNET32_79C970A) { + /* Print the link status and start the watchdog */ + pcnet32_check_media(dev, 1); + mod_timer(&(lp->watchdog_timer), PCNET32_WATCHDOG_TIMEOUT); + } i = 0; while (i++ < 100) diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig index 2ba6d3a40e2..b79ec0d7480 100644 --- a/drivers/net/phy/Kconfig +++ b/drivers/net/phy/Kconfig @@ -56,5 +56,22 @@ config SMSC_PHY ---help--- Currently supports the LAN83C185 PHY +config FIXED_PHY + tristate "Drivers for PHY emulation on fixed speed/link" + depends on PHYLIB + ---help--- + Adds the driver to PHY layer to cover the boards that do not have any PHY bound, + but with the ability to manipulate with speed/link in software. The relavant MII + speed/duplex parameters could be effectively handled in user-specified fuction. + Currently tested with mpc866ads. + +config FIXED_MII_10_FDX + bool "Emulation for 10M Fdx fixed PHY behavior" + depends on FIXED_PHY + +config FIXED_MII_100_FDX + bool "Emulation for 100M Fdx fixed PHY behavior" + depends on FIXED_PHY + endmenu diff --git a/drivers/net/phy/Makefile b/drivers/net/phy/Makefile index a00e6194252..320f8323123 100644 --- a/drivers/net/phy/Makefile +++ b/drivers/net/phy/Makefile @@ -10,3 +10,4 @@ obj-$(CONFIG_LXT_PHY) += lxt.o obj-$(CONFIG_QSEMI_PHY) += qsemi.o obj-$(CONFIG_SMSC_PHY) += smsc.o obj-$(CONFIG_VITESSE_PHY) += vitesse.o +obj-$(CONFIG_FIXED_PHY) += fixed.o diff --git a/drivers/net/phy/fixed.c b/drivers/net/phy/fixed.c new file mode 100644 index 00000000000..341036df471 --- /dev/null +++ b/drivers/net/phy/fixed.c @@ -0,0 +1,358 @@ +/* + * drivers/net/phy/fixed.c + * + * Driver for fixed PHYs, when transceiver is able to operate in one fixed mode. + * + * Author: Vitaly Bordug + * + * Copyright (c) 2006 MontaVista Software, Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + */ +#include <linux/config.h> +#include <linux/kernel.h> +#include <linux/sched.h> +#include <linux/string.h> +#include <linux/errno.h> +#include <linux/unistd.h> +#include <linux/slab.h> +#include <linux/interrupt.h> +#include <linux/init.h> +#include <linux/delay.h> +#include <linux/netdevice.h> +#include <linux/etherdevice.h> +#include <linux/skbuff.h> +#include <linux/spinlock.h> +#include <linux/mm.h> +#include <linux/module.h> +#include <linux/mii.h> +#include <linux/ethtool.h> +#include <linux/phy.h> + +#include <asm/io.h> +#include <asm/irq.h> +#include <asm/uaccess.h> + +#define MII_REGS_NUM 7 + +/* + The idea is to emulate normal phy behavior by responding with + pre-defined values to mii BMCR read, so that read_status hook could + take all the needed info. +*/ + +struct fixed_phy_status { + u8 link; + u16 speed; + u8 duplex; +}; + +/*----------------------------------------------------------------------------- + * Private information hoder for mii_bus + *-----------------------------------------------------------------------------*/ +struct fixed_info { + u16 *regs; + u8 regs_num; + struct fixed_phy_status phy_status; + struct phy_device *phydev; /* pointer to the container */ + /* link & speed cb */ + int(*link_update)(struct net_device*, struct fixed_phy_status*); + +}; + +/*----------------------------------------------------------------------------- + * If something weird is required to be done with link/speed, + * network driver is able to assign a function to implement this. + * May be useful for PHY's that need to be software-driven. + *-----------------------------------------------------------------------------*/ +int fixed_mdio_set_link_update(struct phy_device* phydev, + int(*link_update)(struct net_device*, struct fixed_phy_status*)) +{ + struct fixed_info *fixed; + + if(link_update == NULL) + return -EINVAL; + + if(phydev) { + if(phydev->bus) { + fixed = phydev->bus->priv; + fixed->link_update = link_update; + return 0; + } + } + return -EINVAL; +} +EXPORT_SYMBOL(fixed_mdio_set_link_update); + +/*----------------------------------------------------------------------------- + * This is used for updating internal mii regs from the status + *-----------------------------------------------------------------------------*/ +static int fixed_mdio_update_regs(struct fixed_info *fixed) +{ + u16 *regs = fixed->regs; + u16 bmsr = 0; + u16 bmcr = 0; + + if(!regs) { + printk(KERN_ERR "%s: regs not set up", __FUNCTION__); + return -EINVAL; + } + + if(fixed->phy_status.link) + bmsr |= BMSR_LSTATUS; + + if(fixed->phy_status.duplex) { + bmcr |= BMCR_FULLDPLX; + + switch ( fixed->phy_status.speed ) { + case 100: + bmsr |= BMSR_100FULL; + bmcr |= BMCR_SPEED100; + break; + + case 10: + bmsr |= BMSR_10FULL; + break; + } + } else { + switch ( fixed->phy_status.speed ) { + case 100: + bmsr |= BMSR_100HALF; + bmcr |= BMCR_SPEED100; + break; + + case 10: + bmsr |= BMSR_100HALF; + break; + } + } + + regs[MII_BMCR] = bmcr; + regs[MII_BMSR] = bmsr | 0x800; /*we are always capable of 10 hdx*/ + + return 0; +} + +static int fixed_mii_read(struct mii_bus *bus, int phy_id, int location) +{ + struct fixed_info *fixed = bus->priv; + + /* if user has registered link update callback, use it */ + if(fixed->phydev) + if(fixed->phydev->attached_dev) { + if(fixed->link_update) { + fixed->link_update(fixed->phydev->attached_dev, + &fixed->phy_status); + fixed_mdio_update_regs(fixed); + } + } + + if ((unsigned int)location >= fixed->regs_num) + return -1; + return fixed->regs[location]; +} + +static int fixed_mii_write(struct mii_bus *bus, int phy_id, int location, u16 val) +{ + /* do nothing for now*/ + return 0; +} + +static int fixed_mii_reset(struct mii_bus *bus) +{ + /*nothing here - no way/need to reset it*/ + return 0; +} + +static int fixed_config_aneg(struct phy_device *phydev) +{ + /* :TODO:03/13/2006 09:45:37 PM:: + The full autoneg funcionality can be emulated, + but no need to have anything here for now + */ + return 0; +} + +/*----------------------------------------------------------------------------- + * the manual bind will do the magic - with phy_id_mask == 0 + * match will never return true... + *-----------------------------------------------------------------------------*/ +static struct phy_driver fixed_mdio_driver = { + .name = "Fixed PHY", + .features = PHY_BASIC_FEATURES, + .config_aneg = fixed_config_aneg, + .read_status = genphy_read_status, + .driver = { .owner = THIS_MODULE,}, +}; + +/*----------------------------------------------------------------------------- + * This func is used to create all the necessary stuff, bind + * the fixed phy driver and register all it on the mdio_bus_type. + * speed is either 10 or 100, duplex is boolean. + * number is used to create multiple fixed PHYs, so that several devices can + * utilize them simultaneously. + *-----------------------------------------------------------------------------*/ +static int fixed_mdio_register_device(int number, int speed, int duplex) +{ + struct mii_bus *new_bus; + struct fixed_info *fixed; + struct phy_device *phydev; + int err = 0; + + struct device* dev = kzalloc(sizeof(struct device), GFP_KERNEL); + + if (NULL == dev) + return -ENOMEM; + + new_bus = kzalloc(sizeof(struct mii_bus), GFP_KERNEL); + + if (NULL == new_bus) { + kfree(dev); + return -ENOMEM; + } + fixed = kzalloc(sizeof(struct fixed_info), GFP_KERNEL); + + if (NULL == fixed) { + kfree(dev); + kfree(new_bus); + return -ENOMEM; + } + + fixed->regs = kzalloc(MII_REGS_NUM*sizeof(int), GFP_KERNEL); + fixed->regs_num = MII_REGS_NUM; + fixed->phy_status.speed = speed; + fixed->phy_status.duplex = duplex; + fixed->phy_status.link = 1; + + new_bus->name = "Fixed MII Bus", + new_bus->read = &fixed_mii_read, + new_bus->write = &fixed_mii_write, + new_bus->reset = &fixed_mii_reset, + + /*set up workspace*/ + fixed_mdio_update_regs(fixed); + new_bus->priv = fixed; + + new_bus->dev = dev; + dev_set_drvdata(dev, new_bus); + + /* create phy_device and register it on the mdio bus */ + phydev = phy_device_create(new_bus, 0, 0); + + /* + Put the phydev pointer into the fixed pack so that bus read/write code could + be able to access for instance attached netdev. Well it doesn't have to do + so, only in case of utilizing user-specified link-update... + */ + fixed->phydev = phydev; + + if(NULL == phydev) { + err = -ENOMEM; + goto device_create_fail; + } + + phydev->irq = -1; + phydev->dev.bus = &mdio_bus_type; + + if(number) + snprintf(phydev->dev.bus_id, BUS_ID_SIZE, + "fixed_%d@%d:%d", number, speed, duplex); + else + snprintf(phydev->dev.bus_id, BUS_ID_SIZE, + "fixed@%d:%d", speed, duplex); + phydev->bus = new_bus; + + err = device_register(&phydev->dev); + if(err) { + printk(KERN_ERR "Phy %s failed to register\n", + phydev->dev.bus_id); + goto bus_register_fail; + } + + /* + the mdio bus has phy_id match... In order not to do it + artificially, we are binding the driver here by hand; + it will be the same for all the fixed phys anyway. + */ + down_write(&phydev->dev.bus->subsys.rwsem); + + phydev->dev.driver = &fixed_mdio_driver.driver; + + err = phydev->dev.driver->probe(&phydev->dev); + if(err < 0) { + printk(KERN_ERR "Phy %s: problems with fixed driver\n",phydev->dev.bus_id); + up_write(&phydev->dev.bus->subsys.rwsem); + goto probe_fail; + } + + device_bind_driver(&phydev->dev); + up_write(&phydev->dev.bus->subsys.rwsem); + + return 0; + +probe_fail: + device_unregister(&phydev->dev); +bus_register_fail: + kfree(phydev); +device_create_fail: + kfree(dev); + kfree(new_bus); + kfree(fixed); + + return err; +} + + +MODULE_DESCRIPTION("Fixed PHY device & driver for PAL"); +MODULE_AUTHOR("Vitaly Bordug"); +MODULE_LICENSE("GPL"); + +static int __init fixed_init(void) +{ + int ret; + int duplex = 0; + + /* register on the bus... Not expected to be matched with anything there... */ + phy_driver_register(&fixed_mdio_driver); + + /* So let the fun begin... + We will create several mdio devices here, and will bound the upper + driver to them. + + Then the external software can lookup the phy bus by searching + fixed@speed:duplex, e.g. fixed@100:1, to be connected to the + virtual 100M Fdx phy. + + In case several virtual PHYs required, the bus_id will be in form + fixed_<num>@<speed>:<duplex>, which make it able even to define + driver-specific link control callback, if for instance PHY is completely + SW-driven. + + */ + +#ifdef CONFIG_FIXED_MII_DUPLEX + duplex = 1; +#endif + +#ifdef CONFIG_FIXED_MII_100_FDX + fixed_mdio_register_device(0, 100, 1); +#endif + +#ifdef CONFIX_FIXED_MII_10_FDX + fixed_mdio_register_device(0, 10, 1); +#endif + return 0; +} + +static void __exit fixed_exit(void) +{ + phy_driver_unregister(&fixed_mdio_driver); + /* :WARNING:02/18/2006 04:32:40 AM:: Cleanup all the created stuff */ +} + +module_init(fixed_init); +module_exit(fixed_exit); diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c index 1dde390c164..cf6660c93ff 100644 --- a/drivers/net/phy/mdio_bus.c +++ b/drivers/net/phy/mdio_bus.c @@ -159,6 +159,7 @@ struct bus_type mdio_bus_type = { .suspend = mdio_bus_suspend, .resume = mdio_bus_resume, }; +EXPORT_SYMBOL(mdio_bus_type); int __init mdio_bus_init(void) { diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c index 1bc1e032c5d..2d1ecfdc80d 100644 --- a/drivers/net/phy/phy_device.c +++ b/drivers/net/phy/phy_device.c @@ -45,6 +45,35 @@ static struct phy_driver genphy_driver; extern int mdio_bus_init(void); extern void mdio_bus_exit(void); +struct phy_device* phy_device_create(struct mii_bus *bus, int addr, int phy_id) +{ + struct phy_device *dev; + /* We allocate the device, and initialize the + * default values */ + dev = kcalloc(1, sizeof(*dev), GFP_KERNEL); + + if (NULL == dev) + return (struct phy_device*) PTR_ERR((void*)-ENOMEM); + + dev->speed = 0; + dev->duplex = -1; + dev->pause = dev->asym_pause = 0; + dev->link = 1; + + dev->autoneg = AUTONEG_ENABLE; + + dev->addr = addr; + dev->phy_id = phy_id; + dev->bus = bus; + + dev->state = PHY_DOWN; + + spin_lock_init(&dev->lock); + + return dev; +} +EXPORT_SYMBOL(phy_device_create); + /* get_phy_device * * description: Reads the ID registers of the PHY at addr on the @@ -78,27 +107,7 @@ struct phy_device * get_phy_device(struct mii_bus *bus, int addr) if (0xffffffff == phy_id) return NULL; - /* Otherwise, we allocate the device, and initialize the - * default values */ - dev = kcalloc(1, sizeof(*dev), GFP_KERNEL); - - if (NULL == dev) - return ERR_PTR(-ENOMEM); - - dev->speed = 0; - dev->duplex = -1; - dev->pause = dev->asym_pause = 0; - dev->link = 1; - - dev->autoneg = AUTONEG_ENABLE; - - dev->addr = addr; - dev->phy_id = phy_id; - dev->bus = bus; - - dev->state = PHY_DOWN; - - spin_lock_init(&dev->lock); + dev = phy_device_create(bus, addr, phy_id); return dev; } diff --git a/drivers/net/ppp_generic.c b/drivers/net/ppp_generic.c index 0ec6e9d57b9..c872f7c6cce 100644 --- a/drivers/net/ppp_generic.c +++ b/drivers/net/ppp_generic.c @@ -192,7 +192,7 @@ struct cardmap { void *ptr[CARDMAP_WIDTH]; }; static void *cardmap_get(struct cardmap *map, unsigned int nr); -static void cardmap_set(struct cardmap **map, unsigned int nr, void *ptr); +static int cardmap_set(struct cardmap **map, unsigned int nr, void *ptr); static unsigned int cardmap_find_first_free(struct cardmap *map); static void cardmap_destroy(struct cardmap **map); @@ -1995,10 +1995,9 @@ ppp_register_channel(struct ppp_channel *chan) { struct channel *pch; - pch = kmalloc(sizeof(struct channel), GFP_KERNEL); + pch = kzalloc(sizeof(struct channel), GFP_KERNEL); if (pch == 0) return -ENOMEM; - memset(pch, 0, sizeof(struct channel)); pch->ppp = NULL; pch->chan = chan; chan->ppp = pch; @@ -2408,13 +2407,12 @@ ppp_create_interface(int unit, int *retp) int ret = -ENOMEM; int i; - ppp = kmalloc(sizeof(struct ppp), GFP_KERNEL); + ppp = kzalloc(sizeof(struct ppp), GFP_KERNEL); if (!ppp) goto out; dev = alloc_netdev(0, "", ppp_setup); if (!dev) goto out1; - memset(ppp, 0, sizeof(struct ppp)); ppp->mru = PPP_MRU; init_ppp_file(&ppp->file, INTERFACE); @@ -2454,11 +2452,16 @@ ppp_create_interface(int unit, int *retp) } atomic_inc(&ppp_unit_count); - cardmap_set(&all_ppp_units, unit, ppp); + ret = cardmap_set(&all_ppp_units, unit, ppp); + if (ret != 0) + goto out3; + mutex_unlock(&all_ppp_mutex); *retp = 0; return ppp; +out3: + atomic_dec(&ppp_unit_count); out2: mutex_unlock(&all_ppp_mutex); free_netdev(dev); @@ -2695,7 +2698,7 @@ static void *cardmap_get(struct cardmap *map, unsigned int nr) return NULL; } -static void cardmap_set(struct cardmap **pmap, unsigned int nr, void *ptr) +static int cardmap_set(struct cardmap **pmap, unsigned int nr, void *ptr) { struct cardmap *p; int i; @@ -2704,8 +2707,9 @@ static void cardmap_set(struct cardmap **pmap, unsigned int nr, void *ptr) if (p == NULL || (nr >> p->shift) >= CARDMAP_WIDTH) { do { /* need a new top level */ - struct cardmap *np = kmalloc(sizeof(*np), GFP_KERNEL); - memset(np, 0, sizeof(*np)); + struct cardmap *np = kzalloc(sizeof(*np), GFP_KERNEL); + if (!np) + goto enomem; np->ptr[0] = p; if (p != NULL) { np->shift = p->shift + CARDMAP_ORDER; @@ -2719,8 +2723,9 @@ static void cardmap_set(struct cardmap **pmap, unsigned int nr, void *ptr) while (p->shift > 0) { i = (nr >> p->shift) & CARDMAP_MASK; if (p->ptr[i] == NULL) { - struct cardmap *np = kmalloc(sizeof(*np), GFP_KERNEL); - memset(np, 0, sizeof(*np)); + struct cardmap *np = kzalloc(sizeof(*np), GFP_KERNEL); + if (!np) + goto enomem; np->shift = p->shift - CARDMAP_ORDER; np->parent = p; p->ptr[i] = np; @@ -2735,6 +2740,9 @@ static void cardmap_set(struct cardmap **pmap, unsigned int nr, void *ptr) set_bit(i, &p->inuse); else clear_bit(i, &p->inuse); + return 0; + enomem: + return -ENOMEM; } static unsigned int cardmap_find_first_free(struct cardmap *map) diff --git a/drivers/net/s2io.c b/drivers/net/s2io.c index 132ed32bce1..e72e0e09906 100644 --- a/drivers/net/s2io.c +++ b/drivers/net/s2io.c @@ -71,6 +71,7 @@ #include <asm/uaccess.h> #include <asm/io.h> #include <asm/div64.h> +#include <asm/irq.h> /* local include */ #include "s2io.h" diff --git a/drivers/net/seeq8005.c b/drivers/net/seeq8005.c index efd0f235020..01392bca022 100644 --- a/drivers/net/seeq8005.c +++ b/drivers/net/seeq8005.c @@ -742,7 +742,7 @@ module_param(irq, int, 0); MODULE_PARM_DESC(io, "SEEQ 8005 I/O base address"); MODULE_PARM_DESC(irq, "SEEQ 8005 IRQ number"); -int init_module(void) +int __init init_module(void) { dev_seeq = seeq8005_probe(-1); if (IS_ERR(dev_seeq)) diff --git a/drivers/net/skge.c b/drivers/net/skge.c index 7de9a07b2ac..ad878dfddef 100644 --- a/drivers/net/skge.c +++ b/drivers/net/skge.c @@ -2211,6 +2211,7 @@ static int skge_up(struct net_device *dev) skge_write8(hw, Q_ADDR(rxqaddr[port], Q_CSR), CSR_START | CSR_IRQ_CL_F); skge_led(skge, LED_MODE_ON); + netif_poll_enable(dev); return 0; free_rx_ring: @@ -2279,6 +2280,7 @@ static int skge_down(struct net_device *dev) skge_led(skge, LED_MODE_OFF); + netif_poll_disable(dev); skge_tx_clean(skge); skge_rx_clean(skge); diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c index de91609ca11..933e87f1cc6 100644 --- a/drivers/net/sky2.c +++ b/drivers/net/sky2.c @@ -233,6 +233,8 @@ static void sky2_set_power_state(struct sky2_hw *hw, pci_power_t state) if (hw->ports > 1) reg1 |= PCI_Y2_PHY2_COMA; } + sky2_pci_write32(hw, PCI_DEV_REG1, reg1); + udelay(100); if (hw->chip_id == CHIP_ID_YUKON_EC_U) { sky2_pci_write32(hw, PCI_DEV_REG3, 0); @@ -242,9 +244,6 @@ static void sky2_set_power_state(struct sky2_hw *hw, pci_power_t state) sky2_pci_write32(hw, PCI_DEV_REG5, 0); } - sky2_pci_write32(hw, PCI_DEV_REG1, reg1); - udelay(100); - break; case PCI_D3hot: diff --git a/drivers/net/smc911x.c b/drivers/net/smc911x.c index d37bd860b33..0b15290df27 100644 --- a/drivers/net/smc911x.c +++ b/drivers/net/smc911x.c @@ -1092,6 +1092,7 @@ static irqreturn_t smc911x_interrupt(int irq, void *dev_id, struct pt_regs *regs /* Spurious interrupt check */ if ((SMC_GET_IRQ_CFG() & (INT_CFG_IRQ_INT_ | INT_CFG_IRQ_EN_)) != (INT_CFG_IRQ_INT_ | INT_CFG_IRQ_EN_)) { + spin_unlock_irqrestore(&lp->lock, flags); return IRQ_NONE; } diff --git a/drivers/net/smc91x.c b/drivers/net/smc91x.c index 3d8dcb6c875..cf62373b808 100644 --- a/drivers/net/smc91x.c +++ b/drivers/net/smc91x.c @@ -321,12 +321,12 @@ static void smc_reset(struct net_device *dev) DBG(2, "%s: %s\n", dev->name, __FUNCTION__); /* Disable all interrupts, block TX tasklet */ - spin_lock(&lp->lock); + spin_lock_irq(&lp->lock); SMC_SELECT_BANK(2); SMC_SET_INT_MASK(0); pending_skb = lp->pending_tx_skb; lp->pending_tx_skb = NULL; - spin_unlock(&lp->lock); + spin_unlock_irq(&lp->lock); /* free any pending tx skb */ if (pending_skb) { @@ -448,12 +448,12 @@ static void smc_shutdown(struct net_device *dev) DBG(2, "%s: %s\n", CARDNAME, __FUNCTION__); /* no more interrupts for me */ - spin_lock(&lp->lock); + spin_lock_irq(&lp->lock); SMC_SELECT_BANK(2); SMC_SET_INT_MASK(0); pending_skb = lp->pending_tx_skb; lp->pending_tx_skb = NULL; - spin_unlock(&lp->lock); + spin_unlock_irq(&lp->lock); if (pending_skb) dev_kfree_skb(pending_skb); diff --git a/drivers/net/smc91x.h b/drivers/net/smc91x.h index 4ec4b4d23ae..7aa7fbac822 100644 --- a/drivers/net/smc91x.h +++ b/drivers/net/smc91x.h @@ -136,14 +136,9 @@ #define SMC_CAN_USE_32BIT 0 #define SMC_IO_SHIFT 0 #define SMC_NOWAIT 1 -#define SMC_USE_PXA_DMA 1 -#define SMC_inb(a, r) readb((a) + (r)) #define SMC_inw(a, r) readw((a) + (r)) -#define SMC_inl(a, r) readl((a) + (r)) -#define SMC_outb(v, a, r) writeb(v, (a) + (r)) #define SMC_outw(v, a, r) writew(v, (a) + (r)) -#define SMC_outl(v, a, r) writel(v, (a) + (r)) #define SMC_insw(a, r, p, l) readsw((a) + (r), p, l) #define SMC_outsw(a, r, p, l) writesw((a) + (r), p, l) @@ -189,16 +184,10 @@ SMC_outw(u16 val, void __iomem *ioaddr, int reg) #define SMC_IO_SHIFT 0 #define SMC_NOWAIT 1 -#define SMC_inb(a, r) readb((a) + (r)) -#define SMC_outb(v, a, r) writeb(v, (a) + (r)) #define SMC_inw(a, r) readw((a) + (r)) #define SMC_outw(v, a, r) writew(v, (a) + (r)) #define SMC_insw(a, r, p, l) readsw((a) + (r), p, l) #define SMC_outsw(a, r, p, l) writesw((a) + (r), p, l) -#define SMC_inl(a, r) readl((a) + (r)) -#define SMC_outl(v, a, r) writel(v, (a) + (r)) -#define SMC_insl(a, r, p, l) readsl((a) + (r), p, l) -#define SMC_outsl(a, r, p, l) writesl((a) + (r), p, l) #include <asm/mach-types.h> #include <asm/arch/cpu.h> @@ -372,6 +361,24 @@ static inline void LPD7_SMC_outsw (unsigned char* a, int r, #define SMC_IRQ_FLAGS (0) +#elif defined(CONFIG_ARCH_VERSATILE) + +#define SMC_CAN_USE_8BIT 1 +#define SMC_CAN_USE_16BIT 1 +#define SMC_CAN_USE_32BIT 1 +#define SMC_NOWAIT 1 + +#define SMC_inb(a, r) readb((a) + (r)) +#define SMC_inw(a, r) readw((a) + (r)) +#define SMC_inl(a, r) readl((a) + (r)) +#define SMC_outb(v, a, r) writeb(v, (a) + (r)) +#define SMC_outw(v, a, r) writew(v, (a) + (r)) +#define SMC_outl(v, a, r) writel(v, (a) + (r)) +#define SMC_insl(a, r, p, l) readsl((a) + (r), p, l) +#define SMC_outsl(a, r, p, l) writesl((a) + (r), p, l) + +#define SMC_IRQ_FLAGS (0) + #else #define SMC_CAN_USE_8BIT 1 diff --git a/drivers/net/spider_net.c b/drivers/net/spider_net.c index 647f62e9707..88907218457 100644 --- a/drivers/net/spider_net.c +++ b/drivers/net/spider_net.c @@ -1611,13 +1611,12 @@ spider_net_open(struct net_device *netdev) int result; result = -ENOMEM; - if (spider_net_init_chain(card, &card->tx_chain, - card->descr, - PCI_DMA_TODEVICE, tx_descriptors)) + if (spider_net_init_chain(card, &card->tx_chain, card->descr, + PCI_DMA_TODEVICE, card->tx_desc)) goto alloc_tx_failed; if (spider_net_init_chain(card, &card->rx_chain, - card->descr + tx_descriptors, - PCI_DMA_FROMDEVICE, rx_descriptors)) + card->descr + card->rx_desc, + PCI_DMA_FROMDEVICE, card->rx_desc)) goto alloc_rx_failed; /* allocate rx skbs */ @@ -2005,6 +2004,9 @@ spider_net_setup_netdev(struct spider_net_card *card) card->options.rx_csum = SPIDER_NET_RX_CSUM_DEFAULT; + card->tx_desc = tx_descriptors; + card->rx_desc = rx_descriptors; + spider_net_setup_netdev_ops(netdev); netdev->features = NETIF_F_HW_CSUM | NETIF_F_LLTX; diff --git a/drivers/net/spider_net.h b/drivers/net/spider_net.h index f6dcf180ae3..30407cdf089 100644 --- a/drivers/net/spider_net.h +++ b/drivers/net/spider_net.h @@ -440,6 +440,9 @@ struct spider_net_card { /* for ethtool */ int msg_enable; + int rx_desc; + int tx_desc; + struct spider_net_descr descr[0]; }; diff --git a/drivers/net/spider_net_ethtool.c b/drivers/net/spider_net_ethtool.c index a5bb0b7633a..02209222b8c 100644 --- a/drivers/net/spider_net_ethtool.c +++ b/drivers/net/spider_net_ethtool.c @@ -130,6 +130,18 @@ spider_net_ethtool_set_tx_csum(struct net_device *netdev, uint32_t data) return 0; } +static void +spider_net_ethtool_get_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ering) +{ + struct spider_net_card *card = netdev->priv; + + ering->tx_max_pending = SPIDER_NET_TX_DESCRIPTORS_MAX; + ering->tx_pending = card->tx_desc; + ering->rx_max_pending = SPIDER_NET_RX_DESCRIPTORS_MAX; + ering->rx_pending = card->rx_desc; +} + struct ethtool_ops spider_net_ethtool_ops = { .get_settings = spider_net_ethtool_get_settings, .get_drvinfo = spider_net_ethtool_get_drvinfo, @@ -141,5 +153,6 @@ struct ethtool_ops spider_net_ethtool_ops = { .set_rx_csum = spider_net_ethtool_set_rx_csum, .get_tx_csum = spider_net_ethtool_get_tx_csum, .set_tx_csum = spider_net_ethtool_set_tx_csum, + .get_ringparam = spider_net_ethtool_get_ringparam, }; diff --git a/drivers/net/sundance.c b/drivers/net/sundance.c index ac17377b3e9..698568e751d 100644 --- a/drivers/net/sundance.c +++ b/drivers/net/sundance.c @@ -107,7 +107,7 @@ static char *media[MAX_UNITS]; #endif /* These identify the driver base version and may not be removed. */ -static char version[] __devinitdata = +static char version[] = KERN_INFO DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " Written by Donald Becker\n" KERN_INFO " http://www.scyld.com/network/sundance.html\n"; diff --git a/drivers/net/sunlance.c b/drivers/net/sunlance.c index 0e3fdf7c6dd..ec0413609f3 100644 --- a/drivers/net/sunlance.c +++ b/drivers/net/sunlance.c @@ -1566,20 +1566,21 @@ static int __exit sunlance_sun4_remove(void) static int __devinit sunlance_sbus_probe(struct of_device *dev, const struct of_device_id *match) { struct sbus_dev *sdev = to_sbus_device(&dev->dev); - struct device_node *dp = dev->node; int err; - if (!strcmp(dp->name, "le")) { - err = sparc_lance_probe_one(sdev, NULL, NULL); - } else if (!strcmp(dp->name, "ledma")) { - struct sbus_dma *ledma = find_ledma(sdev); + if (sdev->parent) { + struct of_device *parent = &sdev->parent->ofdev; - err = sparc_lance_probe_one(sdev->child, ledma, NULL); - } else { - BUG_ON(strcmp(dp->name, "lebuffer")); + if (!strcmp(parent->node->name, "ledma")) { + struct sbus_dma *ledma = find_ledma(to_sbus_device(&parent->dev)); - err = sparc_lance_probe_one(sdev->child, NULL, sdev); - } + err = sparc_lance_probe_one(sdev, ledma, NULL); + } else if (!strcmp(parent->node->name, "lebuffer")) { + err = sparc_lance_probe_one(sdev, NULL, to_sbus_device(&parent->dev)); + } else + err = sparc_lance_probe_one(sdev, NULL, NULL); + } else + err = sparc_lance_probe_one(sdev, NULL, NULL); return err; } @@ -1604,12 +1605,6 @@ static struct of_device_id sunlance_sbus_match[] = { { .name = "le", }, - { - .name = "ledma", - }, - { - .name = "lebuffer", - }, {}, }; diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c index 6f97962dd06..eafabb253f0 100644 --- a/drivers/net/tg3.c +++ b/drivers/net/tg3.c @@ -68,8 +68,8 @@ #define DRV_MODULE_NAME "tg3" #define PFX DRV_MODULE_NAME ": " -#define DRV_MODULE_VERSION "3.64" -#define DRV_MODULE_RELDATE "July 31, 2006" +#define DRV_MODULE_VERSION "3.65" +#define DRV_MODULE_RELDATE "August 07, 2006" #define TG3_DEF_MAC_MODE 0 #define TG3_DEF_RX_MODE 0 @@ -123,9 +123,6 @@ TG3_RX_RCB_RING_SIZE(tp)) #define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \ TG3_TX_RING_SIZE) -#define TX_BUFFS_AVAIL(TP) \ - ((TP)->tx_pending - \ - (((TP)->tx_prod - (TP)->tx_cons) & (TG3_TX_RING_SIZE - 1))) #define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1)) #define RX_PKT_BUF_SZ (1536 + tp->rx_offset + 64) @@ -2987,6 +2984,13 @@ static void tg3_tx_recover(struct tg3 *tp) spin_unlock(&tp->lock); } +static inline u32 tg3_tx_avail(struct tg3 *tp) +{ + smp_mb(); + return (tp->tx_pending - + ((tp->tx_prod - tp->tx_cons) & (TG3_TX_RING_SIZE - 1))); +} + /* Tigon3 never reports partial packet sends. So we do not * need special logic to handle SKBs that have not had all * of their frags sent yet, like SunGEM does. @@ -3038,12 +3042,20 @@ static void tg3_tx(struct tg3 *tp) tp->tx_cons = sw_idx; - if (unlikely(netif_queue_stopped(tp->dev))) { - spin_lock(&tp->tx_lock); + /* Need to make the tx_cons update visible to tg3_start_xmit() + * before checking for netif_queue_stopped(). Without the + * memory barrier, there is a small possibility that tg3_start_xmit() + * will miss it and cause the queue to be stopped forever. + */ + smp_mb(); + + if (unlikely(netif_queue_stopped(tp->dev) && + (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH))) { + netif_tx_lock(tp->dev); if (netif_queue_stopped(tp->dev) && - (TX_BUFFS_AVAIL(tp) > TG3_TX_WAKEUP_THRESH)) + (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH)) netif_wake_queue(tp->dev); - spin_unlock(&tp->tx_lock); + netif_tx_unlock(tp->dev); } } @@ -3101,7 +3113,6 @@ static int tg3_alloc_rx_skb(struct tg3 *tp, u32 opaque_key, if (skb == NULL) return -ENOMEM; - skb->dev = tp->dev; skb_reserve(skb, tp->rx_offset); mapping = pci_map_single(tp->pdev, skb->data, @@ -3274,7 +3285,6 @@ static int tg3_rx(struct tg3 *tp, int budget) if (copy_skb == NULL) goto drop_it_no_recycle; - copy_skb->dev = tp->dev; skb_reserve(copy_skb, 2); skb_put(copy_skb, len); pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE); @@ -3797,7 +3807,7 @@ static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev) * interrupt. Furthermore, IRQ processing runs lockless so we have * no IRQ context deadlocks to worry about either. Rejoice! */ - if (unlikely(TX_BUFFS_AVAIL(tp) <= (skb_shinfo(skb)->nr_frags + 1))) { + if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->nr_frags + 1))) { if (!netif_queue_stopped(dev)) { netif_stop_queue(dev); @@ -3893,12 +3903,10 @@ static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev) tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry); tp->tx_prod = entry; - if (unlikely(TX_BUFFS_AVAIL(tp) <= (MAX_SKB_FRAGS + 1))) { - spin_lock(&tp->tx_lock); + if (unlikely(tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1))) { netif_stop_queue(dev); - if (TX_BUFFS_AVAIL(tp) > TG3_TX_WAKEUP_THRESH) + if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH) netif_wake_queue(tp->dev); - spin_unlock(&tp->tx_lock); } out_unlock: @@ -3920,7 +3928,7 @@ static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb) struct sk_buff *segs, *nskb; /* Estimate the number of fragments in the worst case */ - if (unlikely(TX_BUFFS_AVAIL(tp) <= (skb_shinfo(skb)->gso_segs * 3))) { + if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->gso_segs * 3))) { netif_stop_queue(tp->dev); return NETDEV_TX_BUSY; } @@ -3960,7 +3968,7 @@ static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev) * interrupt. Furthermore, IRQ processing runs lockless so we have * no IRQ context deadlocks to worry about either. Rejoice! */ - if (unlikely(TX_BUFFS_AVAIL(tp) <= (skb_shinfo(skb)->nr_frags + 1))) { + if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->nr_frags + 1))) { if (!netif_queue_stopped(dev)) { netif_stop_queue(dev); @@ -4110,12 +4118,10 @@ static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev) tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry); tp->tx_prod = entry; - if (unlikely(TX_BUFFS_AVAIL(tp) <= (MAX_SKB_FRAGS + 1))) { - spin_lock(&tp->tx_lock); + if (unlikely(tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1))) { netif_stop_queue(dev); - if (TX_BUFFS_AVAIL(tp) > TG3_TX_WAKEUP_THRESH) + if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH) netif_wake_queue(tp->dev); - spin_unlock(&tp->tx_lock); } out_unlock: @@ -11474,7 +11480,6 @@ static int __devinit tg3_init_one(struct pci_dev *pdev, tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA; #endif spin_lock_init(&tp->lock); - spin_lock_init(&tp->tx_lock); spin_lock_init(&tp->indirect_lock); INIT_WORK(&tp->reset_task, tg3_reset_task, tp); diff --git a/drivers/net/tg3.h b/drivers/net/tg3.h index ba2c98711c8..3ecf356cfb0 100644 --- a/drivers/net/tg3.h +++ b/drivers/net/tg3.h @@ -2079,9 +2079,9 @@ struct tg3 { * lock: Held during reset, PHY access, timer, and when * updating tg3_flags and tg3_flags2. * - * tx_lock: Held during tg3_start_xmit and tg3_tx only - * when calling netif_[start|stop]_queue. - * tg3_start_xmit is protected by netif_tx_lock. + * netif_tx_lock: Held during tg3_start_xmit. tg3_tx holds + * netif_tx_lock when it needs to call + * netif_wake_queue. * * Both of these locks are to be held with BH safety. * @@ -2118,8 +2118,6 @@ struct tg3 { u32 tx_cons; u32 tx_pending; - spinlock_t tx_lock; - struct tg3_tx_buffer_desc *tx_ring; struct tx_ring_info *tx_buffers; dma_addr_t tx_desc_mapping; diff --git a/drivers/net/tokenring/ibmtr.c b/drivers/net/tokenring/ibmtr.c index 9f491563944..4470025ff7f 100644 --- a/drivers/net/tokenring/ibmtr.c +++ b/drivers/net/tokenring/ibmtr.c @@ -140,7 +140,7 @@ in the event that chatty debug messages are desired - jjs 12/30/98 */ /* version and credits */ #ifndef PCMCIA -static char version[] __initdata = +static char version[] __devinitdata = "\nibmtr.c: v1.3.57 8/ 7/94 Peter De Schrijver and Mark Swanson\n" " v2.1.125 10/20/98 Paul Norton <pnorton@ieee.org>\n" " v2.2.0 12/30/98 Joel Sloan <jjs@c-me.com>\n" @@ -216,7 +216,7 @@ static int __devinitdata turbo_irq[IBMTR_MAX_ADAPTERS] = {0}; static int __devinitdata turbo_searched = 0; #ifndef PCMCIA -static __u32 ibmtr_mem_base __initdata = 0xd0000; +static __u32 ibmtr_mem_base __devinitdata = 0xd0000; #endif static void __devinit PrtChanID(char *pcid, short stride) diff --git a/drivers/net/tokenring/smctr.c b/drivers/net/tokenring/smctr.c index cd2e0251e2b..85a7f797d34 100644 --- a/drivers/net/tokenring/smctr.c +++ b/drivers/net/tokenring/smctr.c @@ -5666,7 +5666,7 @@ module_param_array(io, int, NULL, 0); module_param_array(irq, int, NULL, 0); module_param(ringspeed, int, 0); -static struct net_device *setup_card(int n) +static struct net_device * __init setup_card(int n) { struct net_device *dev = alloc_trdev(sizeof(struct net_local)); int err; @@ -5696,9 +5696,8 @@ out: free_netdev(dev); return ERR_PTR(err); } - -int init_module(void) +int __init init_module(void) { int i, found = 0; struct net_device *dev; diff --git a/drivers/net/tulip/winbond-840.c b/drivers/net/tulip/winbond-840.c index 7f414815cc6..eba9083da14 100644 --- a/drivers/net/tulip/winbond-840.c +++ b/drivers/net/tulip/winbond-840.c @@ -138,7 +138,7 @@ static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1}; #include <asm/irq.h> /* These identify the driver base version and may not be removed. */ -static char version[] __devinitdata = +static char version[] = KERN_INFO DRV_NAME ".c:v" DRV_VERSION " (2.4 port) " DRV_RELDATE " Donald Becker <becker@scyld.com>\n" KERN_INFO " http://www.scyld.com/network/drivers.html\n"; diff --git a/drivers/net/tulip/xircom_cb.c b/drivers/net/tulip/xircom_cb.c index f874e4f6ccf..cf43390d2c8 100644 --- a/drivers/net/tulip/xircom_cb.c +++ b/drivers/net/tulip/xircom_cb.c @@ -1264,8 +1264,7 @@ static void investigate_write_descriptor(struct net_device *dev, struct xircom_p static int __init xircom_init(void) { - pci_register_driver(&xircom_ops); - return 0; + return pci_register_driver(&xircom_ops); } static void __exit xircom_exit(void) diff --git a/drivers/net/ucc_geth.c b/drivers/net/ucc_geth.c new file mode 100644 index 00000000000..47f49ef72bd --- /dev/null +++ b/drivers/net/ucc_geth.c @@ -0,0 +1,4278 @@ +/* + * Copyright (C) Freescale Semicondutor, Inc. 2006. All rights reserved. + * + * Author: Shlomi Gridish <gridish@freescale.com> + * + * Description: + * QE UCC Gigabit Ethernet Driver + * + * Changelog: + * Jul 6, 2006 Li Yang <LeoLi@freescale.com> + * - Rearrange code and style fixes + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ +#include <linux/kernel.h> +#include <linux/init.h> +#include <linux/errno.h> +#include <linux/slab.h> +#include <linux/stddef.h> +#include <linux/interrupt.h> +#include <linux/netdevice.h> +#include <linux/etherdevice.h> +#include <linux/skbuff.h> +#include <linux/spinlock.h> +#include <linux/mm.h> +#include <linux/ethtool.h> +#include <linux/delay.h> +#include <linux/dma-mapping.h> +#include <linux/fsl_devices.h> +#include <linux/ethtool.h> +#include <linux/platform_device.h> +#include <linux/mii.h> + +#include <asm/uaccess.h> +#include <asm/irq.h> +#include <asm/io.h> +#include <asm/immap_qe.h> +#include <asm/qe.h> +#include <asm/ucc.h> +#include <asm/ucc_fast.h> + +#include "ucc_geth.h" +#include "ucc_geth_phy.h" + +#undef DEBUG + +#define DRV_DESC "QE UCC Gigabit Ethernet Controller version:June 20, 2006" +#define DRV_NAME "ucc_geth" + +#define ugeth_printk(level, format, arg...) \ + printk(level format "\n", ## arg) + +#define ugeth_dbg(format, arg...) \ + ugeth_printk(KERN_DEBUG , format , ## arg) +#define ugeth_err(format, arg...) \ + ugeth_printk(KERN_ERR , format , ## arg) +#define ugeth_info(format, arg...) \ + ugeth_printk(KERN_INFO , format , ## arg) +#define ugeth_warn(format, arg...) \ + ugeth_printk(KERN_WARNING , format , ## arg) + +#ifdef UGETH_VERBOSE_DEBUG +#define ugeth_vdbg ugeth_dbg +#else +#define ugeth_vdbg(fmt, args...) do { } while (0) +#endif /* UGETH_VERBOSE_DEBUG */ + +static DEFINE_SPINLOCK(ugeth_lock); + +static ucc_geth_info_t ugeth_primary_info = { + .uf_info = { + .bd_mem_part = MEM_PART_SYSTEM, + .rtsm = UCC_FAST_SEND_IDLES_BETWEEN_FRAMES, + .max_rx_buf_length = 1536, +/* FIXME: should be changed in run time for 1G and 100M */ +#ifdef CONFIG_UGETH_HAS_GIGA + .urfs = UCC_GETH_URFS_GIGA_INIT, + .urfet = UCC_GETH_URFET_GIGA_INIT, + .urfset = UCC_GETH_URFSET_GIGA_INIT, + .utfs = UCC_GETH_UTFS_GIGA_INIT, + .utfet = UCC_GETH_UTFET_GIGA_INIT, + .utftt = UCC_GETH_UTFTT_GIGA_INIT, +#else + .urfs = UCC_GETH_URFS_INIT, + .urfet = UCC_GETH_URFET_INIT, + .urfset = UCC_GETH_URFSET_INIT, + .utfs = UCC_GETH_UTFS_INIT, + .utfet = UCC_GETH_UTFET_INIT, + .utftt = UCC_GETH_UTFTT_INIT, +#endif + .ufpt = 256, + .mode = UCC_FAST_PROTOCOL_MODE_ETHERNET, + .ttx_trx = UCC_FAST_GUMR_TRANSPARENT_TTX_TRX_NORMAL, + .tenc = UCC_FAST_TX_ENCODING_NRZ, + .renc = UCC_FAST_RX_ENCODING_NRZ, + .tcrc = UCC_FAST_16_BIT_CRC, + .synl = UCC_FAST_SYNC_LEN_NOT_USED, + }, + .numQueuesTx = 1, + .numQueuesRx = 1, + .extendedFilteringChainPointer = ((uint32_t) NULL), + .typeorlen = 3072 /*1536 */ , + .nonBackToBackIfgPart1 = 0x40, + .nonBackToBackIfgPart2 = 0x60, + .miminumInterFrameGapEnforcement = 0x50, + .backToBackInterFrameGap = 0x60, + .mblinterval = 128, + .nortsrbytetime = 5, + .fracsiz = 1, + .strictpriorityq = 0xff, + .altBebTruncation = 0xa, + .excessDefer = 1, + .maxRetransmission = 0xf, + .collisionWindow = 0x37, + .receiveFlowControl = 1, + .maxGroupAddrInHash = 4, + .maxIndAddrInHash = 4, + .prel = 7, + .maxFrameLength = 1518, + .minFrameLength = 64, + .maxD1Length = 1520, + .maxD2Length = 1520, + .vlantype = 0x8100, + .ecamptr = ((uint32_t) NULL), + .eventRegMask = UCCE_OTHER, + .pausePeriod = 0xf000, + .interruptcoalescingmaxvalue = {1, 1, 1, 1, 1, 1, 1, 1}, + .bdRingLenTx = { + TX_BD_RING_LEN, + TX_BD_RING_LEN, + TX_BD_RING_LEN, + TX_BD_RING_LEN, + TX_BD_RING_LEN, + TX_BD_RING_LEN, + TX_BD_RING_LEN, + TX_BD_RING_LEN}, + + .bdRingLenRx = { + RX_BD_RING_LEN, + RX_BD_RING_LEN, + RX_BD_RING_LEN, + RX_BD_RING_LEN, + RX_BD_RING_LEN, + RX_BD_RING_LEN, + RX_BD_RING_LEN, + RX_BD_RING_LEN}, + + .numStationAddresses = UCC_GETH_NUM_OF_STATION_ADDRESSES_1, + .largestexternallookupkeysize = + QE_FLTR_LARGEST_EXTERNAL_TABLE_LOOKUP_KEY_SIZE_NONE, + .statisticsMode = UCC_GETH_STATISTICS_GATHERING_MODE_NONE, + .vlanOperationTagged = UCC_GETH_VLAN_OPERATION_TAGGED_NOP, + .vlanOperationNonTagged = UCC_GETH_VLAN_OPERATION_NON_TAGGED_NOP, + .rxQoSMode = UCC_GETH_QOS_MODE_DEFAULT, + .aufc = UPSMR_AUTOMATIC_FLOW_CONTROL_MODE_NONE, + .padAndCrc = MACCFG2_PAD_AND_CRC_MODE_PAD_AND_CRC, + .numThreadsTx = UCC_GETH_NUM_OF_THREADS_4, + .numThreadsRx = UCC_GETH_NUM_OF_THREADS_4, + .riscTx = QE_RISC_ALLOCATION_RISC1_AND_RISC2, + .riscRx = QE_RISC_ALLOCATION_RISC1_AND_RISC2, +}; + +static ucc_geth_info_t ugeth_info[8]; + +#ifdef DEBUG +static void mem_disp(u8 *addr, int size) +{ + u8 *i; + int size16Aling = (size >> 4) << 4; + int size4Aling = (size >> 2) << 2; + int notAlign = 0; + if (size % 16) + notAlign = 1; + + for (i = addr; (u32) i < (u32) addr + size16Aling; i += 16) + printk("0x%08x: %08x %08x %08x %08x\r\n", + (u32) i, + *((u32 *) (i)), + *((u32 *) (i + 4)), + *((u32 *) (i + 8)), *((u32 *) (i + 12))); + if (notAlign == 1) + printk("0x%08x: ", (u32) i); + for (; (u32) i < (u32) addr + size4Aling; i += 4) + printk("%08x ", *((u32 *) (i))); + for (; (u32) i < (u32) addr + size; i++) + printk("%02x", *((u8 *) (i))); + if (notAlign == 1) + printk("\r\n"); +} +#endif /* DEBUG */ + +#ifdef CONFIG_UGETH_FILTERING +static void enqueue(struct list_head *node, struct list_head *lh) +{ + unsigned long flags; + + spin_lock_irqsave(ugeth_lock, flags); + list_add_tail(node, lh); + spin_unlock_irqrestore(ugeth_lock, flags); +} +#endif /* CONFIG_UGETH_FILTERING */ + +static struct list_head *dequeue(struct list_head *lh) +{ + unsigned long flags; + + spin_lock_irqsave(ugeth_lock, flags); + if (!list_empty(lh)) { + struct list_head *node = lh->next; + list_del(node); + spin_unlock_irqrestore(ugeth_lock, flags); + return node; + } else { + spin_unlock_irqrestore(ugeth_lock, flags); + return NULL; + } +} + +static int get_interface_details(enet_interface_e enet_interface, + enet_speed_e *speed, + int *r10m, + int *rmm, + int *rpm, + int *tbi, int *limited_to_full_duplex) +{ + /* Analyze enet_interface according to Interface Mode + Configuration table */ + switch (enet_interface) { + case ENET_10_MII: + *speed = ENET_SPEED_10BT; + break; + case ENET_10_RMII: + *speed = ENET_SPEED_10BT; + *r10m = 1; + *rmm = 1; + break; + case ENET_10_RGMII: + *speed = ENET_SPEED_10BT; + *rpm = 1; + *r10m = 1; + *limited_to_full_duplex = 1; + break; + case ENET_100_MII: + *speed = ENET_SPEED_100BT; + break; + case ENET_100_RMII: + *speed = ENET_SPEED_100BT; + *rmm = 1; + break; + case ENET_100_RGMII: + *speed = ENET_SPEED_100BT; + *rpm = 1; + *limited_to_full_duplex = 1; + break; + case ENET_1000_GMII: + *speed = ENET_SPEED_1000BT; + *limited_to_full_duplex = 1; + break; + case ENET_1000_RGMII: + *speed = ENET_SPEED_1000BT; + *rpm = 1; + *limited_to_full_duplex = 1; + break; + case ENET_1000_TBI: + *speed = ENET_SPEED_1000BT; + *tbi = 1; + *limited_to_full_duplex = 1; + break; + case ENET_1000_RTBI: + *speed = ENET_SPEED_1000BT; + *rpm = 1; + *tbi = 1; + *limited_to_full_duplex = 1; + break; + default: + return -EINVAL; + break; + } + + return 0; +} + +static struct sk_buff *get_new_skb(ucc_geth_private_t *ugeth, u8 *bd) +{ + struct sk_buff *skb = NULL; + + skb = dev_alloc_skb(ugeth->ug_info->uf_info.max_rx_buf_length + + UCC_GETH_RX_DATA_BUF_ALIGNMENT); + + if (skb == NULL) + return NULL; + + /* We need the data buffer to be aligned properly. We will reserve + * as many bytes as needed to align the data properly + */ + skb_reserve(skb, + UCC_GETH_RX_DATA_BUF_ALIGNMENT - + (((unsigned)skb->data) & (UCC_GETH_RX_DATA_BUF_ALIGNMENT - + 1))); + + skb->dev = ugeth->dev; + + BD_BUFFER_SET(bd, + dma_map_single(NULL, + skb->data, + ugeth->ug_info->uf_info.max_rx_buf_length + + UCC_GETH_RX_DATA_BUF_ALIGNMENT, + DMA_FROM_DEVICE)); + + BD_STATUS_AND_LENGTH_SET(bd, + (R_E | R_I | + (BD_STATUS_AND_LENGTH(bd) & R_W))); + + return skb; +} + +static int rx_bd_buffer_set(ucc_geth_private_t *ugeth, u8 rxQ) +{ + u8 *bd; + u32 bd_status; + struct sk_buff *skb; + int i; + + bd = ugeth->p_rx_bd_ring[rxQ]; + i = 0; + + do { + bd_status = BD_STATUS_AND_LENGTH(bd); + skb = get_new_skb(ugeth, bd); + + if (!skb) /* If can not allocate data buffer, + abort. Cleanup will be elsewhere */ + return -ENOMEM; + + ugeth->rx_skbuff[rxQ][i] = skb; + + /* advance the BD pointer */ + bd += UCC_GETH_SIZE_OF_BD; + i++; + } while (!(bd_status & R_W)); + + return 0; +} + +static int fill_init_enet_entries(ucc_geth_private_t *ugeth, + volatile u32 *p_start, + u8 num_entries, + u32 thread_size, + u32 thread_alignment, + qe_risc_allocation_e risc, + int skip_page_for_first_entry) +{ + u32 init_enet_offset; + u8 i; + int snum; + + for (i = 0; i < num_entries; i++) { + if ((snum = qe_get_snum()) < 0) { + ugeth_err("fill_init_enet_entries: Can not get SNUM."); + return snum; + } + if ((i == 0) && skip_page_for_first_entry) + /* First entry of Rx does not have page */ + init_enet_offset = 0; + else { + init_enet_offset = + qe_muram_alloc(thread_size, thread_alignment); + if (IS_MURAM_ERR(init_enet_offset)) { + ugeth_err + ("fill_init_enet_entries: Can not allocate DPRAM memory."); + qe_put_snum((u8) snum); + return -ENOMEM; + } + } + *(p_start++) = + ((u8) snum << ENET_INIT_PARAM_SNUM_SHIFT) | init_enet_offset + | risc; + } + + return 0; +} + +static int return_init_enet_entries(ucc_geth_private_t *ugeth, + volatile u32 *p_start, + u8 num_entries, + qe_risc_allocation_e risc, + int skip_page_for_first_entry) +{ + u32 init_enet_offset; + u8 i; + int snum; + + for (i = 0; i < num_entries; i++) { + /* Check that this entry was actually valid -- + needed in case failed in allocations */ + if ((*p_start & ENET_INIT_PARAM_RISC_MASK) == risc) { + snum = + (u32) (*p_start & ENET_INIT_PARAM_SNUM_MASK) >> + ENET_INIT_PARAM_SNUM_SHIFT; + qe_put_snum((u8) snum); + if (!((i == 0) && skip_page_for_first_entry)) { + /* First entry of Rx does not have page */ + init_enet_offset = + (in_be32(p_start) & + ENET_INIT_PARAM_PTR_MASK); + qe_muram_free(init_enet_offset); + } + *(p_start++) = 0; /* Just for cosmetics */ + } + } + + return 0; +} + +#ifdef DEBUG +static int dump_init_enet_entries(ucc_geth_private_t *ugeth, + volatile u32 *p_start, + u8 num_entries, + u32 thread_size, + qe_risc_allocation_e risc, + int skip_page_for_first_entry) +{ + u32 init_enet_offset; + u8 i; + int snum; + + for (i = 0; i < num_entries; i++) { + /* Check that this entry was actually valid -- + needed in case failed in allocations */ + if ((*p_start & ENET_INIT_PARAM_RISC_MASK) == risc) { + snum = + (u32) (*p_start & ENET_INIT_PARAM_SNUM_MASK) >> + ENET_INIT_PARAM_SNUM_SHIFT; + qe_put_snum((u8) snum); + if (!((i == 0) && skip_page_for_first_entry)) { + /* First entry of Rx does not have page */ + init_enet_offset = + (in_be32(p_start) & + ENET_INIT_PARAM_PTR_MASK); + ugeth_info("Init enet entry %d:", i); + ugeth_info("Base address: 0x%08x", + (u32) + qe_muram_addr(init_enet_offset)); + mem_disp(qe_muram_addr(init_enet_offset), + thread_size); + } + p_start++; + } + } + + return 0; +} +#endif + +#ifdef CONFIG_UGETH_FILTERING +static enet_addr_container_t *get_enet_addr_container(void) +{ + enet_addr_container_t *enet_addr_cont; + + /* allocate memory */ + enet_addr_cont = kmalloc(sizeof(enet_addr_container_t), GFP_KERNEL); + if (!enet_addr_cont) { + ugeth_err("%s: No memory for enet_addr_container_t object.", + __FUNCTION__); + return NULL; + } + + return enet_addr_cont; +} +#endif /* CONFIG_UGETH_FILTERING */ + +static void put_enet_addr_container(enet_addr_container_t *enet_addr_cont) +{ + kfree(enet_addr_cont); +} + +#ifdef CONFIG_UGETH_FILTERING +static int hw_add_addr_in_paddr(ucc_geth_private_t *ugeth, + enet_addr_t *p_enet_addr, u8 paddr_num) +{ + ucc_geth_82xx_address_filtering_pram_t *p_82xx_addr_filt; + + if (!(paddr_num < NUM_OF_PADDRS)) { + ugeth_warn("%s: Illagel paddr_num.", __FUNCTION__); + return -EINVAL; + } + + p_82xx_addr_filt = + (ucc_geth_82xx_address_filtering_pram_t *) ugeth->p_rx_glbl_pram-> + addressfiltering; + + /* Ethernet frames are defined in Little Endian mode, */ + /* therefore to insert the address we reverse the bytes. */ + out_be16(&p_82xx_addr_filt->paddr[paddr_num].h, + (u16) (((u16) (((u16) ((*p_enet_addr)[5])) << 8)) | + (u16) (*p_enet_addr)[4])); + out_be16(&p_82xx_addr_filt->paddr[paddr_num].m, + (u16) (((u16) (((u16) ((*p_enet_addr)[3])) << 8)) | + (u16) (*p_enet_addr)[2])); + out_be16(&p_82xx_addr_filt->paddr[paddr_num].l, + (u16) (((u16) (((u16) ((*p_enet_addr)[1])) << 8)) | + (u16) (*p_enet_addr)[0])); + + return 0; +} +#endif /* CONFIG_UGETH_FILTERING */ + +static int hw_clear_addr_in_paddr(ucc_geth_private_t *ugeth, u8 paddr_num) +{ + ucc_geth_82xx_address_filtering_pram_t *p_82xx_addr_filt; + + if (!(paddr_num < NUM_OF_PADDRS)) { + ugeth_warn("%s: Illagel paddr_num.", __FUNCTION__); + return -EINVAL; + } + + p_82xx_addr_filt = + (ucc_geth_82xx_address_filtering_pram_t *) ugeth->p_rx_glbl_pram-> + addressfiltering; + + /* Writing address ff.ff.ff.ff.ff.ff disables address + recognition for this register */ + out_be16(&p_82xx_addr_filt->paddr[paddr_num].h, 0xffff); + out_be16(&p_82xx_addr_filt->paddr[paddr_num].m, 0xffff); + out_be16(&p_82xx_addr_filt->paddr[paddr_num].l, 0xffff); + + return 0; +} + +static void hw_add_addr_in_hash(ucc_geth_private_t *ugeth, + enet_addr_t *p_enet_addr) +{ + ucc_geth_82xx_address_filtering_pram_t *p_82xx_addr_filt; + u32 cecr_subblock; + + p_82xx_addr_filt = + (ucc_geth_82xx_address_filtering_pram_t *) ugeth->p_rx_glbl_pram-> + addressfiltering; + + cecr_subblock = + ucc_fast_get_qe_cr_subblock(ugeth->ug_info->uf_info.ucc_num); + + /* Ethernet frames are defined in Little Endian mode, + therefor to insert */ + /* the address to the hash (Big Endian mode), we reverse the bytes.*/ + out_be16(&p_82xx_addr_filt->taddr.h, + (u16) (((u16) (((u16) ((*p_enet_addr)[5])) << 8)) | + (u16) (*p_enet_addr)[4])); + out_be16(&p_82xx_addr_filt->taddr.m, + (u16) (((u16) (((u16) ((*p_enet_addr)[3])) << 8)) | + (u16) (*p_enet_addr)[2])); + out_be16(&p_82xx_addr_filt->taddr.l, + (u16) (((u16) (((u16) ((*p_enet_addr)[1])) << 8)) | + (u16) (*p_enet_addr)[0])); + + qe_issue_cmd(QE_SET_GROUP_ADDRESS, cecr_subblock, + (u8) QE_CR_PROTOCOL_ETHERNET, 0); +} + +#ifdef CONFIG_UGETH_MAGIC_PACKET +static void magic_packet_detection_enable(ucc_geth_private_t *ugeth) +{ + ucc_fast_private_t *uccf; + ucc_geth_t *ug_regs; + u32 maccfg2, uccm; + + uccf = ugeth->uccf; + ug_regs = ugeth->ug_regs; + + /* Enable interrupts for magic packet detection */ + uccm = in_be32(uccf->p_uccm); + uccm |= UCCE_MPD; + out_be32(uccf->p_uccm, uccm); + + /* Enable magic packet detection */ + maccfg2 = in_be32(&ug_regs->maccfg2); + maccfg2 |= MACCFG2_MPE; + out_be32(&ug_regs->maccfg2, maccfg2); +} + +static void magic_packet_detection_disable(ucc_geth_private_t *ugeth) +{ + ucc_fast_private_t *uccf; + ucc_geth_t *ug_regs; + u32 maccfg2, uccm; + + uccf = ugeth->uccf; + ug_regs = ugeth->ug_regs; + + /* Disable interrupts for magic packet detection */ + uccm = in_be32(uccf->p_uccm); + uccm &= ~UCCE_MPD; + out_be32(uccf->p_uccm, uccm); + + /* Disable magic packet detection */ + maccfg2 = in_be32(&ug_regs->maccfg2); + maccfg2 &= ~MACCFG2_MPE; + out_be32(&ug_regs->maccfg2, maccfg2); +} +#endif /* MAGIC_PACKET */ + +static inline int compare_addr(enet_addr_t *addr1, enet_addr_t *addr2) +{ + return memcmp(addr1, addr2, ENET_NUM_OCTETS_PER_ADDRESS); +} + +#ifdef DEBUG +static void get_statistics(ucc_geth_private_t *ugeth, + ucc_geth_tx_firmware_statistics_t * + tx_firmware_statistics, + ucc_geth_rx_firmware_statistics_t * + rx_firmware_statistics, + ucc_geth_hardware_statistics_t *hardware_statistics) +{ + ucc_fast_t *uf_regs; + ucc_geth_t *ug_regs; + ucc_geth_tx_firmware_statistics_pram_t *p_tx_fw_statistics_pram; + ucc_geth_rx_firmware_statistics_pram_t *p_rx_fw_statistics_pram; + + ug_regs = ugeth->ug_regs; + uf_regs = (ucc_fast_t *) ug_regs; + p_tx_fw_statistics_pram = ugeth->p_tx_fw_statistics_pram; + p_rx_fw_statistics_pram = ugeth->p_rx_fw_statistics_pram; + + /* Tx firmware only if user handed pointer and driver actually + gathers Tx firmware statistics */ + if (tx_firmware_statistics && p_tx_fw_statistics_pram) { + tx_firmware_statistics->sicoltx = + in_be32(&p_tx_fw_statistics_pram->sicoltx); + tx_firmware_statistics->mulcoltx = + in_be32(&p_tx_fw_statistics_pram->mulcoltx); + tx_firmware_statistics->latecoltxfr = + in_be32(&p_tx_fw_statistics_pram->latecoltxfr); + tx_firmware_statistics->frabortduecol = + in_be32(&p_tx_fw_statistics_pram->frabortduecol); + tx_firmware_statistics->frlostinmactxer = + in_be32(&p_tx_fw_statistics_pram->frlostinmactxer); + tx_firmware_statistics->carriersenseertx = + in_be32(&p_tx_fw_statistics_pram->carriersenseertx); + tx_firmware_statistics->frtxok = + in_be32(&p_tx_fw_statistics_pram->frtxok); + tx_firmware_statistics->txfrexcessivedefer = + in_be32(&p_tx_fw_statistics_pram->txfrexcessivedefer); + tx_firmware_statistics->txpkts256 = + in_be32(&p_tx_fw_statistics_pram->txpkts256); + tx_firmware_statistics->txpkts512 = + in_be32(&p_tx_fw_statistics_pram->txpkts512); + tx_firmware_statistics->txpkts1024 = + in_be32(&p_tx_fw_statistics_pram->txpkts1024); + tx_firmware_statistics->txpktsjumbo = + in_be32(&p_tx_fw_statistics_pram->txpktsjumbo); + } + + /* Rx firmware only if user handed pointer and driver actually + * gathers Rx firmware statistics */ + if (rx_firmware_statistics && p_rx_fw_statistics_pram) { + int i; + rx_firmware_statistics->frrxfcser = + in_be32(&p_rx_fw_statistics_pram->frrxfcser); + rx_firmware_statistics->fraligner = + in_be32(&p_rx_fw_statistics_pram->fraligner); + rx_firmware_statistics->inrangelenrxer = + in_be32(&p_rx_fw_statistics_pram->inrangelenrxer); + rx_firmware_statistics->outrangelenrxer = + in_be32(&p_rx_fw_statistics_pram->outrangelenrxer); + rx_firmware_statistics->frtoolong = + in_be32(&p_rx_fw_statistics_pram->frtoolong); + rx_firmware_statistics->runt = + in_be32(&p_rx_fw_statistics_pram->runt); + rx_firmware_statistics->verylongevent = + in_be32(&p_rx_fw_statistics_pram->verylongevent); + rx_firmware_statistics->symbolerror = + in_be32(&p_rx_fw_statistics_pram->symbolerror); + rx_firmware_statistics->dropbsy = + in_be32(&p_rx_fw_statistics_pram->dropbsy); + for (i = 0; i < 0x8; i++) + rx_firmware_statistics->res0[i] = + p_rx_fw_statistics_pram->res0[i]; + rx_firmware_statistics->mismatchdrop = + in_be32(&p_rx_fw_statistics_pram->mismatchdrop); + rx_firmware_statistics->underpkts = + in_be32(&p_rx_fw_statistics_pram->underpkts); + rx_firmware_statistics->pkts256 = + in_be32(&p_rx_fw_statistics_pram->pkts256); + rx_firmware_statistics->pkts512 = + in_be32(&p_rx_fw_statistics_pram->pkts512); + rx_firmware_statistics->pkts1024 = + in_be32(&p_rx_fw_statistics_pram->pkts1024); + rx_firmware_statistics->pktsjumbo = + in_be32(&p_rx_fw_statistics_pram->pktsjumbo); + rx_firmware_statistics->frlossinmacer = + in_be32(&p_rx_fw_statistics_pram->frlossinmacer); + rx_firmware_statistics->pausefr = + in_be32(&p_rx_fw_statistics_pram->pausefr); + for (i = 0; i < 0x4; i++) + rx_firmware_statistics->res1[i] = + p_rx_fw_statistics_pram->res1[i]; + rx_firmware_statistics->removevlan = + in_be32(&p_rx_fw_statistics_pram->removevlan); + rx_firmware_statistics->replacevlan = + in_be32(&p_rx_fw_statistics_pram->replacevlan); + rx_firmware_statistics->insertvlan = + in_be32(&p_rx_fw_statistics_pram->insertvlan); + } + + /* Hardware only if user handed pointer and driver actually + gathers hardware statistics */ + if (hardware_statistics && (in_be32(&uf_regs->upsmr) & UPSMR_HSE)) { + hardware_statistics->tx64 = in_be32(&ug_regs->tx64); + hardware_statistics->tx127 = in_be32(&ug_regs->tx127); + hardware_statistics->tx255 = in_be32(&ug_regs->tx255); + hardware_statistics->rx64 = in_be32(&ug_regs->rx64); + hardware_statistics->rx127 = in_be32(&ug_regs->rx127); + hardware_statistics->rx255 = in_be32(&ug_regs->rx255); + hardware_statistics->txok = in_be32(&ug_regs->txok); + hardware_statistics->txcf = in_be16(&ug_regs->txcf); + hardware_statistics->tmca = in_be32(&ug_regs->tmca); + hardware_statistics->tbca = in_be32(&ug_regs->tbca); + hardware_statistics->rxfok = in_be32(&ug_regs->rxfok); + hardware_statistics->rxbok = in_be32(&ug_regs->rxbok); + hardware_statistics->rbyt = in_be32(&ug_regs->rbyt); + hardware_statistics->rmca = in_be32(&ug_regs->rmca); + hardware_statistics->rbca = in_be32(&ug_regs->rbca); + } +} + +static void dump_bds(ucc_geth_private_t *ugeth) +{ + int i; + int length; + + for (i = 0; i < ugeth->ug_info->numQueuesTx; i++) { + if (ugeth->p_tx_bd_ring[i]) { + length = + (ugeth->ug_info->bdRingLenTx[i] * + UCC_GETH_SIZE_OF_BD); + ugeth_info("TX BDs[%d]", i); + mem_disp(ugeth->p_tx_bd_ring[i], length); + } + } + for (i = 0; i < ugeth->ug_info->numQueuesRx; i++) { + if (ugeth->p_rx_bd_ring[i]) { + length = + (ugeth->ug_info->bdRingLenRx[i] * + UCC_GETH_SIZE_OF_BD); + ugeth_info("RX BDs[%d]", i); + mem_disp(ugeth->p_rx_bd_ring[i], length); + } + } +} + +static void dump_regs(ucc_geth_private_t *ugeth) +{ + int i; + + ugeth_info("UCC%d Geth registers:", ugeth->ug_info->uf_info.ucc_num); + ugeth_info("Base address: 0x%08x", (u32) ugeth->ug_regs); + + ugeth_info("maccfg1 : addr - 0x%08x, val - 0x%08x", + (u32) & ugeth->ug_regs->maccfg1, + in_be32(&ugeth->ug_regs->maccfg1)); + ugeth_info("maccfg2 : addr - 0x%08x, val - 0x%08x", + (u32) & ugeth->ug_regs->maccfg2, + in_be32(&ugeth->ug_regs->maccfg2)); + ugeth_info("ipgifg : addr - 0x%08x, val - 0x%08x", + (u32) & ugeth->ug_regs->ipgifg, + in_be32(&ugeth->ug_regs->ipgifg)); + ugeth_info("hafdup : addr - 0x%08x, val - 0x%08x", + (u32) & ugeth->ug_regs->hafdup, + in_be32(&ugeth->ug_regs->hafdup)); + ugeth_info("miimcfg : addr - 0x%08x, val - 0x%08x", + (u32) & ugeth->ug_regs->miimng.miimcfg, + in_be32(&ugeth->ug_regs->miimng.miimcfg)); + ugeth_info("miimcom : addr - 0x%08x, val - 0x%08x", + (u32) & ugeth->ug_regs->miimng.miimcom, + in_be32(&ugeth->ug_regs->miimng.miimcom)); + ugeth_info("miimadd : addr - 0x%08x, val - 0x%08x", + (u32) & ugeth->ug_regs->miimng.miimadd, + in_be32(&ugeth->ug_regs->miimng.miimadd)); + ugeth_info("miimcon : addr - 0x%08x, val - 0x%08x", + (u32) & ugeth->ug_regs->miimng.miimcon, + in_be32(&ugeth->ug_regs->miimng.miimcon)); + ugeth_info("miimstat : addr - 0x%08x, val - 0x%08x", + (u32) & ugeth->ug_regs->miimng.miimstat, + in_be32(&ugeth->ug_regs->miimng.miimstat)); + ugeth_info("miimmind : addr - 0x%08x, val - 0x%08x", + (u32) & ugeth->ug_regs->miimng.miimind, + in_be32(&ugeth->ug_regs->miimng.miimind)); + ugeth_info("ifctl : addr - 0x%08x, val - 0x%08x", + (u32) & ugeth->ug_regs->ifctl, + in_be32(&ugeth->ug_regs->ifctl)); + ugeth_info("ifstat : addr - 0x%08x, val - 0x%08x", + (u32) & ugeth->ug_regs->ifstat, + in_be32(&ugeth->ug_regs->ifstat)); + ugeth_info("macstnaddr1: addr - 0x%08x, val - 0x%08x", + (u32) & ugeth->ug_regs->macstnaddr1, + in_be32(&ugeth->ug_regs->macstnaddr1)); + ugeth_info("macstnaddr2: addr - 0x%08x, val - 0x%08x", + (u32) & ugeth->ug_regs->macstnaddr2, + in_be32(&ugeth->ug_regs->macstnaddr2)); + ugeth_info("uempr : addr - 0x%08x, val - 0x%08x", + (u32) & ugeth->ug_regs->uempr, + in_be32(&ugeth->ug_regs->uempr)); + ugeth_info("utbipar : addr - 0x%08x, val - 0x%08x", + (u32) & ugeth->ug_regs->utbipar, + in_be32(&ugeth->ug_regs->utbipar)); + ugeth_info("uescr : addr - 0x%08x, val - 0x%04x", + (u32) & ugeth->ug_regs->uescr, + in_be16(&ugeth->ug_regs->uescr)); + ugeth_info("tx64 : addr - 0x%08x, val - 0x%08x", + (u32) & ugeth->ug_regs->tx64, + in_be32(&ugeth->ug_regs->tx64)); + ugeth_info("tx127 : addr - 0x%08x, val - 0x%08x", + (u32) & ugeth->ug_regs->tx127, + in_be32(&ugeth->ug_regs->tx127)); + ugeth_info("tx255 : addr - 0x%08x, val - 0x%08x", + (u32) & ugeth->ug_regs->tx255, + in_be32(&ugeth->ug_regs->tx255)); + ugeth_info("rx64 : addr - 0x%08x, val - 0x%08x", + (u32) & ugeth->ug_regs->rx64, + in_be32(&ugeth->ug_regs->rx64)); + ugeth_info("rx127 : addr - 0x%08x, val - 0x%08x", + (u32) & ugeth->ug_regs->rx127, + in_be32(&ugeth->ug_regs->rx127)); + ugeth_info("rx255 : addr - 0x%08x, val - 0x%08x", + (u32) & ugeth->ug_regs->rx255, + in_be32(&ugeth->ug_regs->rx255)); + ugeth_info("txok : addr - 0x%08x, val - 0x%08x", + (u32) & ugeth->ug_regs->txok, + in_be32(&ugeth->ug_regs->txok)); + ugeth_info("txcf : addr - 0x%08x, val - 0x%04x", + (u32) & ugeth->ug_regs->txcf, + in_be16(&ugeth->ug_regs->txcf)); + ugeth_info("tmca : addr - 0x%08x, val - 0x%08x", + (u32) & ugeth->ug_regs->tmca, + in_be32(&ugeth->ug_regs->tmca)); + ugeth_info("tbca : addr - 0x%08x, val - 0x%08x", + (u32) & ugeth->ug_regs->tbca, + in_be32(&ugeth->ug_regs->tbca)); + ugeth_info("rxfok : addr - 0x%08x, val - 0x%08x", + (u32) & ugeth->ug_regs->rxfok, + in_be32(&ugeth->ug_regs->rxfok)); + ugeth_info("rxbok : addr - 0x%08x, val - 0x%08x", + (u32) & ugeth->ug_regs->rxbok, + in_be32(&ugeth->ug_regs->rxbok)); + ugeth_info("rbyt : addr - 0x%08x, val - 0x%08x", + (u32) & ugeth->ug_regs->rbyt, + in_be32(&ugeth->ug_regs->rbyt)); + ugeth_info("rmca : addr - 0x%08x, val - 0x%08x", + (u32) & ugeth->ug_regs->rmca, + in_be32(&ugeth->ug_regs->rmca)); + ugeth_info("rbca : addr - 0x%08x, val - 0x%08x", + (u32) & ugeth->ug_regs->rbca, + in_be32(&ugeth->ug_regs->rbca)); + ugeth_info("scar : addr - 0x%08x, val - 0x%08x", + (u32) & ugeth->ug_regs->scar, + in_be32(&ugeth->ug_regs->scar)); + ugeth_info("scam : addr - 0x%08x, val - 0x%08x", + (u32) & ugeth->ug_regs->scam, + in_be32(&ugeth->ug_regs->scam)); + + if (ugeth->p_thread_data_tx) { + int numThreadsTxNumerical; + switch (ugeth->ug_info->numThreadsTx) { + case UCC_GETH_NUM_OF_THREADS_1: + numThreadsTxNumerical = 1; + break; + case UCC_GETH_NUM_OF_THREADS_2: + numThreadsTxNumerical = 2; + break; + case UCC_GETH_NUM_OF_THREADS_4: + numThreadsTxNumerical = 4; + break; + case UCC_GETH_NUM_OF_THREADS_6: + numThreadsTxNumerical = 6; + break; + case UCC_GETH_NUM_OF_THREADS_8: + numThreadsTxNumerical = 8; + break; + default: + numThreadsTxNumerical = 0; + break; + } + + ugeth_info("Thread data TXs:"); + ugeth_info("Base address: 0x%08x", + (u32) ugeth->p_thread_data_tx); + for (i = 0; i < numThreadsTxNumerical; i++) { + ugeth_info("Thread data TX[%d]:", i); + ugeth_info("Base address: 0x%08x", + (u32) & ugeth->p_thread_data_tx[i]); + mem_disp((u8 *) & ugeth->p_thread_data_tx[i], + sizeof(ucc_geth_thread_data_tx_t)); + } + } + if (ugeth->p_thread_data_rx) { + int numThreadsRxNumerical; + switch (ugeth->ug_info->numThreadsRx) { + case UCC_GETH_NUM_OF_THREADS_1: + numThreadsRxNumerical = 1; + break; + case UCC_GETH_NUM_OF_THREADS_2: + numThreadsRxNumerical = 2; + break; + case UCC_GETH_NUM_OF_THREADS_4: + numThreadsRxNumerical = 4; + break; + case UCC_GETH_NUM_OF_THREADS_6: + numThreadsRxNumerical = 6; + break; + case UCC_GETH_NUM_OF_THREADS_8: + numThreadsRxNumerical = 8; + break; + default: + numThreadsRxNumerical = 0; + break; + } + + ugeth_info("Thread data RX:"); + ugeth_info("Base address: 0x%08x", + (u32) ugeth->p_thread_data_rx); + for (i = 0; i < numThreadsRxNumerical; i++) { + ugeth_info("Thread data RX[%d]:", i); + ugeth_info("Base address: 0x%08x", + (u32) & ugeth->p_thread_data_rx[i]); + mem_disp((u8 *) & ugeth->p_thread_data_rx[i], + sizeof(ucc_geth_thread_data_rx_t)); + } + } + if (ugeth->p_exf_glbl_param) { + ugeth_info("EXF global param:"); + ugeth_info("Base address: 0x%08x", + (u32) ugeth->p_exf_glbl_param); + mem_disp((u8 *) ugeth->p_exf_glbl_param, + sizeof(*ugeth->p_exf_glbl_param)); + } + if (ugeth->p_tx_glbl_pram) { + ugeth_info("TX global param:"); + ugeth_info("Base address: 0x%08x", (u32) ugeth->p_tx_glbl_pram); + ugeth_info("temoder : addr - 0x%08x, val - 0x%04x", + (u32) & ugeth->p_tx_glbl_pram->temoder, + in_be16(&ugeth->p_tx_glbl_pram->temoder)); + ugeth_info("sqptr : addr - 0x%08x, val - 0x%08x", + (u32) & ugeth->p_tx_glbl_pram->sqptr, + in_be32(&ugeth->p_tx_glbl_pram->sqptr)); + ugeth_info("schedulerbasepointer: addr - 0x%08x, val - 0x%08x", + (u32) & ugeth->p_tx_glbl_pram->schedulerbasepointer, + in_be32(&ugeth->p_tx_glbl_pram-> + schedulerbasepointer)); + ugeth_info("txrmonbaseptr: addr - 0x%08x, val - 0x%08x", + (u32) & ugeth->p_tx_glbl_pram->txrmonbaseptr, + in_be32(&ugeth->p_tx_glbl_pram->txrmonbaseptr)); + ugeth_info("tstate : addr - 0x%08x, val - 0x%08x", + (u32) & ugeth->p_tx_glbl_pram->tstate, + in_be32(&ugeth->p_tx_glbl_pram->tstate)); + ugeth_info("iphoffset[0] : addr - 0x%08x, val - 0x%02x", + (u32) & ugeth->p_tx_glbl_pram->iphoffset[0], + ugeth->p_tx_glbl_pram->iphoffset[0]); + ugeth_info("iphoffset[1] : addr - 0x%08x, val - 0x%02x", + (u32) & ugeth->p_tx_glbl_pram->iphoffset[1], + ugeth->p_tx_glbl_pram->iphoffset[1]); + ugeth_info("iphoffset[2] : addr - 0x%08x, val - 0x%02x", + (u32) & ugeth->p_tx_glbl_pram->iphoffset[2], + ugeth->p_tx_glbl_pram->iphoffset[2]); + ugeth_info("iphoffset[3] : addr - 0x%08x, val - 0x%02x", + (u32) & ugeth->p_tx_glbl_pram->iphoffset[3], + ugeth->p_tx_glbl_pram->iphoffset[3]); + ugeth_info("iphoffset[4] : addr - 0x%08x, val - 0x%02x", + (u32) & ugeth->p_tx_glbl_pram->iphoffset[4], + ugeth->p_tx_glbl_pram->iphoffset[4]); + ugeth_info("iphoffset[5] : addr - 0x%08x, val - 0x%02x", + (u32) & ugeth->p_tx_glbl_pram->iphoffset[5], + ugeth->p_tx_glbl_pram->iphoffset[5]); + ugeth_info("iphoffset[6] : addr - 0x%08x, val - 0x%02x", + (u32) & ugeth->p_tx_glbl_pram->iphoffset[6], + ugeth->p_tx_glbl_pram->iphoffset[6]); + ugeth_info("iphoffset[7] : addr - 0x%08x, val - 0x%02x", + (u32) & ugeth->p_tx_glbl_pram->iphoffset[7], + ugeth->p_tx_glbl_pram->iphoffset[7]); + ugeth_info("vtagtable[0] : addr - 0x%08x, val - 0x%08x", + (u32) & ugeth->p_tx_glbl_pram->vtagtable[0], + in_be32(&ugeth->p_tx_glbl_pram->vtagtable[0])); + ugeth_info("vtagtable[1] : addr - 0x%08x, val - 0x%08x", + (u32) & ugeth->p_tx_glbl_pram->vtagtable[1], + in_be32(&ugeth->p_tx_glbl_pram->vtagtable[1])); + ugeth_info("vtagtable[2] : addr - 0x%08x, val - 0x%08x", + (u32) & ugeth->p_tx_glbl_pram->vtagtable[2], + in_be32(&ugeth->p_tx_glbl_pram->vtagtable[2])); + ugeth_info("vtagtable[3] : addr - 0x%08x, val - 0x%08x", + (u32) & ugeth->p_tx_glbl_pram->vtagtable[3], + in_be32(&ugeth->p_tx_glbl_pram->vtagtable[3])); + ugeth_info("vtagtable[4] : addr - 0x%08x, val - 0x%08x", + (u32) & ugeth->p_tx_glbl_pram->vtagtable[4], + in_be32(&ugeth->p_tx_glbl_pram->vtagtable[4])); + ugeth_info("vtagtable[5] : addr - 0x%08x, val - 0x%08x", + (u32) & ugeth->p_tx_glbl_pram->vtagtable[5], + in_be32(&ugeth->p_tx_glbl_pram->vtagtable[5])); + ugeth_info("vtagtable[6] : addr - 0x%08x, val - 0x%08x", + (u32) & ugeth->p_tx_glbl_pram->vtagtable[6], + in_be32(&ugeth->p_tx_glbl_pram->vtagtable[6])); + ugeth_info("vtagtable[7] : addr - 0x%08x, val - 0x%08x", + (u32) & ugeth->p_tx_glbl_pram->vtagtable[7], + in_be32(&ugeth->p_tx_glbl_pram->vtagtable[7])); + ugeth_info("tqptr : addr - 0x%08x, val - 0x%08x", + (u32) & ugeth->p_tx_glbl_pram->tqptr, + in_be32(&ugeth->p_tx_glbl_pram->tqptr)); + } + if (ugeth->p_rx_glbl_pram) { + ugeth_info("RX global param:"); + ugeth_info("Base address: 0x%08x", (u32) ugeth->p_rx_glbl_pram); + ugeth_info("remoder : addr - 0x%08x, val - 0x%08x", + (u32) & ugeth->p_rx_glbl_pram->remoder, + in_be32(&ugeth->p_rx_glbl_pram->remoder)); + ugeth_info("rqptr : addr - 0x%08x, val - 0x%08x", + (u32) & ugeth->p_rx_glbl_pram->rqptr, + in_be32(&ugeth->p_rx_glbl_pram->rqptr)); + ugeth_info("typeorlen : addr - 0x%08x, val - 0x%04x", + (u32) & ugeth->p_rx_glbl_pram->typeorlen, + in_be16(&ugeth->p_rx_glbl_pram->typeorlen)); + ugeth_info("rxgstpack : addr - 0x%08x, val - 0x%02x", + (u32) & ugeth->p_rx_glbl_pram->rxgstpack, + ugeth->p_rx_glbl_pram->rxgstpack); + ugeth_info("rxrmonbaseptr : addr - 0x%08x, val - 0x%08x", + (u32) & ugeth->p_rx_glbl_pram->rxrmonbaseptr, + in_be32(&ugeth->p_rx_glbl_pram->rxrmonbaseptr)); + ugeth_info("intcoalescingptr: addr - 0x%08x, val - 0x%08x", + (u32) & ugeth->p_rx_glbl_pram->intcoalescingptr, + in_be32(&ugeth->p_rx_glbl_pram->intcoalescingptr)); + ugeth_info("rstate : addr - 0x%08x, val - 0x%02x", + (u32) & ugeth->p_rx_glbl_pram->rstate, + ugeth->p_rx_glbl_pram->rstate); + ugeth_info("mrblr : addr - 0x%08x, val - 0x%04x", + (u32) & ugeth->p_rx_glbl_pram->mrblr, + in_be16(&ugeth->p_rx_glbl_pram->mrblr)); + ugeth_info("rbdqptr : addr - 0x%08x, val - 0x%08x", + (u32) & ugeth->p_rx_glbl_pram->rbdqptr, + in_be32(&ugeth->p_rx_glbl_pram->rbdqptr)); + ugeth_info("mflr : addr - 0x%08x, val - 0x%04x", + (u32) & ugeth->p_rx_glbl_pram->mflr, + in_be16(&ugeth->p_rx_glbl_pram->mflr)); + ugeth_info("minflr : addr - 0x%08x, val - 0x%04x", + (u32) & ugeth->p_rx_glbl_pram->minflr, + in_be16(&ugeth->p_rx_glbl_pram->minflr)); + ugeth_info("maxd1 : addr - 0x%08x, val - 0x%04x", + (u32) & ugeth->p_rx_glbl_pram->maxd1, + in_be16(&ugeth->p_rx_glbl_pram->maxd1)); + ugeth_info("maxd2 : addr - 0x%08x, val - 0x%04x", + (u32) & ugeth->p_rx_glbl_pram->maxd2, + in_be16(&ugeth->p_rx_glbl_pram->maxd2)); + ugeth_info("ecamptr : addr - 0x%08x, val - 0x%08x", + (u32) & ugeth->p_rx_glbl_pram->ecamptr, + in_be32(&ugeth->p_rx_glbl_pram->ecamptr)); + ugeth_info("l2qt : addr - 0x%08x, val - 0x%08x", + (u32) & ugeth->p_rx_glbl_pram->l2qt, + in_be32(&ugeth->p_rx_glbl_pram->l2qt)); + ugeth_info("l3qt[0] : addr - 0x%08x, val - 0x%08x", + (u32) & ugeth->p_rx_glbl_pram->l3qt[0], + in_be32(&ugeth->p_rx_glbl_pram->l3qt[0])); + ugeth_info("l3qt[1] : addr - 0x%08x, val - 0x%08x", + (u32) & ugeth->p_rx_glbl_pram->l3qt[1], + in_be32(&ugeth->p_rx_glbl_pram->l3qt[1])); + ugeth_info("l3qt[2] : addr - 0x%08x, val - 0x%08x", + (u32) & ugeth->p_rx_glbl_pram->l3qt[2], + in_be32(&ugeth->p_rx_glbl_pram->l3qt[2])); + ugeth_info("l3qt[3] : addr - 0x%08x, val - 0x%08x", + (u32) & ugeth->p_rx_glbl_pram->l3qt[3], + in_be32(&ugeth->p_rx_glbl_pram->l3qt[3])); + ugeth_info("l3qt[4] : addr - 0x%08x, val - 0x%08x", + (u32) & ugeth->p_rx_glbl_pram->l3qt[4], + in_be32(&ugeth->p_rx_glbl_pram->l3qt[4])); + ugeth_info("l3qt[5] : addr - 0x%08x, val - 0x%08x", + (u32) & ugeth->p_rx_glbl_pram->l3qt[5], + in_be32(&ugeth->p_rx_glbl_pram->l3qt[5])); + ugeth_info("l3qt[6] : addr - 0x%08x, val - 0x%08x", + (u32) & ugeth->p_rx_glbl_pram->l3qt[6], + in_be32(&ugeth->p_rx_glbl_pram->l3qt[6])); + ugeth_info("l3qt[7] : addr - 0x%08x, val - 0x%08x", + (u32) & ugeth->p_rx_glbl_pram->l3qt[7], + in_be32(&ugeth->p_rx_glbl_pram->l3qt[7])); + ugeth_info("vlantype : addr - 0x%08x, val - 0x%04x", + (u32) & ugeth->p_rx_glbl_pram->vlantype, + in_be16(&ugeth->p_rx_glbl_pram->vlantype)); + ugeth_info("vlantci : addr - 0x%08x, val - 0x%04x", + (u32) & ugeth->p_rx_glbl_pram->vlantci, + in_be16(&ugeth->p_rx_glbl_pram->vlantci)); + for (i = 0; i < 64; i++) + ugeth_info + ("addressfiltering[%d]: addr - 0x%08x, val - 0x%02x", + i, + (u32) & ugeth->p_rx_glbl_pram->addressfiltering[i], + ugeth->p_rx_glbl_pram->addressfiltering[i]); + ugeth_info("exfGlobalParam : addr - 0x%08x, val - 0x%08x", + (u32) & ugeth->p_rx_glbl_pram->exfGlobalParam, + in_be32(&ugeth->p_rx_glbl_pram->exfGlobalParam)); + } + if (ugeth->p_send_q_mem_reg) { + ugeth_info("Send Q memory registers:"); + ugeth_info("Base address: 0x%08x", + (u32) ugeth->p_send_q_mem_reg); + for (i = 0; i < ugeth->ug_info->numQueuesTx; i++) { + ugeth_info("SQQD[%d]:", i); + ugeth_info("Base address: 0x%08x", + (u32) & ugeth->p_send_q_mem_reg->sqqd[i]); + mem_disp((u8 *) & ugeth->p_send_q_mem_reg->sqqd[i], + sizeof(ucc_geth_send_queue_qd_t)); + } + } + if (ugeth->p_scheduler) { + ugeth_info("Scheduler:"); + ugeth_info("Base address: 0x%08x", (u32) ugeth->p_scheduler); + mem_disp((u8 *) ugeth->p_scheduler, + sizeof(*ugeth->p_scheduler)); + } + if (ugeth->p_tx_fw_statistics_pram) { + ugeth_info("TX FW statistics pram:"); + ugeth_info("Base address: 0x%08x", + (u32) ugeth->p_tx_fw_statistics_pram); + mem_disp((u8 *) ugeth->p_tx_fw_statistics_pram, + sizeof(*ugeth->p_tx_fw_statistics_pram)); + } + if (ugeth->p_rx_fw_statistics_pram) { + ugeth_info("RX FW statistics pram:"); + ugeth_info("Base address: 0x%08x", + (u32) ugeth->p_rx_fw_statistics_pram); + mem_disp((u8 *) ugeth->p_rx_fw_statistics_pram, + sizeof(*ugeth->p_rx_fw_statistics_pram)); + } + if (ugeth->p_rx_irq_coalescing_tbl) { + ugeth_info("RX IRQ coalescing tables:"); + ugeth_info("Base address: 0x%08x", + (u32) ugeth->p_rx_irq_coalescing_tbl); + for (i = 0; i < ugeth->ug_info->numQueuesRx; i++) { + ugeth_info("RX IRQ coalescing table entry[%d]:", i); + ugeth_info("Base address: 0x%08x", + (u32) & ugeth->p_rx_irq_coalescing_tbl-> + coalescingentry[i]); + ugeth_info + ("interruptcoalescingmaxvalue: addr - 0x%08x, val - 0x%08x", + (u32) & ugeth->p_rx_irq_coalescing_tbl-> + coalescingentry[i].interruptcoalescingmaxvalue, + in_be32(&ugeth->p_rx_irq_coalescing_tbl-> + coalescingentry[i]. + interruptcoalescingmaxvalue)); + ugeth_info + ("interruptcoalescingcounter : addr - 0x%08x, val - 0x%08x", + (u32) & ugeth->p_rx_irq_coalescing_tbl-> + coalescingentry[i].interruptcoalescingcounter, + in_be32(&ugeth->p_rx_irq_coalescing_tbl-> + coalescingentry[i]. + interruptcoalescingcounter)); + } + } + if (ugeth->p_rx_bd_qs_tbl) { + ugeth_info("RX BD QS tables:"); + ugeth_info("Base address: 0x%08x", (u32) ugeth->p_rx_bd_qs_tbl); + for (i = 0; i < ugeth->ug_info->numQueuesRx; i++) { + ugeth_info("RX BD QS table[%d]:", i); + ugeth_info("Base address: 0x%08x", + (u32) & ugeth->p_rx_bd_qs_tbl[i]); + ugeth_info + ("bdbaseptr : addr - 0x%08x, val - 0x%08x", + (u32) & ugeth->p_rx_bd_qs_tbl[i].bdbaseptr, + in_be32(&ugeth->p_rx_bd_qs_tbl[i].bdbaseptr)); + ugeth_info + ("bdptr : addr - 0x%08x, val - 0x%08x", + (u32) & ugeth->p_rx_bd_qs_tbl[i].bdptr, + in_be32(&ugeth->p_rx_bd_qs_tbl[i].bdptr)); + ugeth_info + ("externalbdbaseptr: addr - 0x%08x, val - 0x%08x", + (u32) & ugeth->p_rx_bd_qs_tbl[i].externalbdbaseptr, + in_be32(&ugeth->p_rx_bd_qs_tbl[i]. + externalbdbaseptr)); + ugeth_info + ("externalbdptr : addr - 0x%08x, val - 0x%08x", + (u32) & ugeth->p_rx_bd_qs_tbl[i].externalbdptr, + in_be32(&ugeth->p_rx_bd_qs_tbl[i].externalbdptr)); + ugeth_info("ucode RX Prefetched BDs:"); + ugeth_info("Base address: 0x%08x", + (u32) + qe_muram_addr(in_be32 + (&ugeth->p_rx_bd_qs_tbl[i]. + bdbaseptr))); + mem_disp((u8 *) + qe_muram_addr(in_be32 + (&ugeth->p_rx_bd_qs_tbl[i]. + bdbaseptr)), + sizeof(ucc_geth_rx_prefetched_bds_t)); + } + } + if (ugeth->p_init_enet_param_shadow) { + int size; + ugeth_info("Init enet param shadow:"); + ugeth_info("Base address: 0x%08x", + (u32) ugeth->p_init_enet_param_shadow); + mem_disp((u8 *) ugeth->p_init_enet_param_shadow, + sizeof(*ugeth->p_init_enet_param_shadow)); + + size = sizeof(ucc_geth_thread_rx_pram_t); + if (ugeth->ug_info->rxExtendedFiltering) { + size += + THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING; + if (ugeth->ug_info->largestexternallookupkeysize == + QE_FLTR_TABLE_LOOKUP_KEY_SIZE_8_BYTES) + size += + THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING_8; + if (ugeth->ug_info->largestexternallookupkeysize == + QE_FLTR_TABLE_LOOKUP_KEY_SIZE_16_BYTES) + size += + THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING_16; + } + + dump_init_enet_entries(ugeth, + &(ugeth->p_init_enet_param_shadow-> + txthread[0]), + ENET_INIT_PARAM_MAX_ENTRIES_TX, + sizeof(ucc_geth_thread_tx_pram_t), + ugeth->ug_info->riscTx, 0); + dump_init_enet_entries(ugeth, + &(ugeth->p_init_enet_param_shadow-> + rxthread[0]), + ENET_INIT_PARAM_MAX_ENTRIES_RX, size, + ugeth->ug_info->riscRx, 1); + } +} +#endif /* DEBUG */ + +static void init_default_reg_vals(volatile u32 *upsmr_register, + volatile u32 *maccfg1_register, + volatile u32 *maccfg2_register) +{ + out_be32(upsmr_register, UCC_GETH_UPSMR_INIT); + out_be32(maccfg1_register, UCC_GETH_MACCFG1_INIT); + out_be32(maccfg2_register, UCC_GETH_MACCFG2_INIT); +} + +static int init_half_duplex_params(int alt_beb, + int back_pressure_no_backoff, + int no_backoff, + int excess_defer, + u8 alt_beb_truncation, + u8 max_retransmissions, + u8 collision_window, + volatile u32 *hafdup_register) +{ + u32 value = 0; + + if ((alt_beb_truncation > HALFDUP_ALT_BEB_TRUNCATION_MAX) || + (max_retransmissions > HALFDUP_MAX_RETRANSMISSION_MAX) || + (collision_window > HALFDUP_COLLISION_WINDOW_MAX)) + return -EINVAL; + + value = (u32) (alt_beb_truncation << HALFDUP_ALT_BEB_TRUNCATION_SHIFT); + + if (alt_beb) + value |= HALFDUP_ALT_BEB; + if (back_pressure_no_backoff) + value |= HALFDUP_BACK_PRESSURE_NO_BACKOFF; + if (no_backoff) + value |= HALFDUP_NO_BACKOFF; + if (excess_defer) + value |= HALFDUP_EXCESSIVE_DEFER; + + value |= (max_retransmissions << HALFDUP_MAX_RETRANSMISSION_SHIFT); + + value |= collision_window; + + out_be32(hafdup_register, value); + return 0; +} + +static int init_inter_frame_gap_params(u8 non_btb_cs_ipg, + u8 non_btb_ipg, + u8 min_ifg, + u8 btb_ipg, + volatile u32 *ipgifg_register) +{ + u32 value = 0; + + /* Non-Back-to-back IPG part 1 should be <= Non-Back-to-back + IPG part 2 */ + if (non_btb_cs_ipg > non_btb_ipg) + return -EINVAL; + + if ((non_btb_cs_ipg > IPGIFG_NON_BACK_TO_BACK_IFG_PART1_MAX) || + (non_btb_ipg > IPGIFG_NON_BACK_TO_BACK_IFG_PART2_MAX) || + /*(min_ifg > IPGIFG_MINIMUM_IFG_ENFORCEMENT_MAX) || */ + (btb_ipg > IPGIFG_BACK_TO_BACK_IFG_MAX)) + return -EINVAL; + + value |= + ((non_btb_cs_ipg << IPGIFG_NON_BACK_TO_BACK_IFG_PART1_SHIFT) & + IPGIFG_NBTB_CS_IPG_MASK); + value |= + ((non_btb_ipg << IPGIFG_NON_BACK_TO_BACK_IFG_PART2_SHIFT) & + IPGIFG_NBTB_IPG_MASK); + value |= + ((min_ifg << IPGIFG_MINIMUM_IFG_ENFORCEMENT_SHIFT) & + IPGIFG_MIN_IFG_MASK); + value |= (btb_ipg & IPGIFG_BTB_IPG_MASK); + + out_be32(ipgifg_register, value); + return 0; +} + +static int init_flow_control_params(u32 automatic_flow_control_mode, + int rx_flow_control_enable, + int tx_flow_control_enable, + u16 pause_period, + u16 extension_field, + volatile u32 *upsmr_register, + volatile u32 *uempr_register, + volatile u32 *maccfg1_register) +{ + u32 value = 0; + + /* Set UEMPR register */ + value = (u32) pause_period << UEMPR_PAUSE_TIME_VALUE_SHIFT; + value |= (u32) extension_field << UEMPR_EXTENDED_PAUSE_TIME_VALUE_SHIFT; + out_be32(uempr_register, value); + + /* Set UPSMR register */ + value = in_be32(upsmr_register); + value |= automatic_flow_control_mode; + out_be32(upsmr_register, value); + + value = in_be32(maccfg1_register); + if (rx_flow_control_enable) + value |= MACCFG1_FLOW_RX; + if (tx_flow_control_enable) + value |= MACCFG1_FLOW_TX; + out_be32(maccfg1_register, value); + + return 0; +} + +static int init_hw_statistics_gathering_mode(int enable_hardware_statistics, + int auto_zero_hardware_statistics, + volatile u32 *upsmr_register, + volatile u16 *uescr_register) +{ + u32 upsmr_value = 0; + u16 uescr_value = 0; + /* Enable hardware statistics gathering if requested */ + if (enable_hardware_statistics) { + upsmr_value = in_be32(upsmr_register); + upsmr_value |= UPSMR_HSE; + out_be32(upsmr_register, upsmr_value); + } + + /* Clear hardware statistics counters */ + uescr_value = in_be16(uescr_register); + uescr_value |= UESCR_CLRCNT; + /* Automatically zero hardware statistics counters on read, + if requested */ + if (auto_zero_hardware_statistics) + uescr_value |= UESCR_AUTOZ; + out_be16(uescr_register, uescr_value); + + return 0; +} + +static int init_firmware_statistics_gathering_mode(int + enable_tx_firmware_statistics, + int enable_rx_firmware_statistics, + volatile u32 *tx_rmon_base_ptr, + u32 tx_firmware_statistics_structure_address, + volatile u32 *rx_rmon_base_ptr, + u32 rx_firmware_statistics_structure_address, + volatile u16 *temoder_register, + volatile u32 *remoder_register) +{ + /* Note: this function does not check if */ + /* the parameters it receives are NULL */ + u16 temoder_value; + u32 remoder_value; + + if (enable_tx_firmware_statistics) { + out_be32(tx_rmon_base_ptr, + tx_firmware_statistics_structure_address); + temoder_value = in_be16(temoder_register); + temoder_value |= TEMODER_TX_RMON_STATISTICS_ENABLE; + out_be16(temoder_register, temoder_value); + } + + if (enable_rx_firmware_statistics) { + out_be32(rx_rmon_base_ptr, + rx_firmware_statistics_structure_address); + remoder_value = in_be32(remoder_register); + remoder_value |= REMODER_RX_RMON_STATISTICS_ENABLE; + out_be32(remoder_register, remoder_value); + } + + return 0; +} + +static int init_mac_station_addr_regs(u8 address_byte_0, + u8 address_byte_1, + u8 address_byte_2, + u8 address_byte_3, + u8 address_byte_4, + u8 address_byte_5, + volatile u32 *macstnaddr1_register, + volatile u32 *macstnaddr2_register) +{ + u32 value = 0; + + /* Example: for a station address of 0x12345678ABCD, */ + /* 0x12 is byte 0, 0x34 is byte 1 and so on and 0xCD is byte 5 */ + + /* MACSTNADDR1 Register: */ + + /* 0 7 8 15 */ + /* station address byte 5 station address byte 4 */ + /* 16 23 24 31 */ + /* station address byte 3 station address byte 2 */ + value |= (u32) ((address_byte_2 << 0) & 0x000000FF); + value |= (u32) ((address_byte_3 << 8) & 0x0000FF00); + value |= (u32) ((address_byte_4 << 16) & 0x00FF0000); + value |= (u32) ((address_byte_5 << 24) & 0xFF000000); + + out_be32(macstnaddr1_register, value); + + /* MACSTNADDR2 Register: */ + + /* 0 7 8 15 */ + /* station address byte 1 station address byte 0 */ + /* 16 23 24 31 */ + /* reserved reserved */ + value = 0; + value |= (u32) ((address_byte_0 << 16) & 0x00FF0000); + value |= (u32) ((address_byte_1 << 24) & 0xFF000000); + + out_be32(macstnaddr2_register, value); + + return 0; +} + +static int init_mac_duplex_mode(int full_duplex, + int limited_to_full_duplex, + volatile u32 *maccfg2_register) +{ + u32 value = 0; + + /* some interfaces must work in full duplex mode */ + if ((full_duplex == 0) && (limited_to_full_duplex == 1)) + return -EINVAL; + + value = in_be32(maccfg2_register); + + if (full_duplex) + value |= MACCFG2_FDX; + else + value &= ~MACCFG2_FDX; + + out_be32(maccfg2_register, value); + return 0; +} + +static int init_check_frame_length_mode(int length_check, + volatile u32 *maccfg2_register) +{ + u32 value = 0; + + value = in_be32(maccfg2_register); + + if (length_check) + value |= MACCFG2_LC; + else + value &= ~MACCFG2_LC; + + out_be32(maccfg2_register, value); + return 0; +} + +static int init_preamble_length(u8 preamble_length, + volatile u32 *maccfg2_register) +{ + u32 value = 0; + + if ((preamble_length < 3) || (preamble_length > 7)) + return -EINVAL; + + value = in_be32(maccfg2_register); + value &= ~MACCFG2_PREL_MASK; + value |= (preamble_length << MACCFG2_PREL_SHIFT); + out_be32(maccfg2_register, value); + return 0; +} + +static int init_mii_management_configuration(int reset_mgmt, + int preamble_supress, + volatile u32 *miimcfg_register, + volatile u32 *miimind_register) +{ + unsigned int timeout = PHY_INIT_TIMEOUT; + u32 value = 0; + + value = in_be32(miimcfg_register); + if (reset_mgmt) { + value |= MIIMCFG_RESET_MANAGEMENT; + out_be32(miimcfg_register, value); + } + + value = 0; + + if (preamble_supress) + value |= MIIMCFG_NO_PREAMBLE; + + value |= UCC_GETH_MIIMCFG_MNGMNT_CLC_DIV_INIT; + out_be32(miimcfg_register, value); + + /* Wait until the bus is free */ + while ((in_be32(miimind_register) & MIIMIND_BUSY) && timeout--) + cpu_relax(); + + if (timeout <= 0) { + ugeth_err("%s: The MII Bus is stuck!", __FUNCTION__); + return -ETIMEDOUT; + } + + return 0; +} + +static int init_rx_parameters(int reject_broadcast, + int receive_short_frames, + int promiscuous, volatile u32 *upsmr_register) +{ + u32 value = 0; + + value = in_be32(upsmr_register); + + if (reject_broadcast) + value |= UPSMR_BRO; + else + value &= ~UPSMR_BRO; + + if (receive_short_frames) + value |= UPSMR_RSH; + else + value &= ~UPSMR_RSH; + + if (promiscuous) + value |= UPSMR_PRO; + else + value &= ~UPSMR_PRO; + + out_be32(upsmr_register, value); + + return 0; +} + +static int init_max_rx_buff_len(u16 max_rx_buf_len, + volatile u16 *mrblr_register) +{ + /* max_rx_buf_len value must be a multiple of 128 */ + if ((max_rx_buf_len == 0) + || (max_rx_buf_len % UCC_GETH_MRBLR_ALIGNMENT)) + return -EINVAL; + + out_be16(mrblr_register, max_rx_buf_len); + return 0; +} + +static int init_min_frame_len(u16 min_frame_length, + volatile u16 *minflr_register, + volatile u16 *mrblr_register) +{ + u16 mrblr_value = 0; + + mrblr_value = in_be16(mrblr_register); + if (min_frame_length >= (mrblr_value - 4)) + return -EINVAL; + + out_be16(minflr_register, min_frame_length); + return 0; +} + +static int adjust_enet_interface(ucc_geth_private_t *ugeth) +{ + ucc_geth_info_t *ug_info; + ucc_geth_t *ug_regs; + ucc_fast_t *uf_regs; + enet_speed_e speed; + int ret_val, rpm = 0, tbi = 0, r10m = 0, rmm = + 0, limited_to_full_duplex = 0; + u32 upsmr, maccfg2, utbipar, tbiBaseAddress; + u16 value; + + ugeth_vdbg("%s: IN", __FUNCTION__); + + ug_info = ugeth->ug_info; + ug_regs = ugeth->ug_regs; + uf_regs = ugeth->uccf->uf_regs; + + /* Analyze enet_interface according to Interface Mode Configuration + table */ + ret_val = + get_interface_details(ug_info->enet_interface, &speed, &r10m, &rmm, + &rpm, &tbi, &limited_to_full_duplex); + if (ret_val != 0) { + ugeth_err + ("%s: half duplex not supported in requested configuration.", + __FUNCTION__); + return ret_val; + } + + /* Set MACCFG2 */ + maccfg2 = in_be32(&ug_regs->maccfg2); + maccfg2 &= ~MACCFG2_INTERFACE_MODE_MASK; + if ((speed == ENET_SPEED_10BT) || (speed == ENET_SPEED_100BT)) + maccfg2 |= MACCFG2_INTERFACE_MODE_NIBBLE; + else if (speed == ENET_SPEED_1000BT) + maccfg2 |= MACCFG2_INTERFACE_MODE_BYTE; + maccfg2 |= ug_info->padAndCrc; + out_be32(&ug_regs->maccfg2, maccfg2); + + /* Set UPSMR */ + upsmr = in_be32(&uf_regs->upsmr); + upsmr &= ~(UPSMR_RPM | UPSMR_R10M | UPSMR_TBIM | UPSMR_RMM); + if (rpm) + upsmr |= UPSMR_RPM; + if (r10m) + upsmr |= UPSMR_R10M; + if (tbi) + upsmr |= UPSMR_TBIM; + if (rmm) + upsmr |= UPSMR_RMM; + out_be32(&uf_regs->upsmr, upsmr); + + /* Set UTBIPAR */ + utbipar = in_be32(&ug_regs->utbipar); + utbipar &= ~UTBIPAR_PHY_ADDRESS_MASK; + if (tbi) + utbipar |= + (ug_info->phy_address + + ugeth->ug_info->uf_info. + ucc_num) << UTBIPAR_PHY_ADDRESS_SHIFT; + else + utbipar |= + (0x10 + + ugeth->ug_info->uf_info. + ucc_num) << UTBIPAR_PHY_ADDRESS_SHIFT; + out_be32(&ug_regs->utbipar, utbipar); + + /* Disable autonegotiation in tbi mode, because by default it + comes up in autonegotiation mode. */ + /* Note that this depends on proper setting in utbipar register. */ + if (tbi) { + tbiBaseAddress = in_be32(&ug_regs->utbipar); + tbiBaseAddress &= UTBIPAR_PHY_ADDRESS_MASK; + tbiBaseAddress >>= UTBIPAR_PHY_ADDRESS_SHIFT; + value = + ugeth->mii_info->mdio_read(ugeth->dev, (u8) tbiBaseAddress, + ENET_TBI_MII_CR); + value &= ~0x1000; /* Turn off autonegotiation */ + ugeth->mii_info->mdio_write(ugeth->dev, (u8) tbiBaseAddress, + ENET_TBI_MII_CR, value); + } + + ret_val = init_mac_duplex_mode(1, + limited_to_full_duplex, + &ug_regs->maccfg2); + if (ret_val != 0) { + ugeth_err + ("%s: half duplex not supported in requested configuration.", + __FUNCTION__); + return ret_val; + } + + init_check_frame_length_mode(ug_info->lengthCheckRx, &ug_regs->maccfg2); + + ret_val = init_preamble_length(ug_info->prel, &ug_regs->maccfg2); + if (ret_val != 0) { + ugeth_err + ("%s: Preamble length must be between 3 and 7 inclusive.", + __FUNCTION__); + return ret_val; + } + + return 0; +} + +/* Called every time the controller might need to be made + * aware of new link state. The PHY code conveys this + * information through variables in the ugeth structure, and this + * function converts those variables into the appropriate + * register values, and can bring down the device if needed. + */ +static void adjust_link(struct net_device *dev) +{ + ucc_geth_private_t *ugeth = netdev_priv(dev); + ucc_geth_t *ug_regs; + u32 tempval; + struct ugeth_mii_info *mii_info = ugeth->mii_info; + + ug_regs = ugeth->ug_regs; + + if (mii_info->link) { + /* Now we make sure that we can be in full duplex mode. + * If not, we operate in half-duplex mode. */ + if (mii_info->duplex != ugeth->oldduplex) { + if (!(mii_info->duplex)) { + tempval = in_be32(&ug_regs->maccfg2); + tempval &= ~(MACCFG2_FDX); + out_be32(&ug_regs->maccfg2, tempval); + + ugeth_info("%s: Half Duplex", dev->name); + } else { + tempval = in_be32(&ug_regs->maccfg2); + tempval |= MACCFG2_FDX; + out_be32(&ug_regs->maccfg2, tempval); + + ugeth_info("%s: Full Duplex", dev->name); + } + + ugeth->oldduplex = mii_info->duplex; + } + + if (mii_info->speed != ugeth->oldspeed) { + switch (mii_info->speed) { + case 1000: +#ifdef CONFIG_MPC836x +/* FIXME: This code is for 100Mbs BUG fixing, +remove this when it is fixed!!! */ + if (ugeth->ug_info->enet_interface == + ENET_1000_GMII) + /* Run the commands which initialize the PHY */ + { + tempval = + (u32) mii_info->mdio_read(ugeth-> + dev, mii_info->mii_id, 0x1b); + tempval |= 0x000f; + mii_info->mdio_write(ugeth->dev, + mii_info->mii_id, 0x1b, + (u16) tempval); + tempval = + (u32) mii_info->mdio_read(ugeth-> + dev, mii_info->mii_id, + MII_BMCR); + mii_info->mdio_write(ugeth->dev, + mii_info->mii_id, MII_BMCR, + (u16) (tempval | BMCR_RESET)); + } else if (ugeth->ug_info->enet_interface == + ENET_1000_RGMII) + /* Run the commands which initialize the PHY */ + { + tempval = + (u32) mii_info->mdio_read(ugeth-> + dev, mii_info->mii_id, 0x1b); + tempval = (tempval & ~0x000f) | 0x000b; + mii_info->mdio_write(ugeth->dev, + mii_info->mii_id, 0x1b, + (u16) tempval); + tempval = + (u32) mii_info->mdio_read(ugeth-> + dev, mii_info->mii_id, + MII_BMCR); + mii_info->mdio_write(ugeth->dev, + mii_info->mii_id, MII_BMCR, + (u16) (tempval | BMCR_RESET)); + } + msleep(4000); +#endif /* CONFIG_MPC8360 */ + adjust_enet_interface(ugeth); + break; + case 100: + case 10: +#ifdef CONFIG_MPC836x +/* FIXME: This code is for 100Mbs BUG fixing, +remove this lines when it will be fixed!!! */ + ugeth->ug_info->enet_interface = ENET_100_RGMII; + tempval = + (u32) mii_info->mdio_read(ugeth->dev, + mii_info->mii_id, + 0x1b); + tempval = (tempval & ~0x000f) | 0x000b; + mii_info->mdio_write(ugeth->dev, + mii_info->mii_id, 0x1b, + (u16) tempval); + tempval = + (u32) mii_info->mdio_read(ugeth->dev, + mii_info->mii_id, + MII_BMCR); + mii_info->mdio_write(ugeth->dev, + mii_info->mii_id, MII_BMCR, + (u16) (tempval | + BMCR_RESET)); + msleep(4000); +#endif /* CONFIG_MPC8360 */ + adjust_enet_interface(ugeth); + break; + default: + ugeth_warn + ("%s: Ack! Speed (%d) is not 10/100/1000!", + dev->name, mii_info->speed); + break; + } + + ugeth_info("%s: Speed %dBT", dev->name, + mii_info->speed); + + ugeth->oldspeed = mii_info->speed; + } + + if (!ugeth->oldlink) { + ugeth_info("%s: Link is up", dev->name); + ugeth->oldlink = 1; + netif_carrier_on(dev); + netif_schedule(dev); + } + } else { + if (ugeth->oldlink) { + ugeth_info("%s: Link is down", dev->name); + ugeth->oldlink = 0; + ugeth->oldspeed = 0; + ugeth->oldduplex = -1; + netif_carrier_off(dev); + } + } +} + +/* Configure the PHY for dev. + * returns 0 if success. -1 if failure + */ +static int init_phy(struct net_device *dev) +{ + ucc_geth_private_t *ugeth = netdev_priv(dev); + struct phy_info *curphy; + ucc_mii_mng_t *mii_regs; + struct ugeth_mii_info *mii_info; + int err; + + mii_regs = &ugeth->ug_regs->miimng; + + ugeth->oldlink = 0; + ugeth->oldspeed = 0; + ugeth->oldduplex = -1; + + mii_info = kmalloc(sizeof(struct ugeth_mii_info), GFP_KERNEL); + + if (NULL == mii_info) { + ugeth_err("%s: Could not allocate mii_info", dev->name); + return -ENOMEM; + } + + mii_info->mii_regs = mii_regs; + mii_info->speed = SPEED_1000; + mii_info->duplex = DUPLEX_FULL; + mii_info->pause = 0; + mii_info->link = 0; + + mii_info->advertising = (ADVERTISED_10baseT_Half | + ADVERTISED_10baseT_Full | + ADVERTISED_100baseT_Half | + ADVERTISED_100baseT_Full | + ADVERTISED_1000baseT_Full); + mii_info->autoneg = 1; + + mii_info->mii_id = ugeth->ug_info->phy_address; + + mii_info->dev = dev; + + mii_info->mdio_read = &read_phy_reg; + mii_info->mdio_write = &write_phy_reg; + + ugeth->mii_info = mii_info; + + spin_lock_irq(&ugeth->lock); + + /* Set this UCC to be the master of the MII managment */ + ucc_set_qe_mux_mii_mng(ugeth->ug_info->uf_info.ucc_num); + + if (init_mii_management_configuration(1, + ugeth->ug_info-> + miiPreambleSupress, + &mii_regs->miimcfg, + &mii_regs->miimind)) { + ugeth_err("%s: The MII Bus is stuck!", dev->name); + err = -1; + goto bus_fail; + } + + spin_unlock_irq(&ugeth->lock); + + /* get info for this PHY */ + curphy = get_phy_info(ugeth->mii_info); + + if (curphy == NULL) { + ugeth_err("%s: No PHY found", dev->name); + err = -1; + goto no_phy; + } + + mii_info->phyinfo = curphy; + + /* Run the commands which initialize the PHY */ + if (curphy->init) { + err = curphy->init(ugeth->mii_info); + if (err) + goto phy_init_fail; + } + + return 0; + + phy_init_fail: + no_phy: + bus_fail: + kfree(mii_info); + + return err; +} + +#ifdef CONFIG_UGETH_TX_ON_DEMOND +static int ugeth_transmit_on_demand(ucc_geth_private_t *ugeth) +{ + ucc_fast_transmit_on_demand(ugeth->uccf); + + return 0; +} +#endif + +static int ugeth_graceful_stop_tx(ucc_geth_private_t *ugeth) +{ + ucc_fast_private_t *uccf; + u32 cecr_subblock; + u32 temp; + + uccf = ugeth->uccf; + + /* Mask GRACEFUL STOP TX interrupt bit and clear it */ + temp = in_be32(uccf->p_uccm); + temp &= ~UCCE_GRA; + out_be32(uccf->p_uccm, temp); + out_be32(uccf->p_ucce, UCCE_GRA); /* clear by writing 1 */ + + /* Issue host command */ + cecr_subblock = + ucc_fast_get_qe_cr_subblock(ugeth->ug_info->uf_info.ucc_num); + qe_issue_cmd(QE_GRACEFUL_STOP_TX, cecr_subblock, + (u8) QE_CR_PROTOCOL_ETHERNET, 0); + + /* Wait for command to complete */ + do { + temp = in_be32(uccf->p_ucce); + } while (!(temp & UCCE_GRA)); + + uccf->stopped_tx = 1; + + return 0; +} + +static int ugeth_graceful_stop_rx(ucc_geth_private_t * ugeth) +{ + ucc_fast_private_t *uccf; + u32 cecr_subblock; + u8 temp; + + uccf = ugeth->uccf; + + /* Clear acknowledge bit */ + temp = ugeth->p_rx_glbl_pram->rxgstpack; + temp &= ~GRACEFUL_STOP_ACKNOWLEDGE_RX; + ugeth->p_rx_glbl_pram->rxgstpack = temp; + + /* Keep issuing command and checking acknowledge bit until + it is asserted, according to spec */ + do { + /* Issue host command */ + cecr_subblock = + ucc_fast_get_qe_cr_subblock(ugeth->ug_info->uf_info. + ucc_num); + qe_issue_cmd(QE_GRACEFUL_STOP_RX, cecr_subblock, + (u8) QE_CR_PROTOCOL_ETHERNET, 0); + + temp = ugeth->p_rx_glbl_pram->rxgstpack; + } while (!(temp & GRACEFUL_STOP_ACKNOWLEDGE_RX)); + + uccf->stopped_rx = 1; + + return 0; +} + +static int ugeth_restart_tx(ucc_geth_private_t *ugeth) +{ + ucc_fast_private_t *uccf; + u32 cecr_subblock; + + uccf = ugeth->uccf; + + cecr_subblock = + ucc_fast_get_qe_cr_subblock(ugeth->ug_info->uf_info.ucc_num); + qe_issue_cmd(QE_RESTART_TX, cecr_subblock, (u8) QE_CR_PROTOCOL_ETHERNET, + 0); + uccf->stopped_tx = 0; + + return 0; +} + +static int ugeth_restart_rx(ucc_geth_private_t *ugeth) +{ + ucc_fast_private_t *uccf; + u32 cecr_subblock; + + uccf = ugeth->uccf; + + cecr_subblock = + ucc_fast_get_qe_cr_subblock(ugeth->ug_info->uf_info.ucc_num); + qe_issue_cmd(QE_RESTART_RX, cecr_subblock, (u8) QE_CR_PROTOCOL_ETHERNET, + 0); + uccf->stopped_rx = 0; + + return 0; +} + +static int ugeth_enable(ucc_geth_private_t *ugeth, comm_dir_e mode) +{ + ucc_fast_private_t *uccf; + int enabled_tx, enabled_rx; + + uccf = ugeth->uccf; + + /* check if the UCC number is in range. */ + if (ugeth->ug_info->uf_info.ucc_num >= UCC_MAX_NUM) { + ugeth_err("%s: ucc_num out of range.", __FUNCTION__); + return -EINVAL; + } + + enabled_tx = uccf->enabled_tx; + enabled_rx = uccf->enabled_rx; + + /* Get Tx and Rx going again, in case this channel was actively + disabled. */ + if ((mode & COMM_DIR_TX) && (!enabled_tx) && uccf->stopped_tx) + ugeth_restart_tx(ugeth); + if ((mode & COMM_DIR_RX) && (!enabled_rx) && uccf->stopped_rx) + ugeth_restart_rx(ugeth); + + ucc_fast_enable(uccf, mode); /* OK to do even if not disabled */ + + return 0; + +} + +static int ugeth_disable(ucc_geth_private_t * ugeth, comm_dir_e mode) +{ + ucc_fast_private_t *uccf; + + uccf = ugeth->uccf; + + /* check if the UCC number is in range. */ + if (ugeth->ug_info->uf_info.ucc_num >= UCC_MAX_NUM) { + ugeth_err("%s: ucc_num out of range.", __FUNCTION__); + return -EINVAL; + } + + /* Stop any transmissions */ + if ((mode & COMM_DIR_TX) && uccf->enabled_tx && !uccf->stopped_tx) + ugeth_graceful_stop_tx(ugeth); + + /* Stop any receptions */ + if ((mode & COMM_DIR_RX) && uccf->enabled_rx && !uccf->stopped_rx) + ugeth_graceful_stop_rx(ugeth); + + ucc_fast_disable(ugeth->uccf, mode); /* OK to do even if not enabled */ + + return 0; +} + +static void ugeth_dump_regs(ucc_geth_private_t *ugeth) +{ +#ifdef DEBUG + ucc_fast_dump_regs(ugeth->uccf); + dump_regs(ugeth); + dump_bds(ugeth); +#endif +} + +#ifdef CONFIG_UGETH_FILTERING +static int ugeth_ext_filtering_serialize_tad(ucc_geth_tad_params_t * + p_UccGethTadParams, + qe_fltr_tad_t *qe_fltr_tad) +{ + u16 temp; + + /* Zero serialized TAD */ + memset(qe_fltr_tad, 0, QE_FLTR_TAD_SIZE); + + qe_fltr_tad->serialized[0] |= UCC_GETH_TAD_V; /* Must have this */ + if (p_UccGethTadParams->rx_non_dynamic_extended_features_mode || + (p_UccGethTadParams->vtag_op != UCC_GETH_VLAN_OPERATION_TAGGED_NOP) + || (p_UccGethTadParams->vnontag_op != + UCC_GETH_VLAN_OPERATION_NON_TAGGED_NOP) + ) + qe_fltr_tad->serialized[0] |= UCC_GETH_TAD_EF; + if (p_UccGethTadParams->reject_frame) + qe_fltr_tad->serialized[0] |= UCC_GETH_TAD_REJ; + temp = + (u16) (((u16) p_UccGethTadParams-> + vtag_op) << UCC_GETH_TAD_VTAG_OP_SHIFT); + qe_fltr_tad->serialized[0] |= (u8) (temp >> 8); /* upper bits */ + + qe_fltr_tad->serialized[1] |= (u8) (temp & 0x00ff); /* lower bits */ + if (p_UccGethTadParams->vnontag_op == + UCC_GETH_VLAN_OPERATION_NON_TAGGED_Q_TAG_INSERT) + qe_fltr_tad->serialized[1] |= UCC_GETH_TAD_V_NON_VTAG_OP; + qe_fltr_tad->serialized[1] |= + p_UccGethTadParams->rqos << UCC_GETH_TAD_RQOS_SHIFT; + + qe_fltr_tad->serialized[2] |= + p_UccGethTadParams->vpri << UCC_GETH_TAD_V_PRIORITY_SHIFT; + /* upper bits */ + qe_fltr_tad->serialized[2] |= (u8) (p_UccGethTadParams->vid >> 8); + /* lower bits */ + qe_fltr_tad->serialized[3] |= (u8) (p_UccGethTadParams->vid & 0x00ff); + + return 0; +} + +static enet_addr_container_t + *ugeth_82xx_filtering_get_match_addr_in_hash(ucc_geth_private_t *ugeth, + enet_addr_t *p_enet_addr) +{ + enet_addr_container_t *enet_addr_cont; + struct list_head *p_lh; + u16 i, num; + int32_t j; + u8 *p_counter; + + if ((*p_enet_addr)[0] & ENET_GROUP_ADDR) { + p_lh = &ugeth->group_hash_q; + p_counter = &(ugeth->numGroupAddrInHash); + } else { + p_lh = &ugeth->ind_hash_q; + p_counter = &(ugeth->numIndAddrInHash); + } + + if (!p_lh) + return NULL; + + num = *p_counter; + + for (i = 0; i < num; i++) { + enet_addr_cont = + (enet_addr_container_t *) + ENET_ADDR_CONT_ENTRY(dequeue(p_lh)); + for (j = ENET_NUM_OCTETS_PER_ADDRESS - 1; j >= 0; j--) { + if ((*p_enet_addr)[j] != (enet_addr_cont->address)[j]) + break; + if (j == 0) + return enet_addr_cont; /* Found */ + } + enqueue(p_lh, &enet_addr_cont->node); /* Put it back */ + } + return NULL; +} + +static int ugeth_82xx_filtering_add_addr_in_hash(ucc_geth_private_t *ugeth, + enet_addr_t *p_enet_addr) +{ + ucc_geth_enet_address_recognition_location_e location; + enet_addr_container_t *enet_addr_cont; + struct list_head *p_lh; + u8 i; + u32 limit; + u8 *p_counter; + + if ((*p_enet_addr)[0] & ENET_GROUP_ADDR) { + p_lh = &ugeth->group_hash_q; + limit = ugeth->ug_info->maxGroupAddrInHash; + location = + UCC_GETH_ENET_ADDRESS_RECOGNITION_LOCATION_GROUP_HASH; + p_counter = &(ugeth->numGroupAddrInHash); + } else { + p_lh = &ugeth->ind_hash_q; + limit = ugeth->ug_info->maxIndAddrInHash; + location = + UCC_GETH_ENET_ADDRESS_RECOGNITION_LOCATION_INDIVIDUAL_HASH; + p_counter = &(ugeth->numIndAddrInHash); + } + + if ((enet_addr_cont = + ugeth_82xx_filtering_get_match_addr_in_hash(ugeth, p_enet_addr))) { + list_add(p_lh, &enet_addr_cont->node); /* Put it back */ + return 0; + } + if ((!p_lh) || (!(*p_counter < limit))) + return -EBUSY; + if (!(enet_addr_cont = get_enet_addr_container())) + return -ENOMEM; + for (i = 0; i < ENET_NUM_OCTETS_PER_ADDRESS; i++) + (enet_addr_cont->address)[i] = (*p_enet_addr)[i]; + enet_addr_cont->location = location; + enqueue(p_lh, &enet_addr_cont->node); /* Put it back */ + ++(*p_counter); + + hw_add_addr_in_hash(ugeth, &(enet_addr_cont->address)); + + return 0; +} + +static int ugeth_82xx_filtering_clear_addr_in_hash(ucc_geth_private_t *ugeth, + enet_addr_t *p_enet_addr) +{ + ucc_geth_82xx_address_filtering_pram_t *p_82xx_addr_filt; + enet_addr_container_t *enet_addr_cont; + ucc_fast_private_t *uccf; + comm_dir_e comm_dir; + u16 i, num; + struct list_head *p_lh; + u32 *addr_h, *addr_l; + u8 *p_counter; + + uccf = ugeth->uccf; + + p_82xx_addr_filt = + (ucc_geth_82xx_address_filtering_pram_t *) ugeth->p_rx_glbl_pram-> + addressfiltering; + + if (! + (enet_addr_cont = + ugeth_82xx_filtering_get_match_addr_in_hash(ugeth, p_enet_addr))) + return -ENOENT; + + /* It's been found and removed from the CQ. */ + /* Now destroy its container */ + put_enet_addr_container(enet_addr_cont); + + if ((*p_enet_addr)[0] & ENET_GROUP_ADDR) { + addr_h = &(p_82xx_addr_filt->gaddr_h); + addr_l = &(p_82xx_addr_filt->gaddr_l); + p_lh = &ugeth->group_hash_q; + p_counter = &(ugeth->numGroupAddrInHash); + } else { + addr_h = &(p_82xx_addr_filt->iaddr_h); + addr_l = &(p_82xx_addr_filt->iaddr_l); + p_lh = &ugeth->ind_hash_q; + p_counter = &(ugeth->numIndAddrInHash); + } + + comm_dir = 0; + if (uccf->enabled_tx) + comm_dir |= COMM_DIR_TX; + if (uccf->enabled_rx) + comm_dir |= COMM_DIR_RX; + if (comm_dir) + ugeth_disable(ugeth, comm_dir); + + /* Clear the hash table. */ + out_be32(addr_h, 0x00000000); + out_be32(addr_l, 0x00000000); + + /* Add all remaining CQ elements back into hash */ + num = --(*p_counter); + for (i = 0; i < num; i++) { + enet_addr_cont = + (enet_addr_container_t *) + ENET_ADDR_CONT_ENTRY(dequeue(p_lh)); + hw_add_addr_in_hash(ugeth, &(enet_addr_cont->address)); + enqueue(p_lh, &enet_addr_cont->node); /* Put it back */ + } + + if (comm_dir) + ugeth_enable(ugeth, comm_dir); + + return 0; +} +#endif /* CONFIG_UGETH_FILTERING */ + +static int ugeth_82xx_filtering_clear_all_addr_in_hash(ucc_geth_private_t * + ugeth, + enet_addr_type_e + enet_addr_type) +{ + ucc_geth_82xx_address_filtering_pram_t *p_82xx_addr_filt; + ucc_fast_private_t *uccf; + comm_dir_e comm_dir; + struct list_head *p_lh; + u16 i, num; + u32 *addr_h, *addr_l; + u8 *p_counter; + + uccf = ugeth->uccf; + + p_82xx_addr_filt = + (ucc_geth_82xx_address_filtering_pram_t *) ugeth->p_rx_glbl_pram-> + addressfiltering; + + if (enet_addr_type == ENET_ADDR_TYPE_GROUP) { + addr_h = &(p_82xx_addr_filt->gaddr_h); + addr_l = &(p_82xx_addr_filt->gaddr_l); + p_lh = &ugeth->group_hash_q; + p_counter = &(ugeth->numGroupAddrInHash); + } else if (enet_addr_type == ENET_ADDR_TYPE_INDIVIDUAL) { + addr_h = &(p_82xx_addr_filt->iaddr_h); + addr_l = &(p_82xx_addr_filt->iaddr_l); + p_lh = &ugeth->ind_hash_q; + p_counter = &(ugeth->numIndAddrInHash); + } else + return -EINVAL; + + comm_dir = 0; + if (uccf->enabled_tx) + comm_dir |= COMM_DIR_TX; + if (uccf->enabled_rx) + comm_dir |= COMM_DIR_RX; + if (comm_dir) + ugeth_disable(ugeth, comm_dir); + + /* Clear the hash table. */ + out_be32(addr_h, 0x00000000); + out_be32(addr_l, 0x00000000); + + if (!p_lh) + return 0; + + num = *p_counter; + + /* Delete all remaining CQ elements */ + for (i = 0; i < num; i++) + put_enet_addr_container(ENET_ADDR_CONT_ENTRY(dequeue(p_lh))); + + *p_counter = 0; + + if (comm_dir) + ugeth_enable(ugeth, comm_dir); + + return 0; +} + +#ifdef CONFIG_UGETH_FILTERING +static int ugeth_82xx_filtering_add_addr_in_paddr(ucc_geth_private_t *ugeth, + enet_addr_t *p_enet_addr, + u8 paddr_num) +{ + int i; + + if ((*p_enet_addr)[0] & ENET_GROUP_ADDR) + ugeth_warn + ("%s: multicast address added to paddr will have no " + "effect - is this what you wanted?", + __FUNCTION__); + + ugeth->indAddrRegUsed[paddr_num] = 1; /* mark this paddr as used */ + /* store address in our database */ + for (i = 0; i < ENET_NUM_OCTETS_PER_ADDRESS; i++) + ugeth->paddr[paddr_num][i] = (*p_enet_addr)[i]; + /* put in hardware */ + return hw_add_addr_in_paddr(ugeth, p_enet_addr, paddr_num); +} +#endif /* CONFIG_UGETH_FILTERING */ + +static int ugeth_82xx_filtering_clear_addr_in_paddr(ucc_geth_private_t *ugeth, + u8 paddr_num) +{ + ugeth->indAddrRegUsed[paddr_num] = 0; /* mark this paddr as not used */ + return hw_clear_addr_in_paddr(ugeth, paddr_num);/* clear in hardware */ +} + +static void ucc_geth_memclean(ucc_geth_private_t *ugeth) +{ + u16 i, j; + u8 *bd; + + if (!ugeth) + return; + + if (ugeth->uccf) + ucc_fast_free(ugeth->uccf); + + if (ugeth->p_thread_data_tx) { + qe_muram_free(ugeth->thread_dat_tx_offset); + ugeth->p_thread_data_tx = NULL; + } + if (ugeth->p_thread_data_rx) { + qe_muram_free(ugeth->thread_dat_rx_offset); + ugeth->p_thread_data_rx = NULL; + } + if (ugeth->p_exf_glbl_param) { + qe_muram_free(ugeth->exf_glbl_param_offset); + ugeth->p_exf_glbl_param = NULL; + } + if (ugeth->p_rx_glbl_pram) { + qe_muram_free(ugeth->rx_glbl_pram_offset); + ugeth->p_rx_glbl_pram = NULL; + } + if (ugeth->p_tx_glbl_pram) { + qe_muram_free(ugeth->tx_glbl_pram_offset); + ugeth->p_tx_glbl_pram = NULL; + } + if (ugeth->p_send_q_mem_reg) { + qe_muram_free(ugeth->send_q_mem_reg_offset); + ugeth->p_send_q_mem_reg = NULL; + } + if (ugeth->p_scheduler) { + qe_muram_free(ugeth->scheduler_offset); + ugeth->p_scheduler = NULL; + } + if (ugeth->p_tx_fw_statistics_pram) { + qe_muram_free(ugeth->tx_fw_statistics_pram_offset); + ugeth->p_tx_fw_statistics_pram = NULL; + } + if (ugeth->p_rx_fw_statistics_pram) { + qe_muram_free(ugeth->rx_fw_statistics_pram_offset); + ugeth->p_rx_fw_statistics_pram = NULL; + } + if (ugeth->p_rx_irq_coalescing_tbl) { + qe_muram_free(ugeth->rx_irq_coalescing_tbl_offset); + ugeth->p_rx_irq_coalescing_tbl = NULL; + } + if (ugeth->p_rx_bd_qs_tbl) { + qe_muram_free(ugeth->rx_bd_qs_tbl_offset); + ugeth->p_rx_bd_qs_tbl = NULL; + } + if (ugeth->p_init_enet_param_shadow) { + return_init_enet_entries(ugeth, + &(ugeth->p_init_enet_param_shadow-> + rxthread[0]), + ENET_INIT_PARAM_MAX_ENTRIES_RX, + ugeth->ug_info->riscRx, 1); + return_init_enet_entries(ugeth, + &(ugeth->p_init_enet_param_shadow-> + txthread[0]), + ENET_INIT_PARAM_MAX_ENTRIES_TX, + ugeth->ug_info->riscTx, 0); + kfree(ugeth->p_init_enet_param_shadow); + ugeth->p_init_enet_param_shadow = NULL; + } + for (i = 0; i < ugeth->ug_info->numQueuesTx; i++) { + bd = ugeth->p_tx_bd_ring[i]; + for (j = 0; j < ugeth->ug_info->bdRingLenTx[i]; j++) { + if (ugeth->tx_skbuff[i][j]) { + dma_unmap_single(NULL, + BD_BUFFER_ARG(bd), + (BD_STATUS_AND_LENGTH(bd) & + BD_LENGTH_MASK), + DMA_TO_DEVICE); + dev_kfree_skb_any(ugeth->tx_skbuff[i][j]); + ugeth->tx_skbuff[i][j] = NULL; + } + } + + kfree(ugeth->tx_skbuff[i]); + + if (ugeth->p_tx_bd_ring[i]) { + if (ugeth->ug_info->uf_info.bd_mem_part == + MEM_PART_SYSTEM) + kfree((void *)ugeth->tx_bd_ring_offset[i]); + else if (ugeth->ug_info->uf_info.bd_mem_part == + MEM_PART_MURAM) + qe_muram_free(ugeth->tx_bd_ring_offset[i]); + ugeth->p_tx_bd_ring[i] = NULL; + } + } + for (i = 0; i < ugeth->ug_info->numQueuesRx; i++) { + if (ugeth->p_rx_bd_ring[i]) { + /* Return existing data buffers in ring */ + bd = ugeth->p_rx_bd_ring[i]; + for (j = 0; j < ugeth->ug_info->bdRingLenRx[i]; j++) { + if (ugeth->rx_skbuff[i][j]) { + dma_unmap_single(NULL, BD_BUFFER(bd), + ugeth->ug_info-> + uf_info. + max_rx_buf_length + + UCC_GETH_RX_DATA_BUF_ALIGNMENT, + DMA_FROM_DEVICE); + + dev_kfree_skb_any(ugeth-> + rx_skbuff[i][j]); + ugeth->rx_skbuff[i][j] = NULL; + } + bd += UCC_GETH_SIZE_OF_BD; + } + + kfree(ugeth->rx_skbuff[i]); + + if (ugeth->ug_info->uf_info.bd_mem_part == + MEM_PART_SYSTEM) + kfree((void *)ugeth->rx_bd_ring_offset[i]); + else if (ugeth->ug_info->uf_info.bd_mem_part == + MEM_PART_MURAM) + qe_muram_free(ugeth->rx_bd_ring_offset[i]); + ugeth->p_rx_bd_ring[i] = NULL; + } + } + while (!list_empty(&ugeth->group_hash_q)) + put_enet_addr_container(ENET_ADDR_CONT_ENTRY + (dequeue(&ugeth->group_hash_q))); + while (!list_empty(&ugeth->ind_hash_q)) + put_enet_addr_container(ENET_ADDR_CONT_ENTRY + (dequeue(&ugeth->ind_hash_q))); + +} + +static void ucc_geth_set_multi(struct net_device *dev) +{ + ucc_geth_private_t *ugeth; + struct dev_mc_list *dmi; + ucc_fast_t *uf_regs; + ucc_geth_82xx_address_filtering_pram_t *p_82xx_addr_filt; + enet_addr_t tempaddr; + u8 *mcptr, *tdptr; + int i, j; + + ugeth = netdev_priv(dev); + + uf_regs = ugeth->uccf->uf_regs; + + if (dev->flags & IFF_PROMISC) { + + /* Log any net taps. */ + printk("%s: Promiscuous mode enabled.\n", dev->name); + uf_regs->upsmr |= UPSMR_PRO; + + } else { + + uf_regs->upsmr &= ~UPSMR_PRO; + + p_82xx_addr_filt = + (ucc_geth_82xx_address_filtering_pram_t *) ugeth-> + p_rx_glbl_pram->addressfiltering; + + if (dev->flags & IFF_ALLMULTI) { + /* Catch all multicast addresses, so set the + * filter to all 1's. + */ + out_be32(&p_82xx_addr_filt->gaddr_h, 0xffffffff); + out_be32(&p_82xx_addr_filt->gaddr_l, 0xffffffff); + } else { + /* Clear filter and add the addresses in the list. + */ + out_be32(&p_82xx_addr_filt->gaddr_h, 0x0); + out_be32(&p_82xx_addr_filt->gaddr_l, 0x0); + + dmi = dev->mc_list; + + for (i = 0; i < dev->mc_count; i++, dmi = dmi->next) { + + /* Only support group multicast for now. + */ + if (!(dmi->dmi_addr[0] & 1)) + continue; + + /* The address in dmi_addr is LSB first, + * and taddr is MSB first. We have to + * copy bytes MSB first from dmi_addr. + */ + mcptr = (u8 *) dmi->dmi_addr + 5; + tdptr = (u8 *) & tempaddr; + for (j = 0; j < 6; j++) + *tdptr++ = *mcptr--; + + /* Ask CPM to run CRC and set bit in + * filter mask. + */ + hw_add_addr_in_hash(ugeth, &tempaddr); + + } + } + } +} + +static void ucc_geth_stop(ucc_geth_private_t *ugeth) +{ + ucc_geth_t *ug_regs = ugeth->ug_regs; + u32 tempval; + + ugeth_vdbg("%s: IN", __FUNCTION__); + + /* Disable the controller */ + ugeth_disable(ugeth, COMM_DIR_RX_AND_TX); + + /* Tell the kernel the link is down */ + ugeth->mii_info->link = 0; + adjust_link(ugeth->dev); + + /* Mask all interrupts */ + out_be32(ugeth->uccf->p_ucce, 0x00000000); + + /* Clear all interrupts */ + out_be32(ugeth->uccf->p_ucce, 0xffffffff); + + /* Disable Rx and Tx */ + tempval = in_be32(&ug_regs->maccfg1); + tempval &= ~(MACCFG1_ENABLE_RX | MACCFG1_ENABLE_TX); + out_be32(&ug_regs->maccfg1, tempval); + + if (ugeth->ug_info->board_flags & FSL_UGETH_BRD_HAS_PHY_INTR) { + /* Clear any pending interrupts */ + mii_clear_phy_interrupt(ugeth->mii_info); + + /* Disable PHY Interrupts */ + mii_configure_phy_interrupt(ugeth->mii_info, + MII_INTERRUPT_DISABLED); + } + + free_irq(ugeth->ug_info->uf_info.irq, ugeth->dev); + + if (ugeth->ug_info->board_flags & FSL_UGETH_BRD_HAS_PHY_INTR) { + free_irq(ugeth->ug_info->phy_interrupt, ugeth->dev); + } else { + del_timer_sync(&ugeth->phy_info_timer); + } + + ucc_geth_memclean(ugeth); +} + +static int ucc_geth_startup(ucc_geth_private_t *ugeth) +{ + ucc_geth_82xx_address_filtering_pram_t *p_82xx_addr_filt; + ucc_geth_init_pram_t *p_init_enet_pram; + ucc_fast_private_t *uccf; + ucc_geth_info_t *ug_info; + ucc_fast_info_t *uf_info; + ucc_fast_t *uf_regs; + ucc_geth_t *ug_regs; + int ret_val = -EINVAL; + u32 remoder = UCC_GETH_REMODER_INIT; + u32 init_enet_pram_offset, cecr_subblock, command, maccfg1; + u32 ifstat, i, j, size, l2qt, l3qt, length; + u16 temoder = UCC_GETH_TEMODER_INIT; + u16 test; + u8 function_code = 0; + u8 *bd, *endOfRing; + u8 numThreadsRxNumerical, numThreadsTxNumerical; + + ugeth_vdbg("%s: IN", __FUNCTION__); + + ug_info = ugeth->ug_info; + uf_info = &ug_info->uf_info; + + if (!((uf_info->bd_mem_part == MEM_PART_SYSTEM) || + (uf_info->bd_mem_part == MEM_PART_MURAM))) { + ugeth_err("%s: Bad memory partition value.", __FUNCTION__); + return -EINVAL; + } + + /* Rx BD lengths */ + for (i = 0; i < ug_info->numQueuesRx; i++) { + if ((ug_info->bdRingLenRx[i] < UCC_GETH_RX_BD_RING_SIZE_MIN) || + (ug_info->bdRingLenRx[i] % + UCC_GETH_RX_BD_RING_SIZE_ALIGNMENT)) { + ugeth_err + ("%s: Rx BD ring length must be multiple of 4," + " no smaller than 8.", __FUNCTION__); + return -EINVAL; + } + } + + /* Tx BD lengths */ + for (i = 0; i < ug_info->numQueuesTx; i++) { + if (ug_info->bdRingLenTx[i] < UCC_GETH_TX_BD_RING_SIZE_MIN) { + ugeth_err + ("%s: Tx BD ring length must be no smaller than 2.", + __FUNCTION__); + return -EINVAL; + } + } + + /* mrblr */ + if ((uf_info->max_rx_buf_length == 0) || + (uf_info->max_rx_buf_length % UCC_GETH_MRBLR_ALIGNMENT)) { + ugeth_err + ("%s: max_rx_buf_length must be non-zero multiple of 128.", + __FUNCTION__); + return -EINVAL; + } + + /* num Tx queues */ + if (ug_info->numQueuesTx > NUM_TX_QUEUES) { + ugeth_err("%s: number of tx queues too large.", __FUNCTION__); + return -EINVAL; + } + + /* num Rx queues */ + if (ug_info->numQueuesRx > NUM_RX_QUEUES) { + ugeth_err("%s: number of rx queues too large.", __FUNCTION__); + return -EINVAL; + } + + /* l2qt */ + for (i = 0; i < UCC_GETH_VLAN_PRIORITY_MAX; i++) { + if (ug_info->l2qt[i] >= ug_info->numQueuesRx) { + ugeth_err + ("%s: VLAN priority table entry must not be" + " larger than number of Rx queues.", + __FUNCTION__); + return -EINVAL; + } + } + + /* l3qt */ + for (i = 0; i < UCC_GETH_IP_PRIORITY_MAX; i++) { + if (ug_info->l3qt[i] >= ug_info->numQueuesRx) { + ugeth_err + ("%s: IP priority table entry must not be" + " larger than number of Rx queues.", + __FUNCTION__); + return -EINVAL; + } + } + + if (ug_info->cam && !ug_info->ecamptr) { + ugeth_err("%s: If cam mode is chosen, must supply cam ptr.", + __FUNCTION__); + return -EINVAL; + } + + if ((ug_info->numStationAddresses != + UCC_GETH_NUM_OF_STATION_ADDRESSES_1) + && ug_info->rxExtendedFiltering) { + ugeth_err("%s: Number of station addresses greater than 1 " + "not allowed in extended parsing mode.", + __FUNCTION__); + return -EINVAL; + } + + /* Generate uccm_mask for receive */ + uf_info->uccm_mask = ug_info->eventRegMask & UCCE_OTHER;/* Errors */ + for (i = 0; i < ug_info->numQueuesRx; i++) + uf_info->uccm_mask |= (UCCE_RXBF_SINGLE_MASK << i); + + for (i = 0; i < ug_info->numQueuesTx; i++) + uf_info->uccm_mask |= (UCCE_TXBF_SINGLE_MASK << i); + /* Initialize the general fast UCC block. */ + if (ucc_fast_init(uf_info, &uccf)) { + ugeth_err("%s: Failed to init uccf.", __FUNCTION__); + ucc_geth_memclean(ugeth); + return -ENOMEM; + } + ugeth->uccf = uccf; + + switch (ug_info->numThreadsRx) { + case UCC_GETH_NUM_OF_THREADS_1: + numThreadsRxNumerical = 1; + break; + case UCC_GETH_NUM_OF_THREADS_2: + numThreadsRxNumerical = 2; + break; + case UCC_GETH_NUM_OF_THREADS_4: + numThreadsRxNumerical = 4; + break; + case UCC_GETH_NUM_OF_THREADS_6: + numThreadsRxNumerical = 6; + break; + case UCC_GETH_NUM_OF_THREADS_8: + numThreadsRxNumerical = 8; + break; + default: + ugeth_err("%s: Bad number of Rx threads value.", __FUNCTION__); + ucc_geth_memclean(ugeth); + return -EINVAL; + break; + } + + switch (ug_info->numThreadsTx) { + case UCC_GETH_NUM_OF_THREADS_1: + numThreadsTxNumerical = 1; + break; + case UCC_GETH_NUM_OF_THREADS_2: + numThreadsTxNumerical = 2; + break; + case UCC_GETH_NUM_OF_THREADS_4: + numThreadsTxNumerical = 4; + break; + case UCC_GETH_NUM_OF_THREADS_6: + numThreadsTxNumerical = 6; + break; + case UCC_GETH_NUM_OF_THREADS_8: + numThreadsTxNumerical = 8; + break; + default: + ugeth_err("%s: Bad number of Tx threads value.", __FUNCTION__); + ucc_geth_memclean(ugeth); + return -EINVAL; + break; + } + + /* Calculate rx_extended_features */ + ugeth->rx_non_dynamic_extended_features = ug_info->ipCheckSumCheck || + ug_info->ipAddressAlignment || + (ug_info->numStationAddresses != + UCC_GETH_NUM_OF_STATION_ADDRESSES_1); + + ugeth->rx_extended_features = ugeth->rx_non_dynamic_extended_features || + (ug_info->vlanOperationTagged != UCC_GETH_VLAN_OPERATION_TAGGED_NOP) + || (ug_info->vlanOperationNonTagged != + UCC_GETH_VLAN_OPERATION_NON_TAGGED_NOP); + + uf_regs = uccf->uf_regs; + ug_regs = (ucc_geth_t *) (uccf->uf_regs); + ugeth->ug_regs = ug_regs; + + init_default_reg_vals(&uf_regs->upsmr, + &ug_regs->maccfg1, &ug_regs->maccfg2); + + /* Set UPSMR */ + /* For more details see the hardware spec. */ + init_rx_parameters(ug_info->bro, + ug_info->rsh, ug_info->pro, &uf_regs->upsmr); + + /* We're going to ignore other registers for now, */ + /* except as needed to get up and running */ + + /* Set MACCFG1 */ + /* For more details see the hardware spec. */ + init_flow_control_params(ug_info->aufc, + ug_info->receiveFlowControl, + 1, + ug_info->pausePeriod, + ug_info->extensionField, + &uf_regs->upsmr, + &ug_regs->uempr, &ug_regs->maccfg1); + + maccfg1 = in_be32(&ug_regs->maccfg1); + maccfg1 |= MACCFG1_ENABLE_RX; + maccfg1 |= MACCFG1_ENABLE_TX; + out_be32(&ug_regs->maccfg1, maccfg1); + + /* Set IPGIFG */ + /* For more details see the hardware spec. */ + ret_val = init_inter_frame_gap_params(ug_info->nonBackToBackIfgPart1, + ug_info->nonBackToBackIfgPart2, + ug_info-> + miminumInterFrameGapEnforcement, + ug_info->backToBackInterFrameGap, + &ug_regs->ipgifg); + if (ret_val != 0) { + ugeth_err("%s: IPGIFG initialization parameter too large.", + __FUNCTION__); + ucc_geth_memclean(ugeth); + return ret_val; + } + + /* Set HAFDUP */ + /* For more details see the hardware spec. */ + ret_val = init_half_duplex_params(ug_info->altBeb, + ug_info->backPressureNoBackoff, + ug_info->noBackoff, + ug_info->excessDefer, + ug_info->altBebTruncation, + ug_info->maxRetransmission, + ug_info->collisionWindow, + &ug_regs->hafdup); + if (ret_val != 0) { + ugeth_err("%s: Half Duplex initialization parameter too large.", + __FUNCTION__); + ucc_geth_memclean(ugeth); + return ret_val; + } + + /* Set IFSTAT */ + /* For more details see the hardware spec. */ + /* Read only - resets upon read */ + ifstat = in_be32(&ug_regs->ifstat); + + /* Clear UEMPR */ + /* For more details see the hardware spec. */ + out_be32(&ug_regs->uempr, 0); + + /* Set UESCR */ + /* For more details see the hardware spec. */ + init_hw_statistics_gathering_mode((ug_info->statisticsMode & + UCC_GETH_STATISTICS_GATHERING_MODE_HARDWARE), + 0, &uf_regs->upsmr, &ug_regs->uescr); + + /* Allocate Tx bds */ + for (j = 0; j < ug_info->numQueuesTx; j++) { + /* Allocate in multiple of + UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT, + according to spec */ + length = ((ug_info->bdRingLenTx[j] * UCC_GETH_SIZE_OF_BD) + / UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT) + * UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT; + if ((ug_info->bdRingLenTx[j] * UCC_GETH_SIZE_OF_BD) % + UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT) + length += UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT; + if (uf_info->bd_mem_part == MEM_PART_SYSTEM) { + u32 align = 4; + if (UCC_GETH_TX_BD_RING_ALIGNMENT > 4) + align = UCC_GETH_TX_BD_RING_ALIGNMENT; + ugeth->tx_bd_ring_offset[j] = + (u32) (kmalloc((u32) (length + align), + GFP_KERNEL)); + if (ugeth->tx_bd_ring_offset[j] != 0) + ugeth->p_tx_bd_ring[j] = + (void*)((ugeth->tx_bd_ring_offset[j] + + align) & ~(align - 1)); + } else if (uf_info->bd_mem_part == MEM_PART_MURAM) { + ugeth->tx_bd_ring_offset[j] = + qe_muram_alloc(length, + UCC_GETH_TX_BD_RING_ALIGNMENT); + if (!IS_MURAM_ERR(ugeth->tx_bd_ring_offset[j])) + ugeth->p_tx_bd_ring[j] = + (u8 *) qe_muram_addr(ugeth-> + tx_bd_ring_offset[j]); + } + if (!ugeth->p_tx_bd_ring[j]) { + ugeth_err + ("%s: Can not allocate memory for Tx bd rings.", + __FUNCTION__); + ucc_geth_memclean(ugeth); + return -ENOMEM; + } + /* Zero unused end of bd ring, according to spec */ + memset(ugeth->p_tx_bd_ring[j] + + ug_info->bdRingLenTx[j] * UCC_GETH_SIZE_OF_BD, 0, + length - ug_info->bdRingLenTx[j] * UCC_GETH_SIZE_OF_BD); + } + + /* Allocate Rx bds */ + for (j = 0; j < ug_info->numQueuesRx; j++) { + length = ug_info->bdRingLenRx[j] * UCC_GETH_SIZE_OF_BD; + if (uf_info->bd_mem_part == MEM_PART_SYSTEM) { + u32 align = 4; + if (UCC_GETH_RX_BD_RING_ALIGNMENT > 4) + align = UCC_GETH_RX_BD_RING_ALIGNMENT; + ugeth->rx_bd_ring_offset[j] = + (u32) (kmalloc((u32) (length + align), GFP_KERNEL)); + if (ugeth->rx_bd_ring_offset[j] != 0) + ugeth->p_rx_bd_ring[j] = + (void*)((ugeth->rx_bd_ring_offset[j] + + align) & ~(align - 1)); + } else if (uf_info->bd_mem_part == MEM_PART_MURAM) { + ugeth->rx_bd_ring_offset[j] = + qe_muram_alloc(length, + UCC_GETH_RX_BD_RING_ALIGNMENT); + if (!IS_MURAM_ERR(ugeth->rx_bd_ring_offset[j])) + ugeth->p_rx_bd_ring[j] = + (u8 *) qe_muram_addr(ugeth-> + rx_bd_ring_offset[j]); + } + if (!ugeth->p_rx_bd_ring[j]) { + ugeth_err + ("%s: Can not allocate memory for Rx bd rings.", + __FUNCTION__); + ucc_geth_memclean(ugeth); + return -ENOMEM; + } + } + + /* Init Tx bds */ + for (j = 0; j < ug_info->numQueuesTx; j++) { + /* Setup the skbuff rings */ + ugeth->tx_skbuff[j] = + (struct sk_buff **)kmalloc(sizeof(struct sk_buff *) * + ugeth->ug_info->bdRingLenTx[j], + GFP_KERNEL); + + if (ugeth->tx_skbuff[j] == NULL) { + ugeth_err("%s: Could not allocate tx_skbuff", + __FUNCTION__); + ucc_geth_memclean(ugeth); + return -ENOMEM; + } + + for (i = 0; i < ugeth->ug_info->bdRingLenTx[j]; i++) + ugeth->tx_skbuff[j][i] = NULL; + + ugeth->skb_curtx[j] = ugeth->skb_dirtytx[j] = 0; + bd = ugeth->confBd[j] = ugeth->txBd[j] = ugeth->p_tx_bd_ring[j]; + for (i = 0; i < ug_info->bdRingLenTx[j]; i++) { + BD_BUFFER_CLEAR(bd); + BD_STATUS_AND_LENGTH_SET(bd, 0); + bd += UCC_GETH_SIZE_OF_BD; + } + bd -= UCC_GETH_SIZE_OF_BD; + BD_STATUS_AND_LENGTH_SET(bd, T_W);/* for last BD set Wrap bit */ + } + + /* Init Rx bds */ + for (j = 0; j < ug_info->numQueuesRx; j++) { + /* Setup the skbuff rings */ + ugeth->rx_skbuff[j] = + (struct sk_buff **)kmalloc(sizeof(struct sk_buff *) * + ugeth->ug_info->bdRingLenRx[j], + GFP_KERNEL); + + if (ugeth->rx_skbuff[j] == NULL) { + ugeth_err("%s: Could not allocate rx_skbuff", + __FUNCTION__); + ucc_geth_memclean(ugeth); + return -ENOMEM; + } + + for (i = 0; i < ugeth->ug_info->bdRingLenRx[j]; i++) + ugeth->rx_skbuff[j][i] = NULL; + + ugeth->skb_currx[j] = 0; + bd = ugeth->rxBd[j] = ugeth->p_rx_bd_ring[j]; + for (i = 0; i < ug_info->bdRingLenRx[j]; i++) { + BD_STATUS_AND_LENGTH_SET(bd, R_I); + BD_BUFFER_CLEAR(bd); + bd += UCC_GETH_SIZE_OF_BD; + } + bd -= UCC_GETH_SIZE_OF_BD; + BD_STATUS_AND_LENGTH_SET(bd, R_W);/* for last BD set Wrap bit */ + } + + /* + * Global PRAM + */ + /* Tx global PRAM */ + /* Allocate global tx parameter RAM page */ + ugeth->tx_glbl_pram_offset = + qe_muram_alloc(sizeof(ucc_geth_tx_global_pram_t), + UCC_GETH_TX_GLOBAL_PRAM_ALIGNMENT); + if (IS_MURAM_ERR(ugeth->tx_glbl_pram_offset)) { + ugeth_err + ("%s: Can not allocate DPRAM memory for p_tx_glbl_pram.", + __FUNCTION__); + ucc_geth_memclean(ugeth); + return -ENOMEM; + } + ugeth->p_tx_glbl_pram = + (ucc_geth_tx_global_pram_t *) qe_muram_addr(ugeth-> + tx_glbl_pram_offset); + /* Zero out p_tx_glbl_pram */ + memset(ugeth->p_tx_glbl_pram, 0, sizeof(ucc_geth_tx_global_pram_t)); + + /* Fill global PRAM */ + + /* TQPTR */ + /* Size varies with number of Tx threads */ + ugeth->thread_dat_tx_offset = + qe_muram_alloc(numThreadsTxNumerical * + sizeof(ucc_geth_thread_data_tx_t) + + 32 * (numThreadsTxNumerical == 1), + UCC_GETH_THREAD_DATA_ALIGNMENT); + if (IS_MURAM_ERR(ugeth->thread_dat_tx_offset)) { + ugeth_err + ("%s: Can not allocate DPRAM memory for p_thread_data_tx.", + __FUNCTION__); + ucc_geth_memclean(ugeth); + return -ENOMEM; + } + + ugeth->p_thread_data_tx = + (ucc_geth_thread_data_tx_t *) qe_muram_addr(ugeth-> + thread_dat_tx_offset); + out_be32(&ugeth->p_tx_glbl_pram->tqptr, ugeth->thread_dat_tx_offset); + + /* vtagtable */ + for (i = 0; i < UCC_GETH_TX_VTAG_TABLE_ENTRY_MAX; i++) + out_be32(&ugeth->p_tx_glbl_pram->vtagtable[i], + ug_info->vtagtable[i]); + + /* iphoffset */ + for (i = 0; i < TX_IP_OFFSET_ENTRY_MAX; i++) + ugeth->p_tx_glbl_pram->iphoffset[i] = ug_info->iphoffset[i]; + + /* SQPTR */ + /* Size varies with number of Tx queues */ + ugeth->send_q_mem_reg_offset = + qe_muram_alloc(ug_info->numQueuesTx * + sizeof(ucc_geth_send_queue_qd_t), + UCC_GETH_SEND_QUEUE_QUEUE_DESCRIPTOR_ALIGNMENT); + if (IS_MURAM_ERR(ugeth->send_q_mem_reg_offset)) { + ugeth_err + ("%s: Can not allocate DPRAM memory for p_send_q_mem_reg.", + __FUNCTION__); + ucc_geth_memclean(ugeth); + return -ENOMEM; + } + + ugeth->p_send_q_mem_reg = + (ucc_geth_send_queue_mem_region_t *) qe_muram_addr(ugeth-> + send_q_mem_reg_offset); + out_be32(&ugeth->p_tx_glbl_pram->sqptr, ugeth->send_q_mem_reg_offset); + + /* Setup the table */ + /* Assume BD rings are already established */ + for (i = 0; i < ug_info->numQueuesTx; i++) { + endOfRing = + ugeth->p_tx_bd_ring[i] + (ug_info->bdRingLenTx[i] - + 1) * UCC_GETH_SIZE_OF_BD; + if (ugeth->ug_info->uf_info.bd_mem_part == MEM_PART_SYSTEM) { + out_be32(&ugeth->p_send_q_mem_reg->sqqd[i].bd_ring_base, + (u32) virt_to_phys(ugeth->p_tx_bd_ring[i])); + out_be32(&ugeth->p_send_q_mem_reg->sqqd[i]. + last_bd_completed_address, + (u32) virt_to_phys(endOfRing)); + } else if (ugeth->ug_info->uf_info.bd_mem_part == + MEM_PART_MURAM) { + out_be32(&ugeth->p_send_q_mem_reg->sqqd[i].bd_ring_base, + (u32) immrbar_virt_to_phys(ugeth-> + p_tx_bd_ring[i])); + out_be32(&ugeth->p_send_q_mem_reg->sqqd[i]. + last_bd_completed_address, + (u32) immrbar_virt_to_phys(endOfRing)); + } + } + + /* schedulerbasepointer */ + + if (ug_info->numQueuesTx > 1) { + /* scheduler exists only if more than 1 tx queue */ + ugeth->scheduler_offset = + qe_muram_alloc(sizeof(ucc_geth_scheduler_t), + UCC_GETH_SCHEDULER_ALIGNMENT); + if (IS_MURAM_ERR(ugeth->scheduler_offset)) { + ugeth_err + ("%s: Can not allocate DPRAM memory for p_scheduler.", + __FUNCTION__); + ucc_geth_memclean(ugeth); + return -ENOMEM; + } + + ugeth->p_scheduler = + (ucc_geth_scheduler_t *) qe_muram_addr(ugeth-> + scheduler_offset); + out_be32(&ugeth->p_tx_glbl_pram->schedulerbasepointer, + ugeth->scheduler_offset); + /* Zero out p_scheduler */ + memset(ugeth->p_scheduler, 0, sizeof(ucc_geth_scheduler_t)); + + /* Set values in scheduler */ + out_be32(&ugeth->p_scheduler->mblinterval, + ug_info->mblinterval); + out_be16(&ugeth->p_scheduler->nortsrbytetime, + ug_info->nortsrbytetime); + ugeth->p_scheduler->fracsiz = ug_info->fracsiz; + ugeth->p_scheduler->strictpriorityq = ug_info->strictpriorityq; + ugeth->p_scheduler->txasap = ug_info->txasap; + ugeth->p_scheduler->extrabw = ug_info->extrabw; + for (i = 0; i < NUM_TX_QUEUES; i++) + ugeth->p_scheduler->weightfactor[i] = + ug_info->weightfactor[i]; + + /* Set pointers to cpucount registers in scheduler */ + ugeth->p_cpucount[0] = &(ugeth->p_scheduler->cpucount0); + ugeth->p_cpucount[1] = &(ugeth->p_scheduler->cpucount1); + ugeth->p_cpucount[2] = &(ugeth->p_scheduler->cpucount2); + ugeth->p_cpucount[3] = &(ugeth->p_scheduler->cpucount3); + ugeth->p_cpucount[4] = &(ugeth->p_scheduler->cpucount4); + ugeth->p_cpucount[5] = &(ugeth->p_scheduler->cpucount5); + ugeth->p_cpucount[6] = &(ugeth->p_scheduler->cpucount6); + ugeth->p_cpucount[7] = &(ugeth->p_scheduler->cpucount7); + } + + /* schedulerbasepointer */ + /* TxRMON_PTR (statistics) */ + if (ug_info-> + statisticsMode & UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_TX) { + ugeth->tx_fw_statistics_pram_offset = + qe_muram_alloc(sizeof + (ucc_geth_tx_firmware_statistics_pram_t), + UCC_GETH_TX_STATISTICS_ALIGNMENT); + if (IS_MURAM_ERR(ugeth->tx_fw_statistics_pram_offset)) { + ugeth_err + ("%s: Can not allocate DPRAM memory for" + " p_tx_fw_statistics_pram.", __FUNCTION__); + ucc_geth_memclean(ugeth); + return -ENOMEM; + } + ugeth->p_tx_fw_statistics_pram = + (ucc_geth_tx_firmware_statistics_pram_t *) + qe_muram_addr(ugeth->tx_fw_statistics_pram_offset); + /* Zero out p_tx_fw_statistics_pram */ + memset(ugeth->p_tx_fw_statistics_pram, + 0, sizeof(ucc_geth_tx_firmware_statistics_pram_t)); + } + + /* temoder */ + /* Already has speed set */ + + if (ug_info->numQueuesTx > 1) + temoder |= TEMODER_SCHEDULER_ENABLE; + if (ug_info->ipCheckSumGenerate) + temoder |= TEMODER_IP_CHECKSUM_GENERATE; + temoder |= ((ug_info->numQueuesTx - 1) << TEMODER_NUM_OF_QUEUES_SHIFT); + out_be16(&ugeth->p_tx_glbl_pram->temoder, temoder); + + test = in_be16(&ugeth->p_tx_glbl_pram->temoder); + + /* Function code register value to be used later */ + function_code = QE_BMR_BYTE_ORDER_BO_MOT | UCC_FAST_FUNCTION_CODE_GBL; + /* Required for QE */ + + /* function code register */ + out_be32(&ugeth->p_tx_glbl_pram->tstate, ((u32) function_code) << 24); + + /* Rx global PRAM */ + /* Allocate global rx parameter RAM page */ + ugeth->rx_glbl_pram_offset = + qe_muram_alloc(sizeof(ucc_geth_rx_global_pram_t), + UCC_GETH_RX_GLOBAL_PRAM_ALIGNMENT); + if (IS_MURAM_ERR(ugeth->rx_glbl_pram_offset)) { + ugeth_err + ("%s: Can not allocate DPRAM memory for p_rx_glbl_pram.", + __FUNCTION__); + ucc_geth_memclean(ugeth); + return -ENOMEM; + } + ugeth->p_rx_glbl_pram = + (ucc_geth_rx_global_pram_t *) qe_muram_addr(ugeth-> + rx_glbl_pram_offset); + /* Zero out p_rx_glbl_pram */ + memset(ugeth->p_rx_glbl_pram, 0, sizeof(ucc_geth_rx_global_pram_t)); + + /* Fill global PRAM */ + + /* RQPTR */ + /* Size varies with number of Rx threads */ + ugeth->thread_dat_rx_offset = + qe_muram_alloc(numThreadsRxNumerical * + sizeof(ucc_geth_thread_data_rx_t), + UCC_GETH_THREAD_DATA_ALIGNMENT); + if (IS_MURAM_ERR(ugeth->thread_dat_rx_offset)) { + ugeth_err + ("%s: Can not allocate DPRAM memory for p_thread_data_rx.", + __FUNCTION__); + ucc_geth_memclean(ugeth); + return -ENOMEM; + } + + ugeth->p_thread_data_rx = + (ucc_geth_thread_data_rx_t *) qe_muram_addr(ugeth-> + thread_dat_rx_offset); + out_be32(&ugeth->p_rx_glbl_pram->rqptr, ugeth->thread_dat_rx_offset); + + /* typeorlen */ + out_be16(&ugeth->p_rx_glbl_pram->typeorlen, ug_info->typeorlen); + + /* rxrmonbaseptr (statistics) */ + if (ug_info-> + statisticsMode & UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_RX) { + ugeth->rx_fw_statistics_pram_offset = + qe_muram_alloc(sizeof + (ucc_geth_rx_firmware_statistics_pram_t), + UCC_GETH_RX_STATISTICS_ALIGNMENT); + if (IS_MURAM_ERR(ugeth->rx_fw_statistics_pram_offset)) { + ugeth_err + ("%s: Can not allocate DPRAM memory for" + " p_rx_fw_statistics_pram.", __FUNCTION__); + ucc_geth_memclean(ugeth); + return -ENOMEM; + } + ugeth->p_rx_fw_statistics_pram = + (ucc_geth_rx_firmware_statistics_pram_t *) + qe_muram_addr(ugeth->rx_fw_statistics_pram_offset); + /* Zero out p_rx_fw_statistics_pram */ + memset(ugeth->p_rx_fw_statistics_pram, 0, + sizeof(ucc_geth_rx_firmware_statistics_pram_t)); + } + + /* intCoalescingPtr */ + + /* Size varies with number of Rx queues */ + ugeth->rx_irq_coalescing_tbl_offset = + qe_muram_alloc(ug_info->numQueuesRx * + sizeof(ucc_geth_rx_interrupt_coalescing_entry_t), + UCC_GETH_RX_INTERRUPT_COALESCING_ALIGNMENT); + if (IS_MURAM_ERR(ugeth->rx_irq_coalescing_tbl_offset)) { + ugeth_err + ("%s: Can not allocate DPRAM memory for" + " p_rx_irq_coalescing_tbl.", __FUNCTION__); + ucc_geth_memclean(ugeth); + return -ENOMEM; + } + + ugeth->p_rx_irq_coalescing_tbl = + (ucc_geth_rx_interrupt_coalescing_table_t *) + qe_muram_addr(ugeth->rx_irq_coalescing_tbl_offset); + out_be32(&ugeth->p_rx_glbl_pram->intcoalescingptr, + ugeth->rx_irq_coalescing_tbl_offset); + + /* Fill interrupt coalescing table */ + for (i = 0; i < ug_info->numQueuesRx; i++) { + out_be32(&ugeth->p_rx_irq_coalescing_tbl->coalescingentry[i]. + interruptcoalescingmaxvalue, + ug_info->interruptcoalescingmaxvalue[i]); + out_be32(&ugeth->p_rx_irq_coalescing_tbl->coalescingentry[i]. + interruptcoalescingcounter, + ug_info->interruptcoalescingmaxvalue[i]); + } + + /* MRBLR */ + init_max_rx_buff_len(uf_info->max_rx_buf_length, + &ugeth->p_rx_glbl_pram->mrblr); + /* MFLR */ + out_be16(&ugeth->p_rx_glbl_pram->mflr, ug_info->maxFrameLength); + /* MINFLR */ + init_min_frame_len(ug_info->minFrameLength, + &ugeth->p_rx_glbl_pram->minflr, + &ugeth->p_rx_glbl_pram->mrblr); + /* MAXD1 */ + out_be16(&ugeth->p_rx_glbl_pram->maxd1, ug_info->maxD1Length); + /* MAXD2 */ + out_be16(&ugeth->p_rx_glbl_pram->maxd2, ug_info->maxD2Length); + + /* l2qt */ + l2qt = 0; + for (i = 0; i < UCC_GETH_VLAN_PRIORITY_MAX; i++) + l2qt |= (ug_info->l2qt[i] << (28 - 4 * i)); + out_be32(&ugeth->p_rx_glbl_pram->l2qt, l2qt); + + /* l3qt */ + for (j = 0; j < UCC_GETH_IP_PRIORITY_MAX; j += 8) { + l3qt = 0; + for (i = 0; i < 8; i++) + l3qt |= (ug_info->l3qt[j + i] << (28 - 4 * i)); + out_be32(&ugeth->p_rx_glbl_pram->l3qt[j], l3qt); + } + + /* vlantype */ + out_be16(&ugeth->p_rx_glbl_pram->vlantype, ug_info->vlantype); + + /* vlantci */ + out_be16(&ugeth->p_rx_glbl_pram->vlantci, ug_info->vlantci); + + /* ecamptr */ + out_be32(&ugeth->p_rx_glbl_pram->ecamptr, ug_info->ecamptr); + + /* RBDQPTR */ + /* Size varies with number of Rx queues */ + ugeth->rx_bd_qs_tbl_offset = + qe_muram_alloc(ug_info->numQueuesRx * + (sizeof(ucc_geth_rx_bd_queues_entry_t) + + sizeof(ucc_geth_rx_prefetched_bds_t)), + UCC_GETH_RX_BD_QUEUES_ALIGNMENT); + if (IS_MURAM_ERR(ugeth->rx_bd_qs_tbl_offset)) { + ugeth_err + ("%s: Can not allocate DPRAM memory for p_rx_bd_qs_tbl.", + __FUNCTION__); + ucc_geth_memclean(ugeth); + return -ENOMEM; + } + + ugeth->p_rx_bd_qs_tbl = + (ucc_geth_rx_bd_queues_entry_t *) qe_muram_addr(ugeth-> + rx_bd_qs_tbl_offset); + out_be32(&ugeth->p_rx_glbl_pram->rbdqptr, ugeth->rx_bd_qs_tbl_offset); + /* Zero out p_rx_bd_qs_tbl */ + memset(ugeth->p_rx_bd_qs_tbl, + 0, + ug_info->numQueuesRx * (sizeof(ucc_geth_rx_bd_queues_entry_t) + + sizeof(ucc_geth_rx_prefetched_bds_t))); + + /* Setup the table */ + /* Assume BD rings are already established */ + for (i = 0; i < ug_info->numQueuesRx; i++) { + if (ugeth->ug_info->uf_info.bd_mem_part == MEM_PART_SYSTEM) { + out_be32(&ugeth->p_rx_bd_qs_tbl[i].externalbdbaseptr, + (u32) virt_to_phys(ugeth->p_rx_bd_ring[i])); + } else if (ugeth->ug_info->uf_info.bd_mem_part == + MEM_PART_MURAM) { + out_be32(&ugeth->p_rx_bd_qs_tbl[i].externalbdbaseptr, + (u32) immrbar_virt_to_phys(ugeth-> + p_rx_bd_ring[i])); + } + /* rest of fields handled by QE */ + } + + /* remoder */ + /* Already has speed set */ + + if (ugeth->rx_extended_features) + remoder |= REMODER_RX_EXTENDED_FEATURES; + if (ug_info->rxExtendedFiltering) + remoder |= REMODER_RX_EXTENDED_FILTERING; + if (ug_info->dynamicMaxFrameLength) + remoder |= REMODER_DYNAMIC_MAX_FRAME_LENGTH; + if (ug_info->dynamicMinFrameLength) + remoder |= REMODER_DYNAMIC_MIN_FRAME_LENGTH; + remoder |= + ug_info->vlanOperationTagged << REMODER_VLAN_OPERATION_TAGGED_SHIFT; + remoder |= + ug_info-> + vlanOperationNonTagged << REMODER_VLAN_OPERATION_NON_TAGGED_SHIFT; + remoder |= ug_info->rxQoSMode << REMODER_RX_QOS_MODE_SHIFT; + remoder |= ((ug_info->numQueuesRx - 1) << REMODER_NUM_OF_QUEUES_SHIFT); + if (ug_info->ipCheckSumCheck) + remoder |= REMODER_IP_CHECKSUM_CHECK; + if (ug_info->ipAddressAlignment) + remoder |= REMODER_IP_ADDRESS_ALIGNMENT; + out_be32(&ugeth->p_rx_glbl_pram->remoder, remoder); + + /* Note that this function must be called */ + /* ONLY AFTER p_tx_fw_statistics_pram */ + /* andp_UccGethRxFirmwareStatisticsPram are allocated ! */ + init_firmware_statistics_gathering_mode((ug_info-> + statisticsMode & + UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_TX), + (ug_info->statisticsMode & + UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_RX), + &ugeth->p_tx_glbl_pram->txrmonbaseptr, + ugeth->tx_fw_statistics_pram_offset, + &ugeth->p_rx_glbl_pram->rxrmonbaseptr, + ugeth->rx_fw_statistics_pram_offset, + &ugeth->p_tx_glbl_pram->temoder, + &ugeth->p_rx_glbl_pram->remoder); + + /* function code register */ + ugeth->p_rx_glbl_pram->rstate = function_code; + + /* initialize extended filtering */ + if (ug_info->rxExtendedFiltering) { + if (!ug_info->extendedFilteringChainPointer) { + ugeth_err("%s: Null Extended Filtering Chain Pointer.", + __FUNCTION__); + ucc_geth_memclean(ugeth); + return -EINVAL; + } + + /* Allocate memory for extended filtering Mode Global + Parameters */ + ugeth->exf_glbl_param_offset = + qe_muram_alloc(sizeof(ucc_geth_exf_global_pram_t), + UCC_GETH_RX_EXTENDED_FILTERING_GLOBAL_PARAMETERS_ALIGNMENT); + if (IS_MURAM_ERR(ugeth->exf_glbl_param_offset)) { + ugeth_err + ("%s: Can not allocate DPRAM memory for" + " p_exf_glbl_param.", __FUNCTION__); + ucc_geth_memclean(ugeth); + return -ENOMEM; + } + + ugeth->p_exf_glbl_param = + (ucc_geth_exf_global_pram_t *) qe_muram_addr(ugeth-> + exf_glbl_param_offset); + out_be32(&ugeth->p_rx_glbl_pram->exfGlobalParam, + ugeth->exf_glbl_param_offset); + out_be32(&ugeth->p_exf_glbl_param->l2pcdptr, + (u32) ug_info->extendedFilteringChainPointer); + + } else { /* initialize 82xx style address filtering */ + + /* Init individual address recognition registers to disabled */ + + for (j = 0; j < NUM_OF_PADDRS; j++) + ugeth_82xx_filtering_clear_addr_in_paddr(ugeth, (u8) j); + + /* Create CQs for hash tables */ + if (ug_info->maxGroupAddrInHash > 0) { + INIT_LIST_HEAD(&ugeth->group_hash_q); + } + if (ug_info->maxIndAddrInHash > 0) { + INIT_LIST_HEAD(&ugeth->ind_hash_q); + } + p_82xx_addr_filt = + (ucc_geth_82xx_address_filtering_pram_t *) ugeth-> + p_rx_glbl_pram->addressfiltering; + + ugeth_82xx_filtering_clear_all_addr_in_hash(ugeth, + ENET_ADDR_TYPE_GROUP); + ugeth_82xx_filtering_clear_all_addr_in_hash(ugeth, + ENET_ADDR_TYPE_INDIVIDUAL); + } + + /* + * Initialize UCC at QE level + */ + + command = QE_INIT_TX_RX; + + /* Allocate shadow InitEnet command parameter structure. + * This is needed because after the InitEnet command is executed, + * the structure in DPRAM is released, because DPRAM is a premium + * resource. + * This shadow structure keeps a copy of what was done so that the + * allocated resources can be released when the channel is freed. + */ + if (!(ugeth->p_init_enet_param_shadow = + (ucc_geth_init_pram_t *) kmalloc(sizeof(ucc_geth_init_pram_t), + GFP_KERNEL))) { + ugeth_err + ("%s: Can not allocate memory for" + " p_UccInitEnetParamShadows.", __FUNCTION__); + ucc_geth_memclean(ugeth); + return -ENOMEM; + } + /* Zero out *p_init_enet_param_shadow */ + memset((char *)ugeth->p_init_enet_param_shadow, + 0, sizeof(ucc_geth_init_pram_t)); + + /* Fill shadow InitEnet command parameter structure */ + + ugeth->p_init_enet_param_shadow->resinit1 = + ENET_INIT_PARAM_MAGIC_RES_INIT1; + ugeth->p_init_enet_param_shadow->resinit2 = + ENET_INIT_PARAM_MAGIC_RES_INIT2; + ugeth->p_init_enet_param_shadow->resinit3 = + ENET_INIT_PARAM_MAGIC_RES_INIT3; + ugeth->p_init_enet_param_shadow->resinit4 = + ENET_INIT_PARAM_MAGIC_RES_INIT4; + ugeth->p_init_enet_param_shadow->resinit5 = + ENET_INIT_PARAM_MAGIC_RES_INIT5; + ugeth->p_init_enet_param_shadow->rgftgfrxglobal |= + ((u32) ug_info->numThreadsRx) << ENET_INIT_PARAM_RGF_SHIFT; + ugeth->p_init_enet_param_shadow->rgftgfrxglobal |= + ((u32) ug_info->numThreadsTx) << ENET_INIT_PARAM_TGF_SHIFT; + + ugeth->p_init_enet_param_shadow->rgftgfrxglobal |= + ugeth->rx_glbl_pram_offset | ug_info->riscRx; + if ((ug_info->largestexternallookupkeysize != + QE_FLTR_LARGEST_EXTERNAL_TABLE_LOOKUP_KEY_SIZE_NONE) + && (ug_info->largestexternallookupkeysize != + QE_FLTR_LARGEST_EXTERNAL_TABLE_LOOKUP_KEY_SIZE_8_BYTES) + && (ug_info->largestexternallookupkeysize != + QE_FLTR_LARGEST_EXTERNAL_TABLE_LOOKUP_KEY_SIZE_16_BYTES)) { + ugeth_err("%s: Invalid largest External Lookup Key Size.", + __FUNCTION__); + ucc_geth_memclean(ugeth); + return -EINVAL; + } + ugeth->p_init_enet_param_shadow->largestexternallookupkeysize = + ug_info->largestexternallookupkeysize; + size = sizeof(ucc_geth_thread_rx_pram_t); + if (ug_info->rxExtendedFiltering) { + size += THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING; + if (ug_info->largestexternallookupkeysize == + QE_FLTR_TABLE_LOOKUP_KEY_SIZE_8_BYTES) + size += + THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING_8; + if (ug_info->largestexternallookupkeysize == + QE_FLTR_TABLE_LOOKUP_KEY_SIZE_16_BYTES) + size += + THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING_16; + } + + if ((ret_val = fill_init_enet_entries(ugeth, &(ugeth-> + p_init_enet_param_shadow->rxthread[0]), + (u8) (numThreadsRxNumerical + 1) + /* Rx needs one extra for terminator */ + , size, UCC_GETH_THREAD_RX_PRAM_ALIGNMENT, + ug_info->riscRx, 1)) != 0) { + ugeth_err("%s: Can not fill p_init_enet_param_shadow.", + __FUNCTION__); + ucc_geth_memclean(ugeth); + return ret_val; + } + + ugeth->p_init_enet_param_shadow->txglobal = + ugeth->tx_glbl_pram_offset | ug_info->riscTx; + if ((ret_val = + fill_init_enet_entries(ugeth, + &(ugeth->p_init_enet_param_shadow-> + txthread[0]), numThreadsTxNumerical, + sizeof(ucc_geth_thread_tx_pram_t), + UCC_GETH_THREAD_TX_PRAM_ALIGNMENT, + ug_info->riscTx, 0)) != 0) { + ugeth_err("%s: Can not fill p_init_enet_param_shadow.", + __FUNCTION__); + ucc_geth_memclean(ugeth); + return ret_val; + } + + /* Load Rx bds with buffers */ + for (i = 0; i < ug_info->numQueuesRx; i++) { + if ((ret_val = rx_bd_buffer_set(ugeth, (u8) i)) != 0) { + ugeth_err("%s: Can not fill Rx bds with buffers.", + __FUNCTION__); + ucc_geth_memclean(ugeth); + return ret_val; + } + } + + /* Allocate InitEnet command parameter structure */ + init_enet_pram_offset = qe_muram_alloc(sizeof(ucc_geth_init_pram_t), 4); + if (IS_MURAM_ERR(init_enet_pram_offset)) { + ugeth_err + ("%s: Can not allocate DPRAM memory for p_init_enet_pram.", + __FUNCTION__); + ucc_geth_memclean(ugeth); + return -ENOMEM; + } + p_init_enet_pram = + (ucc_geth_init_pram_t *) qe_muram_addr(init_enet_pram_offset); + + /* Copy shadow InitEnet command parameter structure into PRAM */ + p_init_enet_pram->resinit1 = ugeth->p_init_enet_param_shadow->resinit1; + p_init_enet_pram->resinit2 = ugeth->p_init_enet_param_shadow->resinit2; + p_init_enet_pram->resinit3 = ugeth->p_init_enet_param_shadow->resinit3; + p_init_enet_pram->resinit4 = ugeth->p_init_enet_param_shadow->resinit4; + out_be16(&p_init_enet_pram->resinit5, + ugeth->p_init_enet_param_shadow->resinit5); + p_init_enet_pram->largestexternallookupkeysize = + ugeth->p_init_enet_param_shadow->largestexternallookupkeysize; + out_be32(&p_init_enet_pram->rgftgfrxglobal, + ugeth->p_init_enet_param_shadow->rgftgfrxglobal); + for (i = 0; i < ENET_INIT_PARAM_MAX_ENTRIES_RX; i++) + out_be32(&p_init_enet_pram->rxthread[i], + ugeth->p_init_enet_param_shadow->rxthread[i]); + out_be32(&p_init_enet_pram->txglobal, + ugeth->p_init_enet_param_shadow->txglobal); + for (i = 0; i < ENET_INIT_PARAM_MAX_ENTRIES_TX; i++) + out_be32(&p_init_enet_pram->txthread[i], + ugeth->p_init_enet_param_shadow->txthread[i]); + + /* Issue QE command */ + cecr_subblock = + ucc_fast_get_qe_cr_subblock(ugeth->ug_info->uf_info.ucc_num); + qe_issue_cmd(command, cecr_subblock, (u8) QE_CR_PROTOCOL_ETHERNET, + init_enet_pram_offset); + + /* Free InitEnet command parameter */ + qe_muram_free(init_enet_pram_offset); + + return 0; +} + +/* returns a net_device_stats structure pointer */ +static struct net_device_stats *ucc_geth_get_stats(struct net_device *dev) +{ + ucc_geth_private_t *ugeth = netdev_priv(dev); + + return &(ugeth->stats); +} + +/* ucc_geth_timeout gets called when a packet has not been + * transmitted after a set amount of time. + * For now, assume that clearing out all the structures, and + * starting over will fix the problem. */ +static void ucc_geth_timeout(struct net_device *dev) +{ + ucc_geth_private_t *ugeth = netdev_priv(dev); + + ugeth_vdbg("%s: IN", __FUNCTION__); + + ugeth->stats.tx_errors++; + + ugeth_dump_regs(ugeth); + + if (dev->flags & IFF_UP) { + ucc_geth_stop(ugeth); + ucc_geth_startup(ugeth); + } + + netif_schedule(dev); +} + +/* This is called by the kernel when a frame is ready for transmission. */ +/* It is pointed to by the dev->hard_start_xmit function pointer */ +static int ucc_geth_start_xmit(struct sk_buff *skb, struct net_device *dev) +{ + ucc_geth_private_t *ugeth = netdev_priv(dev); + u8 *bd; /* BD pointer */ + u32 bd_status; + u8 txQ = 0; + + ugeth_vdbg("%s: IN", __FUNCTION__); + + spin_lock_irq(&ugeth->lock); + + ugeth->stats.tx_bytes += skb->len; + + /* Start from the next BD that should be filled */ + bd = ugeth->txBd[txQ]; + bd_status = BD_STATUS_AND_LENGTH(bd); + /* Save the skb pointer so we can free it later */ + ugeth->tx_skbuff[txQ][ugeth->skb_curtx[txQ]] = skb; + + /* Update the current skb pointer (wrapping if this was the last) */ + ugeth->skb_curtx[txQ] = + (ugeth->skb_curtx[txQ] + + 1) & TX_RING_MOD_MASK(ugeth->ug_info->bdRingLenTx[txQ]); + + /* set up the buffer descriptor */ + BD_BUFFER_SET(bd, + dma_map_single(NULL, skb->data, skb->len, DMA_TO_DEVICE)); + + //printk(KERN_DEBUG"skb->data is 0x%x\n",skb->data); + + bd_status = (bd_status & T_W) | T_R | T_I | T_L | skb->len; + + BD_STATUS_AND_LENGTH_SET(bd, bd_status); + + dev->trans_start = jiffies; + + /* Move to next BD in the ring */ + if (!(bd_status & T_W)) + ugeth->txBd[txQ] = bd + UCC_GETH_SIZE_OF_BD; + else + ugeth->txBd[txQ] = ugeth->p_tx_bd_ring[txQ]; + + /* If the next BD still needs to be cleaned up, then the bds + are full. We need to tell the kernel to stop sending us stuff. */ + if (bd == ugeth->confBd[txQ]) { + if (!netif_queue_stopped(dev)) + netif_stop_queue(dev); + } + + if (ugeth->p_scheduler) { + ugeth->cpucount[txQ]++; + /* Indicate to QE that there are more Tx bds ready for + transmission */ + /* This is done by writing a running counter of the bd + count to the scheduler PRAM. */ + out_be16(ugeth->p_cpucount[txQ], ugeth->cpucount[txQ]); + } + + spin_unlock_irq(&ugeth->lock); + + return 0; +} + +static int ucc_geth_rx(ucc_geth_private_t *ugeth, u8 rxQ, int rx_work_limit) +{ + struct sk_buff *skb; + u8 *bd; + u16 length, howmany = 0; + u32 bd_status; + u8 *bdBuffer; + + ugeth_vdbg("%s: IN", __FUNCTION__); + + spin_lock(&ugeth->lock); + /* collect received buffers */ + bd = ugeth->rxBd[rxQ]; + + bd_status = BD_STATUS_AND_LENGTH(bd); + + /* while there are received buffers and BD is full (~R_E) */ + while (!((bd_status & (R_E)) || (--rx_work_limit < 0))) { + bdBuffer = (u8 *) BD_BUFFER(bd); + length = (u16) ((bd_status & BD_LENGTH_MASK) - 4); + skb = ugeth->rx_skbuff[rxQ][ugeth->skb_currx[rxQ]]; + + /* determine whether buffer is first, last, first and last + (single buffer frame) or middle (not first and not last) */ + if (!skb || + (!(bd_status & (R_F | R_L))) || + (bd_status & R_ERRORS_FATAL)) { + ugeth_vdbg("%s, %d: ERROR!!! skb - 0x%08x", + __FUNCTION__, __LINE__, (u32) skb); + if (skb) + dev_kfree_skb_any(skb); + + ugeth->rx_skbuff[rxQ][ugeth->skb_currx[rxQ]] = NULL; + ugeth->stats.rx_dropped++; + } else { + ugeth->stats.rx_packets++; + howmany++; + + /* Prep the skb for the packet */ + skb_put(skb, length); + + /* Tell the skb what kind of packet this is */ + skb->protocol = eth_type_trans(skb, ugeth->dev); + + ugeth->stats.rx_bytes += length; + /* Send the packet up the stack */ +#ifdef CONFIG_UGETH_NAPI + netif_receive_skb(skb); +#else + netif_rx(skb); +#endif /* CONFIG_UGETH_NAPI */ + } + + ugeth->dev->last_rx = jiffies; + + skb = get_new_skb(ugeth, bd); + if (!skb) { + ugeth_warn("%s: No Rx Data Buffer", __FUNCTION__); + spin_unlock(&ugeth->lock); + ugeth->stats.rx_dropped++; + break; + } + + ugeth->rx_skbuff[rxQ][ugeth->skb_currx[rxQ]] = skb; + + /* update to point at the next skb */ + ugeth->skb_currx[rxQ] = + (ugeth->skb_currx[rxQ] + + 1) & RX_RING_MOD_MASK(ugeth->ug_info->bdRingLenRx[rxQ]); + + if (bd_status & R_W) + bd = ugeth->p_rx_bd_ring[rxQ]; + else + bd += UCC_GETH_SIZE_OF_BD; + + bd_status = BD_STATUS_AND_LENGTH(bd); + } + + ugeth->rxBd[rxQ] = bd; + spin_unlock(&ugeth->lock); + return howmany; +} + +static int ucc_geth_tx(struct net_device *dev, u8 txQ) +{ + /* Start from the next BD that should be filled */ + ucc_geth_private_t *ugeth = netdev_priv(dev); + u8 *bd; /* BD pointer */ + u32 bd_status; + + bd = ugeth->confBd[txQ]; + bd_status = BD_STATUS_AND_LENGTH(bd); + + /* Normal processing. */ + while ((bd_status & T_R) == 0) { + /* BD contains already transmitted buffer. */ + /* Handle the transmitted buffer and release */ + /* the BD to be used with the current frame */ + + if ((bd = ugeth->txBd[txQ]) && (netif_queue_stopped(dev) == 0)) + break; + + ugeth->stats.tx_packets++; + + /* Free the sk buffer associated with this TxBD */ + dev_kfree_skb_irq(ugeth-> + tx_skbuff[txQ][ugeth->skb_dirtytx[txQ]]); + ugeth->tx_skbuff[txQ][ugeth->skb_dirtytx[txQ]] = NULL; + ugeth->skb_dirtytx[txQ] = + (ugeth->skb_dirtytx[txQ] + + 1) & TX_RING_MOD_MASK(ugeth->ug_info->bdRingLenTx[txQ]); + + /* We freed a buffer, so now we can restart transmission */ + if (netif_queue_stopped(dev)) + netif_wake_queue(dev); + + /* Advance the confirmation BD pointer */ + if (!(bd_status & T_W)) + ugeth->confBd[txQ] += UCC_GETH_SIZE_OF_BD; + else + ugeth->confBd[txQ] = ugeth->p_tx_bd_ring[txQ]; + } + return 0; +} + +#ifdef CONFIG_UGETH_NAPI +static int ucc_geth_poll(struct net_device *dev, int *budget) +{ + ucc_geth_private_t *ugeth = netdev_priv(dev); + int howmany; + int rx_work_limit = *budget; + u8 rxQ = 0; + + if (rx_work_limit > dev->quota) + rx_work_limit = dev->quota; + + howmany = ucc_geth_rx(ugeth, rxQ, rx_work_limit); + + dev->quota -= howmany; + rx_work_limit -= howmany; + *budget -= howmany; + + if (rx_work_limit >= 0) + netif_rx_complete(dev); + + return (rx_work_limit < 0) ? 1 : 0; +} +#endif /* CONFIG_UGETH_NAPI */ + +static irqreturn_t ucc_geth_irq_handler(int irq, void *info, + struct pt_regs *regs) +{ + struct net_device *dev = (struct net_device *)info; + ucc_geth_private_t *ugeth = netdev_priv(dev); + ucc_fast_private_t *uccf; + ucc_geth_info_t *ug_info; + register u32 ucce = 0; + register u32 bit_mask = UCCE_RXBF_SINGLE_MASK; + register u32 tx_mask = UCCE_TXBF_SINGLE_MASK; + register u8 i; + + ugeth_vdbg("%s: IN", __FUNCTION__); + + if (!ugeth) + return IRQ_NONE; + + uccf = ugeth->uccf; + ug_info = ugeth->ug_info; + + do { + ucce |= (u32) (in_be32(uccf->p_ucce) & in_be32(uccf->p_uccm)); + + /* clear event bits for next time */ + /* Side effect here is to mask ucce variable + for future processing below. */ + out_be32(uccf->p_ucce, ucce); /* Clear with ones, + but only bits in UCCM */ + + /* We ignore Tx interrupts because Tx confirmation is + done inside Tx routine */ + + for (i = 0; i < ug_info->numQueuesRx; i++) { + if (ucce & bit_mask) + ucc_geth_rx(ugeth, i, + (int)ugeth->ug_info-> + bdRingLenRx[i]); + ucce &= ~bit_mask; + bit_mask <<= 1; + } + + for (i = 0; i < ug_info->numQueuesTx; i++) { + if (ucce & tx_mask) + ucc_geth_tx(dev, i); + ucce &= ~tx_mask; + tx_mask <<= 1; + } + + /* Exceptions */ + if (ucce & UCCE_BSY) { + ugeth_vdbg("Got BUSY irq!!!!"); + ugeth->stats.rx_errors++; + ucce &= ~UCCE_BSY; + } + if (ucce & UCCE_OTHER) { + ugeth_vdbg("Got frame with error (ucce - 0x%08x)!!!!", + ucce); + ugeth->stats.rx_errors++; + ucce &= ~ucce; + } + } + while (ucce); + + return IRQ_HANDLED; +} + +static irqreturn_t phy_interrupt(int irq, void *dev_id, struct pt_regs *regs) +{ + struct net_device *dev = (struct net_device *)dev_id; + ucc_geth_private_t *ugeth = netdev_priv(dev); + + ugeth_vdbg("%s: IN", __FUNCTION__); + + /* Clear the interrupt */ + mii_clear_phy_interrupt(ugeth->mii_info); + + /* Disable PHY interrupts */ + mii_configure_phy_interrupt(ugeth->mii_info, MII_INTERRUPT_DISABLED); + + /* Schedule the phy change */ + schedule_work(&ugeth->tq); + + return IRQ_HANDLED; +} + +/* Scheduled by the phy_interrupt/timer to handle PHY changes */ +static void ugeth_phy_change(void *data) +{ + struct net_device *dev = (struct net_device *)data; + ucc_geth_private_t *ugeth = netdev_priv(dev); + ucc_geth_t *ug_regs; + int result = 0; + + ugeth_vdbg("%s: IN", __FUNCTION__); + + ug_regs = ugeth->ug_regs; + + /* Delay to give the PHY a chance to change the + * register state */ + msleep(1); + + /* Update the link, speed, duplex */ + result = ugeth->mii_info->phyinfo->read_status(ugeth->mii_info); + + /* Adjust the known status as long as the link + * isn't still coming up */ + if ((0 == result) || (ugeth->mii_info->link == 0)) + adjust_link(dev); + + /* Reenable interrupts, if needed */ + if (ugeth->ug_info->board_flags & FSL_UGETH_BRD_HAS_PHY_INTR) + mii_configure_phy_interrupt(ugeth->mii_info, + MII_INTERRUPT_ENABLED); +} + +/* Called every so often on systems that don't interrupt + * the core for PHY changes */ +static void ugeth_phy_timer(unsigned long data) +{ + struct net_device *dev = (struct net_device *)data; + ucc_geth_private_t *ugeth = netdev_priv(dev); + + schedule_work(&ugeth->tq); + + mod_timer(&ugeth->phy_info_timer, jiffies + PHY_CHANGE_TIME * HZ); +} + +/* Keep trying aneg for some time + * If, after GFAR_AN_TIMEOUT seconds, it has not + * finished, we switch to forced. + * Either way, once the process has completed, we either + * request the interrupt, or switch the timer over to + * using ugeth_phy_timer to check status */ +static void ugeth_phy_startup_timer(unsigned long data) +{ + struct ugeth_mii_info *mii_info = (struct ugeth_mii_info *)data; + ucc_geth_private_t *ugeth = netdev_priv(mii_info->dev); + static int secondary = UGETH_AN_TIMEOUT; + int result; + + /* Configure the Auto-negotiation */ + result = mii_info->phyinfo->config_aneg(mii_info); + + /* If autonegotiation failed to start, and + * we haven't timed out, reset the timer, and return */ + if (result && secondary--) { + mod_timer(&ugeth->phy_info_timer, jiffies + HZ); + return; + } else if (result) { + /* Couldn't start autonegotiation. + * Try switching to forced */ + mii_info->autoneg = 0; + result = mii_info->phyinfo->config_aneg(mii_info); + + /* Forcing failed! Give up */ + if (result) { + ugeth_err("%s: Forcing failed!", mii_info->dev->name); + return; + } + } + + /* Kill the timer so it can be restarted */ + del_timer_sync(&ugeth->phy_info_timer); + + /* Grab the PHY interrupt, if necessary/possible */ + if (ugeth->ug_info->board_flags & FSL_UGETH_BRD_HAS_PHY_INTR) { + if (request_irq(ugeth->ug_info->phy_interrupt, + phy_interrupt, + SA_SHIRQ, "phy_interrupt", mii_info->dev) < 0) { + ugeth_err("%s: Can't get IRQ %d (PHY)", + mii_info->dev->name, + ugeth->ug_info->phy_interrupt); + } else { + mii_configure_phy_interrupt(ugeth->mii_info, + MII_INTERRUPT_ENABLED); + return; + } + } + + /* Start the timer again, this time in order to + * handle a change in status */ + init_timer(&ugeth->phy_info_timer); + ugeth->phy_info_timer.function = &ugeth_phy_timer; + ugeth->phy_info_timer.data = (unsigned long)mii_info->dev; + mod_timer(&ugeth->phy_info_timer, jiffies + PHY_CHANGE_TIME * HZ); +} + +/* Called when something needs to use the ethernet device */ +/* Returns 0 for success. */ +static int ucc_geth_open(struct net_device *dev) +{ + ucc_geth_private_t *ugeth = netdev_priv(dev); + int err; + + ugeth_vdbg("%s: IN", __FUNCTION__); + + /* Test station address */ + if (dev->dev_addr[0] & ENET_GROUP_ADDR) { + ugeth_err("%s: Multicast address used for station address" + " - is this what you wanted?", __FUNCTION__); + return -EINVAL; + } + + err = ucc_geth_startup(ugeth); + if (err) { + ugeth_err("%s: Cannot configure net device, aborting.", + dev->name); + return err; + } + + err = adjust_enet_interface(ugeth); + if (err) { + ugeth_err("%s: Cannot configure net device, aborting.", + dev->name); + return err; + } + + /* Set MACSTNADDR1, MACSTNADDR2 */ + /* For more details see the hardware spec. */ + init_mac_station_addr_regs(dev->dev_addr[0], + dev->dev_addr[1], + dev->dev_addr[2], + dev->dev_addr[3], + dev->dev_addr[4], + dev->dev_addr[5], + &ugeth->ug_regs->macstnaddr1, + &ugeth->ug_regs->macstnaddr2); + + err = init_phy(dev); + if (err) { + ugeth_err("%s: Cannot initialzie PHY, aborting.", dev->name); + return err; + } +#ifndef CONFIG_UGETH_NAPI + err = + request_irq(ugeth->ug_info->uf_info.irq, ucc_geth_irq_handler, 0, + "UCC Geth", dev); + if (err) { + ugeth_err("%s: Cannot get IRQ for net device, aborting.", + dev->name); + ucc_geth_stop(ugeth); + return err; + } +#endif /* CONFIG_UGETH_NAPI */ + + /* Set up the PHY change work queue */ + INIT_WORK(&ugeth->tq, ugeth_phy_change, dev); + + init_timer(&ugeth->phy_info_timer); + ugeth->phy_info_timer.function = &ugeth_phy_startup_timer; + ugeth->phy_info_timer.data = (unsigned long)ugeth->mii_info; + mod_timer(&ugeth->phy_info_timer, jiffies + HZ); + + err = ugeth_enable(ugeth, COMM_DIR_RX_AND_TX); + if (err) { + ugeth_err("%s: Cannot enable net device, aborting.", dev->name); + ucc_geth_stop(ugeth); + return err; + } + + netif_start_queue(dev); + + return err; +} + +/* Stops the kernel queue, and halts the controller */ +static int ucc_geth_close(struct net_device *dev) +{ + ucc_geth_private_t *ugeth = netdev_priv(dev); + + ugeth_vdbg("%s: IN", __FUNCTION__); + + ucc_geth_stop(ugeth); + + /* Shutdown the PHY */ + if (ugeth->mii_info->phyinfo->close) + ugeth->mii_info->phyinfo->close(ugeth->mii_info); + + kfree(ugeth->mii_info); + + netif_stop_queue(dev); + + return 0; +} + +struct ethtool_ops ucc_geth_ethtool_ops = { + .get_settings = NULL, + .get_drvinfo = NULL, + .get_regs_len = NULL, + .get_regs = NULL, + .get_link = NULL, + .get_coalesce = NULL, + .set_coalesce = NULL, + .get_ringparam = NULL, + .set_ringparam = NULL, + .get_strings = NULL, + .get_stats_count = NULL, + .get_ethtool_stats = NULL, +}; + +static int ucc_geth_probe(struct device *device) +{ + struct platform_device *pdev = to_platform_device(device); + struct ucc_geth_platform_data *ugeth_pdata; + struct net_device *dev = NULL; + struct ucc_geth_private *ugeth = NULL; + struct ucc_geth_info *ug_info; + int err; + static int mii_mng_configured = 0; + + ugeth_vdbg("%s: IN", __FUNCTION__); + + ugeth_pdata = (struct ucc_geth_platform_data *)pdev->dev.platform_data; + + ug_info = &ugeth_info[pdev->id]; + ug_info->uf_info.ucc_num = pdev->id; + ug_info->uf_info.rx_clock = ugeth_pdata->rx_clock; + ug_info->uf_info.tx_clock = ugeth_pdata->tx_clock; + ug_info->uf_info.regs = ugeth_pdata->phy_reg_addr; + ug_info->uf_info.irq = platform_get_irq(pdev, 0); + ug_info->phy_address = ugeth_pdata->phy_id; + ug_info->enet_interface = ugeth_pdata->phy_interface; + ug_info->board_flags = ugeth_pdata->board_flags; + ug_info->phy_interrupt = ugeth_pdata->phy_interrupt; + + printk(KERN_INFO "ucc_geth: UCC%1d at 0x%8x (irq = %d) \n", + ug_info->uf_info.ucc_num + 1, ug_info->uf_info.regs, + ug_info->uf_info.irq); + + if (ug_info == NULL) { + ugeth_err("%s: [%d] Missing additional data!", __FUNCTION__, + pdev->id); + return -ENODEV; + } + + if (!mii_mng_configured) { + ucc_set_qe_mux_mii_mng(ug_info->uf_info.ucc_num); + mii_mng_configured = 1; + } + + /* Create an ethernet device instance */ + dev = alloc_etherdev(sizeof(*ugeth)); + + if (dev == NULL) + return -ENOMEM; + + ugeth = netdev_priv(dev); + spin_lock_init(&ugeth->lock); + + dev_set_drvdata(device, dev); + + /* Set the dev->base_addr to the gfar reg region */ + dev->base_addr = (unsigned long)(ug_info->uf_info.regs); + + SET_MODULE_OWNER(dev); + SET_NETDEV_DEV(dev, device); + + /* Fill in the dev structure */ + dev->open = ucc_geth_open; + dev->hard_start_xmit = ucc_geth_start_xmit; + dev->tx_timeout = ucc_geth_timeout; + dev->watchdog_timeo = TX_TIMEOUT; +#ifdef CONFIG_UGETH_NAPI + dev->poll = ucc_geth_poll; + dev->weight = UCC_GETH_DEV_WEIGHT; +#endif /* CONFIG_UGETH_NAPI */ + dev->stop = ucc_geth_close; + dev->get_stats = ucc_geth_get_stats; +// dev->change_mtu = ucc_geth_change_mtu; + dev->mtu = 1500; + dev->set_multicast_list = ucc_geth_set_multi; + dev->ethtool_ops = &ucc_geth_ethtool_ops; + + err = register_netdev(dev); + if (err) { + ugeth_err("%s: Cannot register net device, aborting.", + dev->name); + free_netdev(dev); + return err; + } + + ugeth->ug_info = ug_info; + ugeth->dev = dev; + memcpy(dev->dev_addr, ugeth_pdata->mac_addr, 6); + + return 0; +} + +static int ucc_geth_remove(struct device *device) +{ + struct net_device *dev = dev_get_drvdata(device); + struct ucc_geth_private *ugeth = netdev_priv(dev); + + dev_set_drvdata(device, NULL); + ucc_geth_memclean(ugeth); + free_netdev(dev); + + return 0; +} + +/* Structure for a device driver */ +static struct device_driver ucc_geth_driver = { + .name = DRV_NAME, + .bus = &platform_bus_type, + .probe = ucc_geth_probe, + .remove = ucc_geth_remove, +}; + +static int __init ucc_geth_init(void) +{ + int i; + printk(KERN_INFO "ucc_geth: " DRV_DESC "\n"); + for (i = 0; i < 8; i++) + memcpy(&(ugeth_info[i]), &ugeth_primary_info, + sizeof(ugeth_primary_info)); + + return driver_register(&ucc_geth_driver); +} + +static void __exit ucc_geth_exit(void) +{ + driver_unregister(&ucc_geth_driver); +} + +module_init(ucc_geth_init); +module_exit(ucc_geth_exit); + +MODULE_AUTHOR("Freescale Semiconductor, Inc"); +MODULE_DESCRIPTION(DRV_DESC); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/ucc_geth.h b/drivers/net/ucc_geth.h new file mode 100644 index 00000000000..005965f5dd9 --- /dev/null +++ b/drivers/net/ucc_geth.h @@ -0,0 +1,1339 @@ +/* + * Copyright (C) Freescale Semicondutor, Inc. 2006. All rights reserved. + * + * Author: Shlomi Gridish <gridish@freescale.com> + * + * Description: + * Internal header file for UCC Gigabit Ethernet unit routines. + * + * Changelog: + * Jun 28, 2006 Li Yang <LeoLi@freescale.com> + * - Rearrange code and style fixes + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ +#ifndef __UCC_GETH_H__ +#define __UCC_GETH_H__ + +#include <linux/kernel.h> +#include <linux/list.h> +#include <linux/fsl_devices.h> + +#include <asm/immap_qe.h> +#include <asm/qe.h> + +#include <asm/ucc.h> +#include <asm/ucc_fast.h> + +#define NUM_TX_QUEUES 8 +#define NUM_RX_QUEUES 8 +#define NUM_BDS_IN_PREFETCHED_BDS 4 +#define TX_IP_OFFSET_ENTRY_MAX 8 +#define NUM_OF_PADDRS 4 +#define ENET_INIT_PARAM_MAX_ENTRIES_RX 9 +#define ENET_INIT_PARAM_MAX_ENTRIES_TX 8 + +typedef struct ucc_mii_mng { + u32 miimcfg; /* MII management configuration reg */ + u32 miimcom; /* MII management command reg */ + u32 miimadd; /* MII management address reg */ + u32 miimcon; /* MII management control reg */ + u32 miimstat; /* MII management status reg */ + u32 miimind; /* MII management indication reg */ +} __attribute__ ((packed)) ucc_mii_mng_t; + +typedef struct ucc_geth { + ucc_fast_t uccf; + + u32 maccfg1; /* mac configuration reg. 1 */ + u32 maccfg2; /* mac configuration reg. 2 */ + u32 ipgifg; /* interframe gap reg. */ + u32 hafdup; /* half-duplex reg. */ + u8 res1[0x10]; + ucc_mii_mng_t miimng; /* MII management structure */ + u32 ifctl; /* interface control reg */ + u32 ifstat; /* interface statux reg */ + u32 macstnaddr1; /* mac station address part 1 reg */ + u32 macstnaddr2; /* mac station address part 2 reg */ + u8 res2[0x8]; + u32 uempr; /* UCC Ethernet Mac parameter reg */ + u32 utbipar; /* UCC tbi address reg */ + u16 uescr; /* UCC Ethernet statistics control reg */ + u8 res3[0x180 - 0x15A]; + u32 tx64; /* Total number of frames (including bad + frames) transmitted that were exactly of the + minimal length (64 for un tagged, 68 for + tagged, or with length exactly equal to the + parameter MINLength */ + u32 tx127; /* Total number of frames (including bad + frames) transmitted that were between + MINLength (Including FCS length==4) and 127 + octets */ + u32 tx255; /* Total number of frames (including bad + frames) transmitted that were between 128 + (Including FCS length==4) and 255 octets */ + u32 rx64; /* Total number of frames received including + bad frames that were exactly of the mninimal + length (64 bytes) */ + u32 rx127; /* Total number of frames (including bad + frames) received that were between MINLength + (Including FCS length==4) and 127 octets */ + u32 rx255; /* Total number of frames (including bad + frames) received that were between 128 + (Including FCS length==4) and 255 octets */ + u32 txok; /* Total number of octets residing in frames + that where involved in succesfull + transmission */ + u16 txcf; /* Total number of PAUSE control frames + transmitted by this MAC */ + u8 res4[0x2]; + u32 tmca; /* Total number of frames that were transmitted + succesfully with the group address bit set + that are not broadcast frames */ + u32 tbca; /* Total number of frames transmitted + succesfully that had destination address + field equal to the broadcast address */ + u32 rxfok; /* Total number of frames received OK */ + u32 rxbok; /* Total number of octets received OK */ + u32 rbyt; /* Total number of octets received including + octets in bad frames. Must be implemented in + HW because it includes octets in frames that + never even reach the UCC */ + u32 rmca; /* Total number of frames that were received + succesfully with the group address bit set + that are not broadcast frames */ + u32 rbca; /* Total number of frames received succesfully + that had destination address equal to the + broadcast address */ + u32 scar; /* Statistics carry register */ + u32 scam; /* Statistics caryy mask register */ + u8 res5[0x200 - 0x1c4]; +} __attribute__ ((packed)) ucc_geth_t; + +/* UCC GETH TEMODR Register */ +#define TEMODER_TX_RMON_STATISTICS_ENABLE 0x0100 /* enable Tx statistics + */ +#define TEMODER_SCHEDULER_ENABLE 0x2000 /* enable scheduler */ +#define TEMODER_IP_CHECKSUM_GENERATE 0x0400 /* generate IPv4 + checksums */ +#define TEMODER_PERFORMANCE_OPTIMIZATION_MODE1 0x0200 /* enable performance + optimization + enhancement (mode1) */ +#define TEMODER_RMON_STATISTICS 0x0100 /* enable tx statistics + */ +#define TEMODER_NUM_OF_QUEUES_SHIFT (15-15) /* Number of queues << + shift */ + +/* UCC GETH TEMODR Register */ +#define REMODER_RX_RMON_STATISTICS_ENABLE 0x00001000 /* enable Rx + statistics */ +#define REMODER_RX_EXTENDED_FEATURES 0x80000000 /* enable + extended + features */ +#define REMODER_VLAN_OPERATION_TAGGED_SHIFT (31-9 ) /* vlan operation + tagged << shift */ +#define REMODER_VLAN_OPERATION_NON_TAGGED_SHIFT (31-10) /* vlan operation non + tagged << shift */ +#define REMODER_RX_QOS_MODE_SHIFT (31-15) /* rx QoS mode << shift + */ +#define REMODER_RMON_STATISTICS 0x00001000 /* enable rx + statistics */ +#define REMODER_RX_EXTENDED_FILTERING 0x00000800 /* extended + filtering + vs. + mpc82xx-like + filtering */ +#define REMODER_NUM_OF_QUEUES_SHIFT (31-23) /* Number of queues << + shift */ +#define REMODER_DYNAMIC_MAX_FRAME_LENGTH 0x00000008 /* enable + dynamic max + frame length + */ +#define REMODER_DYNAMIC_MIN_FRAME_LENGTH 0x00000004 /* enable + dynamic min + frame length + */ +#define REMODER_IP_CHECKSUM_CHECK 0x00000002 /* check IPv4 + checksums */ +#define REMODER_IP_ADDRESS_ALIGNMENT 0x00000001 /* align ip + address to + 4-byte + boundary */ + +/* UCC GETH Event Register */ +#define UCCE_MPD 0x80000000 /* Magic packet + detection */ +#define UCCE_SCAR 0x40000000 +#define UCCE_GRA 0x20000000 /* Tx graceful + stop + complete */ +#define UCCE_CBPR 0x10000000 +#define UCCE_BSY 0x08000000 +#define UCCE_RXC 0x04000000 +#define UCCE_TXC 0x02000000 +#define UCCE_TXE 0x01000000 +#define UCCE_TXB7 0x00800000 +#define UCCE_TXB6 0x00400000 +#define UCCE_TXB5 0x00200000 +#define UCCE_TXB4 0x00100000 +#define UCCE_TXB3 0x00080000 +#define UCCE_TXB2 0x00040000 +#define UCCE_TXB1 0x00020000 +#define UCCE_TXB0 0x00010000 +#define UCCE_RXB7 0x00008000 +#define UCCE_RXB6 0x00004000 +#define UCCE_RXB5 0x00002000 +#define UCCE_RXB4 0x00001000 +#define UCCE_RXB3 0x00000800 +#define UCCE_RXB2 0x00000400 +#define UCCE_RXB1 0x00000200 +#define UCCE_RXB0 0x00000100 +#define UCCE_RXF7 0x00000080 +#define UCCE_RXF6 0x00000040 +#define UCCE_RXF5 0x00000020 +#define UCCE_RXF4 0x00000010 +#define UCCE_RXF3 0x00000008 +#define UCCE_RXF2 0x00000004 +#define UCCE_RXF1 0x00000002 +#define UCCE_RXF0 0x00000001 + +#define UCCE_RXBF_SINGLE_MASK (UCCE_RXF0) +#define UCCE_TXBF_SINGLE_MASK (UCCE_TXB0) + +#define UCCE_TXB (UCCE_TXB7 | UCCE_TXB6 | UCCE_TXB5 | UCCE_TXB4 |\ + UCCE_TXB3 | UCCE_TXB2 | UCCE_TXB1 | UCCE_TXB0) +#define UCCE_RXB (UCCE_RXB7 | UCCE_RXB6 | UCCE_RXB5 | UCCE_RXB4 |\ + UCCE_RXB3 | UCCE_RXB2 | UCCE_RXB1 | UCCE_RXB0) +#define UCCE_RXF (UCCE_RXF7 | UCCE_RXF6 | UCCE_RXF5 | UCCE_RXF4 |\ + UCCE_RXF3 | UCCE_RXF2 | UCCE_RXF1 | UCCE_RXF0) +#define UCCE_OTHER (UCCE_SCAR | UCCE_GRA | UCCE_CBPR | UCCE_BSY |\ + UCCE_RXC | UCCE_TXC | UCCE_TXE) + +/* UCC GETH UPSMR (Protocol Specific Mode Register) */ +#define UPSMR_ECM 0x04000000 /* Enable CAM + Miss or + Enable + Filtering + Miss */ +#define UPSMR_HSE 0x02000000 /* Hardware + Statistics + Enable */ +#define UPSMR_PRO 0x00400000 /* Promiscuous*/ +#define UPSMR_CAP 0x00200000 /* CAM polarity + */ +#define UPSMR_RSH 0x00100000 /* Receive + Short Frames + */ +#define UPSMR_RPM 0x00080000 /* Reduced Pin + Mode + interfaces */ +#define UPSMR_R10M 0x00040000 /* RGMII/RMII + 10 Mode */ +#define UPSMR_RLPB 0x00020000 /* RMII + Loopback + Mode */ +#define UPSMR_TBIM 0x00010000 /* Ten-bit + Interface + Mode */ +#define UPSMR_RMM 0x00001000 /* RMII/RGMII + Mode */ +#define UPSMR_CAM 0x00000400 /* CAM Address + Matching */ +#define UPSMR_BRO 0x00000200 /* Broadcast + Address */ +#define UPSMR_RES1 0x00002000 /* Reserved + feild - must + be 1 */ + +/* UCC GETH MACCFG1 (MAC Configuration 1 Register) */ +#define MACCFG1_FLOW_RX 0x00000020 /* Flow Control + Rx */ +#define MACCFG1_FLOW_TX 0x00000010 /* Flow Control + Tx */ +#define MACCFG1_ENABLE_SYNCHED_RX 0x00000008 /* Rx Enable + synchronized + to Rx stream + */ +#define MACCFG1_ENABLE_RX 0x00000004 /* Enable Rx */ +#define MACCFG1_ENABLE_SYNCHED_TX 0x00000002 /* Tx Enable + synchronized + to Tx stream + */ +#define MACCFG1_ENABLE_TX 0x00000001 /* Enable Tx */ + +/* UCC GETH MACCFG2 (MAC Configuration 2 Register) */ +#define MACCFG2_PREL_SHIFT (31 - 19) /* Preamble + Length << + shift */ +#define MACCFG2_PREL_MASK 0x0000f000 /* Preamble + Length mask */ +#define MACCFG2_SRP 0x00000080 /* Soft Receive + Preamble */ +#define MACCFG2_STP 0x00000040 /* Soft + Transmit + Preamble */ +#define MACCFG2_RESERVED_1 0x00000020 /* Reserved - + must be set + to 1 */ +#define MACCFG2_LC 0x00000010 /* Length Check + */ +#define MACCFG2_MPE 0x00000008 /* Magic packet + detect */ +#define MACCFG2_FDX 0x00000001 /* Full Duplex */ +#define MACCFG2_FDX_MASK 0x00000001 /* Full Duplex + mask */ +#define MACCFG2_PAD_CRC 0x00000004 +#define MACCFG2_CRC_EN 0x00000002 +#define MACCFG2_PAD_AND_CRC_MODE_NONE 0x00000000 /* Neither + Padding + short frames + nor CRC */ +#define MACCFG2_PAD_AND_CRC_MODE_CRC_ONLY 0x00000002 /* Append CRC + only */ +#define MACCFG2_PAD_AND_CRC_MODE_PAD_AND_CRC 0x00000004 +#define MACCFG2_INTERFACE_MODE_NIBBLE 0x00000100 /* nibble mode + (MII/RMII/RGMII + 10/100bps) */ +#define MACCFG2_INTERFACE_MODE_BYTE 0x00000200 /* byte mode + (GMII/TBI/RTB/RGMII + 1000bps ) */ +#define MACCFG2_INTERFACE_MODE_MASK 0x00000300 /* mask + covering all + relevant + bits */ + +/* UCC GETH IPGIFG (Inter-frame Gap / Inter-Frame Gap Register) */ +#define IPGIFG_NON_BACK_TO_BACK_IFG_PART1_SHIFT (31 - 7) /* Non + back-to-back + inter frame + gap part 1. + << shift */ +#define IPGIFG_NON_BACK_TO_BACK_IFG_PART2_SHIFT (31 - 15) /* Non + back-to-back + inter frame + gap part 2. + << shift */ +#define IPGIFG_MINIMUM_IFG_ENFORCEMENT_SHIFT (31 - 23) /* Mimimum IFG + Enforcement + << shift */ +#define IPGIFG_BACK_TO_BACK_IFG_SHIFT (31 - 31) /* back-to-back + inter frame + gap << shift + */ +#define IPGIFG_NON_BACK_TO_BACK_IFG_PART1_MAX 127 /* Non back-to-back + inter frame gap part + 1. max val */ +#define IPGIFG_NON_BACK_TO_BACK_IFG_PART2_MAX 127 /* Non back-to-back + inter frame gap part + 2. max val */ +#define IPGIFG_MINIMUM_IFG_ENFORCEMENT_MAX 255 /* Mimimum IFG + Enforcement max val */ +#define IPGIFG_BACK_TO_BACK_IFG_MAX 127 /* back-to-back inter + frame gap max val */ +#define IPGIFG_NBTB_CS_IPG_MASK 0x7F000000 +#define IPGIFG_NBTB_IPG_MASK 0x007F0000 +#define IPGIFG_MIN_IFG_MASK 0x0000FF00 +#define IPGIFG_BTB_IPG_MASK 0x0000007F + +/* UCC GETH HAFDUP (Half Duplex Register) */ +#define HALFDUP_ALT_BEB_TRUNCATION_SHIFT (31 - 11) /* Alternate + Binary + Exponential + Backoff + Truncation + << shift */ +#define HALFDUP_ALT_BEB_TRUNCATION_MAX 0xf /* Alternate Binary + Exponential Backoff + Truncation max val */ +#define HALFDUP_ALT_BEB 0x00080000 /* Alternate + Binary + Exponential + Backoff */ +#define HALFDUP_BACK_PRESSURE_NO_BACKOFF 0x00040000 /* Back + pressure no + backoff */ +#define HALFDUP_NO_BACKOFF 0x00020000 /* No Backoff */ +#define HALFDUP_EXCESSIVE_DEFER 0x00010000 /* Excessive + Defer */ +#define HALFDUP_MAX_RETRANSMISSION_SHIFT (31 - 19) /* Maximum + Retransmission + << shift */ +#define HALFDUP_MAX_RETRANSMISSION_MAX 0xf /* Maximum + Retransmission max + val */ +#define HALFDUP_COLLISION_WINDOW_SHIFT (31 - 31) /* Collision + Window << + shift */ +#define HALFDUP_COLLISION_WINDOW_MAX 0x3f /* Collision Window max + val */ +#define HALFDUP_ALT_BEB_TR_MASK 0x00F00000 +#define HALFDUP_RETRANS_MASK 0x0000F000 +#define HALFDUP_COL_WINDOW_MASK 0x0000003F + +/* UCC GETH UCCS (Ethernet Status Register) */ +#define UCCS_BPR 0x02 /* Back pressure (in + half duplex mode) */ +#define UCCS_PAU 0x02 /* Pause state (in full + duplex mode) */ +#define UCCS_MPD 0x01 /* Magic Packet + Detected */ + +/* UCC GETH MIIMCFG (MII Management Configuration Register) */ +#define MIIMCFG_RESET_MANAGEMENT 0x80000000 /* Reset + management */ +#define MIIMCFG_NO_PREAMBLE 0x00000010 /* Preamble + suppress */ +#define MIIMCFG_CLOCK_DIVIDE_SHIFT (31 - 31) /* clock divide + << shift */ +#define MIIMCFG_CLOCK_DIVIDE_MAX 0xf /* clock divide max val + */ +#define MIIMCFG_MANAGEMENT_CLOCK_DIVIDE_BY_2 0x00000000 /* divide by 2 */ +#define MIIMCFG_MANAGEMENT_CLOCK_DIVIDE_BY_4 0x00000001 /* divide by 4 */ +#define MIIMCFG_MANAGEMENT_CLOCK_DIVIDE_BY_6 0x00000002 /* divide by 6 */ +#define MIIMCFG_MANAGEMENT_CLOCK_DIVIDE_BY_8 0x00000003 /* divide by 8 */ +#define MIIMCFG_MANAGEMENT_CLOCK_DIVIDE_BY_10 0x00000004 /* divide by 10 + */ +#define MIIMCFG_MANAGEMENT_CLOCK_DIVIDE_BY_14 0x00000005 /* divide by 14 + */ +#define MIIMCFG_MANAGEMENT_CLOCK_DIVIDE_BY_16 0x00000008 /* divide by 16 + */ +#define MIIMCFG_MANAGEMENT_CLOCK_DIVIDE_BY_20 0x00000006 /* divide by 20 + */ +#define MIIMCFG_MANAGEMENT_CLOCK_DIVIDE_BY_28 0x00000007 /* divide by 28 + */ +#define MIIMCFG_MANAGEMENT_CLOCK_DIVIDE_BY_32 0x00000009 /* divide by 32 + */ +#define MIIMCFG_MANAGEMENT_CLOCK_DIVIDE_BY_48 0x0000000a /* divide by 48 + */ +#define MIIMCFG_MANAGEMENT_CLOCK_DIVIDE_BY_64 0x0000000b /* divide by 64 + */ +#define MIIMCFG_MANAGEMENT_CLOCK_DIVIDE_BY_80 0x0000000c /* divide by 80 + */ +#define MIIMCFG_MANAGEMENT_CLOCK_DIVIDE_BY_112 0x0000000d /* divide by + 112 */ +#define MIIMCFG_MANAGEMENT_CLOCK_DIVIDE_BY_160 0x0000000e /* divide by + 160 */ +#define MIIMCFG_MANAGEMENT_CLOCK_DIVIDE_BY_224 0x0000000f /* divide by + 224 */ + +/* UCC GETH MIIMCOM (MII Management Command Register) */ +#define MIIMCOM_SCAN_CYCLE 0x00000002 /* Scan cycle */ +#define MIIMCOM_READ_CYCLE 0x00000001 /* Read cycle */ + +/* UCC GETH MIIMADD (MII Management Address Register) */ +#define MIIMADD_PHY_ADDRESS_SHIFT (31 - 23) /* PHY Address + << shift */ +#define MIIMADD_PHY_REGISTER_SHIFT (31 - 31) /* PHY Register + << shift */ + +/* UCC GETH MIIMCON (MII Management Control Register) */ +#define MIIMCON_PHY_CONTROL_SHIFT (31 - 31) /* PHY Control + << shift */ +#define MIIMCON_PHY_STATUS_SHIFT (31 - 31) /* PHY Status + << shift */ + +/* UCC GETH MIIMIND (MII Management Indicator Register) */ +#define MIIMIND_NOT_VALID 0x00000004 /* Not valid */ +#define MIIMIND_SCAN 0x00000002 /* Scan in + progress */ +#define MIIMIND_BUSY 0x00000001 + +/* UCC GETH IFSTAT (Interface Status Register) */ +#define IFSTAT_EXCESS_DEFER 0x00000200 /* Excessive + transmission + defer */ + +/* UCC GETH MACSTNADDR1 (Station Address Part 1 Register) */ +#define MACSTNADDR1_OCTET_6_SHIFT (31 - 7) /* Station + address 6th + octet << + shift */ +#define MACSTNADDR1_OCTET_5_SHIFT (31 - 15) /* Station + address 5th + octet << + shift */ +#define MACSTNADDR1_OCTET_4_SHIFT (31 - 23) /* Station + address 4th + octet << + shift */ +#define MACSTNADDR1_OCTET_3_SHIFT (31 - 31) /* Station + address 3rd + octet << + shift */ + +/* UCC GETH MACSTNADDR2 (Station Address Part 2 Register) */ +#define MACSTNADDR2_OCTET_2_SHIFT (31 - 7) /* Station + address 2nd + octet << + shift */ +#define MACSTNADDR2_OCTET_1_SHIFT (31 - 15) /* Station + address 1st + octet << + shift */ + +/* UCC GETH UEMPR (Ethernet Mac Parameter Register) */ +#define UEMPR_PAUSE_TIME_VALUE_SHIFT (31 - 15) /* Pause time + value << + shift */ +#define UEMPR_EXTENDED_PAUSE_TIME_VALUE_SHIFT (31 - 31) /* Extended + pause time + value << + shift */ + +/* UCC GETH UTBIPAR (Ten Bit Interface Physical Address Register) */ +#define UTBIPAR_PHY_ADDRESS_SHIFT (31 - 31) /* Phy address + << shift */ +#define UTBIPAR_PHY_ADDRESS_MASK 0x0000001f /* Phy address + mask */ + +/* UCC GETH UESCR (Ethernet Statistics Control Register) */ +#define UESCR_AUTOZ 0x8000 /* Automatically zero + addressed + statistical counter + values */ +#define UESCR_CLRCNT 0x4000 /* Clear all statistics + counters */ +#define UESCR_MAXCOV_SHIFT (15 - 7) /* Max + Coalescing + Value << + shift */ +#define UESCR_SCOV_SHIFT (15 - 15) /* Status + Coalescing + Value << + shift */ + +/* UCC GETH UDSR (Data Synchronization Register) */ +#define UDSR_MAGIC 0x067E + +typedef struct ucc_geth_thread_data_tx { + u8 res0[104]; +} __attribute__ ((packed)) ucc_geth_thread_data_tx_t; + +typedef struct ucc_geth_thread_data_rx { + u8 res0[40]; +} __attribute__ ((packed)) ucc_geth_thread_data_rx_t; + +/* Send Queue Queue-Descriptor */ +typedef struct ucc_geth_send_queue_qd { + u32 bd_ring_base; /* pointer to BD ring base address */ + u8 res0[0x8]; + u32 last_bd_completed_address;/* initialize to last entry in BD ring */ + u8 res1[0x30]; +} __attribute__ ((packed)) ucc_geth_send_queue_qd_t; + +typedef struct ucc_geth_send_queue_mem_region { + ucc_geth_send_queue_qd_t sqqd[NUM_TX_QUEUES]; +} __attribute__ ((packed)) ucc_geth_send_queue_mem_region_t; + +typedef struct ucc_geth_thread_tx_pram { + u8 res0[64]; +} __attribute__ ((packed)) ucc_geth_thread_tx_pram_t; + +typedef struct ucc_geth_thread_rx_pram { + u8 res0[128]; +} __attribute__ ((packed)) ucc_geth_thread_rx_pram_t; + +#define THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING 64 +#define THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING_8 64 +#define THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING_16 96 + +typedef struct ucc_geth_scheduler { + u16 cpucount0; /* CPU packet counter */ + u16 cpucount1; /* CPU packet counter */ + u16 cecount0; /* QE packet counter */ + u16 cecount1; /* QE packet counter */ + u16 cpucount2; /* CPU packet counter */ + u16 cpucount3; /* CPU packet counter */ + u16 cecount2; /* QE packet counter */ + u16 cecount3; /* QE packet counter */ + u16 cpucount4; /* CPU packet counter */ + u16 cpucount5; /* CPU packet counter */ + u16 cecount4; /* QE packet counter */ + u16 cecount5; /* QE packet counter */ + u16 cpucount6; /* CPU packet counter */ + u16 cpucount7; /* CPU packet counter */ + u16 cecount6; /* QE packet counter */ + u16 cecount7; /* QE packet counter */ + u32 weightstatus[NUM_TX_QUEUES]; /* accumulated weight factor */ + u32 rtsrshadow; /* temporary variable handled by QE */ + u32 time; /* temporary variable handled by QE */ + u32 ttl; /* temporary variable handled by QE */ + u32 mblinterval; /* max burst length interval */ + u16 nortsrbytetime; /* normalized value of byte time in tsr units */ + u8 fracsiz; /* radix 2 log value of denom. of + NorTSRByteTime */ + u8 res0[1]; + u8 strictpriorityq; /* Strict Priority Mask register */ + u8 txasap; /* Transmit ASAP register */ + u8 extrabw; /* Extra BandWidth register */ + u8 oldwfqmask; /* temporary variable handled by QE */ + u8 weightfactor[NUM_TX_QUEUES]; + /**< weight factor for queues */ + u32 minw; /* temporary variable handled by QE */ + u8 res1[0x70 - 0x64]; +} __attribute__ ((packed)) ucc_geth_scheduler_t; + +typedef struct ucc_geth_tx_firmware_statistics_pram { + u32 sicoltx; /* single collision */ + u32 mulcoltx; /* multiple collision */ + u32 latecoltxfr; /* late collision */ + u32 frabortduecol; /* frames aborted due to transmit collision */ + u32 frlostinmactxer; /* frames lost due to internal MAC error + transmission that are not counted on any + other counter */ + u32 carriersenseertx; /* carrier sense error */ + u32 frtxok; /* frames transmitted OK */ + u32 txfrexcessivedefer; /* frames with defferal time greater than + specified threshold */ + u32 txpkts256; /* total packets (including bad) between 256 + and 511 octets */ + u32 txpkts512; /* total packets (including bad) between 512 + and 1023 octets */ + u32 txpkts1024; /* total packets (including bad) between 1024 + and 1518 octets */ + u32 txpktsjumbo; /* total packets (including bad) between 1024 + and MAXLength octets */ +} __attribute__ ((packed)) ucc_geth_tx_firmware_statistics_pram_t; + +typedef struct ucc_geth_rx_firmware_statistics_pram { + u32 frrxfcser; /* frames with crc error */ + u32 fraligner; /* frames with alignment error */ + u32 inrangelenrxer; /* in range length error */ + u32 outrangelenrxer; /* out of range length error */ + u32 frtoolong; /* frame too long */ + u32 runt; /* runt */ + u32 verylongevent; /* very long event */ + u32 symbolerror; /* symbol error */ + u32 dropbsy; /* drop because of BD not ready */ + u8 res0[0x8]; + u32 mismatchdrop; /* drop because of MAC filtering (e.g. address + or type mismatch) */ + u32 underpkts; /* total frames less than 64 octets */ + u32 pkts256; /* total frames (including bad) between 256 and + 511 octets */ + u32 pkts512; /* total frames (including bad) between 512 and + 1023 octets */ + u32 pkts1024; /* total frames (including bad) between 1024 + and 1518 octets */ + u32 pktsjumbo; /* total frames (including bad) between 1024 + and MAXLength octets */ + u32 frlossinmacer; /* frames lost because of internal MAC error + that is not counted in any other counter */ + u32 pausefr; /* pause frames */ + u8 res1[0x4]; + u32 removevlan; /* total frames that had their VLAN tag removed + */ + u32 replacevlan; /* total frames that had their VLAN tag + replaced */ + u32 insertvlan; /* total frames that had their VLAN tag + inserted */ +} __attribute__ ((packed)) ucc_geth_rx_firmware_statistics_pram_t; + +typedef struct ucc_geth_rx_interrupt_coalescing_entry { + u32 interruptcoalescingmaxvalue; /* interrupt coalescing max + value */ + u32 interruptcoalescingcounter; /* interrupt coalescing counter, + initialize to + interruptcoalescingmaxvalue */ +} __attribute__ ((packed)) ucc_geth_rx_interrupt_coalescing_entry_t; + +typedef struct ucc_geth_rx_interrupt_coalescing_table { + ucc_geth_rx_interrupt_coalescing_entry_t coalescingentry[NUM_RX_QUEUES]; + /**< interrupt coalescing entry */ +} __attribute__ ((packed)) ucc_geth_rx_interrupt_coalescing_table_t; + +typedef struct ucc_geth_rx_prefetched_bds { + qe_bd_t bd[NUM_BDS_IN_PREFETCHED_BDS]; /* prefetched bd */ +} __attribute__ ((packed)) ucc_geth_rx_prefetched_bds_t; + +typedef struct ucc_geth_rx_bd_queues_entry { + u32 bdbaseptr; /* BD base pointer */ + u32 bdptr; /* BD pointer */ + u32 externalbdbaseptr; /* external BD base pointer */ + u32 externalbdptr; /* external BD pointer */ +} __attribute__ ((packed)) ucc_geth_rx_bd_queues_entry_t; + +typedef struct ucc_geth_tx_global_pram { + u16 temoder; + u8 res0[0x38 - 0x02]; + u32 sqptr; /* a base pointer to send queue memory region */ + u32 schedulerbasepointer; /* a base pointer to scheduler memory + region */ + u32 txrmonbaseptr; /* base pointer to Tx RMON statistics counter */ + u32 tstate; /* tx internal state. High byte contains + function code */ + u8 iphoffset[TX_IP_OFFSET_ENTRY_MAX]; + u32 vtagtable[0x8]; /* 8 4-byte VLAN tags */ + u32 tqptr; /* a base pointer to the Tx Queues Memory + Region */ + u8 res2[0x80 - 0x74]; +} __attribute__ ((packed)) ucc_geth_tx_global_pram_t; + +/* structure representing Extended Filtering Global Parameters in PRAM */ +typedef struct ucc_geth_exf_global_pram { + u32 l2pcdptr; /* individual address filter, high */ + u8 res0[0x10 - 0x04]; +} __attribute__ ((packed)) ucc_geth_exf_global_pram_t; + +typedef struct ucc_geth_rx_global_pram { + u32 remoder; /* ethernet mode reg. */ + u32 rqptr; /* base pointer to the Rx Queues Memory Region*/ + u32 res0[0x1]; + u8 res1[0x20 - 0xC]; + u16 typeorlen; /* cutoff point less than which, type/len field + is considered length */ + u8 res2[0x1]; + u8 rxgstpack; /* acknowledgement on GRACEFUL STOP RX command*/ + u32 rxrmonbaseptr; /* base pointer to Rx RMON statistics counter */ + u8 res3[0x30 - 0x28]; + u32 intcoalescingptr; /* Interrupt coalescing table pointer */ + u8 res4[0x36 - 0x34]; + u8 rstate; /* rx internal state. High byte contains + function code */ + u8 res5[0x46 - 0x37]; + u16 mrblr; /* max receive buffer length reg. */ + u32 rbdqptr; /* base pointer to RxBD parameter table + description */ + u16 mflr; /* max frame length reg. */ + u16 minflr; /* min frame length reg. */ + u16 maxd1; /* max dma1 length reg. */ + u16 maxd2; /* max dma2 length reg. */ + u32 ecamptr; /* external CAM address */ + u32 l2qt; /* VLAN priority mapping table. */ + u32 l3qt[0x8]; /* IP priority mapping table. */ + u16 vlantype; /* vlan type */ + u16 vlantci; /* default vlan tci */ + u8 addressfiltering[64]; /* address filtering data structure */ + u32 exfGlobalParam; /* base address for extended filtering global + parameters */ + u8 res6[0x100 - 0xC4]; /* Initialize to zero */ +} __attribute__ ((packed)) ucc_geth_rx_global_pram_t; + +#define GRACEFUL_STOP_ACKNOWLEDGE_RX 0x01 + +/* structure representing InitEnet command */ +typedef struct ucc_geth_init_pram { + u8 resinit1; + u8 resinit2; + u8 resinit3; + u8 resinit4; + u16 resinit5; + u8 res1[0x1]; + u8 largestexternallookupkeysize; + u32 rgftgfrxglobal; + u32 rxthread[ENET_INIT_PARAM_MAX_ENTRIES_RX]; /* rx threads */ + u8 res2[0x38 - 0x30]; + u32 txglobal; /* tx global */ + u32 txthread[ENET_INIT_PARAM_MAX_ENTRIES_TX]; /* tx threads */ + u8 res3[0x1]; +} __attribute__ ((packed)) ucc_geth_init_pram_t; + +#define ENET_INIT_PARAM_RGF_SHIFT (32 - 4) +#define ENET_INIT_PARAM_TGF_SHIFT (32 - 8) + +#define ENET_INIT_PARAM_RISC_MASK 0x0000003f +#define ENET_INIT_PARAM_PTR_MASK 0x00ffffc0 +#define ENET_INIT_PARAM_SNUM_MASK 0xff000000 +#define ENET_INIT_PARAM_SNUM_SHIFT 24 + +#define ENET_INIT_PARAM_MAGIC_RES_INIT1 0x06 +#define ENET_INIT_PARAM_MAGIC_RES_INIT2 0x30 +#define ENET_INIT_PARAM_MAGIC_RES_INIT3 0xff +#define ENET_INIT_PARAM_MAGIC_RES_INIT4 0x00 +#define ENET_INIT_PARAM_MAGIC_RES_INIT5 0x0400 + +/* structure representing 82xx Address Filtering Enet Address in PRAM */ +typedef struct ucc_geth_82xx_enet_address { + u8 res1[0x2]; + u16 h; /* address (MSB) */ + u16 m; /* address */ + u16 l; /* address (LSB) */ +} __attribute__ ((packed)) ucc_geth_82xx_enet_address_t; + +/* structure representing 82xx Address Filtering PRAM */ +typedef struct ucc_geth_82xx_address_filtering_pram { + u32 iaddr_h; /* individual address filter, high */ + u32 iaddr_l; /* individual address filter, low */ + u32 gaddr_h; /* group address filter, high */ + u32 gaddr_l; /* group address filter, low */ + ucc_geth_82xx_enet_address_t taddr; + ucc_geth_82xx_enet_address_t paddr[NUM_OF_PADDRS]; + u8 res0[0x40 - 0x38]; +} __attribute__ ((packed)) ucc_geth_82xx_address_filtering_pram_t; + +/* GETH Tx firmware statistics structure, used when calling + UCC_GETH_GetStatistics. */ +typedef struct ucc_geth_tx_firmware_statistics { + u32 sicoltx; /* single collision */ + u32 mulcoltx; /* multiple collision */ + u32 latecoltxfr; /* late collision */ + u32 frabortduecol; /* frames aborted due to transmit collision */ + u32 frlostinmactxer; /* frames lost due to internal MAC error + transmission that are not counted on any + other counter */ + u32 carriersenseertx; /* carrier sense error */ + u32 frtxok; /* frames transmitted OK */ + u32 txfrexcessivedefer; /* frames with defferal time greater than + specified threshold */ + u32 txpkts256; /* total packets (including bad) between 256 + and 511 octets */ + u32 txpkts512; /* total packets (including bad) between 512 + and 1023 octets */ + u32 txpkts1024; /* total packets (including bad) between 1024 + and 1518 octets */ + u32 txpktsjumbo; /* total packets (including bad) between 1024 + and MAXLength octets */ +} __attribute__ ((packed)) ucc_geth_tx_firmware_statistics_t; + +/* GETH Rx firmware statistics structure, used when calling + UCC_GETH_GetStatistics. */ +typedef struct ucc_geth_rx_firmware_statistics { + u32 frrxfcser; /* frames with crc error */ + u32 fraligner; /* frames with alignment error */ + u32 inrangelenrxer; /* in range length error */ + u32 outrangelenrxer; /* out of range length error */ + u32 frtoolong; /* frame too long */ + u32 runt; /* runt */ + u32 verylongevent; /* very long event */ + u32 symbolerror; /* symbol error */ + u32 dropbsy; /* drop because of BD not ready */ + u8 res0[0x8]; + u32 mismatchdrop; /* drop because of MAC filtering (e.g. address + or type mismatch) */ + u32 underpkts; /* total frames less than 64 octets */ + u32 pkts256; /* total frames (including bad) between 256 and + 511 octets */ + u32 pkts512; /* total frames (including bad) between 512 and + 1023 octets */ + u32 pkts1024; /* total frames (including bad) between 1024 + and 1518 octets */ + u32 pktsjumbo; /* total frames (including bad) between 1024 + and MAXLength octets */ + u32 frlossinmacer; /* frames lost because of internal MAC error + that is not counted in any other counter */ + u32 pausefr; /* pause frames */ + u8 res1[0x4]; + u32 removevlan; /* total frames that had their VLAN tag removed + */ + u32 replacevlan; /* total frames that had their VLAN tag + replaced */ + u32 insertvlan; /* total frames that had their VLAN tag + inserted */ +} __attribute__ ((packed)) ucc_geth_rx_firmware_statistics_t; + +/* GETH hardware statistics structure, used when calling + UCC_GETH_GetStatistics. */ +typedef struct ucc_geth_hardware_statistics { + u32 tx64; /* Total number of frames (including bad + frames) transmitted that were exactly of the + minimal length (64 for un tagged, 68 for + tagged, or with length exactly equal to the + parameter MINLength */ + u32 tx127; /* Total number of frames (including bad + frames) transmitted that were between + MINLength (Including FCS length==4) and 127 + octets */ + u32 tx255; /* Total number of frames (including bad + frames) transmitted that were between 128 + (Including FCS length==4) and 255 octets */ + u32 rx64; /* Total number of frames received including + bad frames that were exactly of the mninimal + length (64 bytes) */ + u32 rx127; /* Total number of frames (including bad + frames) received that were between MINLength + (Including FCS length==4) and 127 octets */ + u32 rx255; /* Total number of frames (including bad + frames) received that were between 128 + (Including FCS length==4) and 255 octets */ + u32 txok; /* Total number of octets residing in frames + that where involved in succesfull + transmission */ + u16 txcf; /* Total number of PAUSE control frames + transmitted by this MAC */ + u32 tmca; /* Total number of frames that were transmitted + succesfully with the group address bit set + that are not broadcast frames */ + u32 tbca; /* Total number of frames transmitted + succesfully that had destination address + field equal to the broadcast address */ + u32 rxfok; /* Total number of frames received OK */ + u32 rxbok; /* Total number of octets received OK */ + u32 rbyt; /* Total number of octets received including + octets in bad frames. Must be implemented in + HW because it includes octets in frames that + never even reach the UCC */ + u32 rmca; /* Total number of frames that were received + succesfully with the group address bit set + that are not broadcast frames */ + u32 rbca; /* Total number of frames received succesfully + that had destination address equal to the + broadcast address */ +} __attribute__ ((packed)) ucc_geth_hardware_statistics_t; + +/* UCC GETH Tx errors returned via TxConf callback */ +#define TX_ERRORS_DEF 0x0200 +#define TX_ERRORS_EXDEF 0x0100 +#define TX_ERRORS_LC 0x0080 +#define TX_ERRORS_RL 0x0040 +#define TX_ERRORS_RC_MASK 0x003C +#define TX_ERRORS_RC_SHIFT 2 +#define TX_ERRORS_UN 0x0002 +#define TX_ERRORS_CSL 0x0001 + +/* UCC GETH Rx errors returned via RxStore callback */ +#define RX_ERRORS_CMR 0x0200 +#define RX_ERRORS_M 0x0100 +#define RX_ERRORS_BC 0x0080 +#define RX_ERRORS_MC 0x0040 + +/* Transmit BD. These are in addition to values defined in uccf. */ +#define T_VID 0x003c0000 /* insert VLAN id index mask. */ +#define T_DEF (((u32) TX_ERRORS_DEF ) << 16) +#define T_EXDEF (((u32) TX_ERRORS_EXDEF ) << 16) +#define T_LC (((u32) TX_ERRORS_LC ) << 16) +#define T_RL (((u32) TX_ERRORS_RL ) << 16) +#define T_RC_MASK (((u32) TX_ERRORS_RC_MASK ) << 16) +#define T_UN (((u32) TX_ERRORS_UN ) << 16) +#define T_CSL (((u32) TX_ERRORS_CSL ) << 16) +#define T_ERRORS_REPORT (T_DEF | T_EXDEF | T_LC | T_RL | T_RC_MASK \ + | T_UN | T_CSL) /* transmit errors to report */ + +/* Receive BD. These are in addition to values defined in uccf. */ +#define R_LG 0x00200000 /* Frame length violation. */ +#define R_NO 0x00100000 /* Non-octet aligned frame. */ +#define R_SH 0x00080000 /* Short frame. */ +#define R_CR 0x00040000 /* CRC error. */ +#define R_OV 0x00020000 /* Overrun. */ +#define R_IPCH 0x00010000 /* IP checksum check failed. */ +#define R_CMR (((u32) RX_ERRORS_CMR ) << 16) +#define R_M (((u32) RX_ERRORS_M ) << 16) +#define R_BC (((u32) RX_ERRORS_BC ) << 16) +#define R_MC (((u32) RX_ERRORS_MC ) << 16) +#define R_ERRORS_REPORT (R_CMR | R_M | R_BC | R_MC) /* receive errors to + report */ +#define R_ERRORS_FATAL (R_LG | R_NO | R_SH | R_CR | \ + R_OV | R_IPCH) /* receive errors to discard */ + +/* Alignments */ +#define UCC_GETH_RX_GLOBAL_PRAM_ALIGNMENT 256 +#define UCC_GETH_TX_GLOBAL_PRAM_ALIGNMENT 128 +#define UCC_GETH_THREAD_RX_PRAM_ALIGNMENT 128 +#define UCC_GETH_THREAD_TX_PRAM_ALIGNMENT 64 +#define UCC_GETH_THREAD_DATA_ALIGNMENT 256 /* spec gives values + based on num of + threads, but always + using the maximum is + easier */ +#define UCC_GETH_SEND_QUEUE_QUEUE_DESCRIPTOR_ALIGNMENT 32 +#define UCC_GETH_SCHEDULER_ALIGNMENT 4 /* This is a guess */ +#define UCC_GETH_TX_STATISTICS_ALIGNMENT 4 /* This is a guess */ +#define UCC_GETH_RX_STATISTICS_ALIGNMENT 4 /* This is a guess */ +#define UCC_GETH_RX_INTERRUPT_COALESCING_ALIGNMENT 4 /* This is a + guess */ +#define UCC_GETH_RX_BD_QUEUES_ALIGNMENT 8 /* This is a guess */ +#define UCC_GETH_RX_PREFETCHED_BDS_ALIGNMENT 128 /* This is a guess */ +#define UCC_GETH_RX_EXTENDED_FILTERING_GLOBAL_PARAMETERS_ALIGNMENT 4 /* This + is a + guess + */ +#define UCC_GETH_RX_BD_RING_ALIGNMENT 32 +#define UCC_GETH_TX_BD_RING_ALIGNMENT 32 +#define UCC_GETH_MRBLR_ALIGNMENT 128 +#define UCC_GETH_RX_BD_RING_SIZE_ALIGNMENT 4 +#define UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT 32 +#define UCC_GETH_RX_DATA_BUF_ALIGNMENT 64 + +#define UCC_GETH_TAD_EF 0x80 +#define UCC_GETH_TAD_V 0x40 +#define UCC_GETH_TAD_REJ 0x20 +#define UCC_GETH_TAD_VTAG_OP_RIGHT_SHIFT 2 +#define UCC_GETH_TAD_VTAG_OP_SHIFT 6 +#define UCC_GETH_TAD_V_NON_VTAG_OP 0x20 +#define UCC_GETH_TAD_RQOS_SHIFT 0 +#define UCC_GETH_TAD_V_PRIORITY_SHIFT 5 +#define UCC_GETH_TAD_CFI 0x10 + +#define UCC_GETH_VLAN_PRIORITY_MAX 8 +#define UCC_GETH_IP_PRIORITY_MAX 64 +#define UCC_GETH_TX_VTAG_TABLE_ENTRY_MAX 8 +#define UCC_GETH_RX_BD_RING_SIZE_MIN 8 +#define UCC_GETH_TX_BD_RING_SIZE_MIN 2 + +#define UCC_GETH_SIZE_OF_BD QE_SIZEOF_BD + +/* Driver definitions */ +#define TX_BD_RING_LEN 0x10 +#define RX_BD_RING_LEN 0x10 +#define UCC_GETH_DEV_WEIGHT TX_BD_RING_LEN + +#define TX_RING_MOD_MASK(size) (size-1) +#define RX_RING_MOD_MASK(size) (size-1) + +#define ENET_NUM_OCTETS_PER_ADDRESS 6 +#define ENET_GROUP_ADDR 0x01 /* Group address mask + for ethernet + addresses */ + +#define TX_TIMEOUT (1*HZ) +#define SKB_ALLOC_TIMEOUT 100000 +#define PHY_INIT_TIMEOUT 100000 +#define PHY_CHANGE_TIME 2 + +/* Fast Ethernet (10/100 Mbps) */ +#define UCC_GETH_URFS_INIT 512 /* Rx virtual FIFO size + */ +#define UCC_GETH_URFET_INIT 256 /* 1/2 urfs */ +#define UCC_GETH_URFSET_INIT 384 /* 3/4 urfs */ +#define UCC_GETH_UTFS_INIT 512 /* Tx virtual FIFO size + */ +#define UCC_GETH_UTFET_INIT 256 /* 1/2 utfs */ +#define UCC_GETH_UTFTT_INIT 128 +/* Gigabit Ethernet (1000 Mbps) */ +#define UCC_GETH_URFS_GIGA_INIT 4096/*2048*/ /* Rx virtual + FIFO size */ +#define UCC_GETH_URFET_GIGA_INIT 2048/*1024*/ /* 1/2 urfs */ +#define UCC_GETH_URFSET_GIGA_INIT 3072/*1536*/ /* 3/4 urfs */ +#define UCC_GETH_UTFS_GIGA_INIT 8192/*2048*/ /* Tx virtual + FIFO size */ +#define UCC_GETH_UTFET_GIGA_INIT 4096/*1024*/ /* 1/2 utfs */ +#define UCC_GETH_UTFTT_GIGA_INIT 0x400/*0x40*/ /* */ + +#define UCC_GETH_REMODER_INIT 0 /* bits that must be + set */ +#define UCC_GETH_TEMODER_INIT 0xC000 /* bits that must */ +#define UCC_GETH_UPSMR_INIT (UPSMR_RES1) /* Start value + for this + register */ +#define UCC_GETH_MACCFG1_INIT 0 +#define UCC_GETH_MACCFG2_INIT (MACCFG2_RESERVED_1) +#define UCC_GETH_MIIMCFG_MNGMNT_CLC_DIV_INIT \ + (MIIMCFG_MANAGEMENT_CLOCK_DIVIDE_BY_112) + +/* Ethernet speed */ +typedef enum enet_speed { + ENET_SPEED_10BT, /* 10 Base T */ + ENET_SPEED_100BT, /* 100 Base T */ + ENET_SPEED_1000BT /* 1000 Base T */ +} enet_speed_e; + +/* Ethernet Address Type. */ +typedef enum enet_addr_type { + ENET_ADDR_TYPE_INDIVIDUAL, + ENET_ADDR_TYPE_GROUP, + ENET_ADDR_TYPE_BROADCAST +} enet_addr_type_e; + +/* TBI / MII Set Register */ +typedef enum enet_tbi_mii_reg { + ENET_TBI_MII_CR = 0x00, /* Control (CR ) */ + ENET_TBI_MII_SR = 0x01, /* Status (SR ) */ + ENET_TBI_MII_ANA = 0x04, /* AN advertisement (ANA ) */ + ENET_TBI_MII_ANLPBPA = 0x05, /* AN link partner base page ability + (ANLPBPA) */ + ENET_TBI_MII_ANEX = 0x06, /* AN expansion (ANEX ) */ + ENET_TBI_MII_ANNPT = 0x07, /* AN next page transmit (ANNPT ) */ + ENET_TBI_MII_ANLPANP = 0x08, /* AN link partner ability next page + (ANLPANP) */ + ENET_TBI_MII_EXST = 0x0F, /* Extended status (EXST ) */ + ENET_TBI_MII_JD = 0x10, /* Jitter diagnostics (JD ) */ + ENET_TBI_MII_TBICON = 0x11 /* TBI control (TBICON ) */ +} enet_tbi_mii_reg_e; + +/* UCC GETH 82xx Ethernet Address Recognition Location */ +typedef enum ucc_geth_enet_address_recognition_location { + UCC_GETH_ENET_ADDRESS_RECOGNITION_LOCATION_STATION_ADDRESS,/* station + address */ + UCC_GETH_ENET_ADDRESS_RECOGNITION_LOCATION_PADDR_FIRST, /* additional + station + address + paddr1 */ + UCC_GETH_ENET_ADDRESS_RECOGNITION_LOCATION_PADDR2, /* additional + station + address + paddr2 */ + UCC_GETH_ENET_ADDRESS_RECOGNITION_LOCATION_PADDR3, /* additional + station + address + paddr3 */ + UCC_GETH_ENET_ADDRESS_RECOGNITION_LOCATION_PADDR_LAST, /* additional + station + address + paddr4 */ + UCC_GETH_ENET_ADDRESS_RECOGNITION_LOCATION_GROUP_HASH, /* group hash */ + UCC_GETH_ENET_ADDRESS_RECOGNITION_LOCATION_INDIVIDUAL_HASH /* individual + hash */ +} ucc_geth_enet_address_recognition_location_e; + +/* UCC GETH vlan operation tagged */ +typedef enum ucc_geth_vlan_operation_tagged { + UCC_GETH_VLAN_OPERATION_TAGGED_NOP = 0x0, /* Tagged - nop */ + UCC_GETH_VLAN_OPERATION_TAGGED_REPLACE_VID_PORTION_OF_Q_TAG + = 0x1, /* Tagged - replace vid portion of q tag */ + UCC_GETH_VLAN_OPERATION_TAGGED_IF_VID0_REPLACE_VID_WITH_DEFAULT_VALUE + = 0x2, /* Tagged - if vid0 replace vid with default value */ + UCC_GETH_VLAN_OPERATION_TAGGED_EXTRACT_Q_TAG_FROM_FRAME + = 0x3 /* Tagged - extract q tag from frame */ +} ucc_geth_vlan_operation_tagged_e; + +/* UCC GETH vlan operation non-tagged */ +typedef enum ucc_geth_vlan_operation_non_tagged { + UCC_GETH_VLAN_OPERATION_NON_TAGGED_NOP = 0x0, /* Non tagged - nop */ + UCC_GETH_VLAN_OPERATION_NON_TAGGED_Q_TAG_INSERT = 0x1 /* Non tagged - + q tag insert + */ +} ucc_geth_vlan_operation_non_tagged_e; + +/* UCC GETH Rx Quality of Service Mode */ +typedef enum ucc_geth_qos_mode { + UCC_GETH_QOS_MODE_DEFAULT = 0x0, /* default queue */ + UCC_GETH_QOS_MODE_QUEUE_NUM_FROM_L2_CRITERIA = 0x1, /* queue + determined + by L2 + criteria */ + UCC_GETH_QOS_MODE_QUEUE_NUM_FROM_L3_CRITERIA = 0x2 /* queue + determined + by L3 + criteria */ +} ucc_geth_qos_mode_e; + +/* UCC GETH Statistics Gathering Mode - These are bit flags, 'or' them together + for combined functionality */ +typedef enum ucc_geth_statistics_gathering_mode { + UCC_GETH_STATISTICS_GATHERING_MODE_NONE = 0x00000000, /* No + statistics + gathering */ + UCC_GETH_STATISTICS_GATHERING_MODE_HARDWARE = 0x00000001,/* Enable + hardware + statistics + gathering + */ + UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_TX = 0x00000004,/*Enable + firmware + tx + statistics + gathering + */ + UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_RX = 0x00000008/* Enable + firmware + rx + statistics + gathering + */ +} ucc_geth_statistics_gathering_mode_e; + +/* UCC GETH Pad and CRC Mode - Note, Padding without CRC is not possible */ +typedef enum ucc_geth_maccfg2_pad_and_crc_mode { + UCC_GETH_PAD_AND_CRC_MODE_NONE + = MACCFG2_PAD_AND_CRC_MODE_NONE, /* Neither Padding + short frames + nor CRC */ + UCC_GETH_PAD_AND_CRC_MODE_CRC_ONLY + = MACCFG2_PAD_AND_CRC_MODE_CRC_ONLY, /* Append + CRC only */ + UCC_GETH_PAD_AND_CRC_MODE_PAD_AND_CRC = + MACCFG2_PAD_AND_CRC_MODE_PAD_AND_CRC +} ucc_geth_maccfg2_pad_and_crc_mode_e; + +/* UCC GETH upsmr Flow Control Mode */ +typedef enum ucc_geth_flow_control_mode { + UPSMR_AUTOMATIC_FLOW_CONTROL_MODE_NONE = 0x00000000, /* No automatic + flow control + */ + UPSMR_AUTOMATIC_FLOW_CONTROL_MODE_PAUSE_WHEN_EMERGENCY + = 0x00004000 /* Send pause frame when RxFIFO reaches its + emergency threshold */ +} ucc_geth_flow_control_mode_e; + +/* UCC GETH number of threads */ +typedef enum ucc_geth_num_of_threads { + UCC_GETH_NUM_OF_THREADS_1 = 0x1, /* 1 */ + UCC_GETH_NUM_OF_THREADS_2 = 0x2, /* 2 */ + UCC_GETH_NUM_OF_THREADS_4 = 0x0, /* 4 */ + UCC_GETH_NUM_OF_THREADS_6 = 0x3, /* 6 */ + UCC_GETH_NUM_OF_THREADS_8 = 0x4 /* 8 */ +} ucc_geth_num_of_threads_e; + +/* UCC GETH number of station addresses */ +typedef enum ucc_geth_num_of_station_addresses { + UCC_GETH_NUM_OF_STATION_ADDRESSES_1, /* 1 */ + UCC_GETH_NUM_OF_STATION_ADDRESSES_5 /* 5 */ +} ucc_geth_num_of_station_addresses_e; + +typedef u8 enet_addr_t[ENET_NUM_OCTETS_PER_ADDRESS]; + +/* UCC GETH 82xx Ethernet Address Container */ +typedef struct enet_addr_container { + enet_addr_t address; /* ethernet address */ + ucc_geth_enet_address_recognition_location_e location; /* location in + 82xx address + recognition + hardware */ + struct list_head node; +} enet_addr_container_t; + +#define ENET_ADDR_CONT_ENTRY(ptr) list_entry(ptr, enet_addr_container_t, node) + +/* UCC GETH Termination Action Descriptor (TAD) structure. */ +typedef struct ucc_geth_tad_params { + int rx_non_dynamic_extended_features_mode; + int reject_frame; + ucc_geth_vlan_operation_tagged_e vtag_op; + ucc_geth_vlan_operation_non_tagged_e vnontag_op; + ucc_geth_qos_mode_e rqos; + u8 vpri; + u16 vid; +} ucc_geth_tad_params_t; + +/* GETH protocol initialization structure */ +typedef struct ucc_geth_info { + ucc_fast_info_t uf_info; + u8 numQueuesTx; + u8 numQueuesRx; + int ipCheckSumCheck; + int ipCheckSumGenerate; + int rxExtendedFiltering; + u32 extendedFilteringChainPointer; + u16 typeorlen; + int dynamicMaxFrameLength; + int dynamicMinFrameLength; + u8 nonBackToBackIfgPart1; + u8 nonBackToBackIfgPart2; + u8 miminumInterFrameGapEnforcement; + u8 backToBackInterFrameGap; + int ipAddressAlignment; + int lengthCheckRx; + u32 mblinterval; + u16 nortsrbytetime; + u8 fracsiz; + u8 strictpriorityq; + u8 txasap; + u8 extrabw; + int miiPreambleSupress; + u8 altBebTruncation; + int altBeb; + int backPressureNoBackoff; + int noBackoff; + int excessDefer; + u8 maxRetransmission; + u8 collisionWindow; + int pro; + int cap; + int rsh; + int rlpb; + int cam; + int bro; + int ecm; + int receiveFlowControl; + u8 maxGroupAddrInHash; + u8 maxIndAddrInHash; + u8 prel; + u16 maxFrameLength; + u16 minFrameLength; + u16 maxD1Length; + u16 maxD2Length; + u16 vlantype; + u16 vlantci; + u32 ecamptr; + u32 eventRegMask; + u16 pausePeriod; + u16 extensionField; + u8 phy_address; + u32 board_flags; + u32 phy_interrupt; + u8 weightfactor[NUM_TX_QUEUES]; + u8 interruptcoalescingmaxvalue[NUM_RX_QUEUES]; + u8 l2qt[UCC_GETH_VLAN_PRIORITY_MAX]; + u8 l3qt[UCC_GETH_IP_PRIORITY_MAX]; + u32 vtagtable[UCC_GETH_TX_VTAG_TABLE_ENTRY_MAX]; + u8 iphoffset[TX_IP_OFFSET_ENTRY_MAX]; + u16 bdRingLenTx[NUM_TX_QUEUES]; + u16 bdRingLenRx[NUM_RX_QUEUES]; + enet_interface_e enet_interface; + ucc_geth_num_of_station_addresses_e numStationAddresses; + qe_fltr_largest_external_tbl_lookup_key_size_e + largestexternallookupkeysize; + ucc_geth_statistics_gathering_mode_e statisticsMode; + ucc_geth_vlan_operation_tagged_e vlanOperationTagged; + ucc_geth_vlan_operation_non_tagged_e vlanOperationNonTagged; + ucc_geth_qos_mode_e rxQoSMode; + ucc_geth_flow_control_mode_e aufc; + ucc_geth_maccfg2_pad_and_crc_mode_e padAndCrc; + ucc_geth_num_of_threads_e numThreadsTx; + ucc_geth_num_of_threads_e numThreadsRx; + qe_risc_allocation_e riscTx; + qe_risc_allocation_e riscRx; +} ucc_geth_info_t; + +/* structure representing UCC GETH */ +typedef struct ucc_geth_private { + ucc_geth_info_t *ug_info; + ucc_fast_private_t *uccf; + struct net_device *dev; + struct net_device_stats stats; /* linux network statistics */ + ucc_geth_t *ug_regs; + ucc_geth_init_pram_t *p_init_enet_param_shadow; + ucc_geth_exf_global_pram_t *p_exf_glbl_param; + u32 exf_glbl_param_offset; + ucc_geth_rx_global_pram_t *p_rx_glbl_pram; + u32 rx_glbl_pram_offset; + ucc_geth_tx_global_pram_t *p_tx_glbl_pram; + u32 tx_glbl_pram_offset; + ucc_geth_send_queue_mem_region_t *p_send_q_mem_reg; + u32 send_q_mem_reg_offset; + ucc_geth_thread_data_tx_t *p_thread_data_tx; + u32 thread_dat_tx_offset; + ucc_geth_thread_data_rx_t *p_thread_data_rx; + u32 thread_dat_rx_offset; + ucc_geth_scheduler_t *p_scheduler; + u32 scheduler_offset; + ucc_geth_tx_firmware_statistics_pram_t *p_tx_fw_statistics_pram; + u32 tx_fw_statistics_pram_offset; + ucc_geth_rx_firmware_statistics_pram_t *p_rx_fw_statistics_pram; + u32 rx_fw_statistics_pram_offset; + ucc_geth_rx_interrupt_coalescing_table_t *p_rx_irq_coalescing_tbl; + u32 rx_irq_coalescing_tbl_offset; + ucc_geth_rx_bd_queues_entry_t *p_rx_bd_qs_tbl; + u32 rx_bd_qs_tbl_offset; + u8 *p_tx_bd_ring[NUM_TX_QUEUES]; + u32 tx_bd_ring_offset[NUM_TX_QUEUES]; + u8 *p_rx_bd_ring[NUM_RX_QUEUES]; + u32 rx_bd_ring_offset[NUM_RX_QUEUES]; + u8 *confBd[NUM_TX_QUEUES]; + u8 *txBd[NUM_TX_QUEUES]; + u8 *rxBd[NUM_RX_QUEUES]; + int badFrame[NUM_RX_QUEUES]; + u16 cpucount[NUM_TX_QUEUES]; + volatile u16 *p_cpucount[NUM_TX_QUEUES]; + int indAddrRegUsed[NUM_OF_PADDRS]; + enet_addr_t paddr[NUM_OF_PADDRS]; + u8 numGroupAddrInHash; + u8 numIndAddrInHash; + u8 numIndAddrInReg; + int rx_extended_features; + int rx_non_dynamic_extended_features; + struct list_head conf_skbs; + struct list_head group_hash_q; + struct list_head ind_hash_q; + u32 saved_uccm; + spinlock_t lock; + /* pointers to arrays of skbuffs for tx and rx */ + struct sk_buff **tx_skbuff[NUM_TX_QUEUES]; + struct sk_buff **rx_skbuff[NUM_RX_QUEUES]; + /* indices pointing to the next free sbk in skb arrays */ + u16 skb_curtx[NUM_TX_QUEUES]; + u16 skb_currx[NUM_RX_QUEUES]; + /* index of the first skb which hasn't been transmitted yet. */ + u16 skb_dirtytx[NUM_TX_QUEUES]; + + struct work_struct tq; + struct timer_list phy_info_timer; + struct ugeth_mii_info *mii_info; + int oldspeed; + int oldduplex; + int oldlink; +} ucc_geth_private_t; + +#endif /* __UCC_GETH_H__ */ diff --git a/drivers/net/ucc_geth_phy.c b/drivers/net/ucc_geth_phy.c new file mode 100644 index 00000000000..f91028c5386 --- /dev/null +++ b/drivers/net/ucc_geth_phy.c @@ -0,0 +1,801 @@ +/* + * Copyright (C) Freescale Semicondutor, Inc. 2006. All rights reserved. + * + * Author: Shlomi Gridish <gridish@freescale.com> + * + * Description: + * UCC GETH Driver -- PHY handling + * + * Changelog: + * Jun 28, 2006 Li Yang <LeoLi@freescale.com> + * - Rearrange code and style fixes + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + */ + +#include <linux/config.h> +#include <linux/kernel.h> +#include <linux/sched.h> +#include <linux/string.h> +#include <linux/errno.h> +#include <linux/slab.h> +#include <linux/interrupt.h> +#include <linux/init.h> +#include <linux/delay.h> +#include <linux/netdevice.h> +#include <linux/etherdevice.h> +#include <linux/skbuff.h> +#include <linux/spinlock.h> +#include <linux/mm.h> +#include <linux/module.h> +#include <linux/version.h> +#include <linux/crc32.h> +#include <linux/mii.h> +#include <linux/ethtool.h> + +#include <asm/io.h> +#include <asm/irq.h> +#include <asm/uaccess.h> + +#include "ucc_geth.h" +#include "ucc_geth_phy.h" +#include <platforms/83xx/mpc8360e_pb.h> + +#define ugphy_printk(level, format, arg...) \ + printk(level format "\n", ## arg) + +#define ugphy_dbg(format, arg...) \ + ugphy_printk(KERN_DEBUG, format , ## arg) +#define ugphy_err(format, arg...) \ + ugphy_printk(KERN_ERR, format , ## arg) +#define ugphy_info(format, arg...) \ + ugphy_printk(KERN_INFO, format , ## arg) +#define ugphy_warn(format, arg...) \ + ugphy_printk(KERN_WARNING, format , ## arg) + +#ifdef UGETH_VERBOSE_DEBUG +#define ugphy_vdbg ugphy_dbg +#else +#define ugphy_vdbg(fmt, args...) do { } while (0) +#endif /* UGETH_VERBOSE_DEBUG */ + +static void config_genmii_advert(struct ugeth_mii_info *mii_info); +static void genmii_setup_forced(struct ugeth_mii_info *mii_info); +static void genmii_restart_aneg(struct ugeth_mii_info *mii_info); +static int gbit_config_aneg(struct ugeth_mii_info *mii_info); +static int genmii_config_aneg(struct ugeth_mii_info *mii_info); +static int genmii_update_link(struct ugeth_mii_info *mii_info); +static int genmii_read_status(struct ugeth_mii_info *mii_info); +u16 phy_read(struct ugeth_mii_info *mii_info, u16 regnum); +void phy_write(struct ugeth_mii_info *mii_info, u16 regnum, u16 val); + +static u8 *bcsr_regs = NULL; + +/* Write value to the PHY for this device to the register at regnum, */ +/* waiting until the write is done before it returns. All PHY */ +/* configuration has to be done through the TSEC1 MIIM regs */ +void write_phy_reg(struct net_device *dev, int mii_id, int regnum, int value) +{ + ucc_geth_private_t *ugeth = netdev_priv(dev); + ucc_mii_mng_t *mii_regs; + enet_tbi_mii_reg_e mii_reg = (enet_tbi_mii_reg_e) regnum; + u32 tmp_reg; + + ugphy_vdbg("%s: IN", __FUNCTION__); + + spin_lock_irq(&ugeth->lock); + + mii_regs = ugeth->mii_info->mii_regs; + + /* Set this UCC to be the master of the MII managment */ + ucc_set_qe_mux_mii_mng(ugeth->ug_info->uf_info.ucc_num); + + /* Stop the MII management read cycle */ + out_be32(&mii_regs->miimcom, 0); + /* Setting up the MII Mangement Address Register */ + tmp_reg = ((u32) mii_id << MIIMADD_PHY_ADDRESS_SHIFT) | mii_reg; + out_be32(&mii_regs->miimadd, tmp_reg); + + /* Setting up the MII Mangement Control Register with the value */ + out_be32(&mii_regs->miimcon, (u32) value); + + /* Wait till MII management write is complete */ + while ((in_be32(&mii_regs->miimind)) & MIIMIND_BUSY) + cpu_relax(); + + spin_unlock_irq(&ugeth->lock); + + udelay(10000); +} + +/* Reads from register regnum in the PHY for device dev, */ +/* returning the value. Clears miimcom first. All PHY */ +/* configuration has to be done through the TSEC1 MIIM regs */ +int read_phy_reg(struct net_device *dev, int mii_id, int regnum) +{ + ucc_geth_private_t *ugeth = netdev_priv(dev); + ucc_mii_mng_t *mii_regs; + enet_tbi_mii_reg_e mii_reg = (enet_tbi_mii_reg_e) regnum; + u32 tmp_reg; + u16 value; + + ugphy_vdbg("%s: IN", __FUNCTION__); + + spin_lock_irq(&ugeth->lock); + + mii_regs = ugeth->mii_info->mii_regs; + + /* Setting up the MII Mangement Address Register */ + tmp_reg = ((u32) mii_id << MIIMADD_PHY_ADDRESS_SHIFT) | mii_reg; + out_be32(&mii_regs->miimadd, tmp_reg); + + /* Perform an MII management read cycle */ + out_be32(&mii_regs->miimcom, MIIMCOM_READ_CYCLE); + + /* Wait till MII management write is complete */ + while ((in_be32(&mii_regs->miimind)) & MIIMIND_BUSY) + cpu_relax(); + + udelay(10000); + + /* Read MII management status */ + value = (u16) in_be32(&mii_regs->miimstat); + out_be32(&mii_regs->miimcom, 0); + if (value == 0xffff) + ugphy_warn("read wrong value : mii_id %d,mii_reg %d, base %08x", + mii_id, mii_reg, (u32) & (mii_regs->miimcfg)); + + spin_unlock_irq(&ugeth->lock); + + return (value); +} + +void mii_clear_phy_interrupt(struct ugeth_mii_info *mii_info) +{ + ugphy_vdbg("%s: IN", __FUNCTION__); + + if (mii_info->phyinfo->ack_interrupt) + mii_info->phyinfo->ack_interrupt(mii_info); +} + +void mii_configure_phy_interrupt(struct ugeth_mii_info *mii_info, + u32 interrupts) +{ + ugphy_vdbg("%s: IN", __FUNCTION__); + + mii_info->interrupts = interrupts; + if (mii_info->phyinfo->config_intr) + mii_info->phyinfo->config_intr(mii_info); +} + +/* Writes MII_ADVERTISE with the appropriate values, after + * sanitizing advertise to make sure only supported features + * are advertised + */ +static void config_genmii_advert(struct ugeth_mii_info *mii_info) +{ + u32 advertise; + u16 adv; + + ugphy_vdbg("%s: IN", __FUNCTION__); + + /* Only allow advertising what this PHY supports */ + mii_info->advertising &= mii_info->phyinfo->features; + advertise = mii_info->advertising; + + /* Setup standard advertisement */ + adv = phy_read(mii_info, MII_ADVERTISE); + adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4); + if (advertise & ADVERTISED_10baseT_Half) + adv |= ADVERTISE_10HALF; + if (advertise & ADVERTISED_10baseT_Full) + adv |= ADVERTISE_10FULL; + if (advertise & ADVERTISED_100baseT_Half) + adv |= ADVERTISE_100HALF; + if (advertise & ADVERTISED_100baseT_Full) + adv |= ADVERTISE_100FULL; + phy_write(mii_info, MII_ADVERTISE, adv); +} + +static void genmii_setup_forced(struct ugeth_mii_info *mii_info) +{ + u16 ctrl; + u32 features = mii_info->phyinfo->features; + + ugphy_vdbg("%s: IN", __FUNCTION__); + + ctrl = phy_read(mii_info, MII_BMCR); + + ctrl &= + ~(BMCR_FULLDPLX | BMCR_SPEED100 | BMCR_SPEED1000 | BMCR_ANENABLE); + ctrl |= BMCR_RESET; + + switch (mii_info->speed) { + case SPEED_1000: + if (features & (SUPPORTED_1000baseT_Half + | SUPPORTED_1000baseT_Full)) { + ctrl |= BMCR_SPEED1000; + break; + } + mii_info->speed = SPEED_100; + case SPEED_100: + if (features & (SUPPORTED_100baseT_Half + | SUPPORTED_100baseT_Full)) { + ctrl |= BMCR_SPEED100; + break; + } + mii_info->speed = SPEED_10; + case SPEED_10: + if (features & (SUPPORTED_10baseT_Half + | SUPPORTED_10baseT_Full)) + break; + default: /* Unsupported speed! */ + ugphy_err("%s: Bad speed!", mii_info->dev->name); + break; + } + + phy_write(mii_info, MII_BMCR, ctrl); +} + +/* Enable and Restart Autonegotiation */ +static void genmii_restart_aneg(struct ugeth_mii_info *mii_info) +{ + u16 ctl; + + ugphy_vdbg("%s: IN", __FUNCTION__); + + ctl = phy_read(mii_info, MII_BMCR); + ctl |= (BMCR_ANENABLE | BMCR_ANRESTART); + phy_write(mii_info, MII_BMCR, ctl); +} + +static int gbit_config_aneg(struct ugeth_mii_info *mii_info) +{ + u16 adv; + u32 advertise; + + ugphy_vdbg("%s: IN", __FUNCTION__); + + if (mii_info->autoneg) { + /* Configure the ADVERTISE register */ + config_genmii_advert(mii_info); + advertise = mii_info->advertising; + + adv = phy_read(mii_info, MII_1000BASETCONTROL); + adv &= ~(MII_1000BASETCONTROL_FULLDUPLEXCAP | + MII_1000BASETCONTROL_HALFDUPLEXCAP); + if (advertise & SUPPORTED_1000baseT_Half) + adv |= MII_1000BASETCONTROL_HALFDUPLEXCAP; + if (advertise & SUPPORTED_1000baseT_Full) + adv |= MII_1000BASETCONTROL_FULLDUPLEXCAP; + phy_write(mii_info, MII_1000BASETCONTROL, adv); + + /* Start/Restart aneg */ + genmii_restart_aneg(mii_info); + } else + genmii_setup_forced(mii_info); + + return 0; +} + +static int genmii_config_aneg(struct ugeth_mii_info *mii_info) +{ + ugphy_vdbg("%s: IN", __FUNCTION__); + + if (mii_info->autoneg) { + config_genmii_advert(mii_info); + genmii_restart_aneg(mii_info); + } else + genmii_setup_forced(mii_info); + + return 0; +} + +static int genmii_update_link(struct ugeth_mii_info *mii_info) +{ + u16 status; + + ugphy_vdbg("%s: IN", __FUNCTION__); + + /* Do a fake read */ + phy_read(mii_info, MII_BMSR); + + /* Read link and autonegotiation status */ + status = phy_read(mii_info, MII_BMSR); + if ((status & BMSR_LSTATUS) == 0) + mii_info->link = 0; + else + mii_info->link = 1; + + /* If we are autonegotiating, and not done, + * return an error */ + if (mii_info->autoneg && !(status & BMSR_ANEGCOMPLETE)) + return -EAGAIN; + + return 0; +} + +static int genmii_read_status(struct ugeth_mii_info *mii_info) +{ + u16 status; + int err; + + ugphy_vdbg("%s: IN", __FUNCTION__); + + /* Update the link, but return if there + * was an error */ + err = genmii_update_link(mii_info); + if (err) + return err; + + if (mii_info->autoneg) { + status = phy_read(mii_info, MII_LPA); + + if (status & (LPA_10FULL | LPA_100FULL)) + mii_info->duplex = DUPLEX_FULL; + else + mii_info->duplex = DUPLEX_HALF; + if (status & (LPA_100FULL | LPA_100HALF)) + mii_info->speed = SPEED_100; + else + mii_info->speed = SPEED_10; + mii_info->pause = 0; + } + /* On non-aneg, we assume what we put in BMCR is the speed, + * though magic-aneg shouldn't prevent this case from occurring + */ + + return 0; +} + +static int marvell_init(struct ugeth_mii_info *mii_info) +{ + ugphy_vdbg("%s: IN", __FUNCTION__); + + phy_write(mii_info, 0x14, 0x0cd2); + phy_write(mii_info, MII_BMCR, + phy_read(mii_info, MII_BMCR) | BMCR_RESET); + msleep(4000); + + return 0; +} + +static int marvell_config_aneg(struct ugeth_mii_info *mii_info) +{ + ugphy_vdbg("%s: IN", __FUNCTION__); + + /* The Marvell PHY has an errata which requires + * that certain registers get written in order + * to restart autonegotiation */ + phy_write(mii_info, MII_BMCR, BMCR_RESET); + + phy_write(mii_info, 0x1d, 0x1f); + phy_write(mii_info, 0x1e, 0x200c); + phy_write(mii_info, 0x1d, 0x5); + phy_write(mii_info, 0x1e, 0); + phy_write(mii_info, 0x1e, 0x100); + + gbit_config_aneg(mii_info); + + return 0; +} + +static int marvell_read_status(struct ugeth_mii_info *mii_info) +{ + u16 status; + int err; + + ugphy_vdbg("%s: IN", __FUNCTION__); + + /* Update the link, but return if there + * was an error */ + err = genmii_update_link(mii_info); + if (err) + return err; + + /* If the link is up, read the speed and duplex */ + /* If we aren't autonegotiating, assume speeds + * are as set */ + if (mii_info->autoneg && mii_info->link) { + int speed; + status = phy_read(mii_info, MII_M1011_PHY_SPEC_STATUS); + + /* Get the duplexity */ + if (status & MII_M1011_PHY_SPEC_STATUS_FULLDUPLEX) + mii_info->duplex = DUPLEX_FULL; + else + mii_info->duplex = DUPLEX_HALF; + + /* Get the speed */ + speed = status & MII_M1011_PHY_SPEC_STATUS_SPD_MASK; + switch (speed) { + case MII_M1011_PHY_SPEC_STATUS_1000: + mii_info->speed = SPEED_1000; + break; + case MII_M1011_PHY_SPEC_STATUS_100: + mii_info->speed = SPEED_100; + break; + default: + mii_info->speed = SPEED_10; + break; + } + mii_info->pause = 0; + } + + return 0; +} + +static int marvell_ack_interrupt(struct ugeth_mii_info *mii_info) +{ + ugphy_vdbg("%s: IN", __FUNCTION__); + + /* Clear the interrupts by reading the reg */ + phy_read(mii_info, MII_M1011_IEVENT); + + return 0; +} + +static int marvell_config_intr(struct ugeth_mii_info *mii_info) +{ + ugphy_vdbg("%s: IN", __FUNCTION__); + + if (mii_info->interrupts == MII_INTERRUPT_ENABLED) + phy_write(mii_info, MII_M1011_IMASK, MII_M1011_IMASK_INIT); + else + phy_write(mii_info, MII_M1011_IMASK, MII_M1011_IMASK_CLEAR); + + return 0; +} + +static int cis820x_init(struct ugeth_mii_info *mii_info) +{ + ugphy_vdbg("%s: IN", __FUNCTION__); + + phy_write(mii_info, MII_CIS8201_AUX_CONSTAT, + MII_CIS8201_AUXCONSTAT_INIT); + phy_write(mii_info, MII_CIS8201_EXT_CON1, MII_CIS8201_EXTCON1_INIT); + + return 0; +} + +static int cis820x_read_status(struct ugeth_mii_info *mii_info) +{ + u16 status; + int err; + + ugphy_vdbg("%s: IN", __FUNCTION__); + + /* Update the link, but return if there + * was an error */ + err = genmii_update_link(mii_info); + if (err) + return err; + + /* If the link is up, read the speed and duplex */ + /* If we aren't autonegotiating, assume speeds + * are as set */ + if (mii_info->autoneg && mii_info->link) { + int speed; + + status = phy_read(mii_info, MII_CIS8201_AUX_CONSTAT); + if (status & MII_CIS8201_AUXCONSTAT_DUPLEX) + mii_info->duplex = DUPLEX_FULL; + else + mii_info->duplex = DUPLEX_HALF; + + speed = status & MII_CIS8201_AUXCONSTAT_SPEED; + + switch (speed) { + case MII_CIS8201_AUXCONSTAT_GBIT: + mii_info->speed = SPEED_1000; + break; + case MII_CIS8201_AUXCONSTAT_100: + mii_info->speed = SPEED_100; + break; + default: + mii_info->speed = SPEED_10; + break; + } + } + + return 0; +} + +static int cis820x_ack_interrupt(struct ugeth_mii_info *mii_info) +{ + ugphy_vdbg("%s: IN", __FUNCTION__); + + phy_read(mii_info, MII_CIS8201_ISTAT); + + return 0; +} + +static int cis820x_config_intr(struct ugeth_mii_info *mii_info) +{ + ugphy_vdbg("%s: IN", __FUNCTION__); + + if (mii_info->interrupts == MII_INTERRUPT_ENABLED) + phy_write(mii_info, MII_CIS8201_IMASK, MII_CIS8201_IMASK_MASK); + else + phy_write(mii_info, MII_CIS8201_IMASK, 0); + + return 0; +} + +#define DM9161_DELAY 10 + +static int dm9161_read_status(struct ugeth_mii_info *mii_info) +{ + u16 status; + int err; + + ugphy_vdbg("%s: IN", __FUNCTION__); + + /* Update the link, but return if there + * was an error */ + err = genmii_update_link(mii_info); + if (err) + return err; + + /* If the link is up, read the speed and duplex */ + /* If we aren't autonegotiating, assume speeds + * are as set */ + if (mii_info->autoneg && mii_info->link) { + status = phy_read(mii_info, MII_DM9161_SCSR); + if (status & (MII_DM9161_SCSR_100F | MII_DM9161_SCSR_100H)) + mii_info->speed = SPEED_100; + else + mii_info->speed = SPEED_10; + + if (status & (MII_DM9161_SCSR_100F | MII_DM9161_SCSR_10F)) + mii_info->duplex = DUPLEX_FULL; + else + mii_info->duplex = DUPLEX_HALF; + } + + return 0; +} + +static int dm9161_config_aneg(struct ugeth_mii_info *mii_info) +{ + struct dm9161_private *priv = mii_info->priv; + + ugphy_vdbg("%s: IN", __FUNCTION__); + + if (0 == priv->resetdone) + return -EAGAIN; + + return 0; +} + +static void dm9161_timer(unsigned long data) +{ + struct ugeth_mii_info *mii_info = (struct ugeth_mii_info *)data; + struct dm9161_private *priv = mii_info->priv; + u16 status = phy_read(mii_info, MII_BMSR); + + ugphy_vdbg("%s: IN", __FUNCTION__); + + if (status & BMSR_ANEGCOMPLETE) { + priv->resetdone = 1; + } else + mod_timer(&priv->timer, jiffies + DM9161_DELAY * HZ); +} + +static int dm9161_init(struct ugeth_mii_info *mii_info) +{ + struct dm9161_private *priv; + + ugphy_vdbg("%s: IN", __FUNCTION__); + + /* Allocate the private data structure */ + priv = kmalloc(sizeof(struct dm9161_private), GFP_KERNEL); + + if (NULL == priv) + return -ENOMEM; + + mii_info->priv = priv; + + /* Reset is not done yet */ + priv->resetdone = 0; + + phy_write(mii_info, MII_BMCR, + phy_read(mii_info, MII_BMCR) | BMCR_RESET); + + phy_write(mii_info, MII_BMCR, + phy_read(mii_info, MII_BMCR) & ~BMCR_ISOLATE); + + config_genmii_advert(mii_info); + /* Start/Restart aneg */ + genmii_config_aneg(mii_info); + + /* Start a timer for DM9161_DELAY seconds to wait + * for the PHY to be ready */ + init_timer(&priv->timer); + priv->timer.function = &dm9161_timer; + priv->timer.data = (unsigned long)mii_info; + mod_timer(&priv->timer, jiffies + DM9161_DELAY * HZ); + + return 0; +} + +static void dm9161_close(struct ugeth_mii_info *mii_info) +{ + struct dm9161_private *priv = mii_info->priv; + + ugphy_vdbg("%s: IN", __FUNCTION__); + + del_timer_sync(&priv->timer); + kfree(priv); +} + +static int dm9161_ack_interrupt(struct ugeth_mii_info *mii_info) +{ +/* FIXME: This lines are for BUG fixing in the mpc8325. +Remove this from here when it's fixed */ + if (bcsr_regs == NULL) + bcsr_regs = (u8 *) ioremap(BCSR_PHYS_ADDR, BCSR_SIZE); + bcsr_regs[14] |= 0x40; + ugphy_vdbg("%s: IN", __FUNCTION__); + + /* Clear the interrupts by reading the reg */ + phy_read(mii_info, MII_DM9161_INTR); + + + return 0; +} + +static int dm9161_config_intr(struct ugeth_mii_info *mii_info) +{ +/* FIXME: This lines are for BUG fixing in the mpc8325. +Remove this from here when it's fixed */ + if (bcsr_regs == NULL) { + bcsr_regs = (u8 *) ioremap(BCSR_PHYS_ADDR, BCSR_SIZE); + bcsr_regs[14] &= ~0x40; + } + ugphy_vdbg("%s: IN", __FUNCTION__); + + if (mii_info->interrupts == MII_INTERRUPT_ENABLED) + phy_write(mii_info, MII_DM9161_INTR, MII_DM9161_INTR_INIT); + else + phy_write(mii_info, MII_DM9161_INTR, MII_DM9161_INTR_STOP); + + return 0; +} + +/* Cicada 820x */ +static struct phy_info phy_info_cis820x = { + .phy_id = 0x000fc440, + .name = "Cicada Cis8204", + .phy_id_mask = 0x000fffc0, + .features = MII_GBIT_FEATURES, + .init = &cis820x_init, + .config_aneg = &gbit_config_aneg, + .read_status = &cis820x_read_status, + .ack_interrupt = &cis820x_ack_interrupt, + .config_intr = &cis820x_config_intr, +}; + +static struct phy_info phy_info_dm9161 = { + .phy_id = 0x0181b880, + .phy_id_mask = 0x0ffffff0, + .name = "Davicom DM9161E", + .init = dm9161_init, + .config_aneg = dm9161_config_aneg, + .read_status = dm9161_read_status, + .close = dm9161_close, +}; + +static struct phy_info phy_info_dm9161a = { + .phy_id = 0x0181b8a0, + .phy_id_mask = 0x0ffffff0, + .name = "Davicom DM9161A", + .features = MII_BASIC_FEATURES, + .init = dm9161_init, + .config_aneg = dm9161_config_aneg, + .read_status = dm9161_read_status, + .ack_interrupt = dm9161_ack_interrupt, + .config_intr = dm9161_config_intr, + .close = dm9161_close, +}; + +static struct phy_info phy_info_marvell = { + .phy_id = 0x01410c00, + .phy_id_mask = 0xffffff00, + .name = "Marvell 88E11x1", + .features = MII_GBIT_FEATURES, + .init = &marvell_init, + .config_aneg = &marvell_config_aneg, + .read_status = &marvell_read_status, + .ack_interrupt = &marvell_ack_interrupt, + .config_intr = &marvell_config_intr, +}; + +static struct phy_info phy_info_genmii = { + .phy_id = 0x00000000, + .phy_id_mask = 0x00000000, + .name = "Generic MII", + .features = MII_BASIC_FEATURES, + .config_aneg = genmii_config_aneg, + .read_status = genmii_read_status, +}; + +static struct phy_info *phy_info[] = { + &phy_info_cis820x, + &phy_info_marvell, + &phy_info_dm9161, + &phy_info_dm9161a, + &phy_info_genmii, + NULL +}; + +u16 phy_read(struct ugeth_mii_info *mii_info, u16 regnum) +{ + u16 retval; + unsigned long flags; + + ugphy_vdbg("%s: IN", __FUNCTION__); + + spin_lock_irqsave(&mii_info->mdio_lock, flags); + retval = mii_info->mdio_read(mii_info->dev, mii_info->mii_id, regnum); + spin_unlock_irqrestore(&mii_info->mdio_lock, flags); + + return retval; +} + +void phy_write(struct ugeth_mii_info *mii_info, u16 regnum, u16 val) +{ + unsigned long flags; + + ugphy_vdbg("%s: IN", __FUNCTION__); + + spin_lock_irqsave(&mii_info->mdio_lock, flags); + mii_info->mdio_write(mii_info->dev, mii_info->mii_id, regnum, val); + spin_unlock_irqrestore(&mii_info->mdio_lock, flags); +} + +/* Use the PHY ID registers to determine what type of PHY is attached + * to device dev. return a struct phy_info structure describing that PHY + */ +struct phy_info *get_phy_info(struct ugeth_mii_info *mii_info) +{ + u16 phy_reg; + u32 phy_ID; + int i; + struct phy_info *theInfo = NULL; + struct net_device *dev = mii_info->dev; + + ugphy_vdbg("%s: IN", __FUNCTION__); + + /* Grab the bits from PHYIR1, and put them in the upper half */ + phy_reg = phy_read(mii_info, MII_PHYSID1); + phy_ID = (phy_reg & 0xffff) << 16; + + /* Grab the bits from PHYIR2, and put them in the lower half */ + phy_reg = phy_read(mii_info, MII_PHYSID2); + phy_ID |= (phy_reg & 0xffff); + + /* loop through all the known PHY types, and find one that */ + /* matches the ID we read from the PHY. */ + for (i = 0; phy_info[i]; i++) + if (phy_info[i]->phy_id == (phy_ID & phy_info[i]->phy_id_mask)){ + theInfo = phy_info[i]; + break; + } + + /* This shouldn't happen, as we have generic PHY support */ + if (theInfo == NULL) { + ugphy_info("%s: PHY id %x is not supported!", dev->name, + phy_ID); + return NULL; + } else { + ugphy_info("%s: PHY is %s (%x)", dev->name, theInfo->name, + phy_ID); + } + + return theInfo; +} diff --git a/drivers/net/ucc_geth_phy.h b/drivers/net/ucc_geth_phy.h new file mode 100644 index 00000000000..2f98b8f1bb0 --- /dev/null +++ b/drivers/net/ucc_geth_phy.h @@ -0,0 +1,217 @@ +/* + * Copyright (C) Freescale Semicondutor, Inc. 2006. All rights reserved. + * + * Author: Shlomi Gridish <gridish@freescale.com> + * + * Description: + * UCC GETH Driver -- PHY handling + * + * Changelog: + * Jun 28, 2006 Li Yang <LeoLi@freescale.com> + * - Rearrange code and style fixes + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + */ +#ifndef __UCC_GETH_PHY_H__ +#define __UCC_GETH_PHY_H__ + +#define MII_end ((u32)-2) +#define MII_read ((u32)-1) + +#define MIIMIND_BUSY 0x00000001 +#define MIIMIND_NOTVALID 0x00000004 + +#define UGETH_AN_TIMEOUT 2000 + +/* 1000BT control (Marvell & BCM54xx at least) */ +#define MII_1000BASETCONTROL 0x09 +#define MII_1000BASETCONTROL_FULLDUPLEXCAP 0x0200 +#define MII_1000BASETCONTROL_HALFDUPLEXCAP 0x0100 + +/* Cicada Extended Control Register 1 */ +#define MII_CIS8201_EXT_CON1 0x17 +#define MII_CIS8201_EXTCON1_INIT 0x0000 + +/* Cicada Interrupt Mask Register */ +#define MII_CIS8201_IMASK 0x19 +#define MII_CIS8201_IMASK_IEN 0x8000 +#define MII_CIS8201_IMASK_SPEED 0x4000 +#define MII_CIS8201_IMASK_LINK 0x2000 +#define MII_CIS8201_IMASK_DUPLEX 0x1000 +#define MII_CIS8201_IMASK_MASK 0xf000 + +/* Cicada Interrupt Status Register */ +#define MII_CIS8201_ISTAT 0x1a +#define MII_CIS8201_ISTAT_STATUS 0x8000 +#define MII_CIS8201_ISTAT_SPEED 0x4000 +#define MII_CIS8201_ISTAT_LINK 0x2000 +#define MII_CIS8201_ISTAT_DUPLEX 0x1000 + +/* Cicada Auxiliary Control/Status Register */ +#define MII_CIS8201_AUX_CONSTAT 0x1c +#define MII_CIS8201_AUXCONSTAT_INIT 0x0004 +#define MII_CIS8201_AUXCONSTAT_DUPLEX 0x0020 +#define MII_CIS8201_AUXCONSTAT_SPEED 0x0018 +#define MII_CIS8201_AUXCONSTAT_GBIT 0x0010 +#define MII_CIS8201_AUXCONSTAT_100 0x0008 + +/* 88E1011 PHY Status Register */ +#define MII_M1011_PHY_SPEC_STATUS 0x11 +#define MII_M1011_PHY_SPEC_STATUS_1000 0x8000 +#define MII_M1011_PHY_SPEC_STATUS_100 0x4000 +#define MII_M1011_PHY_SPEC_STATUS_SPD_MASK 0xc000 +#define MII_M1011_PHY_SPEC_STATUS_FULLDUPLEX 0x2000 +#define MII_M1011_PHY_SPEC_STATUS_RESOLVED 0x0800 +#define MII_M1011_PHY_SPEC_STATUS_LINK 0x0400 + +#define MII_M1011_IEVENT 0x13 +#define MII_M1011_IEVENT_CLEAR 0x0000 + +#define MII_M1011_IMASK 0x12 +#define MII_M1011_IMASK_INIT 0x6400 +#define MII_M1011_IMASK_CLEAR 0x0000 + +#define MII_DM9161_SCR 0x10 +#define MII_DM9161_SCR_INIT 0x0610 + +/* DM9161 Specified Configuration and Status Register */ +#define MII_DM9161_SCSR 0x11 +#define MII_DM9161_SCSR_100F 0x8000 +#define MII_DM9161_SCSR_100H 0x4000 +#define MII_DM9161_SCSR_10F 0x2000 +#define MII_DM9161_SCSR_10H 0x1000 + +/* DM9161 Interrupt Register */ +#define MII_DM9161_INTR 0x15 +#define MII_DM9161_INTR_PEND 0x8000 +#define MII_DM9161_INTR_DPLX_MASK 0x0800 +#define MII_DM9161_INTR_SPD_MASK 0x0400 +#define MII_DM9161_INTR_LINK_MASK 0x0200 +#define MII_DM9161_INTR_MASK 0x0100 +#define MII_DM9161_INTR_DPLX_CHANGE 0x0010 +#define MII_DM9161_INTR_SPD_CHANGE 0x0008 +#define MII_DM9161_INTR_LINK_CHANGE 0x0004 +#define MII_DM9161_INTR_INIT 0x0000 +#define MII_DM9161_INTR_STOP \ +(MII_DM9161_INTR_DPLX_MASK | MII_DM9161_INTR_SPD_MASK \ + | MII_DM9161_INTR_LINK_MASK | MII_DM9161_INTR_MASK) + +/* DM9161 10BT Configuration/Status */ +#define MII_DM9161_10BTCSR 0x12 +#define MII_DM9161_10BTCSR_INIT 0x7800 + +#define MII_BASIC_FEATURES (SUPPORTED_10baseT_Half | \ + SUPPORTED_10baseT_Full | \ + SUPPORTED_100baseT_Half | \ + SUPPORTED_100baseT_Full | \ + SUPPORTED_Autoneg | \ + SUPPORTED_TP | \ + SUPPORTED_MII) + +#define MII_GBIT_FEATURES (MII_BASIC_FEATURES | \ + SUPPORTED_1000baseT_Half | \ + SUPPORTED_1000baseT_Full) + +#define MII_READ_COMMAND 0x00000001 + +#define MII_INTERRUPT_DISABLED 0x0 +#define MII_INTERRUPT_ENABLED 0x1 +/* Taken from mii_if_info and sungem_phy.h */ +struct ugeth_mii_info { + /* Information about the PHY type */ + /* And management functions */ + struct phy_info *phyinfo; + + ucc_mii_mng_t *mii_regs; + + /* forced speed & duplex (no autoneg) + * partner speed & duplex & pause (autoneg) + */ + int speed; + int duplex; + int pause; + + /* The most recently read link state */ + int link; + + /* Enabled Interrupts */ + u32 interrupts; + + u32 advertising; + int autoneg; + int mii_id; + + /* private data pointer */ + /* For use by PHYs to maintain extra state */ + void *priv; + + /* Provided by host chip */ + struct net_device *dev; + + /* A lock to ensure that only one thing can read/write + * the MDIO bus at a time */ + spinlock_t mdio_lock; + + /* Provided by ethernet driver */ + int (*mdio_read) (struct net_device * dev, int mii_id, int reg); + void (*mdio_write) (struct net_device * dev, int mii_id, int reg, + int val); +}; + +/* struct phy_info: a structure which defines attributes for a PHY + * + * id will contain a number which represents the PHY. During + * startup, the driver will poll the PHY to find out what its + * UID--as defined by registers 2 and 3--is. The 32-bit result + * gotten from the PHY will be ANDed with phy_id_mask to + * discard any bits which may change based on revision numbers + * unimportant to functionality + * + * There are 6 commands which take a ugeth_mii_info structure. + * Each PHY must declare config_aneg, and read_status. + */ +struct phy_info { + u32 phy_id; + char *name; + unsigned int phy_id_mask; + u32 features; + + /* Called to initialize the PHY */ + int (*init) (struct ugeth_mii_info * mii_info); + + /* Called to suspend the PHY for power */ + int (*suspend) (struct ugeth_mii_info * mii_info); + + /* Reconfigures autonegotiation (or disables it) */ + int (*config_aneg) (struct ugeth_mii_info * mii_info); + + /* Determines the negotiated speed and duplex */ + int (*read_status) (struct ugeth_mii_info * mii_info); + + /* Clears any pending interrupts */ + int (*ack_interrupt) (struct ugeth_mii_info * mii_info); + + /* Enables or disables interrupts */ + int (*config_intr) (struct ugeth_mii_info * mii_info); + + /* Clears up any memory if needed */ + void (*close) (struct ugeth_mii_info * mii_info); +}; + +struct phy_info *get_phy_info(struct ugeth_mii_info *mii_info); +void write_phy_reg(struct net_device *dev, int mii_id, int regnum, int value); +int read_phy_reg(struct net_device *dev, int mii_id, int regnum); +void mii_clear_phy_interrupt(struct ugeth_mii_info *mii_info); +void mii_configure_phy_interrupt(struct ugeth_mii_info *mii_info, + u32 interrupts); + +struct dm9161_private { + struct timer_list timer; + int resetdone; +}; + +#endif /* __UCC_GETH_PHY_H__ */ diff --git a/drivers/net/via-rhine.c b/drivers/net/via-rhine.c index d3d0ec97031..ae971080e2e 100644 --- a/drivers/net/via-rhine.c +++ b/drivers/net/via-rhine.c @@ -30,8 +30,8 @@ */ #define DRV_NAME "via-rhine" -#define DRV_VERSION "1.4.0" -#define DRV_RELDATE "June-27-2006" +#define DRV_VERSION "1.4.1" +#define DRV_RELDATE "July-24-2006" /* A few user-configurable values. @@ -44,6 +44,10 @@ static int max_interrupt_work = 20; Setting to > 1518 effectively disables this feature. */ static int rx_copybreak; +/* Work-around for broken BIOSes: they are unable to get the chip back out of + power state D3 so PXE booting fails. bootparam(7): via-rhine.avoid_D3=1 */ +static int avoid_D3; + /* * In case you are looking for 'options[]' or 'full_duplex[]', they * are gone. Use ethtool(8) instead. @@ -63,7 +67,11 @@ static const int multicast_filter_limit = 32; There are no ill effects from too-large receive rings. */ #define TX_RING_SIZE 16 #define TX_QUEUE_LEN 10 /* Limit ring entries actually used. */ +#ifdef CONFIG_VIA_RHINE_NAPI +#define RX_RING_SIZE 64 +#else #define RX_RING_SIZE 16 +#endif /* Operational parameters that usually are not changed. */ @@ -116,9 +124,11 @@ MODULE_LICENSE("GPL"); module_param(max_interrupt_work, int, 0); module_param(debug, int, 0); module_param(rx_copybreak, int, 0); +module_param(avoid_D3, bool, 0); MODULE_PARM_DESC(max_interrupt_work, "VIA Rhine maximum events handled per interrupt"); MODULE_PARM_DESC(debug, "VIA Rhine debug level (0-7)"); MODULE_PARM_DESC(rx_copybreak, "VIA Rhine copy breakpoint for copy-only-tiny-frames"); +MODULE_PARM_DESC(avoid_D3, "Avoid power state D3 (work-around for broken BIOSes)"); /* Theory of Operation @@ -396,7 +406,7 @@ static void rhine_tx_timeout(struct net_device *dev); static int rhine_start_tx(struct sk_buff *skb, struct net_device *dev); static irqreturn_t rhine_interrupt(int irq, void *dev_instance, struct pt_regs *regs); static void rhine_tx(struct net_device *dev); -static void rhine_rx(struct net_device *dev); +static int rhine_rx(struct net_device *dev, int limit); static void rhine_error(struct net_device *dev, int intr_status); static void rhine_set_rx_mode(struct net_device *dev); static struct net_device_stats *rhine_get_stats(struct net_device *dev); @@ -564,6 +574,32 @@ static void rhine_poll(struct net_device *dev) } #endif +#ifdef CONFIG_VIA_RHINE_NAPI +static int rhine_napipoll(struct net_device *dev, int *budget) +{ + struct rhine_private *rp = netdev_priv(dev); + void __iomem *ioaddr = rp->base; + int done, limit = min(dev->quota, *budget); + + done = rhine_rx(dev, limit); + *budget -= done; + dev->quota -= done; + + if (done < limit) { + netif_rx_complete(dev); + + iowrite16(IntrRxDone | IntrRxErr | IntrRxEmpty| IntrRxOverflow | + IntrRxDropped | IntrRxNoBuf | IntrTxAborted | + IntrTxDone | IntrTxError | IntrTxUnderrun | + IntrPCIErr | IntrStatsMax | IntrLinkChange, + ioaddr + IntrEnable); + return 0; + } + else + return 1; +} +#endif + static void rhine_hw_init(struct net_device *dev, long pioaddr) { struct rhine_private *rp = netdev_priv(dev); @@ -744,6 +780,10 @@ static int __devinit rhine_init_one(struct pci_dev *pdev, #ifdef CONFIG_NET_POLL_CONTROLLER dev->poll_controller = rhine_poll; #endif +#ifdef CONFIG_VIA_RHINE_NAPI + dev->poll = rhine_napipoll; + dev->weight = 64; +#endif if (rp->quirks & rqRhineI) dev->features |= NETIF_F_SG|NETIF_F_HW_CSUM; @@ -789,6 +829,9 @@ static int __devinit rhine_init_one(struct pci_dev *pdev, } } rp->mii_if.phy_id = phy_id; + if (debug > 1 && avoid_D3) + printk(KERN_INFO "%s: No D3 power state at shutdown.\n", + dev->name); return 0; @@ -1014,6 +1057,8 @@ static void init_registers(struct net_device *dev) rhine_set_rx_mode(dev); + netif_poll_enable(dev); + /* Enable interrupts by setting the interrupt mask. */ iowrite16(IntrRxDone | IntrRxErr | IntrRxEmpty| IntrRxOverflow | IntrRxDropped | IntrRxNoBuf | IntrTxAborted | @@ -1268,8 +1313,18 @@ static irqreturn_t rhine_interrupt(int irq, void *dev_instance, struct pt_regs * dev->name, intr_status); if (intr_status & (IntrRxDone | IntrRxErr | IntrRxDropped | - IntrRxWakeUp | IntrRxEmpty | IntrRxNoBuf)) - rhine_rx(dev); + IntrRxWakeUp | IntrRxEmpty | IntrRxNoBuf)) { +#ifdef CONFIG_VIA_RHINE_NAPI + iowrite16(IntrTxAborted | + IntrTxDone | IntrTxError | IntrTxUnderrun | + IntrPCIErr | IntrStatsMax | IntrLinkChange, + ioaddr + IntrEnable); + + netif_rx_schedule(dev); +#else + rhine_rx(dev, RX_RING_SIZE); +#endif + } if (intr_status & (IntrTxErrSummary | IntrTxDone)) { if (intr_status & IntrTxErrSummary) { @@ -1367,13 +1422,12 @@ static void rhine_tx(struct net_device *dev) spin_unlock(&rp->lock); } -/* This routine is logically part of the interrupt handler, but isolated - for clarity and better register allocation. */ -static void rhine_rx(struct net_device *dev) +/* Process up to limit frames from receive ring */ +static int rhine_rx(struct net_device *dev, int limit) { struct rhine_private *rp = netdev_priv(dev); + int count; int entry = rp->cur_rx % RX_RING_SIZE; - int boguscnt = rp->dirty_rx + RX_RING_SIZE - rp->cur_rx; if (debug > 4) { printk(KERN_DEBUG "%s: rhine_rx(), entry %d status %8.8x.\n", @@ -1382,16 +1436,18 @@ static void rhine_rx(struct net_device *dev) } /* If EOP is set on the next entry, it's a new packet. Send it up. */ - while (!(rp->rx_head_desc->rx_status & cpu_to_le32(DescOwn))) { + for (count = 0; count < limit; ++count) { struct rx_desc *desc = rp->rx_head_desc; u32 desc_status = le32_to_cpu(desc->rx_status); int data_size = desc_status >> 16; + if (desc_status & DescOwn) + break; + if (debug > 4) printk(KERN_DEBUG "rhine_rx() status is %8.8x.\n", desc_status); - if (--boguscnt < 0) - break; + if ((desc_status & (RxWholePkt | RxErr)) != RxWholePkt) { if ((desc_status & RxWholePkt) != RxWholePkt) { printk(KERN_WARNING "%s: Oversized Ethernet " @@ -1460,7 +1516,11 @@ static void rhine_rx(struct net_device *dev) PCI_DMA_FROMDEVICE); } skb->protocol = eth_type_trans(skb, dev); +#ifdef CONFIG_VIA_RHINE_NAPI + netif_receive_skb(skb); +#else netif_rx(skb); +#endif dev->last_rx = jiffies; rp->stats.rx_bytes += pkt_len; rp->stats.rx_packets++; @@ -1487,6 +1547,8 @@ static void rhine_rx(struct net_device *dev) } rp->rx_ring[entry].rx_status = cpu_to_le32(DescOwn); } + + return count; } /* @@ -1776,6 +1838,7 @@ static int rhine_close(struct net_device *dev) spin_lock_irq(&rp->lock); netif_stop_queue(dev); + netif_poll_disable(dev); if (debug > 1) printk(KERN_DEBUG "%s: Shutting down ethercard, " @@ -1857,7 +1920,8 @@ static void rhine_shutdown (struct pci_dev *pdev) } /* Hit power state D3 (sleep) */ - iowrite8(ioread8(ioaddr + StickyHW) | 0x03, ioaddr + StickyHW); + if (!avoid_D3) + iowrite8(ioread8(ioaddr + StickyHW) | 0x03, ioaddr + StickyHW); /* TODO: Check use of pci_enable_wake() */ diff --git a/drivers/net/wan/c101.c b/drivers/net/wan/c101.c index 435e91ec462..6b63b350cd5 100644 --- a/drivers/net/wan/c101.c +++ b/drivers/net/wan/c101.c @@ -118,7 +118,7 @@ static inline void openwin(card_t *card, u8 page) static inline void set_carrier(port_t *port) { - if (!sca_in(MSCI1_OFFSET + ST3, port) & ST3_DCD) + if (!(sca_in(MSCI1_OFFSET + ST3, port) & ST3_DCD)) netif_carrier_on(port_to_dev(port)); else netif_carrier_off(port_to_dev(port)); @@ -127,10 +127,10 @@ static inline void set_carrier(port_t *port) static void sca_msci_intr(port_t *port) { - u8 stat = sca_in(MSCI1_OFFSET + ST1, port); /* read MSCI ST1 status */ + u8 stat = sca_in(MSCI0_OFFSET + ST1, port); /* read MSCI ST1 status */ - /* Reset MSCI TX underrun status bit */ - sca_out(stat & ST1_UDRN, MSCI0_OFFSET + ST1, port); + /* Reset MSCI TX underrun and CDCD (ignored) status bit */ + sca_out(stat & (ST1_UDRN | ST1_CDCD), MSCI0_OFFSET + ST1, port); if (stat & ST1_UDRN) { struct net_device_stats *stats = hdlc_stats(port_to_dev(port)); @@ -138,6 +138,7 @@ static void sca_msci_intr(port_t *port) stats->tx_fifo_errors++; } + stat = sca_in(MSCI1_OFFSET + ST1, port); /* read MSCI1 ST1 status */ /* Reset MSCI CDCD status bit - uses ch#2 DCD input */ sca_out(stat & ST1_CDCD, MSCI1_OFFSET + ST1, port); diff --git a/drivers/net/wd.c b/drivers/net/wd.c index 7caa8dc88a5..b1ba1872f31 100644 --- a/drivers/net/wd.c +++ b/drivers/net/wd.c @@ -500,8 +500,8 @@ MODULE_LICENSE("GPL"); /* This is set up so that only a single autoprobe takes place per call. ISA device autoprobes on a running machine are not recommended. */ -int -init_module(void) + +int __init init_module(void) { struct net_device *dev; int this_dev, found = 0; diff --git a/drivers/net/wireless/hostap/hostap_hw.c b/drivers/net/wireless/hostap/hostap_hw.c index dafaa5ff5aa..d500012fdc7 100644 --- a/drivers/net/wireless/hostap/hostap_hw.c +++ b/drivers/net/wireless/hostap/hostap_hw.c @@ -1042,6 +1042,9 @@ static int prism2_reset_port(struct net_device *dev) dev->name, local->fragm_threshold); } + /* Some firmwares lose antenna selection settings on reset */ + (void) hostap_set_antsel(local); + return res; } diff --git a/drivers/net/wireless/spectrum_cs.c b/drivers/net/wireless/spectrum_cs.c index 7f78b7801fb..bcc7038130f 100644 --- a/drivers/net/wireless/spectrum_cs.c +++ b/drivers/net/wireless/spectrum_cs.c @@ -242,7 +242,7 @@ spectrum_reset(struct pcmcia_device *link, int idle) u_int save_cor; /* Doing it if hardware is gone is guaranteed crash */ - if (pcmcia_dev_present(link)) + if (!pcmcia_dev_present(link)) return -ENODEV; /* Save original COR value */ diff --git a/drivers/net/wireless/strip.c b/drivers/net/wireless/strip.c index fd31885c684..ccaf28e8db0 100644 --- a/drivers/net/wireless/strip.c +++ b/drivers/net/wireless/strip.c @@ -467,6 +467,7 @@ static int arp_query(unsigned char *haddr, u32 paddr, struct net_device *dev) { struct neighbour *neighbor_entry; + int ret = 0; neighbor_entry = neigh_lookup(&arp_tbl, &paddr, dev); @@ -474,10 +475,11 @@ static int arp_query(unsigned char *haddr, u32 paddr, neighbor_entry->used = jiffies; if (neighbor_entry->nud_state & NUD_VALID) { memcpy(haddr, neighbor_entry->ha, dev->addr_len); - return 1; + ret = 1; } + neigh_release(neighbor_entry); } - return 0; + return ret; } static void DumpData(char *msg, struct strip *strip_info, __u8 * ptr, diff --git a/drivers/pci/hotplug/Kconfig b/drivers/pci/hotplug/Kconfig index 3fae77ffb2f..8a60f391ffc 100644 --- a/drivers/pci/hotplug/Kconfig +++ b/drivers/pci/hotplug/Kconfig @@ -76,7 +76,7 @@ config HOTPLUG_PCI_IBM config HOTPLUG_PCI_ACPI tristate "ACPI PCI Hotplug driver" - depends on ACPI_DOCK && HOTPLUG_PCI + depends on (!ACPI_DOCK && ACPI && HOTPLUG_PCI) || (ACPI_DOCK && HOTPLUG_PCI) help Say Y here if you have a system that supports PCI Hotplug using ACPI. @@ -153,13 +153,6 @@ config HOTPLUG_PCI_SHPC_POLL_EVENT_MODE When in doubt, say N. -config HOTPLUG_PCI_SHPC_PHPRM_LEGACY - bool "For AMD SHPC only: Use $HRT for resource/configuration" - depends on HOTPLUG_PCI_SHPC && !ACPI - help - Say Y here for AMD SHPC. You have to select this option if you are - using this driver on platform with AMD SHPC. - config HOTPLUG_PCI_RPA tristate "RPA PCI Hotplug driver" depends on HOTPLUG_PCI && PPC_PSERIES && PPC64 && !HOTPLUG_PCI_FAKE diff --git a/drivers/pci/hotplug/cpci_hotplug_pci.c b/drivers/pci/hotplug/cpci_hotplug_pci.c index 02be74caa89..4afcaffd031 100644 --- a/drivers/pci/hotplug/cpci_hotplug_pci.c +++ b/drivers/pci/hotplug/cpci_hotplug_pci.c @@ -254,8 +254,8 @@ int cpci_led_off(struct slot* slot) int cpci_configure_slot(struct slot* slot) { - unsigned char busnr; - struct pci_bus *child; + struct pci_bus *parent; + int fn; dbg("%s - enter", __FUNCTION__); @@ -276,23 +276,53 @@ int cpci_configure_slot(struct slot* slot) */ n = pci_scan_slot(slot->bus, slot->devfn); dbg("%s: pci_scan_slot returned %d", __FUNCTION__, n); - if (n > 0) - pci_bus_add_devices(slot->bus); slot->dev = pci_get_slot(slot->bus, slot->devfn); if (slot->dev == NULL) { err("Could not find PCI device for slot %02x", slot->number); - return 1; + return -ENODEV; } } - - if (slot->dev->hdr_type == PCI_HEADER_TYPE_BRIDGE) { - pci_read_config_byte(slot->dev, PCI_SECONDARY_BUS, &busnr); - child = pci_add_new_bus(slot->dev->bus, slot->dev, busnr); - pci_do_scan_bus(child); - pci_bus_size_bridges(child); + parent = slot->dev->bus; + + for (fn = 0; fn < 8; fn++) { + struct pci_dev *dev; + + dev = pci_get_slot(parent, PCI_DEVFN(PCI_SLOT(slot->devfn), fn)); + if (!dev) + continue; + if ((dev->hdr_type == PCI_HEADER_TYPE_BRIDGE) || + (dev->hdr_type == PCI_HEADER_TYPE_CARDBUS)) { + /* Find an unused bus number for the new bridge */ + struct pci_bus *child; + unsigned char busnr, start = parent->secondary; + unsigned char end = parent->subordinate; + + for (busnr = start; busnr <= end; busnr++) { + if (!pci_find_bus(pci_domain_nr(parent), + busnr)) + break; + } + if (busnr >= end) { + err("No free bus for hot-added bridge\n"); + pci_dev_put(dev); + continue; + } + child = pci_add_new_bus(parent, dev, busnr); + if (!child) { + err("Cannot add new bus for %s\n", + pci_name(dev)); + pci_dev_put(dev); + continue; + } + child->subordinate = pci_do_scan_bus(child); + pci_bus_size_bridges(child); + } + pci_dev_put(dev); } - pci_bus_assign_resources(slot->dev->bus); + pci_bus_assign_resources(parent); + pci_bus_add_devices(parent); + pci_enable_bridges(parent); dbg("%s - exit", __FUNCTION__); return 0; diff --git a/drivers/pci/hotplug/pciehp.h b/drivers/pci/hotplug/pciehp.h index ce89f581586..eaea9d36a1b 100644 --- a/drivers/pci/hotplug/pciehp.h +++ b/drivers/pci/hotplug/pciehp.h @@ -279,6 +279,11 @@ struct hpc_ops { #ifdef CONFIG_ACPI +#include <acpi/acpi.h> +#include <acpi/acpi_bus.h> +#include <acpi/actypes.h> +#include <linux/pci-acpi.h> + #define pciehp_get_hp_hw_control_from_firmware(dev) \ pciehp_acpi_get_hp_hw_control_from_firmware(dev) static inline int pciehp_get_hp_params_from_firmware(struct pci_dev *dev, diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c index 0d8fb6e607a..6ab3b6cd2b5 100644 --- a/drivers/pci/hotplug/pciehp_hpc.c +++ b/drivers/pci/hotplug/pciehp_hpc.c @@ -38,10 +38,6 @@ #include "../pci.h" #include "pciehp.h" -#include <acpi/acpi.h> -#include <acpi/acpi_bus.h> -#include <acpi/actypes.h> -#include <linux/pci-acpi.h> #ifdef DEBUG #define DBG_K_TRACE_ENTRY ((unsigned int)0x00000001) /* On function entry */ #define DBG_K_TRACE_EXIT ((unsigned int)0x00000002) /* On function exit */ diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c index 10e1a905c14..474e9cd0e9e 100644 --- a/drivers/pci/pci-driver.c +++ b/drivers/pci/pci-driver.c @@ -139,9 +139,8 @@ const struct pci_device_id *pci_match_id(const struct pci_device_id *ids, /** * pci_match_device - Tell if a PCI device structure has a matching * PCI device id structure - * @ids: array of PCI device id structures to search in - * @dev: the PCI device structure to match against * @drv: the PCI driver to match against + * @dev: the PCI device structure to match against * * Used by a driver to check whether a PCI device present in the * system is in its list of supported devices. Returns the matching diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index fb08bc951ac..73177429fe7 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c @@ -438,6 +438,7 @@ static void __devinit quirk_ich6_lpc_acpi(struct pci_dev *dev) pci_read_config_dword(dev, 0x48, ®ion); quirk_io_region(dev, region, 64, PCI_BRIDGE_RESOURCES+1, "ICH6 GPIO"); } +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_0, quirk_ich6_lpc_acpi ); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_1, quirk_ich6_lpc_acpi ); /* @@ -1091,7 +1092,6 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_0, asu DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_12, asus_hides_smbus_lpc ); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_12, asus_hides_smbus_lpc ); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801EB_0, asus_hides_smbus_lpc ); -DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_1, asus_hides_smbus_lpc ); static void __init asus_hides_smbus_lpc_ich6(struct pci_dev *dev) { @@ -1518,6 +1518,63 @@ static void __devinit quirk_netmos(struct pci_dev *dev) } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NETMOS, PCI_ANY_ID, quirk_netmos); +static void __devinit quirk_e100_interrupt(struct pci_dev *dev) +{ + u16 command; + u32 bar; + u8 __iomem *csr; + u8 cmd_hi; + + switch (dev->device) { + /* PCI IDs taken from drivers/net/e100.c */ + case 0x1029: + case 0x1030 ... 0x1034: + case 0x1038 ... 0x103E: + case 0x1050 ... 0x1057: + case 0x1059: + case 0x1064 ... 0x106B: + case 0x1091 ... 0x1095: + case 0x1209: + case 0x1229: + case 0x2449: + case 0x2459: + case 0x245D: + case 0x27DC: + break; + default: + return; + } + + /* + * Some firmware hands off the e100 with interrupts enabled, + * which can cause a flood of interrupts if packets are + * received before the driver attaches to the device. So + * disable all e100 interrupts here. The driver will + * re-enable them when it's ready. + */ + pci_read_config_word(dev, PCI_COMMAND, &command); + pci_read_config_dword(dev, PCI_BASE_ADDRESS_0, &bar); + + if (!(command & PCI_COMMAND_MEMORY) || !bar) + return; + + csr = ioremap(bar, 8); + if (!csr) { + printk(KERN_WARNING "PCI: Can't map %s e100 registers\n", + pci_name(dev)); + return; + } + + cmd_hi = readb(csr + 3); + if (cmd_hi == 0) { + printk(KERN_WARNING "PCI: Firmware left %s e100 interrupts " + "enabled, disabling\n", pci_name(dev)); + writeb(1, csr + 3); + } + + iounmap(csr); +} +DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_ANY_ID, quirk_e100_interrupt); static void __devinit fixup_rev1_53c810(struct pci_dev* dev) { diff --git a/drivers/rtc/rtc-s3c.c b/drivers/rtc/rtc-s3c.c index d6d1bff52b8..2c7de79c83b 100644 --- a/drivers/rtc/rtc-s3c.c +++ b/drivers/rtc/rtc-s3c.c @@ -69,12 +69,12 @@ static void s3c_rtc_setaie(int to) pr_debug("%s: aie=%d\n", __FUNCTION__, to); - tmp = readb(S3C2410_RTCALM) & ~S3C2410_RTCALM_ALMEN; + tmp = readb(s3c_rtc_base + S3C2410_RTCALM) & ~S3C2410_RTCALM_ALMEN; if (to) tmp |= S3C2410_RTCALM_ALMEN; - writeb(tmp, S3C2410_RTCALM); + writeb(tmp, s3c_rtc_base + S3C2410_RTCALM); } static void s3c_rtc_setpie(int to) @@ -84,12 +84,12 @@ static void s3c_rtc_setpie(int to) pr_debug("%s: pie=%d\n", __FUNCTION__, to); spin_lock_irq(&s3c_rtc_pie_lock); - tmp = readb(S3C2410_TICNT) & ~S3C2410_TICNT_ENABLE; + tmp = readb(s3c_rtc_base + S3C2410_TICNT) & ~S3C2410_TICNT_ENABLE; if (to) tmp |= S3C2410_TICNT_ENABLE; - writeb(tmp, S3C2410_TICNT); + writeb(tmp, s3c_rtc_base + S3C2410_TICNT); spin_unlock_irq(&s3c_rtc_pie_lock); } @@ -98,13 +98,13 @@ static void s3c_rtc_setfreq(int freq) unsigned int tmp; spin_lock_irq(&s3c_rtc_pie_lock); - tmp = readb(S3C2410_TICNT) & S3C2410_TICNT_ENABLE; + tmp = readb(s3c_rtc_base + S3C2410_TICNT) & S3C2410_TICNT_ENABLE; s3c_rtc_freq = freq; tmp |= (128 / freq)-1; - writeb(tmp, S3C2410_TICNT); + writeb(tmp, s3c_rtc_base + S3C2410_TICNT); spin_unlock_irq(&s3c_rtc_pie_lock); } @@ -113,14 +113,15 @@ static void s3c_rtc_setfreq(int freq) static int s3c_rtc_gettime(struct device *dev, struct rtc_time *rtc_tm) { unsigned int have_retried = 0; + void __iomem *base = s3c_rtc_base; retry_get_time: - rtc_tm->tm_min = readb(S3C2410_RTCMIN); - rtc_tm->tm_hour = readb(S3C2410_RTCHOUR); - rtc_tm->tm_mday = readb(S3C2410_RTCDATE); - rtc_tm->tm_mon = readb(S3C2410_RTCMON); - rtc_tm->tm_year = readb(S3C2410_RTCYEAR); - rtc_tm->tm_sec = readb(S3C2410_RTCSEC); + rtc_tm->tm_min = readb(base + S3C2410_RTCMIN); + rtc_tm->tm_hour = readb(base + S3C2410_RTCHOUR); + rtc_tm->tm_mday = readb(base + S3C2410_RTCDATE); + rtc_tm->tm_mon = readb(base + S3C2410_RTCMON); + rtc_tm->tm_year = readb(base + S3C2410_RTCYEAR); + rtc_tm->tm_sec = readb(base + S3C2410_RTCSEC); /* the only way to work out wether the system was mid-update * when we read it is to check the second counter, and if it @@ -151,17 +152,26 @@ static int s3c_rtc_gettime(struct device *dev, struct rtc_time *rtc_tm) static int s3c_rtc_settime(struct device *dev, struct rtc_time *tm) { - /* the rtc gets round the y2k problem by just not supporting it */ + void __iomem *base = s3c_rtc_base; + int year = tm->tm_year - 100; - if (tm->tm_year < 100) + pr_debug("set time %02d.%02d.%02d %02d/%02d/%02d\n", + tm->tm_year, tm->tm_mon, tm->tm_mday, + tm->tm_hour, tm->tm_min, tm->tm_sec); + + /* we get around y2k by simply not supporting it */ + + if (year < 0 || year >= 100) { + dev_err(dev, "rtc only supports 100 years\n"); return -EINVAL; + } - writeb(BIN2BCD(tm->tm_sec), S3C2410_RTCSEC); - writeb(BIN2BCD(tm->tm_min), S3C2410_RTCMIN); - writeb(BIN2BCD(tm->tm_hour), S3C2410_RTCHOUR); - writeb(BIN2BCD(tm->tm_mday), S3C2410_RTCDATE); - writeb(BIN2BCD(tm->tm_mon + 1), S3C2410_RTCMON); - writeb(BIN2BCD(tm->tm_year - 100), S3C2410_RTCYEAR); + writeb(BIN2BCD(tm->tm_sec), base + S3C2410_RTCSEC); + writeb(BIN2BCD(tm->tm_min), base + S3C2410_RTCMIN); + writeb(BIN2BCD(tm->tm_hour), base + S3C2410_RTCHOUR); + writeb(BIN2BCD(tm->tm_mday), base + S3C2410_RTCDATE); + writeb(BIN2BCD(tm->tm_mon + 1), base + S3C2410_RTCMON); + writeb(BIN2BCD(year), base + S3C2410_RTCYEAR); return 0; } @@ -169,16 +179,17 @@ static int s3c_rtc_settime(struct device *dev, struct rtc_time *tm) static int s3c_rtc_getalarm(struct device *dev, struct rtc_wkalrm *alrm) { struct rtc_time *alm_tm = &alrm->time; + void __iomem *base = s3c_rtc_base; unsigned int alm_en; - alm_tm->tm_sec = readb(S3C2410_ALMSEC); - alm_tm->tm_min = readb(S3C2410_ALMMIN); - alm_tm->tm_hour = readb(S3C2410_ALMHOUR); - alm_tm->tm_mon = readb(S3C2410_ALMMON); - alm_tm->tm_mday = readb(S3C2410_ALMDATE); - alm_tm->tm_year = readb(S3C2410_ALMYEAR); + alm_tm->tm_sec = readb(base + S3C2410_ALMSEC); + alm_tm->tm_min = readb(base + S3C2410_ALMMIN); + alm_tm->tm_hour = readb(base + S3C2410_ALMHOUR); + alm_tm->tm_mon = readb(base + S3C2410_ALMMON); + alm_tm->tm_mday = readb(base + S3C2410_ALMDATE); + alm_tm->tm_year = readb(base + S3C2410_ALMYEAR); - alm_en = readb(S3C2410_RTCALM); + alm_en = readb(base + S3C2410_RTCALM); pr_debug("read alarm %02x %02x.%02x.%02x %02x/%02x/%02x\n", alm_en, @@ -226,6 +237,7 @@ static int s3c_rtc_getalarm(struct device *dev, struct rtc_wkalrm *alrm) static int s3c_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm) { struct rtc_time *tm = &alrm->time; + void __iomem *base = s3c_rtc_base; unsigned int alrm_en; pr_debug("s3c_rtc_setalarm: %d, %02x/%02x/%02x %02x.%02x.%02x\n", @@ -234,32 +246,32 @@ static int s3c_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm) tm->tm_hour & 0xff, tm->tm_min & 0xff, tm->tm_sec); - alrm_en = readb(S3C2410_RTCALM) & S3C2410_RTCALM_ALMEN; - writeb(0x00, S3C2410_RTCALM); + alrm_en = readb(base + S3C2410_RTCALM) & S3C2410_RTCALM_ALMEN; + writeb(0x00, base + S3C2410_RTCALM); if (tm->tm_sec < 60 && tm->tm_sec >= 0) { alrm_en |= S3C2410_RTCALM_SECEN; - writeb(BIN2BCD(tm->tm_sec), S3C2410_ALMSEC); + writeb(BIN2BCD(tm->tm_sec), base + S3C2410_ALMSEC); } if (tm->tm_min < 60 && tm->tm_min >= 0) { alrm_en |= S3C2410_RTCALM_MINEN; - writeb(BIN2BCD(tm->tm_min), S3C2410_ALMMIN); + writeb(BIN2BCD(tm->tm_min), base + S3C2410_ALMMIN); } if (tm->tm_hour < 24 && tm->tm_hour >= 0) { alrm_en |= S3C2410_RTCALM_HOUREN; - writeb(BIN2BCD(tm->tm_hour), S3C2410_ALMHOUR); + writeb(BIN2BCD(tm->tm_hour), base + S3C2410_ALMHOUR); } pr_debug("setting S3C2410_RTCALM to %08x\n", alrm_en); - writeb(alrm_en, S3C2410_RTCALM); + writeb(alrm_en, base + S3C2410_RTCALM); if (0) { - alrm_en = readb(S3C2410_RTCALM); + alrm_en = readb(base + S3C2410_RTCALM); alrm_en &= ~S3C2410_RTCALM_ALMEN; - writeb(alrm_en, S3C2410_RTCALM); + writeb(alrm_en, base + S3C2410_RTCALM); disable_irq_wake(s3c_rtc_alarmno); } @@ -319,8 +331,8 @@ static int s3c_rtc_ioctl(struct device *dev, static int s3c_rtc_proc(struct device *dev, struct seq_file *seq) { - unsigned int rtcalm = readb(S3C2410_RTCALM); - unsigned int ticnt = readb (S3C2410_TICNT); + unsigned int rtcalm = readb(s3c_rtc_base + S3C2410_RTCALM); + unsigned int ticnt = readb(s3c_rtc_base + S3C2410_TICNT); seq_printf(seq, "alarm_IRQ\t: %s\n", (rtcalm & S3C2410_RTCALM_ALMEN) ? "yes" : "no" ); @@ -387,39 +399,40 @@ static struct rtc_class_ops s3c_rtcops = { static void s3c_rtc_enable(struct platform_device *pdev, int en) { + void __iomem *base = s3c_rtc_base; unsigned int tmp; if (s3c_rtc_base == NULL) return; if (!en) { - tmp = readb(S3C2410_RTCCON); - writeb(tmp & ~S3C2410_RTCCON_RTCEN, S3C2410_RTCCON); + tmp = readb(base + S3C2410_RTCCON); + writeb(tmp & ~S3C2410_RTCCON_RTCEN, base + S3C2410_RTCCON); - tmp = readb(S3C2410_TICNT); - writeb(tmp & ~S3C2410_TICNT_ENABLE, S3C2410_TICNT); + tmp = readb(base + S3C2410_TICNT); + writeb(tmp & ~S3C2410_TICNT_ENABLE, base + S3C2410_TICNT); } else { /* re-enable the device, and check it is ok */ - if ((readb(S3C2410_RTCCON) & S3C2410_RTCCON_RTCEN) == 0){ + if ((readb(base+S3C2410_RTCCON) & S3C2410_RTCCON_RTCEN) == 0){ dev_info(&pdev->dev, "rtc disabled, re-enabling\n"); - tmp = readb(S3C2410_RTCCON); - writeb(tmp | S3C2410_RTCCON_RTCEN , S3C2410_RTCCON); + tmp = readb(base + S3C2410_RTCCON); + writeb(tmp|S3C2410_RTCCON_RTCEN, base+S3C2410_RTCCON); } - if ((readb(S3C2410_RTCCON) & S3C2410_RTCCON_CNTSEL)){ + if ((readb(base + S3C2410_RTCCON) & S3C2410_RTCCON_CNTSEL)){ dev_info(&pdev->dev, "removing RTCCON_CNTSEL\n"); - tmp = readb(S3C2410_RTCCON); - writeb(tmp& ~S3C2410_RTCCON_CNTSEL , S3C2410_RTCCON); + tmp = readb(base + S3C2410_RTCCON); + writeb(tmp& ~S3C2410_RTCCON_CNTSEL, base+S3C2410_RTCCON); } - if ((readb(S3C2410_RTCCON) & S3C2410_RTCCON_CLKRST)){ + if ((readb(base + S3C2410_RTCCON) & S3C2410_RTCCON_CLKRST)){ dev_info(&pdev->dev, "removing RTCCON_CLKRST\n"); - tmp = readb(S3C2410_RTCCON); - writeb(tmp & ~S3C2410_RTCCON_CLKRST, S3C2410_RTCCON); + tmp = readb(base + S3C2410_RTCCON); + writeb(tmp & ~S3C2410_RTCCON_CLKRST, base+S3C2410_RTCCON); } } } @@ -475,8 +488,8 @@ static int s3c_rtc_probe(struct platform_device *pdev) } s3c_rtc_mem = request_mem_region(res->start, - res->end-res->start+1, - pdev->name); + res->end-res->start+1, + pdev->name); if (s3c_rtc_mem == NULL) { dev_err(&pdev->dev, "failed to reserve memory region\n"); @@ -495,7 +508,8 @@ static int s3c_rtc_probe(struct platform_device *pdev) s3c_rtc_enable(pdev, 1); - pr_debug("s3c2410_rtc: RTCCON=%02x\n", readb(S3C2410_RTCCON)); + pr_debug("s3c2410_rtc: RTCCON=%02x\n", + readb(s3c_rtc_base + S3C2410_RTCCON)); s3c_rtc_setfreq(s3c_rtc_freq); @@ -543,7 +557,7 @@ static int s3c_rtc_suspend(struct platform_device *pdev, pm_message_t state) /* save TICNT for anyone using periodic interrupts */ - ticnt_save = readb(S3C2410_TICNT); + ticnt_save = readb(s3c_rtc_base + S3C2410_TICNT); /* calculate time delta for suspend */ @@ -567,7 +581,7 @@ static int s3c_rtc_resume(struct platform_device *pdev) rtc_tm_to_time(&tm, &time.tv_sec); restore_time_delta(&s3c_rtc_delta, &time); - writeb(ticnt_save, S3C2410_TICNT); + writeb(ticnt_save, s3c_rtc_base + S3C2410_TICNT); return 0; } #else diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c index 4bf03fb67f8..d8e9b95f0a1 100644 --- a/drivers/s390/block/dasd.c +++ b/drivers/s390/block/dasd.c @@ -1730,8 +1730,8 @@ dasd_flush_request_queue(struct dasd_device * device) req = elv_next_request(device->request_queue); if (req == NULL) break; - dasd_end_request(req, 0); blkdev_dequeue_request(req); + dasd_end_request(req, 0); } spin_unlock_irq(&device->request_queue_lock); } diff --git a/drivers/s390/block/dasd_devmap.c b/drivers/s390/block/dasd_devmap.c index 7f6fdac7470..9af02c79ce8 100644 --- a/drivers/s390/block/dasd_devmap.c +++ b/drivers/s390/block/dasd_devmap.c @@ -48,18 +48,20 @@ struct dasd_devmap { }; /* - * dasd_servermap is used to store the server_id of all storage servers - * accessed by DASD device driver. + * dasd_server_ssid_map contains a globally unique storage server subsystem ID. + * dasd_server_ssid_list contains the list of all subsystem IDs accessed by + * the DASD device driver. */ -struct dasd_servermap { +struct dasd_server_ssid_map { struct list_head list; - struct server_id { + struct system_id { char vendor[4]; char serial[15]; + __u16 ssid; } sid; }; -static struct list_head dasd_serverlist; +static struct list_head dasd_server_ssid_list; /* * Parameter parsing functions for dasd= parameter. The syntax is: @@ -89,7 +91,7 @@ static char *dasd[256]; module_param_array(dasd, charp, NULL, 0); /* - * Single spinlock to protect devmap structures and lists. + * Single spinlock to protect devmap and servermap structures and lists. */ static DEFINE_SPINLOCK(dasd_devmap_lock); @@ -264,8 +266,9 @@ dasd_parse_keyword( char *parsestring ) { if (dasd_page_cache) return residual_str; dasd_page_cache = - kmem_cache_create("dasd_page_cache", PAGE_SIZE, 0, - SLAB_CACHE_DMA, NULL, NULL ); + kmem_cache_create("dasd_page_cache", PAGE_SIZE, + PAGE_SIZE, SLAB_CACHE_DMA, + NULL, NULL ); if (!dasd_page_cache) MESSAGE(KERN_WARNING, "%s", "Failed to create slab, " "fixed buffer mode disabled."); @@ -859,39 +862,6 @@ static struct attribute_group dasd_attr_group = { }; /* - * Check if the related storage server is already contained in the - * dasd_serverlist. If server is not contained, create new entry. - * Return 0 if server was already in serverlist, - * 1 if the server was added successfully - * <0 in case of error. - */ -static int -dasd_add_server(struct dasd_uid *uid) -{ - struct dasd_servermap *new, *tmp; - - /* check if server is already contained */ - list_for_each_entry(tmp, &dasd_serverlist, list) - // normale cmp? - if (strncmp(tmp->sid.vendor, uid->vendor, - sizeof(tmp->sid.vendor)) == 0 - && strncmp(tmp->sid.serial, uid->serial, - sizeof(tmp->sid.serial)) == 0) - return 0; - - new = (struct dasd_servermap *) - kzalloc(sizeof(struct dasd_servermap), GFP_KERNEL); - if (!new) - return -ENOMEM; - - strncpy(new->sid.vendor, uid->vendor, sizeof(new->sid.vendor)); - strncpy(new->sid.serial, uid->serial, sizeof(new->sid.serial)); - list_add(&new->list, &dasd_serverlist); - return 1; -} - - -/* * Return copy of the device unique identifier. */ int @@ -910,6 +880,9 @@ dasd_get_uid(struct ccw_device *cdev, struct dasd_uid *uid) /* * Register the given device unique identifier into devmap struct. + * In addition check if the related storage server subsystem ID is already + * contained in the dasd_server_ssid_list. If subsystem ID is not contained, + * create new entry. * Return 0 if server was already in serverlist, * 1 if the server was added successful * <0 in case of error. @@ -918,16 +891,39 @@ int dasd_set_uid(struct ccw_device *cdev, struct dasd_uid *uid) { struct dasd_devmap *devmap; - int rc; + struct dasd_server_ssid_map *srv, *tmp; devmap = dasd_find_busid(cdev->dev.bus_id); if (IS_ERR(devmap)) return PTR_ERR(devmap); + + /* generate entry for server_ssid_map */ + srv = (struct dasd_server_ssid_map *) + kzalloc(sizeof(struct dasd_server_ssid_map), GFP_KERNEL); + if (!srv) + return -ENOMEM; + strncpy(srv->sid.vendor, uid->vendor, sizeof(srv->sid.vendor) - 1); + strncpy(srv->sid.serial, uid->serial, sizeof(srv->sid.serial) - 1); + srv->sid.ssid = uid->ssid; + + /* server is already contained ? */ spin_lock(&dasd_devmap_lock); devmap->uid = *uid; - rc = dasd_add_server(uid); + list_for_each_entry(tmp, &dasd_server_ssid_list, list) { + if (!memcmp(&srv->sid, &tmp->sid, + sizeof(struct system_id))) { + kfree(srv); + srv = NULL; + break; + } + } + + /* add servermap to serverlist */ + if (srv) + list_add(&srv->list, &dasd_server_ssid_list); spin_unlock(&dasd_devmap_lock); - return rc; + + return (srv ? 1 : 0); } EXPORT_SYMBOL_GPL(dasd_set_uid); @@ -995,7 +991,7 @@ dasd_devmap_init(void) INIT_LIST_HEAD(&dasd_hashlists[i]); /* Initialize servermap structure. */ - INIT_LIST_HEAD(&dasd_serverlist); + INIT_LIST_HEAD(&dasd_server_ssid_list); return 0; } diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c index 39c2281371b..b7a7fac3f7c 100644 --- a/drivers/s390/block/dasd_eckd.c +++ b/drivers/s390/block/dasd_eckd.c @@ -468,11 +468,11 @@ dasd_eckd_generate_uid(struct dasd_device *device, struct dasd_uid *uid) return -ENODEV; memset(uid, 0, sizeof(struct dasd_uid)); - strncpy(uid->vendor, confdata->ned1.HDA_manufacturer, - sizeof(uid->vendor) - 1); + memcpy(uid->vendor, confdata->ned1.HDA_manufacturer, + sizeof(uid->vendor) - 1); EBCASC(uid->vendor, sizeof(uid->vendor) - 1); - strncpy(uid->serial, confdata->ned1.HDA_location, - sizeof(uid->serial) - 1); + memcpy(uid->serial, confdata->ned1.HDA_location, + sizeof(uid->serial) - 1); EBCASC(uid->serial, sizeof(uid->serial) - 1); uid->ssid = confdata->neq.subsystemID; if (confdata->ned2.sneq.flags == 0x40) { @@ -607,7 +607,7 @@ dasd_eckd_psf_ssc(struct dasd_device *device) * Valide storage server of current device. */ static int -dasd_eckd_validate_server(struct dasd_device *device) +dasd_eckd_validate_server(struct dasd_device *device, struct dasd_uid *uid) { int rc; @@ -616,11 +616,11 @@ dasd_eckd_validate_server(struct dasd_device *device) return 0; rc = dasd_eckd_psf_ssc(device); - if (rc) - /* may be requested feature is not available on server, - * therefore just report error and go ahead */ - DEV_MESSAGE(KERN_INFO, device, - "Perform Subsystem Function returned rc=%d", rc); + /* may be requested feature is not available on server, + * therefore just report error and go ahead */ + DEV_MESSAGE(KERN_INFO, device, + "PSF-SSC on storage subsystem %s.%s.%04x returned rc=%d", + uid->vendor, uid->serial, uid->ssid, rc); /* RE-Read Configuration Data */ return dasd_eckd_read_conf(device); } @@ -666,7 +666,7 @@ dasd_eckd_check_characteristics(struct dasd_device *device) return rc; rc = dasd_set_uid(device->cdev, &uid); if (rc == 1) /* new server found */ - rc = dasd_eckd_validate_server(device); + rc = dasd_eckd_validate_server(device, &uid); if (rc) return rc; diff --git a/drivers/s390/block/xpram.c b/drivers/s390/block/xpram.c index 1140302ff11..ca7d51f7ecc 100644 --- a/drivers/s390/block/xpram.c +++ b/drivers/s390/block/xpram.c @@ -48,15 +48,6 @@ #define PRINT_ERR(x...) printk(KERN_ERR XPRAM_NAME " error:" x) -static struct sysdev_class xpram_sysclass = { - set_kset_name("xpram"), -}; - -static struct sys_device xpram_sys_device = { - .id = 0, - .cls = &xpram_sysclass, -}; - typedef struct { unsigned int size; /* size of xpram segment in pages */ unsigned int offset; /* start page of xpram segment */ @@ -451,8 +442,6 @@ static void __exit xpram_exit(void) } unregister_blkdev(XPRAM_MAJOR, XPRAM_NAME); blk_cleanup_queue(xpram_queue); - sysdev_unregister(&xpram_sys_device); - sysdev_class_unregister(&xpram_sysclass); } static int __init xpram_init(void) @@ -470,19 +459,7 @@ static int __init xpram_init(void) rc = xpram_setup_sizes(xpram_pages); if (rc) return rc; - rc = sysdev_class_register(&xpram_sysclass); - if (rc) - return rc; - - rc = sysdev_register(&xpram_sys_device); - if (rc) { - sysdev_class_unregister(&xpram_sysclass); - return rc; - } - rc = xpram_setup_blkdev(); - if (rc) - sysdev_unregister(&xpram_sys_device); - return rc; + return xpram_setup_blkdev(); } module_init(xpram_init); diff --git a/drivers/s390/char/tape_class.c b/drivers/s390/char/tape_class.c index 643b6d07856..56b87618b10 100644 --- a/drivers/s390/char/tape_class.c +++ b/drivers/s390/char/tape_class.c @@ -76,7 +76,7 @@ struct tape_class_device *register_tape_dev( device, "%s", tcd->device_name ); - rc = PTR_ERR(tcd->class_device); + rc = IS_ERR(tcd->class_device) ? PTR_ERR(tcd->class_device) : 0; if (rc) goto fail_with_cdev; rc = sysfs_create_link( diff --git a/drivers/s390/cio/device_fsm.c b/drivers/s390/cio/device_fsm.c index 7a39e0b0386..6d91c2eb205 100644 --- a/drivers/s390/cio/device_fsm.c +++ b/drivers/s390/cio/device_fsm.c @@ -772,6 +772,7 @@ ccw_device_online_verify(struct ccw_device *cdev, enum dev_event dev_event) stsch(sch->schid, &sch->schib); if (sch->schib.scsw.actl != 0 || + (sch->schib.scsw.stctl & SCSW_STCTL_STATUS_PEND) || (cdev->private->irb.scsw.stctl & SCSW_STCTL_STATUS_PEND)) { /* * No final status yet or final status not yet delivered diff --git a/drivers/s390/cio/device_ops.c b/drivers/s390/cio/device_ops.c index a60124264be..9e3de0bd59b 100644 --- a/drivers/s390/cio/device_ops.c +++ b/drivers/s390/cio/device_ops.c @@ -263,6 +263,9 @@ ccw_device_wake_up(struct ccw_device *cdev, unsigned long ip, struct irb *irb) /* Abuse intparm for error reporting. */ if (IS_ERR(irb)) cdev->private->intparm = -EIO; + else if (irb->scsw.cc == 1) + /* Retry for deferred condition code. */ + cdev->private->intparm = -EAGAIN; else if ((irb->scsw.dstat != (DEV_STAT_CHN_END|DEV_STAT_DEV_END)) || (irb->scsw.cstat != 0)) { diff --git a/drivers/s390/net/qeth_main.c b/drivers/s390/net/qeth_main.c index 5fff1f93973..e1327b8fce0 100644 --- a/drivers/s390/net/qeth_main.c +++ b/drivers/s390/net/qeth_main.c @@ -8510,9 +8510,9 @@ static int qeth_ipv6_init(void) { qeth_old_arp_constructor = arp_tbl.constructor; - write_lock(&arp_tbl.lock); + write_lock_bh(&arp_tbl.lock); arp_tbl.constructor = qeth_arp_constructor; - write_unlock(&arp_tbl.lock); + write_unlock_bh(&arp_tbl.lock); arp_direct_ops = (struct neigh_ops*) kmalloc(sizeof(struct neigh_ops), GFP_KERNEL); @@ -8528,9 +8528,9 @@ qeth_ipv6_init(void) static void qeth_ipv6_uninit(void) { - write_lock(&arp_tbl.lock); + write_lock_bh(&arp_tbl.lock); arp_tbl.constructor = qeth_old_arp_constructor; - write_unlock(&arp_tbl.lock); + write_unlock_bh(&arp_tbl.lock); kfree(arp_direct_ops); } #endif /* CONFIG_QETH_IPV6 */ diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c index 9cd789b8acd..adc9d8f2c28 100644 --- a/drivers/s390/scsi/zfcp_aux.c +++ b/drivers/s390/scsi/zfcp_aux.c @@ -112,6 +112,105 @@ _zfcp_hex_dump(char *addr, int count) printk("\n"); } + +/****************************************************************/ +/****** Functions to handle the request ID hash table ********/ +/****************************************************************/ + +#define ZFCP_LOG_AREA ZFCP_LOG_AREA_FSF + +static int zfcp_reqlist_init(struct zfcp_adapter *adapter) +{ + int i; + + adapter->req_list = kcalloc(REQUEST_LIST_SIZE, sizeof(struct list_head), + GFP_KERNEL); + + if (!adapter->req_list) + return -ENOMEM; + + for (i=0; i<REQUEST_LIST_SIZE; i++) + INIT_LIST_HEAD(&adapter->req_list[i]); + + return 0; +} + +static void zfcp_reqlist_free(struct zfcp_adapter *adapter) +{ + struct zfcp_fsf_req *request, *tmp; + unsigned int i; + + for (i=0; i<REQUEST_LIST_SIZE; i++) { + if (list_empty(&adapter->req_list[i])) + continue; + + list_for_each_entry_safe(request, tmp, + &adapter->req_list[i], list) + list_del(&request->list); + } + + kfree(adapter->req_list); +} + +void zfcp_reqlist_add(struct zfcp_adapter *adapter, + struct zfcp_fsf_req *fsf_req) +{ + unsigned int i; + + i = fsf_req->req_id % REQUEST_LIST_SIZE; + list_add_tail(&fsf_req->list, &adapter->req_list[i]); +} + +void zfcp_reqlist_remove(struct zfcp_adapter *adapter, unsigned long req_id) +{ + struct zfcp_fsf_req *request, *tmp; + unsigned int i, counter; + u64 dbg_tmp[2]; + + i = req_id % REQUEST_LIST_SIZE; + BUG_ON(list_empty(&adapter->req_list[i])); + + counter = 0; + list_for_each_entry_safe(request, tmp, &adapter->req_list[i], list) { + if (request->req_id == req_id) { + dbg_tmp[0] = (u64) atomic_read(&adapter->reqs_active); + dbg_tmp[1] = (u64) counter; + debug_event(adapter->erp_dbf, 4, (void *) dbg_tmp, 16); + list_del(&request->list); + break; + } + counter++; + } +} + +struct zfcp_fsf_req *zfcp_reqlist_ismember(struct zfcp_adapter *adapter, + unsigned long req_id) +{ + struct zfcp_fsf_req *request, *tmp; + unsigned int i; + + i = req_id % REQUEST_LIST_SIZE; + + list_for_each_entry_safe(request, tmp, &adapter->req_list[i], list) + if (request->req_id == req_id) + return request; + + return NULL; +} + +int zfcp_reqlist_isempty(struct zfcp_adapter *adapter) +{ + unsigned int i; + + for (i=0; i<REQUEST_LIST_SIZE; i++) + if (!list_empty(&adapter->req_list[i])) + return 0; + + return 1; +} + +#undef ZFCP_LOG_AREA + /****************************************************************/ /************** Uncategorised Functions *************************/ /****************************************************************/ @@ -961,8 +1060,12 @@ zfcp_adapter_enqueue(struct ccw_device *ccw_device) INIT_LIST_HEAD(&adapter->port_remove_lh); /* initialize list of fsf requests */ - spin_lock_init(&adapter->fsf_req_list_lock); - INIT_LIST_HEAD(&adapter->fsf_req_list_head); + spin_lock_init(&adapter->req_list_lock); + retval = zfcp_reqlist_init(adapter); + if (retval) { + ZFCP_LOG_INFO("request list initialization failed\n"); + goto failed_low_mem_buffers; + } /* initialize debug locks */ @@ -1041,8 +1144,6 @@ zfcp_adapter_enqueue(struct ccw_device *ccw_device) * !0 - struct zfcp_adapter data structure could not be removed * (e.g. still used) * locks: adapter list write lock is assumed to be held by caller - * adapter->fsf_req_list_lock is taken and released within this - * function and must not be held on entry */ void zfcp_adapter_dequeue(struct zfcp_adapter *adapter) @@ -1054,14 +1155,14 @@ zfcp_adapter_dequeue(struct zfcp_adapter *adapter) zfcp_sysfs_adapter_remove_files(&adapter->ccw_device->dev); dev_set_drvdata(&adapter->ccw_device->dev, NULL); /* sanity check: no pending FSF requests */ - spin_lock_irqsave(&adapter->fsf_req_list_lock, flags); - retval = !list_empty(&adapter->fsf_req_list_head); - spin_unlock_irqrestore(&adapter->fsf_req_list_lock, flags); - if (retval) { + spin_lock_irqsave(&adapter->req_list_lock, flags); + retval = zfcp_reqlist_isempty(adapter); + spin_unlock_irqrestore(&adapter->req_list_lock, flags); + if (!retval) { ZFCP_LOG_NORMAL("bug: adapter %s (%p) still in use, " "%i requests outstanding\n", zfcp_get_busid_by_adapter(adapter), adapter, - atomic_read(&adapter->fsf_reqs_active)); + atomic_read(&adapter->reqs_active)); retval = -EBUSY; goto out; } @@ -1087,6 +1188,7 @@ zfcp_adapter_dequeue(struct zfcp_adapter *adapter) zfcp_free_low_mem_buffers(adapter); /* free memory of adapter data structure and queues */ zfcp_qdio_free_queues(adapter); + zfcp_reqlist_free(adapter); kfree(adapter->fc_stats); kfree(adapter->stats_reset_data); ZFCP_LOG_TRACE("freeing adapter structure\n"); diff --git a/drivers/s390/scsi/zfcp_ccw.c b/drivers/s390/scsi/zfcp_ccw.c index 57d8e4bfb8d..fdabadeaa9e 100644 --- a/drivers/s390/scsi/zfcp_ccw.c +++ b/drivers/s390/scsi/zfcp_ccw.c @@ -164,6 +164,11 @@ zfcp_ccw_set_online(struct ccw_device *ccw_device) retval = zfcp_adapter_scsi_register(adapter); if (retval) goto out_scsi_register; + + /* initialize request counter */ + BUG_ON(!zfcp_reqlist_isempty(adapter)); + adapter->req_no = 0; + zfcp_erp_modify_adapter_status(adapter, ZFCP_STATUS_COMMON_RUNNING, ZFCP_SET); zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED); diff --git a/drivers/s390/scsi/zfcp_def.h b/drivers/s390/scsi/zfcp_def.h index 2df512a18e2..94d1b74db35 100644 --- a/drivers/s390/scsi/zfcp_def.h +++ b/drivers/s390/scsi/zfcp_def.h @@ -52,7 +52,7 @@ /********************* GENERAL DEFINES *********************************/ /* zfcp version number, it consists of major, minor, and patch-level number */ -#define ZFCP_VERSION "4.7.0" +#define ZFCP_VERSION "4.8.0" /** * zfcp_sg_to_address - determine kernel address from struct scatterlist @@ -80,7 +80,7 @@ zfcp_address_to_sg(void *address, struct scatterlist *list) #define REQUEST_LIST_SIZE 128 /********************* SCSI SPECIFIC DEFINES *********************************/ -#define ZFCP_SCSI_ER_TIMEOUT (100*HZ) +#define ZFCP_SCSI_ER_TIMEOUT (10*HZ) /********************* CIO/QDIO SPECIFIC DEFINES *****************************/ @@ -886,11 +886,11 @@ struct zfcp_adapter { struct list_head port_remove_lh; /* head of ports to be removed */ u32 ports; /* number of remote ports */ - struct timer_list scsi_er_timer; /* SCSI err recovery watch */ - struct list_head fsf_req_list_head; /* head of FSF req list */ - spinlock_t fsf_req_list_lock; /* lock for ops on list of - FSF requests */ - atomic_t fsf_reqs_active; /* # active FSF reqs */ + struct timer_list scsi_er_timer; /* SCSI err recovery watch */ + atomic_t reqs_active; /* # active FSF reqs */ + unsigned long req_no; /* unique FSF req number */ + struct list_head *req_list; /* list of pending reqs */ + spinlock_t req_list_lock; /* request list lock */ struct zfcp_qdio_queue request_queue; /* request queue */ u32 fsf_req_seq_no; /* FSF cmnd seq number */ wait_queue_head_t request_wq; /* can be used to wait for @@ -986,6 +986,7 @@ struct zfcp_unit { /* FSF request */ struct zfcp_fsf_req { struct list_head list; /* list of FSF requests */ + unsigned long req_id; /* unique request ID */ struct zfcp_adapter *adapter; /* adapter request belongs to */ u8 sbal_number; /* nr of SBALs free for use */ u8 sbal_first; /* first SBAL for this request */ diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c index 8ec8da0beaa..7f60b6fdf72 100644 --- a/drivers/s390/scsi/zfcp_erp.c +++ b/drivers/s390/scsi/zfcp_erp.c @@ -64,8 +64,8 @@ static int zfcp_erp_strategy_check_action(struct zfcp_erp_action *, int); static int zfcp_erp_adapter_strategy(struct zfcp_erp_action *); static int zfcp_erp_adapter_strategy_generic(struct zfcp_erp_action *, int); static int zfcp_erp_adapter_strategy_close(struct zfcp_erp_action *); -static int zfcp_erp_adapter_strategy_close_qdio(struct zfcp_erp_action *); -static int zfcp_erp_adapter_strategy_close_fsf(struct zfcp_erp_action *); +static void zfcp_erp_adapter_strategy_close_qdio(struct zfcp_erp_action *); +static void zfcp_erp_adapter_strategy_close_fsf(struct zfcp_erp_action *); static int zfcp_erp_adapter_strategy_open(struct zfcp_erp_action *); static int zfcp_erp_adapter_strategy_open_qdio(struct zfcp_erp_action *); static int zfcp_erp_adapter_strategy_open_fsf(struct zfcp_erp_action *); @@ -93,10 +93,9 @@ static int zfcp_erp_unit_strategy_clearstati(struct zfcp_unit *); static int zfcp_erp_unit_strategy_close(struct zfcp_erp_action *); static int zfcp_erp_unit_strategy_open(struct zfcp_erp_action *); -static int zfcp_erp_action_dismiss_adapter(struct zfcp_adapter *); -static int zfcp_erp_action_dismiss_port(struct zfcp_port *); -static int zfcp_erp_action_dismiss_unit(struct zfcp_unit *); -static int zfcp_erp_action_dismiss(struct zfcp_erp_action *); +static void zfcp_erp_action_dismiss_port(struct zfcp_port *); +static void zfcp_erp_action_dismiss_unit(struct zfcp_unit *); +static void zfcp_erp_action_dismiss(struct zfcp_erp_action *); static int zfcp_erp_action_enqueue(int, struct zfcp_adapter *, struct zfcp_port *, struct zfcp_unit *); @@ -135,29 +134,39 @@ zfcp_fsf_request_timeout_handler(unsigned long data) zfcp_erp_adapter_reopen(adapter, 0); } -/* - * function: zfcp_fsf_scsi_er_timeout_handler - * - * purpose: This function needs to be called whenever a SCSI error recovery - * action (abort/reset) does not return. - * Re-opening the adapter means that the command can be returned - * by zfcp (it is guarranteed that it does not return via the - * adapter anymore). The buffer can then be used again. - * - * returns: sod all +/** + * zfcp_fsf_scsi_er_timeout_handler - timeout handler for scsi eh tasks + * + * This function needs to be called whenever a SCSI error recovery + * action (abort/reset) does not return. Re-opening the adapter means + * that the abort/reset command can be returned by zfcp. It won't complete + * via the adapter anymore (because qdio queues are closed). If ERP is + * already running on this adapter it will be stopped. */ -void -zfcp_fsf_scsi_er_timeout_handler(unsigned long data) +void zfcp_fsf_scsi_er_timeout_handler(unsigned long data) { struct zfcp_adapter *adapter = (struct zfcp_adapter *) data; + unsigned long flags; ZFCP_LOG_NORMAL("warning: SCSI error recovery timed out. " "Restarting all operations on the adapter %s\n", zfcp_get_busid_by_adapter(adapter)); debug_text_event(adapter->erp_dbf, 1, "eh_lmem_tout"); - zfcp_erp_adapter_reopen(adapter, 0); - return; + write_lock_irqsave(&adapter->erp_lock, flags); + if (atomic_test_mask(ZFCP_STATUS_ADAPTER_ERP_PENDING, + &adapter->status)) { + zfcp_erp_modify_adapter_status(adapter, + ZFCP_STATUS_COMMON_UNBLOCKED|ZFCP_STATUS_COMMON_OPEN, + ZFCP_CLEAR); + zfcp_erp_action_dismiss_adapter(adapter); + write_unlock_irqrestore(&adapter->erp_lock, flags); + /* dismiss all pending requests including requests for ERP */ + zfcp_fsf_req_dismiss_all(adapter); + adapter->fsf_req_seq_no = 0; + } else + write_unlock_irqrestore(&adapter->erp_lock, flags); + zfcp_erp_adapter_reopen(adapter, 0); } /* @@ -670,17 +679,10 @@ zfcp_erp_unit_reopen(struct zfcp_unit *unit, int clear_mask) return retval; } -/* - * function: - * - * purpose: disable I/O, - * return any open requests and clean them up, - * aim: no pending and incoming I/O - * - * returns: +/** + * zfcp_erp_adapter_block - mark adapter as blocked, block scsi requests */ -static void -zfcp_erp_adapter_block(struct zfcp_adapter *adapter, int clear_mask) +static void zfcp_erp_adapter_block(struct zfcp_adapter *adapter, int clear_mask) { debug_text_event(adapter->erp_dbf, 6, "a_bl"); zfcp_erp_modify_adapter_status(adapter, @@ -688,15 +690,10 @@ zfcp_erp_adapter_block(struct zfcp_adapter *adapter, int clear_mask) clear_mask, ZFCP_CLEAR); } -/* - * function: - * - * purpose: enable I/O - * - * returns: +/** + * zfcp_erp_adapter_unblock - mark adapter as unblocked, allow scsi requests */ -static void -zfcp_erp_adapter_unblock(struct zfcp_adapter *adapter) +static void zfcp_erp_adapter_unblock(struct zfcp_adapter *adapter) { debug_text_event(adapter->erp_dbf, 6, "a_ubl"); atomic_set_mask(ZFCP_STATUS_COMMON_UNBLOCKED, &adapter->status); @@ -848,18 +845,16 @@ zfcp_erp_strategy_check_fsfreq(struct zfcp_erp_action *erp_action) struct zfcp_adapter *adapter = erp_action->adapter; if (erp_action->fsf_req) { - /* take lock to ensure that request is not being deleted meanwhile */ - spin_lock(&adapter->fsf_req_list_lock); - /* check whether fsf req does still exist */ - list_for_each_entry(fsf_req, &adapter->fsf_req_list_head, list) - if (fsf_req == erp_action->fsf_req) - break; - if (fsf_req && (fsf_req->erp_action == erp_action)) { + /* take lock to ensure that request is not deleted meanwhile */ + spin_lock(&adapter->req_list_lock); + if ((!zfcp_reqlist_ismember(adapter, + erp_action->fsf_req->req_id)) && + (fsf_req->erp_action == erp_action)) { /* fsf_req still exists */ debug_text_event(adapter->erp_dbf, 3, "a_ca_req"); debug_event(adapter->erp_dbf, 3, &fsf_req, sizeof (unsigned long)); - /* dismiss fsf_req of timed out or dismissed erp_action */ + /* dismiss fsf_req of timed out/dismissed erp_action */ if (erp_action->status & (ZFCP_STATUS_ERP_DISMISSED | ZFCP_STATUS_ERP_TIMEDOUT)) { debug_text_event(adapter->erp_dbf, 3, @@ -892,30 +887,22 @@ zfcp_erp_strategy_check_fsfreq(struct zfcp_erp_action *erp_action) */ erp_action->fsf_req = NULL; } - spin_unlock(&adapter->fsf_req_list_lock); + spin_unlock(&adapter->req_list_lock); } else debug_text_event(adapter->erp_dbf, 3, "a_ca_noreq"); return retval; } -/* - * purpose: generic handler for asynchronous events related to erp_action events - * (normal completion, time-out, dismissing, retry after - * low memory condition) - * - * note: deletion of timer is not required (e.g. in case of a time-out), - * but a second try does no harm, - * we leave it in here to allow for greater simplification +/** + * zfcp_erp_async_handler_nolock - complete erp_action * - * returns: 0 - there was an action to handle - * !0 - otherwise + * Used for normal completion, time-out, dismissal and failure after + * low memory condition. */ -static int -zfcp_erp_async_handler_nolock(struct zfcp_erp_action *erp_action, - unsigned long set_mask) +static void zfcp_erp_async_handler_nolock(struct zfcp_erp_action *erp_action, + unsigned long set_mask) { - int retval; struct zfcp_adapter *adapter = erp_action->adapter; if (zfcp_erp_action_exists(erp_action) == ZFCP_ERP_ACTION_RUNNING) { @@ -926,43 +913,26 @@ zfcp_erp_async_handler_nolock(struct zfcp_erp_action *erp_action, del_timer(&erp_action->timer); erp_action->status |= set_mask; zfcp_erp_action_ready(erp_action); - retval = 0; } else { /* action is ready or gone - nothing to do */ debug_text_event(adapter->erp_dbf, 3, "a_asyh_gone"); debug_event(adapter->erp_dbf, 3, &erp_action->action, sizeof (int)); - retval = 1; } - - return retval; } -/* - * purpose: generic handler for asynchronous events related to erp_action - * events (normal completion, time-out, dismissing, retry after - * low memory condition) - * - * note: deletion of timer is not required (e.g. in case of a time-out), - * but a second try does no harm, - * we leave it in here to allow for greater simplification - * - * returns: 0 - there was an action to handle - * !0 - otherwise +/** + * zfcp_erp_async_handler - wrapper for erp_async_handler_nolock w/ locking */ -int -zfcp_erp_async_handler(struct zfcp_erp_action *erp_action, - unsigned long set_mask) +void zfcp_erp_async_handler(struct zfcp_erp_action *erp_action, + unsigned long set_mask) { struct zfcp_adapter *adapter = erp_action->adapter; unsigned long flags; - int retval; write_lock_irqsave(&adapter->erp_lock, flags); - retval = zfcp_erp_async_handler_nolock(erp_action, set_mask); + zfcp_erp_async_handler_nolock(erp_action, set_mask); write_unlock_irqrestore(&adapter->erp_lock, flags); - - return retval; } /* @@ -999,17 +969,15 @@ zfcp_erp_timeout_handler(unsigned long data) zfcp_erp_async_handler(erp_action, ZFCP_STATUS_ERP_TIMEDOUT); } -/* - * purpose: is called for an erp_action which needs to be ended - * though not being done, - * this is usually required if an higher is generated, - * action gets an appropriate flag and will be processed - * accordingly +/** + * zfcp_erp_action_dismiss - dismiss an erp_action * - * locks: erp_lock held (thus we need to call another handler variant) + * adapter->erp_lock must be held + * + * Dismissal of an erp_action is usually required if an erp_action of + * higher priority is generated. */ -static int -zfcp_erp_action_dismiss(struct zfcp_erp_action *erp_action) +static void zfcp_erp_action_dismiss(struct zfcp_erp_action *erp_action) { struct zfcp_adapter *adapter = erp_action->adapter; @@ -1017,8 +985,6 @@ zfcp_erp_action_dismiss(struct zfcp_erp_action *erp_action) debug_event(adapter->erp_dbf, 2, &erp_action->action, sizeof (int)); zfcp_erp_async_handler_nolock(erp_action, ZFCP_STATUS_ERP_DISMISSED); - - return 0; } int @@ -2074,18 +2040,12 @@ zfcp_erp_adapter_strategy_open_qdio(struct zfcp_erp_action *erp_action) return retval; } -/* - * function: zfcp_qdio_cleanup - * - * purpose: cleans up QDIO operation for the specified adapter - * - * returns: 0 - successful cleanup - * !0 - failed cleanup +/** + * zfcp_erp_adapter_strategy_close_qdio - close qdio queues for an adapter */ -int +static void zfcp_erp_adapter_strategy_close_qdio(struct zfcp_erp_action *erp_action) { - int retval = ZFCP_ERP_SUCCEEDED; int first_used; int used_count; struct zfcp_adapter *adapter = erp_action->adapter; @@ -2094,15 +2054,13 @@ zfcp_erp_adapter_strategy_close_qdio(struct zfcp_erp_action *erp_action) ZFCP_LOG_DEBUG("error: attempt to shut down inactive QDIO " "queues on adapter %s\n", zfcp_get_busid_by_adapter(adapter)); - retval = ZFCP_ERP_FAILED; - goto out; + return; } /* * Get queue_lock and clear QDIOUP flag. Thus it's guaranteed that * do_QDIO won't be called while qdio_shutdown is in progress. */ - write_lock_irq(&adapter->request_queue.queue_lock); atomic_clear_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &adapter->status); write_unlock_irq(&adapter->request_queue.queue_lock); @@ -2134,8 +2092,6 @@ zfcp_erp_adapter_strategy_close_qdio(struct zfcp_erp_action *erp_action) adapter->request_queue.free_index = 0; atomic_set(&adapter->request_queue.free_count, 0); adapter->request_queue.distance_from_int = 0; - out: - return retval; } static int @@ -2258,11 +2214,11 @@ zfcp_erp_adapter_strategy_open_fsf_xport(struct zfcp_erp_action *erp_action) "%s)\n", zfcp_get_busid_by_adapter(adapter)); ret = ZFCP_ERP_FAILED; } - if (!atomic_test_mask(ZFCP_STATUS_ADAPTER_XPORT_OK, &adapter->status)) { - ZFCP_LOG_INFO("error: exchange port data failed (adapter " + + /* don't treat as error for the sake of compatibility */ + if (!atomic_test_mask(ZFCP_STATUS_ADAPTER_XPORT_OK, &adapter->status)) + ZFCP_LOG_INFO("warning: exchange port data failed (adapter " "%s\n", zfcp_get_busid_by_adapter(adapter)); - ret = ZFCP_ERP_FAILED; - } return ret; } @@ -2292,18 +2248,12 @@ zfcp_erp_adapter_strategy_open_fsf_statusread(struct zfcp_erp_action return retval; } -/* - * function: zfcp_fsf_cleanup - * - * purpose: cleanup FSF operation for specified adapter - * - * returns: 0 - FSF operation successfully cleaned up - * !0 - failed to cleanup FSF operation for this adapter +/** + * zfcp_erp_adapter_strategy_close_fsf - stop FSF operations for an adapter */ -static int +static void zfcp_erp_adapter_strategy_close_fsf(struct zfcp_erp_action *erp_action) { - int retval = ZFCP_ERP_SUCCEEDED; struct zfcp_adapter *adapter = erp_action->adapter; /* @@ -2317,8 +2267,6 @@ zfcp_erp_adapter_strategy_close_fsf(struct zfcp_erp_action *erp_action) /* all ports and units are closed */ zfcp_erp_modify_adapter_status(adapter, ZFCP_STATUS_COMMON_OPEN, ZFCP_CLEAR); - - return retval; } /* @@ -3293,10 +3241,8 @@ zfcp_erp_action_cleanup(int action, struct zfcp_adapter *adapter, } -static int -zfcp_erp_action_dismiss_adapter(struct zfcp_adapter *adapter) +void zfcp_erp_action_dismiss_adapter(struct zfcp_adapter *adapter) { - int retval = 0; struct zfcp_port *port; debug_text_event(adapter->erp_dbf, 5, "a_actab"); @@ -3305,14 +3251,10 @@ zfcp_erp_action_dismiss_adapter(struct zfcp_adapter *adapter) else list_for_each_entry(port, &adapter->port_list_head, list) zfcp_erp_action_dismiss_port(port); - - return retval; } -static int -zfcp_erp_action_dismiss_port(struct zfcp_port *port) +static void zfcp_erp_action_dismiss_port(struct zfcp_port *port) { - int retval = 0; struct zfcp_unit *unit; struct zfcp_adapter *adapter = port->adapter; @@ -3323,22 +3265,16 @@ zfcp_erp_action_dismiss_port(struct zfcp_port *port) else list_for_each_entry(unit, &port->unit_list_head, list) zfcp_erp_action_dismiss_unit(unit); - - return retval; } -static int -zfcp_erp_action_dismiss_unit(struct zfcp_unit *unit) +static void zfcp_erp_action_dismiss_unit(struct zfcp_unit *unit) { - int retval = 0; struct zfcp_adapter *adapter = unit->port->adapter; debug_text_event(adapter->erp_dbf, 5, "u_actab"); debug_event(adapter->erp_dbf, 5, &unit->fcp_lun, sizeof (fcp_lun_t)); if (atomic_test_mask(ZFCP_STATUS_COMMON_ERP_INUSE, &unit->status)) zfcp_erp_action_dismiss(&unit->erp_action); - - return retval; } static inline void diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h index d02366004cd..146d7a2b4c4 100644 --- a/drivers/s390/scsi/zfcp_ext.h +++ b/drivers/s390/scsi/zfcp_ext.h @@ -63,7 +63,6 @@ extern int zfcp_qdio_allocate_queues(struct zfcp_adapter *); extern void zfcp_qdio_free_queues(struct zfcp_adapter *); extern int zfcp_qdio_determine_pci(struct zfcp_qdio_queue *, struct zfcp_fsf_req *); -extern int zfcp_qdio_reqid_check(struct zfcp_adapter *, void *); extern volatile struct qdio_buffer_element *zfcp_qdio_sbale_req (struct zfcp_fsf_req *, int, int); @@ -140,6 +139,7 @@ extern void zfcp_erp_modify_adapter_status(struct zfcp_adapter *, u32, int); extern int zfcp_erp_adapter_reopen(struct zfcp_adapter *, int); extern int zfcp_erp_adapter_shutdown(struct zfcp_adapter *, int); extern void zfcp_erp_adapter_failed(struct zfcp_adapter *); +extern void zfcp_erp_action_dismiss_adapter(struct zfcp_adapter *); extern void zfcp_erp_modify_port_status(struct zfcp_port *, u32, int); extern int zfcp_erp_port_reopen(struct zfcp_port *, int); @@ -156,7 +156,7 @@ extern void zfcp_erp_unit_failed(struct zfcp_unit *); extern int zfcp_erp_thread_setup(struct zfcp_adapter *); extern int zfcp_erp_thread_kill(struct zfcp_adapter *); extern int zfcp_erp_wait(struct zfcp_adapter *); -extern int zfcp_erp_async_handler(struct zfcp_erp_action *, unsigned long); +extern void zfcp_erp_async_handler(struct zfcp_erp_action *, unsigned long); extern int zfcp_test_link(struct zfcp_port *); @@ -190,5 +190,10 @@ extern void zfcp_scsi_dbf_event_abort(const char *, struct zfcp_adapter *, struct zfcp_fsf_req *); extern void zfcp_scsi_dbf_event_devreset(const char *, u8, struct zfcp_unit *, struct scsi_cmnd *); +extern void zfcp_reqlist_add(struct zfcp_adapter *, struct zfcp_fsf_req *); +extern void zfcp_reqlist_remove(struct zfcp_adapter *, unsigned long); +extern struct zfcp_fsf_req *zfcp_reqlist_ismember(struct zfcp_adapter *, + unsigned long); +extern int zfcp_reqlist_isempty(struct zfcp_adapter *); #endif /* ZFCP_EXT_H */ diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c index 31db2b06fab..ff2eacf5ec8 100644 --- a/drivers/s390/scsi/zfcp_fsf.c +++ b/drivers/s390/scsi/zfcp_fsf.c @@ -49,7 +49,6 @@ static int zfcp_fsf_fsfstatus_qual_eval(struct zfcp_fsf_req *); static void zfcp_fsf_link_down_info_eval(struct zfcp_adapter *, struct fsf_link_down_info *); static int zfcp_fsf_req_dispatch(struct zfcp_fsf_req *); -static void zfcp_fsf_req_dismiss(struct zfcp_fsf_req *); /* association between FSF command and FSF QTCB type */ static u32 fsf_qtcb_type[] = { @@ -146,47 +145,48 @@ zfcp_fsf_req_free(struct zfcp_fsf_req *fsf_req) kfree(fsf_req); } -/* - * function: - * - * purpose: - * - * returns: - * - * note: qdio queues shall be down (no ongoing inbound processing) +/** + * zfcp_fsf_req_dismiss - dismiss a single fsf request */ -int -zfcp_fsf_req_dismiss_all(struct zfcp_adapter *adapter) +static void zfcp_fsf_req_dismiss(struct zfcp_adapter *adapter, + struct zfcp_fsf_req *fsf_req, + unsigned int counter) { - struct zfcp_fsf_req *fsf_req, *tmp; - unsigned long flags; - LIST_HEAD(remove_queue); + u64 dbg_tmp[2]; - spin_lock_irqsave(&adapter->fsf_req_list_lock, flags); - list_splice_init(&adapter->fsf_req_list_head, &remove_queue); - atomic_set(&adapter->fsf_reqs_active, 0); - spin_unlock_irqrestore(&adapter->fsf_req_list_lock, flags); - - list_for_each_entry_safe(fsf_req, tmp, &remove_queue, list) { - list_del(&fsf_req->list); - zfcp_fsf_req_dismiss(fsf_req); - } - - return 0; + dbg_tmp[0] = (u64) atomic_read(&adapter->reqs_active); + dbg_tmp[1] = (u64) counter; + debug_event(adapter->erp_dbf, 4, (void *) dbg_tmp, 16); + list_del(&fsf_req->list); + fsf_req->status |= ZFCP_STATUS_FSFREQ_DISMISSED; + zfcp_fsf_req_complete(fsf_req); } -/* - * function: - * - * purpose: - * - * returns: +/** + * zfcp_fsf_req_dismiss_all - dismiss all remaining fsf requests */ -static void -zfcp_fsf_req_dismiss(struct zfcp_fsf_req *fsf_req) +int zfcp_fsf_req_dismiss_all(struct zfcp_adapter *adapter) { - fsf_req->status |= ZFCP_STATUS_FSFREQ_DISMISSED; - zfcp_fsf_req_complete(fsf_req); + struct zfcp_fsf_req *request, *tmp; + unsigned long flags; + unsigned int i, counter; + + spin_lock_irqsave(&adapter->req_list_lock, flags); + atomic_set(&adapter->reqs_active, 0); + for (i=0; i<REQUEST_LIST_SIZE; i++) { + if (list_empty(&adapter->req_list[i])) + continue; + + counter = 0; + list_for_each_entry_safe(request, tmp, + &adapter->req_list[i], list) { + zfcp_fsf_req_dismiss(adapter, request, counter); + counter++; + } + } + spin_unlock_irqrestore(&adapter->req_list_lock, flags); + + return 0; } /* @@ -4592,12 +4592,14 @@ static inline void zfcp_fsf_req_qtcb_init(struct zfcp_fsf_req *fsf_req) { if (likely(fsf_req->qtcb != NULL)) { - fsf_req->qtcb->prefix.req_seq_no = fsf_req->adapter->fsf_req_seq_no; - fsf_req->qtcb->prefix.req_id = (unsigned long)fsf_req; + fsf_req->qtcb->prefix.req_seq_no = + fsf_req->adapter->fsf_req_seq_no; + fsf_req->qtcb->prefix.req_id = fsf_req->req_id; fsf_req->qtcb->prefix.ulp_info = ZFCP_ULP_INFO_VERSION; - fsf_req->qtcb->prefix.qtcb_type = fsf_qtcb_type[fsf_req->fsf_command]; + fsf_req->qtcb->prefix.qtcb_type = + fsf_qtcb_type[fsf_req->fsf_command]; fsf_req->qtcb->prefix.qtcb_version = ZFCP_QTCB_VERSION; - fsf_req->qtcb->header.req_handle = (unsigned long)fsf_req; + fsf_req->qtcb->header.req_handle = fsf_req->req_id; fsf_req->qtcb->header.fsf_command = fsf_req->fsf_command; } } @@ -4654,6 +4656,7 @@ zfcp_fsf_req_create(struct zfcp_adapter *adapter, u32 fsf_cmd, int req_flags, { volatile struct qdio_buffer_element *sbale; struct zfcp_fsf_req *fsf_req = NULL; + unsigned long flags; int ret = 0; struct zfcp_qdio_queue *req_queue = &adapter->request_queue; @@ -4668,6 +4671,12 @@ zfcp_fsf_req_create(struct zfcp_adapter *adapter, u32 fsf_cmd, int req_flags, fsf_req->adapter = adapter; fsf_req->fsf_command = fsf_cmd; + INIT_LIST_HEAD(&fsf_req->list); + + /* unique request id */ + spin_lock_irqsave(&adapter->req_list_lock, flags); + fsf_req->req_id = adapter->req_no++; + spin_unlock_irqrestore(&adapter->req_list_lock, flags); zfcp_fsf_req_qtcb_init(fsf_req); @@ -4707,7 +4716,7 @@ zfcp_fsf_req_create(struct zfcp_adapter *adapter, u32 fsf_cmd, int req_flags, sbale = zfcp_qdio_sbale_req(fsf_req, fsf_req->sbal_curr, 0); /* setup common SBALE fields */ - sbale[0].addr = fsf_req; + sbale[0].addr = (void *) fsf_req->req_id; sbale[0].flags |= SBAL_FLAGS0_COMMAND; if (likely(fsf_req->qtcb != NULL)) { sbale[1].addr = (void *) fsf_req->qtcb; @@ -4747,7 +4756,7 @@ zfcp_fsf_req_send(struct zfcp_fsf_req *fsf_req, struct timer_list *timer) volatile struct qdio_buffer_element *sbale; int inc_seq_no; int new_distance_from_int; - unsigned long flags; + u64 dbg_tmp[2]; int retval = 0; adapter = fsf_req->adapter; @@ -4761,10 +4770,10 @@ zfcp_fsf_req_send(struct zfcp_fsf_req *fsf_req, struct timer_list *timer) ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_TRACE, (char *) sbale[1].addr, sbale[1].length); - /* put allocated FSF request at list tail */ - spin_lock_irqsave(&adapter->fsf_req_list_lock, flags); - list_add_tail(&fsf_req->list, &adapter->fsf_req_list_head); - spin_unlock_irqrestore(&adapter->fsf_req_list_lock, flags); + /* put allocated FSF request into hash table */ + spin_lock(&adapter->req_list_lock); + zfcp_reqlist_add(adapter, fsf_req); + spin_unlock(&adapter->req_list_lock); inc_seq_no = (fsf_req->qtcb != NULL); @@ -4803,6 +4812,10 @@ zfcp_fsf_req_send(struct zfcp_fsf_req *fsf_req, struct timer_list *timer) QDIO_FLAG_SYNC_OUTPUT, 0, fsf_req->sbal_first, fsf_req->sbal_number, NULL); + dbg_tmp[0] = (unsigned long) sbale[0].addr; + dbg_tmp[1] = (u64) retval; + debug_event(adapter->erp_dbf, 4, (void *) dbg_tmp, 16); + if (unlikely(retval)) { /* Queues are down..... */ retval = -EIO; @@ -4812,22 +4825,17 @@ zfcp_fsf_req_send(struct zfcp_fsf_req *fsf_req, struct timer_list *timer) */ if (timer) del_timer(timer); - spin_lock_irqsave(&adapter->fsf_req_list_lock, flags); - list_del(&fsf_req->list); - spin_unlock_irqrestore(&adapter->fsf_req_list_lock, flags); - /* - * adjust the number of free SBALs in request queue as well as - * position of first one - */ + spin_lock(&adapter->req_list_lock); + zfcp_reqlist_remove(adapter, fsf_req->req_id); + spin_unlock(&adapter->req_list_lock); + /* undo changes in request queue made for this request */ zfcp_qdio_zero_sbals(req_queue->buffer, fsf_req->sbal_first, fsf_req->sbal_number); atomic_add(fsf_req->sbal_number, &req_queue->free_count); - req_queue->free_index -= fsf_req->sbal_number; /* increase */ + req_queue->free_index -= fsf_req->sbal_number; req_queue->free_index += QDIO_MAX_BUFFERS_PER_Q; req_queue->free_index %= QDIO_MAX_BUFFERS_PER_Q; /* wrap */ - ZFCP_LOG_DEBUG - ("error: do_QDIO failed. Buffers could not be enqueued " - "to request queue.\n"); + zfcp_erp_adapter_reopen(adapter, 0); } else { req_queue->distance_from_int = new_distance_from_int; /* @@ -4843,7 +4851,7 @@ zfcp_fsf_req_send(struct zfcp_fsf_req *fsf_req, struct timer_list *timer) adapter->fsf_req_seq_no++; /* count FSF requests pending */ - atomic_inc(&adapter->fsf_reqs_active); + atomic_inc(&adapter->reqs_active); } return retval; } diff --git a/drivers/s390/scsi/zfcp_qdio.c b/drivers/s390/scsi/zfcp_qdio.c index 49ea5add4ab..dbd9f48e863 100644 --- a/drivers/s390/scsi/zfcp_qdio.c +++ b/drivers/s390/scsi/zfcp_qdio.c @@ -282,6 +282,37 @@ zfcp_qdio_request_handler(struct ccw_device *ccw_device, return; } +/** + * zfcp_qdio_reqid_check - checks for valid reqids or unsolicited status + */ +static int zfcp_qdio_reqid_check(struct zfcp_adapter *adapter, + unsigned long req_id) +{ + struct zfcp_fsf_req *fsf_req; + unsigned long flags; + + debug_long_event(adapter->erp_dbf, 4, req_id); + + spin_lock_irqsave(&adapter->req_list_lock, flags); + fsf_req = zfcp_reqlist_ismember(adapter, req_id); + + if (!fsf_req) { + spin_unlock_irqrestore(&adapter->req_list_lock, flags); + ZFCP_LOG_NORMAL("error: unknown request id (%ld).\n", req_id); + zfcp_erp_adapter_reopen(adapter, 0); + return -EINVAL; + } + + zfcp_reqlist_remove(adapter, req_id); + atomic_dec(&adapter->reqs_active); + spin_unlock_irqrestore(&adapter->req_list_lock, flags); + + /* finish the FSF request */ + zfcp_fsf_req_complete(fsf_req); + + return 0; +} + /* * function: zfcp_qdio_response_handler * @@ -344,7 +375,7 @@ zfcp_qdio_response_handler(struct ccw_device *ccw_device, /* look for QDIO request identifiers in SB */ buffere = &buffer->element[buffere_index]; retval = zfcp_qdio_reqid_check(adapter, - (void *) buffere->addr); + (unsigned long) buffere->addr); if (retval) { ZFCP_LOG_NORMAL("bug: unexpected inbound " @@ -415,52 +446,6 @@ zfcp_qdio_response_handler(struct ccw_device *ccw_device, return; } -/* - * function: zfcp_qdio_reqid_check - * - * purpose: checks for valid reqids or unsolicited status - * - * returns: 0 - valid request id or unsolicited status - * !0 - otherwise - */ -int -zfcp_qdio_reqid_check(struct zfcp_adapter *adapter, void *sbale_addr) -{ - struct zfcp_fsf_req *fsf_req; - unsigned long flags; - - /* invalid (per convention used in this driver) */ - if (unlikely(!sbale_addr)) { - ZFCP_LOG_NORMAL("bug: invalid reqid\n"); - return -EINVAL; - } - - /* valid request id and thus (hopefully :) valid fsf_req address */ - fsf_req = (struct zfcp_fsf_req *) sbale_addr; - - /* serialize with zfcp_fsf_req_dismiss_all */ - spin_lock_irqsave(&adapter->fsf_req_list_lock, flags); - if (list_empty(&adapter->fsf_req_list_head)) { - spin_unlock_irqrestore(&adapter->fsf_req_list_lock, flags); - return 0; - } - list_del(&fsf_req->list); - atomic_dec(&adapter->fsf_reqs_active); - spin_unlock_irqrestore(&adapter->fsf_req_list_lock, flags); - - if (unlikely(adapter != fsf_req->adapter)) { - ZFCP_LOG_NORMAL("bug: invalid reqid (fsf_req=%p, " - "fsf_req->adapter=%p, adapter=%p)\n", - fsf_req, fsf_req->adapter, adapter); - return -EINVAL; - } - - /* finish the FSF request */ - zfcp_fsf_req_complete(fsf_req); - - return 0; -} - /** * zfcp_qdio_sbale_get - return pointer to SBALE of qdio_queue * @queue: queue from which SBALE should be returned diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c index 671f4a6a5d1..1bb55086db9 100644 --- a/drivers/s390/scsi/zfcp_scsi.c +++ b/drivers/s390/scsi/zfcp_scsi.c @@ -30,7 +30,6 @@ static int zfcp_scsi_queuecommand(struct scsi_cmnd *, void (*done) (struct scsi_cmnd *)); static int zfcp_scsi_eh_abort_handler(struct scsi_cmnd *); static int zfcp_scsi_eh_device_reset_handler(struct scsi_cmnd *); -static int zfcp_scsi_eh_bus_reset_handler(struct scsi_cmnd *); static int zfcp_scsi_eh_host_reset_handler(struct scsi_cmnd *); static int zfcp_task_management_function(struct zfcp_unit *, u8, struct scsi_cmnd *); @@ -46,30 +45,22 @@ struct zfcp_data zfcp_data = { .scsi_host_template = { .name = ZFCP_NAME, .proc_name = "zfcp", - .proc_info = NULL, - .detect = NULL, .slave_alloc = zfcp_scsi_slave_alloc, .slave_configure = zfcp_scsi_slave_configure, .slave_destroy = zfcp_scsi_slave_destroy, .queuecommand = zfcp_scsi_queuecommand, .eh_abort_handler = zfcp_scsi_eh_abort_handler, .eh_device_reset_handler = zfcp_scsi_eh_device_reset_handler, - .eh_bus_reset_handler = zfcp_scsi_eh_bus_reset_handler, + .eh_bus_reset_handler = zfcp_scsi_eh_host_reset_handler, .eh_host_reset_handler = zfcp_scsi_eh_host_reset_handler, .can_queue = 4096, .this_id = -1, - /* - * FIXME: - * one less? can zfcp_create_sbale cope with it? - */ .sg_tablesize = ZFCP_MAX_SBALES_PER_REQ, .cmd_per_lun = 1, - .unchecked_isa_dma = 0, .use_clustering = 1, .sdev_attrs = zfcp_sysfs_sdev_attrs, }, .driver_version = ZFCP_VERSION, - /* rest initialised with zeros */ }; /* Find start of Response Information in FCP response unit*/ @@ -176,8 +167,14 @@ zfcp_scsi_slave_alloc(struct scsi_device *sdp) return retval; } -static void -zfcp_scsi_slave_destroy(struct scsi_device *sdpnt) +/** + * zfcp_scsi_slave_destroy - called when scsi device is removed + * + * Remove reference to associated scsi device for an zfcp_unit. + * Mark zfcp_unit as failed. The scsi device might be deleted via sysfs + * or a scan for this device might have failed. + */ +static void zfcp_scsi_slave_destroy(struct scsi_device *sdpnt) { struct zfcp_unit *unit = (struct zfcp_unit *) sdpnt->hostdata; @@ -185,6 +182,7 @@ zfcp_scsi_slave_destroy(struct scsi_device *sdpnt) atomic_clear_mask(ZFCP_STATUS_UNIT_REGISTERED, &unit->status); sdpnt->hostdata = NULL; unit->device = NULL; + zfcp_erp_unit_failed(unit); zfcp_unit_put(unit); } else { ZFCP_LOG_NORMAL("bug: no unit associated with SCSI device at " @@ -549,35 +547,38 @@ zfcp_task_management_function(struct zfcp_unit *unit, u8 tm_flags, } /** - * zfcp_scsi_eh_bus_reset_handler - reset bus (reopen adapter) + * zfcp_scsi_eh_host_reset_handler - handler for host and bus reset + * + * If ERP is already running it will be stopped. */ -int -zfcp_scsi_eh_bus_reset_handler(struct scsi_cmnd *scpnt) +int zfcp_scsi_eh_host_reset_handler(struct scsi_cmnd *scpnt) { - struct zfcp_unit *unit = (struct zfcp_unit*) scpnt->device->hostdata; - struct zfcp_adapter *adapter = unit->port->adapter; - - ZFCP_LOG_NORMAL("bus reset because of problems with " - "unit 0x%016Lx\n", unit->fcp_lun); - zfcp_erp_adapter_reopen(adapter, 0); - zfcp_erp_wait(adapter); - - return SUCCESS; -} + struct zfcp_unit *unit; + struct zfcp_adapter *adapter; + unsigned long flags; -/** - * zfcp_scsi_eh_host_reset_handler - reset host (reopen adapter) - */ -int -zfcp_scsi_eh_host_reset_handler(struct scsi_cmnd *scpnt) -{ - struct zfcp_unit *unit = (struct zfcp_unit*) scpnt->device->hostdata; - struct zfcp_adapter *adapter = unit->port->adapter; + unit = (struct zfcp_unit*) scpnt->device->hostdata; + adapter = unit->port->adapter; - ZFCP_LOG_NORMAL("host reset because of problems with " + ZFCP_LOG_NORMAL("host/bus reset because of problems with " "unit 0x%016Lx\n", unit->fcp_lun); - zfcp_erp_adapter_reopen(adapter, 0); - zfcp_erp_wait(adapter); + + write_lock_irqsave(&adapter->erp_lock, flags); + if (atomic_test_mask(ZFCP_STATUS_ADAPTER_ERP_PENDING, + &adapter->status)) { + zfcp_erp_modify_adapter_status(adapter, + ZFCP_STATUS_COMMON_UNBLOCKED|ZFCP_STATUS_COMMON_OPEN, + ZFCP_CLEAR); + zfcp_erp_action_dismiss_adapter(adapter); + write_unlock_irqrestore(&adapter->erp_lock, flags); + zfcp_fsf_req_dismiss_all(adapter); + adapter->fsf_req_seq_no = 0; + zfcp_erp_adapter_reopen(adapter, 0); + } else { + write_unlock_irqrestore(&adapter->erp_lock, flags); + zfcp_erp_adapter_reopen(adapter, 0); + zfcp_erp_wait(adapter); + } return SUCCESS; } diff --git a/drivers/scsi/arm/Kconfig b/drivers/scsi/arm/Kconfig index 06d7601cdf5..d006a8cb4a7 100644 --- a/drivers/scsi/arm/Kconfig +++ b/drivers/scsi/arm/Kconfig @@ -69,6 +69,7 @@ comment "The following drivers are not fully supported" config SCSI_CUMANA_1 tristate "CumanaSCSI I support (EXPERIMENTAL)" depends on ARCH_ACORN && EXPERIMENTAL && SCSI + select SCSI_SPI_ATTRS help This enables support for the Cumana SCSI I card. If you have an Acorn system with one of these, say Y. If unsure, say N. @@ -76,6 +77,7 @@ config SCSI_CUMANA_1 config SCSI_ECOSCSI tristate "EcoScsi support (EXPERIMENTAL)" depends on ARCH_ACORN && EXPERIMENTAL && (ARCH_ARC || ARCH_A5K) && SCSI + select SCSI_SPI_ATTRS help This enables support for the EcoSCSI card -- a small card that sits in the Econet socket. If you have an Acorn system with one of these, @@ -84,6 +86,7 @@ config SCSI_ECOSCSI config SCSI_OAK1 tristate "Oak SCSI support (EXPERIMENTAL)" depends on ARCH_ACORN && EXPERIMENTAL && SCSI + select SCSI_SPI_ATTRS help This enables support for the Oak SCSI card. If you have an Acorn system with one of these, say Y. If unsure, say N. diff --git a/drivers/scsi/arm/scsi.h b/drivers/scsi/arm/scsi.h index 6dd544a5eb5..8c2600ffc6a 100644 --- a/drivers/scsi/arm/scsi.h +++ b/drivers/scsi/arm/scsi.h @@ -74,7 +74,7 @@ static inline void init_SCp(Scsi_Cmnd *SCpnt) unsigned long len = 0; int buf; - SCpnt->SCp.buffer = (struct scatterlist *) SCpnt->buffer; + SCpnt->SCp.buffer = (struct scatterlist *) SCpnt->request_buffer; SCpnt->SCp.buffers_residual = SCpnt->use_sg - 1; SCpnt->SCp.ptr = (char *) (page_address(SCpnt->SCp.buffer->page) + diff --git a/drivers/scsi/ata_piix.c b/drivers/scsi/ata_piix.c index 19745a31072..2d20caf377f 100644 --- a/drivers/scsi/ata_piix.c +++ b/drivers/scsi/ata_piix.c @@ -390,7 +390,8 @@ static struct ata_port_info piix_port_info[] = { /* ich5_sata */ { .sht = &piix_sht, - .host_flags = ATA_FLAG_SATA | PIIX_FLAG_CHECKINTR, + .host_flags = ATA_FLAG_SATA | PIIX_FLAG_CHECKINTR | + PIIX_FLAG_IGNORE_PCS, .pio_mask = 0x1f, /* pio0-4 */ .mwdma_mask = 0x07, /* mwdma0-2 */ .udma_mask = 0x7f, /* udma0-6 */ @@ -467,6 +468,11 @@ MODULE_LICENSE("GPL"); MODULE_DEVICE_TABLE(pci, piix_pci_tbl); MODULE_VERSION(DRV_VERSION); +static int force_pcs = 0; +module_param(force_pcs, int, 0444); +MODULE_PARM_DESC(force_pcs, "force honoring or ignoring PCS to work around " + "device mis-detection (0=default, 1=ignore PCS, 2=honor PCS)"); + /** * piix_pata_cbl_detect - Probe host controller cable detect info * @ap: Port for which cable detect info is desired @@ -531,27 +537,25 @@ static void piix_pata_error_handler(struct ata_port *ap) } /** - * piix_sata_prereset - prereset for SATA host controller + * piix_sata_present_mask - determine present mask for SATA host controller * @ap: Target port * - * Reads and configures SATA PCI device's PCI config register - * Port Configuration and Status (PCS) to determine port and - * device availability. Return -ENODEV to skip reset if no - * device is present. + * Reads SATA PCI device's PCI config register Port Configuration + * and Status (PCS) to determine port and device availability. * * LOCKING: * None (inherited from caller). * * RETURNS: - * 0 if device is present, -ENODEV otherwise. + * determined present_mask */ -static int piix_sata_prereset(struct ata_port *ap) +static unsigned int piix_sata_present_mask(struct ata_port *ap) { struct pci_dev *pdev = to_pci_dev(ap->host_set->dev); struct piix_host_priv *hpriv = ap->host_set->private_data; const unsigned int *map = hpriv->map; int base = 2 * ap->hard_port_no; - unsigned int present = 0; + unsigned int present_mask = 0; int port, i; u16 pcs; @@ -564,24 +568,52 @@ static int piix_sata_prereset(struct ata_port *ap) continue; if ((ap->flags & PIIX_FLAG_IGNORE_PCS) || (pcs & 1 << (hpriv->map_db->present_shift + port))) - present = 1; + present_mask |= 1 << i; } DPRINTK("ata%u: LEAVE, pcs=0x%x present_mask=0x%x\n", ap->id, pcs, present_mask); - if (!present) { - ata_port_printk(ap, KERN_INFO, "SATA port has no device.\n"); - ap->eh_context.i.action &= ~ATA_EH_RESET_MASK; - return 0; + return present_mask; +} + +/** + * piix_sata_softreset - reset SATA host port via ATA SRST + * @ap: port to reset + * @classes: resulting classes of attached devices + * + * Reset SATA host port via ATA SRST. On controllers with + * reliable PCS present bits, the bits are used to determine + * device presence. + * + * LOCKING: + * Kernel thread context (may sleep) + * + * RETURNS: + * 0 on success, -errno otherwise. + */ +static int piix_sata_softreset(struct ata_port *ap, unsigned int *classes) +{ + unsigned int present_mask; + int i, rc; + + present_mask = piix_sata_present_mask(ap); + + rc = ata_std_softreset(ap, classes); + if (rc) + return rc; + + for (i = 0; i < ATA_MAX_DEVICES; i++) { + if (!(present_mask & (1 << i))) + classes[i] = ATA_DEV_NONE; } - return ata_std_prereset(ap); + return 0; } static void piix_sata_error_handler(struct ata_port *ap) { - ata_bmdma_drive_eh(ap, piix_sata_prereset, ata_std_softreset, NULL, + ata_bmdma_drive_eh(ap, ata_std_prereset, piix_sata_softreset, NULL, ata_std_postreset); } @@ -785,6 +817,7 @@ static int __devinit piix_check_450nx_errata(struct pci_dev *ata_dev) } static void __devinit piix_init_pcs(struct pci_dev *pdev, + struct ata_port_info *pinfo, const struct piix_map_db *map_db) { u16 pcs, new_pcs; @@ -798,6 +831,18 @@ static void __devinit piix_init_pcs(struct pci_dev *pdev, pci_write_config_word(pdev, ICH5_PCS, new_pcs); msleep(150); } + + if (force_pcs == 1) { + dev_printk(KERN_INFO, &pdev->dev, + "force ignoring PCS (0x%x)\n", new_pcs); + pinfo[0].host_flags |= PIIX_FLAG_IGNORE_PCS; + pinfo[1].host_flags |= PIIX_FLAG_IGNORE_PCS; + } else if (force_pcs == 2) { + dev_printk(KERN_INFO, &pdev->dev, + "force honoring PCS (0x%x)\n", new_pcs); + pinfo[0].host_flags &= ~PIIX_FLAG_IGNORE_PCS; + pinfo[1].host_flags &= ~PIIX_FLAG_IGNORE_PCS; + } } static void __devinit piix_init_sata_map(struct pci_dev *pdev, @@ -828,6 +873,7 @@ static void __devinit piix_init_sata_map(struct pci_dev *pdev, case IDE: WARN_ON((i & 1) || map[i + 1] != IDE); pinfo[i / 2] = piix_port_info[ich5_pata]; + pinfo[i / 2].private_data = hpriv; i++; printk(" IDE IDE"); break; @@ -905,7 +951,8 @@ static int piix_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) if (host_flags & ATA_FLAG_SATA) { piix_init_sata_map(pdev, port_info, piix_map_db_table[ent->driver_data]); - piix_init_pcs(pdev, piix_map_db_table[ent->driver_data]); + piix_init_pcs(pdev, port_info, + piix_map_db_table[ent->driver_data]); } /* On ICH5, some BIOSen disable the interrupt using the diff --git a/drivers/scsi/esp.c b/drivers/scsi/esp.c index 98bd22714d0..5630868c1b2 100644 --- a/drivers/scsi/esp.c +++ b/drivers/scsi/esp.c @@ -1146,7 +1146,7 @@ static struct sbus_dev sun4_esp_dev; static int __init esp_sun4_probe(struct scsi_host_template *tpnt) { if (sun4_esp_physaddr) { - memset(&sun4_esp_dev, 0, sizeof(esp_dev)); + memset(&sun4_esp_dev, 0, sizeof(sun4_esp_dev)); sun4_esp_dev.reg_addrs[0].phys_addr = sun4_esp_physaddr; sun4_esp_dev.irqs[0] = 4; sun4_esp_dev.resource[0].start = sun4_esp_physaddr; @@ -1162,6 +1162,7 @@ static int __init esp_sun4_probe(struct scsi_host_template *tpnt) static int __devexit esp_sun4_remove(void) { + struct of_device *dev = &sun4_esp_dev.ofdev; struct esp *esp = dev_get_drvdata(&dev->dev); return esp_remove_common(esp); diff --git a/drivers/scsi/hptiop.c b/drivers/scsi/hptiop.c index ab2f8b26790..bcb3444f1dc 100644 --- a/drivers/scsi/hptiop.c +++ b/drivers/scsi/hptiop.c @@ -45,10 +45,6 @@ static char driver_name[] = "hptiop"; static const char driver_name_long[] = "RocketRAID 3xxx SATA Controller driver"; static const char driver_ver[] = "v1.0 (060426)"; -static DEFINE_SPINLOCK(hptiop_hba_list_lock); -static LIST_HEAD(hptiop_hba_list); -static int hptiop_cdev_major = -1; - static void hptiop_host_request_callback(struct hptiop_hba *hba, u32 tag); static void hptiop_iop_request_callback(struct hptiop_hba *hba, u32 tag); static void hptiop_message_callback(struct hptiop_hba *hba, u32 msg); @@ -577,7 +573,7 @@ static int hptiop_reset_hba(struct hptiop_hba *hba) if (atomic_xchg(&hba->resetting, 1) == 0) { atomic_inc(&hba->reset_count); writel(IOPMU_INBOUND_MSG0_RESET, - &hba->iop->outbound_msgaddr0); + &hba->iop->inbound_msgaddr0); hptiop_pci_posting_flush(hba->iop); } @@ -620,532 +616,11 @@ static int hptiop_adjust_disk_queue_depth(struct scsi_device *sdev, return queue_depth; } -struct hptiop_getinfo { - char __user *buffer; - loff_t buflength; - loff_t bufoffset; - loff_t buffillen; - loff_t filpos; -}; - -static void hptiop_copy_mem_info(struct hptiop_getinfo *pinfo, - char *data, int datalen) -{ - if (pinfo->filpos < pinfo->bufoffset) { - if (pinfo->filpos + datalen <= pinfo->bufoffset) { - pinfo->filpos += datalen; - return; - } else { - data += (pinfo->bufoffset - pinfo->filpos); - datalen -= (pinfo->bufoffset - pinfo->filpos); - pinfo->filpos = pinfo->bufoffset; - } - } - - pinfo->filpos += datalen; - if (pinfo->buffillen == pinfo->buflength) - return; - - if (pinfo->buflength - pinfo->buffillen < datalen) - datalen = pinfo->buflength - pinfo->buffillen; - - if (copy_to_user(pinfo->buffer + pinfo->buffillen, data, datalen)) - return; - - pinfo->buffillen += datalen; -} - -static int hptiop_copy_info(struct hptiop_getinfo *pinfo, char *fmt, ...) -{ - va_list args; - char buf[128]; - int len; - - va_start(args, fmt); - len = vsnprintf(buf, sizeof(buf), fmt, args); - va_end(args); - hptiop_copy_mem_info(pinfo, buf, len); - return len; -} - -static void hptiop_ioctl_done(struct hpt_ioctl_k *arg) -{ - arg->done = NULL; - wake_up(&arg->hba->ioctl_wq); -} - -static void hptiop_do_ioctl(struct hpt_ioctl_k *arg) -{ - struct hptiop_hba *hba = arg->hba; - u32 val; - struct hpt_iop_request_ioctl_command __iomem *req; - int ioctl_retry = 0; - - dprintk("scsi%d: hptiop_do_ioctl\n", hba->host->host_no); - - /* - * check (in + out) buff size from application. - * outbuf must be dword aligned. - */ - if (((arg->inbuf_size + 3) & ~3) + arg->outbuf_size > - hba->max_request_size - - sizeof(struct hpt_iop_request_header) - - 4 * sizeof(u32)) { - dprintk("scsi%d: ioctl buf size (%d/%d) is too large\n", - hba->host->host_no, - arg->inbuf_size, arg->outbuf_size); - arg->result = HPT_IOCTL_RESULT_FAILED; - return; - } - -retry: - spin_lock_irq(hba->host->host_lock); - - val = readl(&hba->iop->inbound_queue); - if (val == IOPMU_QUEUE_EMPTY) { - spin_unlock_irq(hba->host->host_lock); - dprintk("scsi%d: no free req for ioctl\n", hba->host->host_no); - arg->result = -1; - return; - } - - req = (struct hpt_iop_request_ioctl_command __iomem *) - ((unsigned long)hba->iop + val); - - writel(HPT_CTL_CODE_LINUX_TO_IOP(arg->ioctl_code), - &req->ioctl_code); - writel(arg->inbuf_size, &req->inbuf_size); - writel(arg->outbuf_size, &req->outbuf_size); - - /* - * use the buffer on the IOP local memory first, then copy it - * back to host. - * the caller's request buffer shoudl be little-endian. - */ - if (arg->inbuf_size) - memcpy_toio(req->buf, arg->inbuf, arg->inbuf_size); - - /* correct the controller ID for IOP */ - if ((arg->ioctl_code == HPT_IOCTL_GET_CHANNEL_INFO || - arg->ioctl_code == HPT_IOCTL_GET_CONTROLLER_INFO_V2 || - arg->ioctl_code == HPT_IOCTL_GET_CONTROLLER_INFO) - && arg->inbuf_size >= sizeof(u32)) - writel(0, req->buf); - - writel(IOP_REQUEST_TYPE_IOCTL_COMMAND, &req->header.type); - writel(0, &req->header.flags); - writel(offsetof(struct hpt_iop_request_ioctl_command, buf) - + arg->inbuf_size, &req->header.size); - writel((u32)(unsigned long)arg, &req->header.context); - writel(BITS_PER_LONG > 32 ? (u32)((unsigned long)arg>>32) : 0, - &req->header.context_hi32); - writel(IOP_RESULT_PENDING, &req->header.result); - - arg->result = HPT_IOCTL_RESULT_FAILED; - arg->done = hptiop_ioctl_done; - - writel(val, &hba->iop->inbound_queue); - hptiop_pci_posting_flush(hba->iop); - - spin_unlock_irq(hba->host->host_lock); - - wait_event_timeout(hba->ioctl_wq, arg->done == NULL, 60 * HZ); - - if (arg->done != NULL) { - hptiop_reset_hba(hba); - if (ioctl_retry++ < 3) - goto retry; - } - - dprintk("hpt_iop_ioctl %x result %d\n", - arg->ioctl_code, arg->result); -} - -static int __hpt_do_ioctl(struct hptiop_hba *hba, u32 code, void *inbuf, - u32 insize, void *outbuf, u32 outsize) -{ - struct hpt_ioctl_k arg; - arg.hba = hba; - arg.ioctl_code = code; - arg.inbuf = inbuf; - arg.outbuf = outbuf; - arg.inbuf_size = insize; - arg.outbuf_size = outsize; - arg.bytes_returned = NULL; - hptiop_do_ioctl(&arg); - return arg.result; -} - -static inline int hpt_id_valid(__le32 id) -{ - return id != 0 && id != cpu_to_le32(0xffffffff); -} - -static int hptiop_get_controller_info(struct hptiop_hba *hba, - struct hpt_controller_info *pinfo) -{ - int id = 0; - - return __hpt_do_ioctl(hba, HPT_IOCTL_GET_CONTROLLER_INFO, - &id, sizeof(int), pinfo, sizeof(*pinfo)); -} - - -static int hptiop_get_channel_info(struct hptiop_hba *hba, int bus, - struct hpt_channel_info *pinfo) -{ - u32 ids[2]; - - ids[0] = 0; - ids[1] = bus; - return __hpt_do_ioctl(hba, HPT_IOCTL_GET_CHANNEL_INFO, - ids, sizeof(ids), pinfo, sizeof(*pinfo)); - -} - -static int hptiop_get_logical_devices(struct hptiop_hba *hba, - __le32 *pids, int maxcount) -{ - int i; - u32 count = maxcount - 1; - - if (__hpt_do_ioctl(hba, HPT_IOCTL_GET_LOGICAL_DEVICES, - &count, sizeof(u32), - pids, sizeof(u32) * maxcount)) - return -1; - - maxcount = le32_to_cpu(pids[0]); - for (i = 0; i < maxcount; i++) - pids[i] = pids[i+1]; - - return maxcount; -} - -static int hptiop_get_device_info_v3(struct hptiop_hba *hba, __le32 id, - struct hpt_logical_device_info_v3 *pinfo) -{ - return __hpt_do_ioctl(hba, HPT_IOCTL_GET_DEVICE_INFO_V3, - &id, sizeof(u32), - pinfo, sizeof(*pinfo)); -} - -static const char *get_array_status(struct hpt_logical_device_info_v3 *devinfo) -{ - static char s[64]; - u32 flags = le32_to_cpu(devinfo->u.array.flags); - u32 trans_prog = le32_to_cpu(devinfo->u.array.transforming_progress); - u32 reb_prog = le32_to_cpu(devinfo->u.array.rebuilding_progress); - - if (flags & ARRAY_FLAG_DISABLED) - return "Disabled"; - else if (flags & ARRAY_FLAG_TRANSFORMING) - sprintf(s, "Expanding/Migrating %d.%d%%%s%s", - trans_prog / 100, - trans_prog % 100, - (flags & (ARRAY_FLAG_NEEDBUILDING|ARRAY_FLAG_BROKEN))? - ", Critical" : "", - ((flags & ARRAY_FLAG_NEEDINITIALIZING) && - !(flags & ARRAY_FLAG_REBUILDING) && - !(flags & ARRAY_FLAG_INITIALIZING))? - ", Unintialized" : ""); - else if ((flags & ARRAY_FLAG_BROKEN) && - devinfo->u.array.array_type != AT_RAID6) - return "Critical"; - else if (flags & ARRAY_FLAG_REBUILDING) - sprintf(s, - (flags & ARRAY_FLAG_NEEDINITIALIZING)? - "%sBackground initializing %d.%d%%" : - "%sRebuilding %d.%d%%", - (flags & ARRAY_FLAG_BROKEN)? "Critical, " : "", - reb_prog / 100, - reb_prog % 100); - else if (flags & ARRAY_FLAG_VERIFYING) - sprintf(s, "%sVerifying %d.%d%%", - (flags & ARRAY_FLAG_BROKEN)? "Critical, " : "", - reb_prog / 100, - reb_prog % 100); - else if (flags & ARRAY_FLAG_INITIALIZING) - sprintf(s, "%sForground initializing %d.%d%%", - (flags & ARRAY_FLAG_BROKEN)? "Critical, " : "", - reb_prog / 100, - reb_prog % 100); - else if (flags & ARRAY_FLAG_NEEDTRANSFORM) - sprintf(s,"%s%s%s", "Need Expanding/Migrating", - (flags & ARRAY_FLAG_BROKEN)? "Critical, " : "", - ((flags & ARRAY_FLAG_NEEDINITIALIZING) && - !(flags & ARRAY_FLAG_REBUILDING) && - !(flags & ARRAY_FLAG_INITIALIZING))? - ", Unintialized" : ""); - else if (flags & ARRAY_FLAG_NEEDINITIALIZING && - !(flags & ARRAY_FLAG_REBUILDING) && - !(flags & ARRAY_FLAG_INITIALIZING)) - sprintf(s,"%sUninitialized", - (flags & ARRAY_FLAG_BROKEN)? "Critical, " : ""); - else if ((flags & ARRAY_FLAG_NEEDBUILDING) || - (flags & ARRAY_FLAG_BROKEN)) - return "Critical"; - else - return "Normal"; - return s; -} - -static void hptiop_dump_devinfo(struct hptiop_hba *hba, - struct hptiop_getinfo *pinfo, __le32 id, int indent) -{ - struct hpt_logical_device_info_v3 devinfo; - int i; - u64 capacity; - - for (i = 0; i < indent; i++) - hptiop_copy_info(pinfo, "\t"); - - if (hptiop_get_device_info_v3(hba, id, &devinfo)) { - hptiop_copy_info(pinfo, "unknown\n"); - return; - } - - switch (devinfo.type) { - - case LDT_DEVICE: { - struct hd_driveid *driveid; - u32 flags = le32_to_cpu(devinfo.u.device.flags); - - driveid = (struct hd_driveid *)devinfo.u.device.ident; - /* model[] is 40 chars long, but we just want 20 chars here */ - driveid->model[20] = 0; - - if (indent) - if (flags & DEVICE_FLAG_DISABLED) - hptiop_copy_info(pinfo,"Missing\n"); - else - hptiop_copy_info(pinfo, "CH%d %s\n", - devinfo.u.device.path_id + 1, - driveid->model); - else { - capacity = le64_to_cpu(devinfo.capacity) * 512; - do_div(capacity, 1000000); - hptiop_copy_info(pinfo, - "CH%d %s, %lluMB, %s %s%s%s%s\n", - devinfo.u.device.path_id + 1, - driveid->model, - capacity, - (flags & DEVICE_FLAG_DISABLED)? - "Disabled" : "Normal", - devinfo.u.device.read_ahead_enabled? - "[RA]" : "", - devinfo.u.device.write_cache_enabled? - "[WC]" : "", - devinfo.u.device.TCQ_enabled? - "[TCQ]" : "", - devinfo.u.device.NCQ_enabled? - "[NCQ]" : "" - ); - } - break; - } - - case LDT_ARRAY: - if (devinfo.target_id != INVALID_TARGET_ID) - hptiop_copy_info(pinfo, "[DISK %d_%d] ", - devinfo.vbus_id, devinfo.target_id); - - capacity = le64_to_cpu(devinfo.capacity) * 512; - do_div(capacity, 1000000); - hptiop_copy_info(pinfo, "%s (%s), %lluMB, %s\n", - devinfo.u.array.name, - devinfo.u.array.array_type==AT_RAID0? "RAID0" : - devinfo.u.array.array_type==AT_RAID1? "RAID1" : - devinfo.u.array.array_type==AT_RAID5? "RAID5" : - devinfo.u.array.array_type==AT_RAID6? "RAID6" : - devinfo.u.array.array_type==AT_JBOD? "JBOD" : - "unknown", - capacity, - get_array_status(&devinfo)); - for (i = 0; i < devinfo.u.array.ndisk; i++) { - if (hpt_id_valid(devinfo.u.array.members[i])) { - if (cpu_to_le16(1<<i) & - devinfo.u.array.critical_members) - hptiop_copy_info(pinfo, "\t*"); - hptiop_dump_devinfo(hba, pinfo, - devinfo.u.array.members[i], indent+1); - } - else - hptiop_copy_info(pinfo, "\tMissing\n"); - } - if (id == devinfo.u.array.transform_source) { - hptiop_copy_info(pinfo, "\tExpanding/Migrating to:\n"); - hptiop_dump_devinfo(hba, pinfo, - devinfo.u.array.transform_target, indent+1); - } - break; - } -} - static ssize_t hptiop_show_version(struct class_device *class_dev, char *buf) { return snprintf(buf, PAGE_SIZE, "%s\n", driver_ver); } -static ssize_t hptiop_cdev_read(struct file *filp, char __user *buf, - size_t count, loff_t *ppos) -{ - struct hptiop_hba *hba = filp->private_data; - struct hptiop_getinfo info; - int i, j, ndev; - struct hpt_controller_info con_info; - struct hpt_channel_info chan_info; - __le32 ids[32]; - - info.buffer = buf; - info.buflength = count; - info.bufoffset = ppos ? *ppos : 0; - info.filpos = 0; - info.buffillen = 0; - - if (hptiop_get_controller_info(hba, &con_info)) - return -EIO; - - for (i = 0; i < con_info.num_buses; i++) { - if (hptiop_get_channel_info(hba, i, &chan_info) == 0) { - if (hpt_id_valid(chan_info.devices[0])) - hptiop_dump_devinfo(hba, &info, - chan_info.devices[0], 0); - if (hpt_id_valid(chan_info.devices[1])) - hptiop_dump_devinfo(hba, &info, - chan_info.devices[1], 0); - } - } - - ndev = hptiop_get_logical_devices(hba, ids, - sizeof(ids) / sizeof(ids[0])); - - /* - * if hptiop_get_logical_devices fails, ndev==-1 and it just - * output nothing here - */ - for (j = 0; j < ndev; j++) - hptiop_dump_devinfo(hba, &info, ids[j], 0); - - if (ppos) - *ppos += info.buffillen; - - return info.buffillen; -} - -static int hptiop_cdev_ioctl(struct inode *inode, struct file *file, - unsigned int cmd, unsigned long arg) -{ - struct hptiop_hba *hba = file->private_data; - struct hpt_ioctl_u ioctl_u; - struct hpt_ioctl_k ioctl_k; - u32 bytes_returned; - int err = -EINVAL; - - if (copy_from_user(&ioctl_u, - (void __user *)arg, sizeof(struct hpt_ioctl_u))) - return -EINVAL; - - if (ioctl_u.magic != HPT_IOCTL_MAGIC) - return -EINVAL; - - ioctl_k.ioctl_code = ioctl_u.ioctl_code; - ioctl_k.inbuf = NULL; - ioctl_k.inbuf_size = ioctl_u.inbuf_size; - ioctl_k.outbuf = NULL; - ioctl_k.outbuf_size = ioctl_u.outbuf_size; - ioctl_k.hba = hba; - ioctl_k.bytes_returned = &bytes_returned; - - /* verify user buffer */ - if ((ioctl_k.inbuf_size && !access_ok(VERIFY_READ, - ioctl_u.inbuf, ioctl_k.inbuf_size)) || - (ioctl_k.outbuf_size && !access_ok(VERIFY_WRITE, - ioctl_u.outbuf, ioctl_k.outbuf_size)) || - (ioctl_u.bytes_returned && !access_ok(VERIFY_WRITE, - ioctl_u.bytes_returned, sizeof(u32))) || - ioctl_k.inbuf_size + ioctl_k.outbuf_size > 0x10000) { - - dprintk("scsi%d: got bad user address\n", hba->host->host_no); - return -EINVAL; - } - - /* map buffer to kernel. */ - if (ioctl_k.inbuf_size) { - ioctl_k.inbuf = kmalloc(ioctl_k.inbuf_size, GFP_KERNEL); - if (!ioctl_k.inbuf) { - dprintk("scsi%d: fail to alloc inbuf\n", - hba->host->host_no); - err = -ENOMEM; - goto err_exit; - } - - if (copy_from_user(ioctl_k.inbuf, - ioctl_u.inbuf, ioctl_k.inbuf_size)) { - goto err_exit; - } - } - - if (ioctl_k.outbuf_size) { - ioctl_k.outbuf = kmalloc(ioctl_k.outbuf_size, GFP_KERNEL); - if (!ioctl_k.outbuf) { - dprintk("scsi%d: fail to alloc outbuf\n", - hba->host->host_no); - err = -ENOMEM; - goto err_exit; - } - } - - hptiop_do_ioctl(&ioctl_k); - - if (ioctl_k.result == HPT_IOCTL_RESULT_OK) { - if (ioctl_k.outbuf_size && - copy_to_user(ioctl_u.outbuf, - ioctl_k.outbuf, ioctl_k.outbuf_size)) - goto err_exit; - - if (ioctl_u.bytes_returned && - copy_to_user(ioctl_u.bytes_returned, - &bytes_returned, sizeof(u32))) - goto err_exit; - - err = 0; - } - -err_exit: - kfree(ioctl_k.inbuf); - kfree(ioctl_k.outbuf); - - return err; -} - -static int hptiop_cdev_open(struct inode *inode, struct file *file) -{ - struct hptiop_hba *hba; - unsigned i = 0, minor = iminor(inode); - int ret = -ENODEV; - - spin_lock(&hptiop_hba_list_lock); - list_for_each_entry(hba, &hptiop_hba_list, link) { - if (i == minor) { - file->private_data = hba; - ret = 0; - goto out; - } - i++; - } - -out: - spin_unlock(&hptiop_hba_list_lock); - return ret; -} - -static struct file_operations hptiop_cdev_fops = { - .owner = THIS_MODULE, - .read = hptiop_cdev_read, - .ioctl = hptiop_cdev_ioctl, - .open = hptiop_cdev_open, -}; - static ssize_t hptiop_show_fw_version(struct class_device *class_dev, char *buf) { struct Scsi_Host *host = class_to_shost(class_dev); @@ -1296,19 +771,13 @@ static int __devinit hptiop_probe(struct pci_dev *pcidev, goto unmap_pci_bar; } - if (scsi_add_host(host, &pcidev->dev)) { - printk(KERN_ERR "scsi%d: scsi_add_host failed\n", - hba->host->host_no); - goto unmap_pci_bar; - } - pci_set_drvdata(pcidev, host); if (request_irq(pcidev->irq, hptiop_intr, IRQF_SHARED, driver_name, hba)) { printk(KERN_ERR "scsi%d: request irq %d failed\n", hba->host->host_no, pcidev->irq); - goto remove_scsi_host; + goto unmap_pci_bar; } /* Allocate request mem */ @@ -1355,9 +824,12 @@ static int __devinit hptiop_probe(struct pci_dev *pcidev, if (hptiop_initialize_iop(hba)) goto free_request_mem; - spin_lock(&hptiop_hba_list_lock); - list_add_tail(&hba->link, &hptiop_hba_list); - spin_unlock(&hptiop_hba_list_lock); + if (scsi_add_host(host, &pcidev->dev)) { + printk(KERN_ERR "scsi%d: scsi_add_host failed\n", + hba->host->host_no); + goto free_request_mem; + } + scsi_scan_host(host); @@ -1372,9 +844,6 @@ free_request_mem: free_request_irq: free_irq(hba->pcidev->irq, hba); -remove_scsi_host: - scsi_remove_host(host); - unmap_pci_bar: iounmap(hba->iop); @@ -1422,10 +891,6 @@ static void hptiop_remove(struct pci_dev *pcidev) scsi_remove_host(host); - spin_lock(&hptiop_hba_list_lock); - list_del_init(&hba->link); - spin_unlock(&hptiop_hba_list_lock); - hptiop_shutdown(pcidev); free_irq(hba->pcidev->irq, hba); @@ -1462,27 +927,12 @@ static struct pci_driver hptiop_pci_driver = { static int __init hptiop_module_init(void) { - int error; - printk(KERN_INFO "%s %s\n", driver_name_long, driver_ver); - - error = pci_register_driver(&hptiop_pci_driver); - if (error < 0) - return error; - - hptiop_cdev_major = register_chrdev(0, "hptiop", &hptiop_cdev_fops); - if (hptiop_cdev_major < 0) { - printk(KERN_WARNING "unable to register hptiop device.\n"); - return hptiop_cdev_major; - } - - return 0; + return pci_register_driver(&hptiop_pci_driver); } static void __exit hptiop_module_exit(void) { - dprintk("hptiop_module_exit\n"); - unregister_chrdev(hptiop_cdev_major, "hptiop"); pci_unregister_driver(&hptiop_pci_driver); } diff --git a/drivers/scsi/ide-scsi.c b/drivers/scsi/ide-scsi.c index f7b5d7372d2..94d1de55607 100644 --- a/drivers/scsi/ide-scsi.c +++ b/drivers/scsi/ide-scsi.c @@ -517,7 +517,7 @@ static ide_startstop_t idescsi_pc_intr (ide_drive_t *drive) /* No more interrupts */ if (test_bit(IDESCSI_LOG_CMD, &scsi->log)) printk (KERN_INFO "Packet command completed, %d bytes transferred\n", pc->actually_transferred); - local_irq_enable(); + local_irq_enable_in_hardirq(); if (status.b.check) rq->errors++; idescsi_end_request (drive, 1, 0); diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c index 848fb2aa4ca..058f094f945 100644 --- a/drivers/scsi/iscsi_tcp.c +++ b/drivers/scsi/iscsi_tcp.c @@ -43,13 +43,10 @@ #include "iscsi_tcp.h" -#define ISCSI_TCP_VERSION "1.0-595" - MODULE_AUTHOR("Dmitry Yusupov <dmitry_yus@yahoo.com>, " "Alex Aizman <itn780@yahoo.com>"); MODULE_DESCRIPTION("iSCSI/TCP data-path"); MODULE_LICENSE("GPL"); -MODULE_VERSION(ISCSI_TCP_VERSION); /* #define DEBUG_TCP */ #define DEBUG_ASSERT @@ -185,11 +182,19 @@ iscsi_hdr_extract(struct iscsi_tcp_conn *tcp_conn) * must be called with session lock */ static void -__iscsi_ctask_cleanup(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask) +iscsi_tcp_cleanup_ctask(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask) { struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data; + struct iscsi_r2t_info *r2t; struct scsi_cmnd *sc; + /* flush ctask's r2t queues */ + while (__kfifo_get(tcp_ctask->r2tqueue, (void*)&r2t, sizeof(void*))) { + __kfifo_put(tcp_ctask->r2tpool.queue, (void*)&r2t, + sizeof(void*)); + debug_scsi("iscsi_tcp_cleanup_ctask pending r2t dropped\n"); + } + sc = ctask->sc; if (unlikely(!sc)) return; @@ -374,6 +379,7 @@ iscsi_r2t_rsp(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask) spin_unlock(&session->lock); return 0; } + rc = __kfifo_get(tcp_ctask->r2tpool.queue, (void*)&r2t, sizeof(void*)); BUG_ON(!rc); @@ -399,7 +405,7 @@ iscsi_r2t_rsp(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask) tcp_ctask->exp_r2tsn = r2tsn + 1; tcp_ctask->xmstate |= XMSTATE_SOL_HDR; __kfifo_put(tcp_ctask->r2tqueue, (void*)&r2t, sizeof(void*)); - __kfifo_put(conn->xmitqueue, (void*)&ctask, sizeof(void*)); + list_move_tail(&ctask->running, &conn->xmitqueue); scsi_queue_work(session->host, &conn->xmitwork); conn->r2t_pdus_cnt++; @@ -477,6 +483,8 @@ iscsi_tcp_hdr_recv(struct iscsi_conn *conn) case ISCSI_OP_SCSI_DATA_IN: tcp_conn->in.ctask = session->cmds[itt]; rc = iscsi_data_rsp(conn, tcp_conn->in.ctask); + if (rc) + return rc; /* fall through */ case ISCSI_OP_SCSI_CMD_RSP: tcp_conn->in.ctask = session->cmds[itt]; @@ -484,7 +492,7 @@ iscsi_tcp_hdr_recv(struct iscsi_conn *conn) goto copy_hdr; spin_lock(&session->lock); - __iscsi_ctask_cleanup(conn, tcp_conn->in.ctask); + iscsi_tcp_cleanup_ctask(conn, tcp_conn->in.ctask); rc = __iscsi_complete_pdu(conn, hdr, NULL, 0); spin_unlock(&session->lock); break; @@ -500,13 +508,28 @@ iscsi_tcp_hdr_recv(struct iscsi_conn *conn) break; case ISCSI_OP_LOGIN_RSP: case ISCSI_OP_TEXT_RSP: - case ISCSI_OP_LOGOUT_RSP: - case ISCSI_OP_NOOP_IN: case ISCSI_OP_REJECT: case ISCSI_OP_ASYNC_EVENT: + /* + * It is possible that we could get a PDU with a buffer larger + * than 8K, but there are no targets that currently do this. + * For now we fail until we find a vendor that needs it + */ + if (DEFAULT_MAX_RECV_DATA_SEGMENT_LENGTH < + tcp_conn->in.datalen) { + printk(KERN_ERR "iscsi_tcp: received buffer of len %u " + "but conn buffer is only %u (opcode %0x)\n", + tcp_conn->in.datalen, + DEFAULT_MAX_RECV_DATA_SEGMENT_LENGTH, opcode); + rc = ISCSI_ERR_PROTO; + break; + } + if (tcp_conn->in.datalen) goto copy_hdr; /* fall through */ + case ISCSI_OP_LOGOUT_RSP: + case ISCSI_OP_NOOP_IN: case ISCSI_OP_SCSI_TMFUNC_RSP: rc = iscsi_complete_pdu(conn, hdr, NULL, 0); break; @@ -523,7 +546,7 @@ copy_hdr: * skbs to complete the command then we have to copy the header * for later use */ - if (tcp_conn->in.zero_copy_hdr && tcp_conn->in.copy < + if (tcp_conn->in.zero_copy_hdr && tcp_conn->in.copy <= (tcp_conn->in.datalen + tcp_conn->in.padding + (conn->datadgst_en ? 4 : 0))) { debug_tcp("Copying header for later use. in.copy %d in.datalen" @@ -614,9 +637,9 @@ iscsi_ctask_copy(struct iscsi_tcp_conn *tcp_conn, struct iscsi_cmd_task *ctask, * byte counters. **/ static inline int -iscsi_tcp_copy(struct iscsi_tcp_conn *tcp_conn) +iscsi_tcp_copy(struct iscsi_conn *conn) { - void *buf = tcp_conn->data; + struct iscsi_tcp_conn *tcp_conn = conn->dd_data; int buf_size = tcp_conn->in.datalen; int buf_left = buf_size - tcp_conn->data_copied; int size = min(tcp_conn->in.copy, buf_left); @@ -627,7 +650,7 @@ iscsi_tcp_copy(struct iscsi_tcp_conn *tcp_conn) BUG_ON(size <= 0); rc = skb_copy_bits(tcp_conn->in.skb, tcp_conn->in.offset, - (char*)buf + tcp_conn->data_copied, size); + (char*)conn->data + tcp_conn->data_copied, size); BUG_ON(rc); tcp_conn->in.offset += size; @@ -745,10 +768,11 @@ static int iscsi_scsi_data_in(struct iscsi_conn *conn) done: /* check for non-exceptional status */ if (tcp_conn->in.hdr->flags & ISCSI_FLAG_DATA_STATUS) { - debug_scsi("done [sc %lx res %d itt 0x%x]\n", - (long)sc, sc->result, ctask->itt); + debug_scsi("done [sc %lx res %d itt 0x%x flags 0x%x]\n", + (long)sc, sc->result, ctask->itt, + tcp_conn->in.hdr->flags); spin_lock(&conn->session->lock); - __iscsi_ctask_cleanup(conn, ctask); + iscsi_tcp_cleanup_ctask(conn, ctask); __iscsi_complete_pdu(conn, tcp_conn->in.hdr, NULL, 0); spin_unlock(&conn->session->lock); } @@ -769,26 +793,25 @@ iscsi_data_recv(struct iscsi_conn *conn) break; case ISCSI_OP_SCSI_CMD_RSP: spin_lock(&conn->session->lock); - __iscsi_ctask_cleanup(conn, tcp_conn->in.ctask); + iscsi_tcp_cleanup_ctask(conn, tcp_conn->in.ctask); spin_unlock(&conn->session->lock); case ISCSI_OP_TEXT_RSP: case ISCSI_OP_LOGIN_RSP: - case ISCSI_OP_NOOP_IN: case ISCSI_OP_ASYNC_EVENT: case ISCSI_OP_REJECT: /* * Collect data segment to the connection's data * placeholder */ - if (iscsi_tcp_copy(tcp_conn)) { + if (iscsi_tcp_copy(conn)) { rc = -EAGAIN; goto exit; } - rc = iscsi_complete_pdu(conn, tcp_conn->in.hdr, tcp_conn->data, + rc = iscsi_complete_pdu(conn, tcp_conn->in.hdr, conn->data, tcp_conn->in.datalen); if (!rc && conn->datadgst_en && opcode != ISCSI_OP_LOGIN_RSP) - iscsi_recv_digest_update(tcp_conn, tcp_conn->data, + iscsi_recv_digest_update(tcp_conn, conn->data, tcp_conn->in.datalen); break; default: @@ -843,7 +866,7 @@ more: if (rc == -EAGAIN) goto nomore; else { - iscsi_conn_failure(conn, rc); + iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED); return 0; } } @@ -897,7 +920,7 @@ more: if (rc) { if (rc == -EAGAIN) goto again; - iscsi_conn_failure(conn, rc); + iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED); return 0; } tcp_conn->in.copy -= tcp_conn->in.padding; @@ -1028,9 +1051,8 @@ iscsi_conn_set_callbacks(struct iscsi_conn *conn) } static void -iscsi_conn_restore_callbacks(struct iscsi_conn *conn) +iscsi_conn_restore_callbacks(struct iscsi_tcp_conn *tcp_conn) { - struct iscsi_tcp_conn *tcp_conn = conn->dd_data; struct sock *sk = tcp_conn->sock->sk; /* restore socket callbacks, see also: iscsi_conn_set_callbacks() */ @@ -1308,7 +1330,7 @@ iscsi_tcp_cmd_init(struct iscsi_cmd_task *ctask) ctask->imm_count - ctask->unsol_count; - debug_scsi("cmd [itt %x total %d imm %d imm_data %d " + debug_scsi("cmd [itt 0x%x total %d imm %d imm_data %d " "r2t_data %d]\n", ctask->itt, ctask->total_length, ctask->imm_count, ctask->unsol_count, tcp_ctask->r2t_data_count); @@ -1636,7 +1658,7 @@ handle_xmstate_sol_data(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask) } solicit_again: /* - * send Data-Out whitnin this R2T sequence. + * send Data-Out within this R2T sequence. */ if (!r2t->data_count) goto data_out_done; @@ -1731,7 +1753,7 @@ handle_xmstate_w_pad(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask) struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data; struct iscsi_tcp_conn *tcp_conn = conn->dd_data; struct iscsi_data_task *dtask = tcp_ctask->dtask; - int sent, rc; + int sent = 0, rc; tcp_ctask->xmstate &= ~XMSTATE_W_PAD; iscsi_buf_init_iov(&tcp_ctask->sendbuf, (char*)&tcp_ctask->pad, @@ -1900,27 +1922,32 @@ iscsi_tcp_conn_create(struct iscsi_cls_session *cls_session, uint32_t conn_idx) tcp_conn->in_progress = IN_PROGRESS_WAIT_HEADER; /* initial operational parameters */ tcp_conn->hdr_size = sizeof(struct iscsi_hdr); - tcp_conn->data_size = DEFAULT_MAX_RECV_DATA_SEGMENT_LENGTH; - - /* allocate initial PDU receive place holder */ - if (tcp_conn->data_size <= PAGE_SIZE) - tcp_conn->data = kmalloc(tcp_conn->data_size, GFP_KERNEL); - else - tcp_conn->data = (void*)__get_free_pages(GFP_KERNEL, - get_order(tcp_conn->data_size)); - if (!tcp_conn->data) - goto max_recv_dlenght_alloc_fail; return cls_conn; -max_recv_dlenght_alloc_fail: - kfree(tcp_conn); tcp_conn_alloc_fail: iscsi_conn_teardown(cls_conn); return NULL; } static void +iscsi_tcp_release_conn(struct iscsi_conn *conn) +{ + struct iscsi_tcp_conn *tcp_conn = conn->dd_data; + + if (!tcp_conn->sock) + return; + + sock_hold(tcp_conn->sock->sk); + iscsi_conn_restore_callbacks(tcp_conn); + sock_put(tcp_conn->sock->sk); + + sock_release(tcp_conn->sock); + tcp_conn->sock = NULL; + conn->recv_lock = NULL; +} + +static void iscsi_tcp_conn_destroy(struct iscsi_cls_conn *cls_conn) { struct iscsi_conn *conn = cls_conn->dd_data; @@ -1930,6 +1957,7 @@ iscsi_tcp_conn_destroy(struct iscsi_cls_conn *cls_conn) if (conn->hdrdgst_en || conn->datadgst_en) digest = 1; + iscsi_tcp_release_conn(conn); iscsi_conn_teardown(cls_conn); /* now free tcp_conn */ @@ -1944,15 +1972,18 @@ iscsi_tcp_conn_destroy(struct iscsi_cls_conn *cls_conn) crypto_free_tfm(tcp_conn->data_rx_tfm); } - /* free conn->data, size = MaxRecvDataSegmentLength */ - if (tcp_conn->data_size <= PAGE_SIZE) - kfree(tcp_conn->data); - else - free_pages((unsigned long)tcp_conn->data, - get_order(tcp_conn->data_size)); kfree(tcp_conn); } +static void +iscsi_tcp_conn_stop(struct iscsi_cls_conn *cls_conn, int flag) +{ + struct iscsi_conn *conn = cls_conn->dd_data; + + iscsi_conn_stop(cls_conn, flag); + iscsi_tcp_release_conn(conn); +} + static int iscsi_tcp_conn_bind(struct iscsi_cls_session *cls_session, struct iscsi_cls_conn *cls_conn, uint64_t transport_eph, @@ -2001,52 +2032,6 @@ iscsi_tcp_conn_bind(struct iscsi_cls_session *cls_session, return 0; } -static void -iscsi_tcp_cleanup_ctask(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask) -{ - struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data; - struct iscsi_r2t_info *r2t; - - /* flush ctask's r2t queues */ - while (__kfifo_get(tcp_ctask->r2tqueue, (void*)&r2t, sizeof(void*))) - __kfifo_put(tcp_ctask->r2tpool.queue, (void*)&r2t, - sizeof(void*)); - - __iscsi_ctask_cleanup(conn, ctask); -} - -static void -iscsi_tcp_suspend_conn_rx(struct iscsi_conn *conn) -{ - struct iscsi_tcp_conn *tcp_conn = conn->dd_data; - struct sock *sk; - - if (!tcp_conn->sock) - return; - - sk = tcp_conn->sock->sk; - write_lock_bh(&sk->sk_callback_lock); - set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx); - write_unlock_bh(&sk->sk_callback_lock); -} - -static void -iscsi_tcp_terminate_conn(struct iscsi_conn *conn) -{ - struct iscsi_tcp_conn *tcp_conn = conn->dd_data; - - if (!tcp_conn->sock) - return; - - sock_hold(tcp_conn->sock->sk); - iscsi_conn_restore_callbacks(conn); - sock_put(tcp_conn->sock->sk); - - sock_release(tcp_conn->sock); - tcp_conn->sock = NULL; - conn->recv_lock = NULL; -} - /* called with host lock */ static void iscsi_tcp_mgmt_init(struct iscsi_conn *conn, struct iscsi_mgmt_task *mtask, @@ -2057,6 +2042,7 @@ iscsi_tcp_mgmt_init(struct iscsi_conn *conn, struct iscsi_mgmt_task *mtask, iscsi_buf_init_iov(&tcp_mtask->headbuf, (char*)mtask->hdr, sizeof(struct iscsi_hdr)); tcp_mtask->xmstate = XMSTATE_IMM_HDR; + tcp_mtask->sent = 0; if (mtask->data_count) iscsi_buf_init_iov(&tcp_mtask->sendbuf, (char*)mtask->data, @@ -2138,39 +2124,6 @@ iscsi_conn_set_param(struct iscsi_cls_conn *cls_conn, enum iscsi_param param, int value; switch(param) { - case ISCSI_PARAM_MAX_RECV_DLENGTH: { - char *saveptr = tcp_conn->data; - gfp_t flags = GFP_KERNEL; - - sscanf(buf, "%d", &value); - if (tcp_conn->data_size >= value) { - iscsi_set_param(cls_conn, param, buf, buflen); - break; - } - - spin_lock_bh(&session->lock); - if (conn->stop_stage == STOP_CONN_RECOVER) - flags = GFP_ATOMIC; - spin_unlock_bh(&session->lock); - - if (value <= PAGE_SIZE) - tcp_conn->data = kmalloc(value, flags); - else - tcp_conn->data = (void*)__get_free_pages(flags, - get_order(value)); - if (tcp_conn->data == NULL) { - tcp_conn->data = saveptr; - return -ENOMEM; - } - if (tcp_conn->data_size <= PAGE_SIZE) - kfree(saveptr); - else - free_pages((unsigned long)saveptr, - get_order(tcp_conn->data_size)); - iscsi_set_param(cls_conn, param, buf, buflen); - tcp_conn->data_size = value; - break; - } case ISCSI_PARAM_HDRDGST_EN: iscsi_set_param(cls_conn, param, buf, buflen); tcp_conn->hdr_size = sizeof(struct iscsi_hdr); @@ -2361,8 +2314,7 @@ static void iscsi_tcp_session_destroy(struct iscsi_cls_session *cls_session) } static struct scsi_host_template iscsi_sht = { - .name = "iSCSI Initiator over TCP/IP, v" - ISCSI_TCP_VERSION, + .name = "iSCSI Initiator over TCP/IP", .queuecommand = iscsi_queuecommand, .change_queue_depth = iscsi_change_queue_depth, .can_queue = ISCSI_XMIT_CMDS_MAX - 1, @@ -2414,10 +2366,7 @@ static struct iscsi_transport iscsi_tcp_transport = { .get_conn_param = iscsi_tcp_conn_get_param, .get_session_param = iscsi_session_get_param, .start_conn = iscsi_conn_start, - .stop_conn = iscsi_conn_stop, - /* these are called as part of conn recovery */ - .suspend_conn_recv = iscsi_tcp_suspend_conn_rx, - .terminate_conn = iscsi_tcp_terminate_conn, + .stop_conn = iscsi_tcp_conn_stop, /* IO */ .send_pdu = iscsi_conn_send_pdu, .get_stats = iscsi_conn_get_stats, diff --git a/drivers/scsi/iscsi_tcp.h b/drivers/scsi/iscsi_tcp.h index 808302832e6..6a4ee704e46 100644 --- a/drivers/scsi/iscsi_tcp.h +++ b/drivers/scsi/iscsi_tcp.h @@ -78,8 +78,6 @@ struct iscsi_tcp_conn { char hdrext[4*sizeof(__u16) + sizeof(__u32)]; int data_copied; - char *data; /* data placeholder */ - int data_size; /* actual recv_dlength */ int stop_stage; /* conn_stop() flag: * * stop to recover, * * stop to terminate */ diff --git a/drivers/scsi/libata-core.c b/drivers/scsi/libata-core.c index 386e5f21e19..73dd6c8deed 100644 --- a/drivers/scsi/libata-core.c +++ b/drivers/scsi/libata-core.c @@ -2746,7 +2746,7 @@ int sata_std_hardreset(struct ata_port *ap, unsigned int *class) if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol))) return rc; - scontrol = (scontrol & 0x0f0) | 0x302; + scontrol = (scontrol & 0x0f0) | 0x304; if ((rc = sata_scr_write(ap, SCR_CONTROL, scontrol))) return rc; @@ -5185,28 +5185,6 @@ void ata_host_stop (struct ata_host_set *host_set) iounmap(host_set->mmio_base); } - -/** - * ata_host_remove - Unregister SCSI host structure with upper layers - * @ap: Port to unregister - * @do_unregister: 1 if we fully unregister, 0 to just stop the port - * - * LOCKING: - * Inherited from caller. - */ - -static void ata_host_remove(struct ata_port *ap, unsigned int do_unregister) -{ - struct Scsi_Host *sh = ap->host; - - DPRINTK("ENTER\n"); - - if (do_unregister) - scsi_remove_host(sh); - - ap->ops->port_stop(ap); -} - /** * ata_dev_init - Initialize an ata_device structure * @dev: Device structure to initialize @@ -5532,8 +5510,11 @@ int ata_device_add(const struct ata_probe_ent *ent) err_out: for (i = 0; i < count; i++) { - ata_host_remove(host_set->ports[i], 1); - scsi_host_put(host_set->ports[i]->host); + struct ata_port *ap = host_set->ports[i]; + if (ap) { + ap->ops->port_stop(ap); + scsi_host_put(ap->host); + } } err_free_ret: kfree(host_set); @@ -5558,7 +5539,7 @@ void ata_port_detach(struct ata_port *ap) int i; if (!ap->ops->error_handler) - return; + goto skip_eh; /* tell EH we're leaving & flush EH */ spin_lock_irqsave(ap->lock, flags); @@ -5594,6 +5575,7 @@ void ata_port_detach(struct ata_port *ap) cancel_delayed_work(&ap->hotplug_task); flush_workqueue(ata_aux_wq); + skip_eh: /* remove the associated SCSI host */ scsi_remove_host(ap->host); } @@ -5662,7 +5644,7 @@ int ata_scsi_release(struct Scsi_Host *host) DPRINTK("ENTER\n"); ap->ops->port_disable(ap); - ata_host_remove(ap, 0); + ap->ops->port_stop(ap); DPRINTK("EXIT\n"); return 1; diff --git a/drivers/scsi/libata-scsi.c b/drivers/scsi/libata-scsi.c index 7ced41ecde8..e92c31d698f 100644 --- a/drivers/scsi/libata-scsi.c +++ b/drivers/scsi/libata-scsi.c @@ -2353,6 +2353,19 @@ static void atapi_qc_complete(struct ata_queued_cmd *qc) ata_gen_ata_desc_sense(qc); } + /* SCSI EH automatically locks door if sdev->locked is + * set. Sometimes door lock request continues to + * fail, for example, when no media is present. This + * creates a loop - SCSI EH issues door lock which + * fails and gets invoked again to acquire sense data + * for the failed command. + * + * If door lock fails, always clear sdev->locked to + * avoid this infinite loop. + */ + if (qc->cdb[0] == ALLOW_MEDIUM_REMOVAL) + qc->dev->sdev->locked = 0; + qc->scsicmd->result = SAM_STAT_CHECK_CONDITION; qc->scsidone(cmd); ata_qc_free(qc); diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c index 7e6e031cc41..5884cd26d53 100644 --- a/drivers/scsi/libiscsi.c +++ b/drivers/scsi/libiscsi.c @@ -189,6 +189,7 @@ static void iscsi_complete_command(struct iscsi_session *session, { struct scsi_cmnd *sc = ctask->sc; + ctask->state = ISCSI_TASK_COMPLETED; ctask->sc = NULL; list_del_init(&ctask->running); __kfifo_put(session->cmdpool.queue, (void*)&ctask, sizeof(void*)); @@ -275,6 +276,25 @@ out: return rc; } +static void iscsi_tmf_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr) +{ + struct iscsi_tm_rsp *tmf = (struct iscsi_tm_rsp *)hdr; + + conn->exp_statsn = be32_to_cpu(hdr->statsn) + 1; + conn->tmfrsp_pdus_cnt++; + + if (conn->tmabort_state != TMABORT_INITIAL) + return; + + if (tmf->response == ISCSI_TMF_RSP_COMPLETE) + conn->tmabort_state = TMABORT_SUCCESS; + else if (tmf->response == ISCSI_TMF_RSP_NO_TASK) + conn->tmabort_state = TMABORT_NOT_FOUND; + else + conn->tmabort_state = TMABORT_FAILED; + wake_up(&conn->ehwait); +} + /** * __iscsi_complete_pdu - complete pdu * @conn: iscsi conn @@ -340,6 +360,10 @@ int __iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr, switch(opcode) { case ISCSI_OP_LOGOUT_RSP: + if (datalen) { + rc = ISCSI_ERR_PROTO; + break; + } conn->exp_statsn = be32_to_cpu(hdr->statsn) + 1; /* fall through */ case ISCSI_OP_LOGIN_RSP: @@ -348,7 +372,8 @@ int __iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr, * login related PDU's exp_statsn is handled in * userspace */ - rc = iscsi_recv_pdu(conn->cls_conn, hdr, data, datalen); + if (iscsi_recv_pdu(conn->cls_conn, hdr, data, datalen)) + rc = ISCSI_ERR_CONN_FAILED; list_del(&mtask->running); if (conn->login_mtask != mtask) __kfifo_put(session->mgmtpool.queue, @@ -360,25 +385,17 @@ int __iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr, break; } - conn->exp_statsn = be32_to_cpu(hdr->statsn) + 1; - conn->tmfrsp_pdus_cnt++; - if (conn->tmabort_state == TMABORT_INITIAL) { - conn->tmabort_state = - ((struct iscsi_tm_rsp *)hdr)-> - response == ISCSI_TMF_RSP_COMPLETE ? - TMABORT_SUCCESS:TMABORT_FAILED; - /* unblock eh_abort() */ - wake_up(&conn->ehwait); - } + iscsi_tmf_rsp(conn, hdr); break; case ISCSI_OP_NOOP_IN: - if (hdr->ttt != ISCSI_RESERVED_TAG) { + if (hdr->ttt != ISCSI_RESERVED_TAG || datalen) { rc = ISCSI_ERR_PROTO; break; } conn->exp_statsn = be32_to_cpu(hdr->statsn) + 1; - rc = iscsi_recv_pdu(conn->cls_conn, hdr, data, datalen); + if (iscsi_recv_pdu(conn->cls_conn, hdr, data, datalen)) + rc = ISCSI_ERR_CONN_FAILED; list_del(&mtask->running); if (conn->login_mtask != mtask) __kfifo_put(session->mgmtpool.queue, @@ -391,14 +408,21 @@ int __iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr, } else if (itt == ISCSI_RESERVED_TAG) { switch(opcode) { case ISCSI_OP_NOOP_IN: - if (!datalen) { - rc = iscsi_check_assign_cmdsn(session, - (struct iscsi_nopin*)hdr); - if (!rc && hdr->ttt != ISCSI_RESERVED_TAG) - rc = iscsi_recv_pdu(conn->cls_conn, - hdr, NULL, 0); - } else + if (datalen) { rc = ISCSI_ERR_PROTO; + break; + } + + rc = iscsi_check_assign_cmdsn(session, + (struct iscsi_nopin*)hdr); + if (rc) + break; + + if (hdr->ttt == ISCSI_RESERVED_TAG) + break; + + if (iscsi_recv_pdu(conn->cls_conn, hdr, NULL, 0)) + rc = ISCSI_ERR_CONN_FAILED; break; case ISCSI_OP_REJECT: /* we need sth like iscsi_reject_rsp()*/ @@ -568,20 +592,24 @@ static int iscsi_data_xmit(struct iscsi_conn *conn) } /* process command queue */ - while (__kfifo_get(conn->xmitqueue, (void*)&conn->ctask, - sizeof(void*))) { + spin_lock_bh(&conn->session->lock); + while (!list_empty(&conn->xmitqueue)) { /* * iscsi tcp may readd the task to the xmitqueue to send * write data */ - spin_lock_bh(&conn->session->lock); - if (list_empty(&conn->ctask->running)) - list_add_tail(&conn->ctask->running, &conn->run_list); + conn->ctask = list_entry(conn->xmitqueue.next, + struct iscsi_cmd_task, running); + conn->ctask->state = ISCSI_TASK_RUNNING; + list_move_tail(conn->xmitqueue.next, &conn->run_list); spin_unlock_bh(&conn->session->lock); + rc = tt->xmit_cmd_task(conn, conn->ctask); if (rc) goto again; + spin_lock_bh(&conn->session->lock); } + spin_unlock_bh(&conn->session->lock); /* done with this ctask */ conn->ctask = NULL; @@ -691,6 +719,7 @@ int iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *)) sc->SCp.phase = session->age; sc->SCp.ptr = (char *)ctask; + ctask->state = ISCSI_TASK_PENDING; ctask->mtask = NULL; ctask->conn = conn; ctask->sc = sc; @@ -700,7 +729,7 @@ int iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *)) session->tt->init_cmd_task(ctask); - __kfifo_put(conn->xmitqueue, (void*)&ctask, sizeof(void*)); + list_add_tail(&ctask->running, &conn->xmitqueue); debug_scsi( "ctask enq [%s cid %d sc %lx itt 0x%x len %d cmdsn %d win %d]\n", sc->sc_data_direction == DMA_TO_DEVICE ? "write" : "read", @@ -977,31 +1006,27 @@ static int iscsi_exec_abort_task(struct scsi_cmnd *sc, /* * xmit mutex and session lock must be held */ -#define iscsi_remove_task(tasktype) \ -static struct iscsi_##tasktype * \ -iscsi_remove_##tasktype(struct kfifo *fifo, uint32_t itt) \ -{ \ - int i, nr_tasks = __kfifo_len(fifo) / sizeof(void*); \ - struct iscsi_##tasktype *task; \ - \ - debug_scsi("searching %d tasks\n", nr_tasks); \ - \ - for (i = 0; i < nr_tasks; i++) { \ - __kfifo_get(fifo, (void*)&task, sizeof(void*)); \ - debug_scsi("check task %u\n", task->itt); \ - \ - if (task->itt == itt) { \ - debug_scsi("matched task\n"); \ - return task; \ - } \ - \ - __kfifo_put(fifo, (void*)&task, sizeof(void*)); \ - } \ - return NULL; \ -} +static struct iscsi_mgmt_task * +iscsi_remove_mgmt_task(struct kfifo *fifo, uint32_t itt) +{ + int i, nr_tasks = __kfifo_len(fifo) / sizeof(void*); + struct iscsi_mgmt_task *task; -iscsi_remove_task(mgmt_task); -iscsi_remove_task(cmd_task); + debug_scsi("searching %d tasks\n", nr_tasks); + + for (i = 0; i < nr_tasks; i++) { + __kfifo_get(fifo, (void*)&task, sizeof(void*)); + debug_scsi("check task %u\n", task->itt); + + if (task->itt == itt) { + debug_scsi("matched task\n"); + return task; + } + + __kfifo_put(fifo, (void*)&task, sizeof(void*)); + } + return NULL; +} static int iscsi_ctask_mtask_cleanup(struct iscsi_cmd_task *ctask) { @@ -1027,12 +1052,13 @@ static void fail_command(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask, { struct scsi_cmnd *sc; - conn->session->tt->cleanup_cmd_task(conn, ctask); - iscsi_ctask_mtask_cleanup(ctask); - sc = ctask->sc; if (!sc) return; + + conn->session->tt->cleanup_cmd_task(conn, ctask); + iscsi_ctask_mtask_cleanup(ctask); + sc->result = err; sc->resid = sc->request_bufflen; iscsi_complete_command(conn->session, ctask); @@ -1043,7 +1069,6 @@ int iscsi_eh_abort(struct scsi_cmnd *sc) struct iscsi_cmd_task *ctask = (struct iscsi_cmd_task *)sc->SCp.ptr; struct iscsi_conn *conn = ctask->conn; struct iscsi_session *session = conn->session; - struct iscsi_cmd_task *pending_ctask; int rc; conn->eh_abort_cnt++; @@ -1061,8 +1086,11 @@ int iscsi_eh_abort(struct scsi_cmnd *sc) goto failed; /* ctask completed before time out */ - if (!ctask->sc) - goto success; + if (!ctask->sc) { + spin_unlock_bh(&session->lock); + debug_scsi("sc completed while abort in progress\n"); + goto success_rel_mutex; + } /* what should we do here ? */ if (conn->ctask == ctask) { @@ -1071,17 +1099,8 @@ int iscsi_eh_abort(struct scsi_cmnd *sc) goto failed; } - /* check for the easy pending cmd abort */ - pending_ctask = iscsi_remove_cmd_task(conn->xmitqueue, ctask->itt); - if (pending_ctask) { - /* iscsi_tcp queues write transfers on the xmitqueue */ - if (list_empty(&pending_ctask->running)) { - debug_scsi("found pending task\n"); - goto success; - } else - __kfifo_put(conn->xmitqueue, (void*)&pending_ctask, - sizeof(void*)); - } + if (ctask->state == ISCSI_TASK_PENDING) + goto success_cleanup; conn->tmabort_state = TMABORT_INITIAL; @@ -1089,25 +1108,31 @@ int iscsi_eh_abort(struct scsi_cmnd *sc) rc = iscsi_exec_abort_task(sc, ctask); spin_lock_bh(&session->lock); - iscsi_ctask_mtask_cleanup(ctask); if (rc || sc->SCp.phase != session->age || session->state != ISCSI_STATE_LOGGED_IN) goto failed; + iscsi_ctask_mtask_cleanup(ctask); - /* ctask completed before tmf abort response */ - if (!ctask->sc) { - debug_scsi("sc completed while abort in progress\n"); - goto success; - } - - if (conn->tmabort_state != TMABORT_SUCCESS) { + switch (conn->tmabort_state) { + case TMABORT_SUCCESS: + goto success_cleanup; + case TMABORT_NOT_FOUND: + if (!ctask->sc) { + /* ctask completed before tmf abort response */ + spin_unlock_bh(&session->lock); + debug_scsi("sc completed while abort in progress\n"); + goto success_rel_mutex; + } + /* fall through */ + default: + /* timedout or failed */ spin_unlock_bh(&session->lock); iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED); spin_lock_bh(&session->lock); goto failed; } -success: +success_cleanup: debug_scsi("abort success [sc %lx itt 0x%x]\n", (long)sc, ctask->itt); spin_unlock_bh(&session->lock); @@ -1121,6 +1146,7 @@ success: spin_unlock(&session->lock); write_unlock_bh(conn->recv_lock); +success_rel_mutex: mutex_unlock(&conn->xmitmutex); return SUCCESS; @@ -1263,6 +1289,7 @@ iscsi_session_setup(struct iscsi_transport *iscsit, if (cmd_task_size) ctask->dd_data = &ctask[1]; ctask->itt = cmd_i; + INIT_LIST_HEAD(&ctask->running); } spin_lock_init(&session->lock); @@ -1282,6 +1309,7 @@ iscsi_session_setup(struct iscsi_transport *iscsit, if (mgmt_task_size) mtask->dd_data = &mtask[1]; mtask->itt = ISCSI_MGMT_ITT_OFFSET + cmd_i; + INIT_LIST_HEAD(&mtask->running); } if (scsi_add_host(shost, NULL)) @@ -1322,15 +1350,18 @@ void iscsi_session_teardown(struct iscsi_cls_session *cls_session) { struct Scsi_Host *shost = iscsi_session_to_shost(cls_session); struct iscsi_session *session = iscsi_hostdata(shost->hostdata); + struct module *owner = cls_session->transport->owner; scsi_remove_host(shost); iscsi_pool_free(&session->mgmtpool, (void**)session->mgmt_cmds); iscsi_pool_free(&session->cmdpool, (void**)session->cmds); + kfree(session->targetname); + iscsi_destroy_session(cls_session); scsi_host_put(shost); - module_put(cls_session->transport->owner); + module_put(owner); } EXPORT_SYMBOL_GPL(iscsi_session_teardown); @@ -1361,12 +1392,7 @@ iscsi_conn_setup(struct iscsi_cls_session *cls_session, uint32_t conn_idx) conn->tmabort_state = TMABORT_INITIAL; INIT_LIST_HEAD(&conn->run_list); INIT_LIST_HEAD(&conn->mgmt_run_list); - - /* initialize general xmit PDU commands queue */ - conn->xmitqueue = kfifo_alloc(session->cmds_max * sizeof(void*), - GFP_KERNEL, NULL); - if (conn->xmitqueue == ERR_PTR(-ENOMEM)) - goto xmitqueue_alloc_fail; + INIT_LIST_HEAD(&conn->xmitqueue); /* initialize general immediate & non-immediate PDU commands queue */ conn->immqueue = kfifo_alloc(session->mgmtpool_max * sizeof(void*), @@ -1394,7 +1420,7 @@ iscsi_conn_setup(struct iscsi_cls_session *cls_session, uint32_t conn_idx) data = kmalloc(DEFAULT_MAX_RECV_DATA_SEGMENT_LENGTH, GFP_KERNEL); if (!data) goto login_mtask_data_alloc_fail; - conn->login_mtask->data = data; + conn->login_mtask->data = conn->data = data; init_timer(&conn->tmabort_timer); mutex_init(&conn->xmitmutex); @@ -1410,8 +1436,6 @@ login_mtask_alloc_fail: mgmtqueue_alloc_fail: kfifo_free(conn->immqueue); immqueue_alloc_fail: - kfifo_free(conn->xmitqueue); -xmitqueue_alloc_fail: iscsi_destroy_conn(cls_conn); return NULL; } @@ -1432,12 +1456,6 @@ void iscsi_conn_teardown(struct iscsi_cls_conn *cls_conn) set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx); mutex_lock(&conn->xmitmutex); - if (conn->c_stage == ISCSI_CONN_INITIAL_STAGE) { - if (session->tt->suspend_conn_recv) - session->tt->suspend_conn_recv(conn); - - session->tt->terminate_conn(conn); - } spin_lock_bh(&session->lock); conn->c_stage = ISCSI_CONN_CLEANUP_WAIT; @@ -1474,7 +1492,8 @@ void iscsi_conn_teardown(struct iscsi_cls_conn *cls_conn) } spin_lock_bh(&session->lock); - kfree(conn->login_mtask->data); + kfree(conn->data); + kfree(conn->persistent_address); __kfifo_put(session->mgmtpool.queue, (void*)&conn->login_mtask, sizeof(void*)); list_del(&conn->item); @@ -1489,7 +1508,6 @@ void iscsi_conn_teardown(struct iscsi_cls_conn *cls_conn) session->cmdsn = session->max_cmdsn = session->exp_cmdsn = 1; spin_unlock_bh(&session->lock); - kfifo_free(conn->xmitqueue); kfifo_free(conn->immqueue); kfifo_free(conn->mgmtqueue); @@ -1572,7 +1590,7 @@ static void fail_all_commands(struct iscsi_conn *conn) struct iscsi_cmd_task *ctask, *tmp; /* flush pending */ - while (__kfifo_get(conn->xmitqueue, (void*)&ctask, sizeof(void*))) { + list_for_each_entry_safe(ctask, tmp, &conn->xmitqueue, running) { debug_scsi("failing pending sc %p itt 0x%x\n", ctask->sc, ctask->itt); fail_command(conn, ctask, DID_BUS_BUSY << 16); @@ -1615,8 +1633,9 @@ static void iscsi_start_session_recovery(struct iscsi_session *session, set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx); spin_unlock_bh(&session->lock); - if (session->tt->suspend_conn_recv) - session->tt->suspend_conn_recv(conn); + write_lock_bh(conn->recv_lock); + set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx); + write_unlock_bh(conn->recv_lock); mutex_lock(&conn->xmitmutex); /* @@ -1635,7 +1654,6 @@ static void iscsi_start_session_recovery(struct iscsi_session *session, } } - session->tt->terminate_conn(conn); /* * flush queues. */ diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c index 5c68cdd8736..d384c16f4a8 100644 --- a/drivers/scsi/lpfc/lpfc_attr.c +++ b/drivers/scsi/lpfc/lpfc_attr.c @@ -222,7 +222,7 @@ lpfc_issue_lip(struct Scsi_Host *host) pmboxq->mb.mbxCommand = MBX_DOWN_LINK; pmboxq->mb.mbxOwner = OWN_HOST; - mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2); + mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, LPFC_MBOX_TMO * 2); if ((mbxstatus == MBX_SUCCESS) && (pmboxq->mb.mbxStatus == 0)) { memset((void *)pmboxq, 0, sizeof (LPFC_MBOXQ_t)); @@ -884,7 +884,7 @@ sysfs_mbox_write(struct kobject *kobj, char *buf, loff_t off, size_t count) phba->sysfs_mbox.mbox == NULL ) { sysfs_mbox_idle(phba); spin_unlock_irq(host->host_lock); - return -EINVAL; + return -EAGAIN; } } @@ -1000,14 +1000,15 @@ sysfs_mbox_read(struct kobject *kobj, char *buf, loff_t off, size_t count) spin_unlock_irq(phba->host->host_lock); rc = lpfc_sli_issue_mbox_wait (phba, phba->sysfs_mbox.mbox, - phba->fc_ratov * 2); + lpfc_mbox_tmo_val(phba, + phba->sysfs_mbox.mbox->mb.mbxCommand) * HZ); spin_lock_irq(phba->host->host_lock); } if (rc != MBX_SUCCESS) { sysfs_mbox_idle(phba); spin_unlock_irq(host->host_lock); - return -ENODEV; + return (rc == MBX_TIMEOUT) ? -ETIME : -ENODEV; } phba->sysfs_mbox.state = SMBOX_READING; } @@ -1016,7 +1017,7 @@ sysfs_mbox_read(struct kobject *kobj, char *buf, loff_t off, size_t count) printk(KERN_WARNING "mbox_read: Bad State\n"); sysfs_mbox_idle(phba); spin_unlock_irq(host->host_lock); - return -EINVAL; + return -EAGAIN; } memcpy(buf, (uint8_t *) & phba->sysfs_mbox.mbox->mb + off, count); @@ -1210,8 +1211,10 @@ lpfc_get_stats(struct Scsi_Host *shost) struct lpfc_hba *phba = (struct lpfc_hba *)shost->hostdata; struct lpfc_sli *psli = &phba->sli; struct fc_host_statistics *hs = &phba->link_stats; + struct lpfc_lnk_stat * lso = &psli->lnk_stat_offsets; LPFC_MBOXQ_t *pmboxq; MAILBOX_t *pmb; + unsigned long seconds; int rc = 0; pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); @@ -1272,22 +1275,103 @@ lpfc_get_stats(struct Scsi_Host *shost) hs->invalid_crc_count = pmb->un.varRdLnk.crcCnt; hs->error_frames = pmb->un.varRdLnk.crcCnt; + hs->link_failure_count -= lso->link_failure_count; + hs->loss_of_sync_count -= lso->loss_of_sync_count; + hs->loss_of_signal_count -= lso->loss_of_signal_count; + hs->prim_seq_protocol_err_count -= lso->prim_seq_protocol_err_count; + hs->invalid_tx_word_count -= lso->invalid_tx_word_count; + hs->invalid_crc_count -= lso->invalid_crc_count; + hs->error_frames -= lso->error_frames; + if (phba->fc_topology == TOPOLOGY_LOOP) { hs->lip_count = (phba->fc_eventTag >> 1); + hs->lip_count -= lso->link_events; hs->nos_count = -1; } else { hs->lip_count = -1; hs->nos_count = (phba->fc_eventTag >> 1); + hs->nos_count -= lso->link_events; } hs->dumped_frames = -1; -/* FIX ME */ - /*hs->SecondsSinceLastReset = (jiffies - lpfc_loadtime) / HZ;*/ + seconds = get_seconds(); + if (seconds < psli->stats_start) + hs->seconds_since_last_reset = seconds + + ((unsigned long)-1 - psli->stats_start); + else + hs->seconds_since_last_reset = seconds - psli->stats_start; return hs; } +static void +lpfc_reset_stats(struct Scsi_Host *shost) +{ + struct lpfc_hba *phba = (struct lpfc_hba *)shost->hostdata; + struct lpfc_sli *psli = &phba->sli; + struct lpfc_lnk_stat * lso = &psli->lnk_stat_offsets; + LPFC_MBOXQ_t *pmboxq; + MAILBOX_t *pmb; + int rc = 0; + + pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + if (!pmboxq) + return; + memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t)); + + pmb = &pmboxq->mb; + pmb->mbxCommand = MBX_READ_STATUS; + pmb->mbxOwner = OWN_HOST; + pmb->un.varWords[0] = 0x1; /* reset request */ + pmboxq->context1 = NULL; + + if ((phba->fc_flag & FC_OFFLINE_MODE) || + (!(psli->sli_flag & LPFC_SLI2_ACTIVE))) + rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL); + else + rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2); + + if (rc != MBX_SUCCESS) { + if (rc == MBX_TIMEOUT) + pmboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl; + else + mempool_free(pmboxq, phba->mbox_mem_pool); + return; + } + + memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t)); + pmb->mbxCommand = MBX_READ_LNK_STAT; + pmb->mbxOwner = OWN_HOST; + pmboxq->context1 = NULL; + + if ((phba->fc_flag & FC_OFFLINE_MODE) || + (!(psli->sli_flag & LPFC_SLI2_ACTIVE))) + rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL); + else + rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2); + + if (rc != MBX_SUCCESS) { + if (rc == MBX_TIMEOUT) + pmboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl; + else + mempool_free( pmboxq, phba->mbox_mem_pool); + return; + } + + lso->link_failure_count = pmb->un.varRdLnk.linkFailureCnt; + lso->loss_of_sync_count = pmb->un.varRdLnk.lossSyncCnt; + lso->loss_of_signal_count = pmb->un.varRdLnk.lossSignalCnt; + lso->prim_seq_protocol_err_count = pmb->un.varRdLnk.primSeqErrCnt; + lso->invalid_tx_word_count = pmb->un.varRdLnk.invalidXmitWord; + lso->invalid_crc_count = pmb->un.varRdLnk.crcCnt; + lso->error_frames = pmb->un.varRdLnk.crcCnt; + lso->link_events = (phba->fc_eventTag >> 1); + + psli->stats_start = get_seconds(); + + return; +} /* * The LPFC driver treats linkdown handling as target loss events so there @@ -1431,8 +1515,7 @@ struct fc_function_template lpfc_transport_functions = { */ .get_fc_host_stats = lpfc_get_stats, - - /* the LPFC driver doesn't support resetting stats yet */ + .reset_fc_host_stats = lpfc_reset_stats, .dd_fcrport_size = sizeof(struct lpfc_rport_data), .show_rport_maxframe_size = 1, diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h index 517e9e4dd46..2a176467f71 100644 --- a/drivers/scsi/lpfc/lpfc_crtn.h +++ b/drivers/scsi/lpfc/lpfc_crtn.h @@ -127,6 +127,7 @@ void lpfc_config_port(struct lpfc_hba *, LPFC_MBOXQ_t *); void lpfc_kill_board(struct lpfc_hba *, LPFC_MBOXQ_t *); void lpfc_mbox_put(struct lpfc_hba *, LPFC_MBOXQ_t *); LPFC_MBOXQ_t *lpfc_mbox_get(struct lpfc_hba *); +int lpfc_mbox_tmo_val(struct lpfc_hba *, int); int lpfc_mem_alloc(struct lpfc_hba *); void lpfc_mem_free(struct lpfc_hba *); diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c index b65ee57af53..bbb7310210b 100644 --- a/drivers/scsi/lpfc/lpfc_ct.c +++ b/drivers/scsi/lpfc/lpfc_ct.c @@ -131,6 +131,7 @@ lpfc_ct_unsol_event(struct lpfc_hba * phba, } ct_unsol_event_exit_piocbq: + list_del(&head); if (pmbuf) { list_for_each_entry_safe(matp, next_matp, &pmbuf->list, list) { lpfc_mbuf_free(phba, matp->virt, matp->phys); @@ -481,7 +482,7 @@ lpfc_cmpl_ct_cmd_gid_ft(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb, if (CTrsp->CommandResponse.bits.CmdRsp == be16_to_cpu(SLI_CT_RESPONSE_FS_ACC)) { lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY, - "%d:0239 NameServer Rsp " + "%d:0208 NameServer Rsp " "Data: x%x\n", phba->brd_no, phba->fc_flag); @@ -588,13 +589,9 @@ lpfc_get_hba_sym_node_name(struct lpfc_hba * phba, uint8_t * symbp) lpfc_decode_firmware_rev(phba, fwrev, 0); - if (phba->Port[0]) { - sprintf(symbp, "Emulex %s Port %s FV%s DV%s", phba->ModelName, - phba->Port, fwrev, lpfc_release_version); - } else { - sprintf(symbp, "Emulex %s FV%s DV%s", phba->ModelName, - fwrev, lpfc_release_version); - } + sprintf(symbp, "Emulex %s FV%s DV%s", phba->ModelName, + fwrev, lpfc_release_version); + return; } /* diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c index b89f6cb641e..3567de61316 100644 --- a/drivers/scsi/lpfc/lpfc_els.c +++ b/drivers/scsi/lpfc/lpfc_els.c @@ -1848,9 +1848,12 @@ static void lpfc_cmpl_els_acc(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb, struct lpfc_iocbq * rspiocb) { + IOCB_t *irsp; struct lpfc_nodelist *ndlp; LPFC_MBOXQ_t *mbox = NULL; + irsp = &rspiocb->iocb; + ndlp = (struct lpfc_nodelist *) cmdiocb->context1; if (cmdiocb->context_un.mbox) mbox = cmdiocb->context_un.mbox; @@ -1893,9 +1896,15 @@ lpfc_cmpl_els_acc(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb, mempool_free( mbox, phba->mbox_mem_pool); } else { mempool_free( mbox, phba->mbox_mem_pool); - if (ndlp->nlp_flag & NLP_ACC_REGLOGIN) { - lpfc_nlp_list(phba, ndlp, NLP_NO_LIST); - ndlp = NULL; + /* Do not call NO_LIST for lpfc_els_abort'ed ELS cmds */ + if (!((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) && + ((irsp->un.ulpWord[4] == IOERR_SLI_ABORTED) || + (irsp->un.ulpWord[4] == IOERR_LINK_DOWN) || + (irsp->un.ulpWord[4] == IOERR_SLI_DOWN)))) { + if (ndlp->nlp_flag & NLP_ACC_REGLOGIN) { + lpfc_nlp_list(phba, ndlp, NLP_NO_LIST); + ndlp = NULL; + } } } } @@ -2839,7 +2848,7 @@ lpfc_els_rsp_rps_acc(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb) /* Xmit ELS RPS ACC response tag <ulpIoTag> */ lpfc_printf_log(phba, KERN_INFO, LOG_ELS, - "%d:0128 Xmit ELS RPS ACC response tag x%x " + "%d:0118 Xmit ELS RPS ACC response tag x%x " "Data: x%x x%x x%x x%x x%x\n", phba->brd_no, elsiocb->iocb.ulpIoTag, @@ -2948,7 +2957,7 @@ lpfc_els_rsp_rpl_acc(struct lpfc_hba * phba, uint16_t cmdsize, /* Xmit ELS RPL ACC response tag <ulpIoTag> */ lpfc_printf_log(phba, KERN_INFO, LOG_ELS, - "%d:0128 Xmit ELS RPL ACC response tag x%x " + "%d:0120 Xmit ELS RPL ACC response tag x%x " "Data: x%x x%x x%x x%x x%x\n", phba->brd_no, elsiocb->iocb.ulpIoTag, @@ -3109,7 +3118,7 @@ lpfc_els_rcv_fan(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb, struct lpfc_nodelist *ndlp, *next_ndlp; /* FAN received */ - lpfc_printf_log(phba, KERN_INFO, LOG_ELS, "%d:265 FAN received\n", + lpfc_printf_log(phba, KERN_INFO, LOG_ELS, "%d:0265 FAN received\n", phba->brd_no); icmd = &cmdiocb->iocb; diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c index 4d6cf990c4f..b2f1552f184 100644 --- a/drivers/scsi/lpfc/lpfc_hbadisc.c +++ b/drivers/scsi/lpfc/lpfc_hbadisc.c @@ -1557,6 +1557,8 @@ lpfc_freenode(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp) mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; } } + + spin_lock_irq(phba->host->host_lock); list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) { if ((mb->mb.mbxCommand == MBX_REG_LOGIN64) && (ndlp == (struct lpfc_nodelist *) mb->context2)) { @@ -1569,6 +1571,7 @@ lpfc_freenode(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp) mempool_free(mb, phba->mbox_mem_pool); } } + spin_unlock_irq(phba->host->host_lock); lpfc_els_abort(phba,ndlp,0); spin_lock_irq(phba->host->host_lock); @@ -1782,7 +1785,7 @@ lpfc_findnode_did(struct lpfc_hba * phba, uint32_t order, uint32_t did) /* LOG change to REGLOGIN */ /* FIND node DID reglogin */ lpfc_printf_log(phba, KERN_INFO, LOG_NODE, - "%d:0931 FIND node DID reglogin" + "%d:0901 FIND node DID reglogin" " Data: x%p x%x x%x x%x\n", phba->brd_no, ndlp, ndlp->nlp_DID, @@ -1805,7 +1808,7 @@ lpfc_findnode_did(struct lpfc_hba * phba, uint32_t order, uint32_t did) /* LOG change to PRLI */ /* FIND node DID prli */ lpfc_printf_log(phba, KERN_INFO, LOG_NODE, - "%d:0931 FIND node DID prli " + "%d:0902 FIND node DID prli " "Data: x%p x%x x%x x%x\n", phba->brd_no, ndlp, ndlp->nlp_DID, @@ -1828,7 +1831,7 @@ lpfc_findnode_did(struct lpfc_hba * phba, uint32_t order, uint32_t did) /* LOG change to NPR */ /* FIND node DID npr */ lpfc_printf_log(phba, KERN_INFO, LOG_NODE, - "%d:0931 FIND node DID npr " + "%d:0903 FIND node DID npr " "Data: x%p x%x x%x x%x\n", phba->brd_no, ndlp, ndlp->nlp_DID, @@ -1851,7 +1854,7 @@ lpfc_findnode_did(struct lpfc_hba * phba, uint32_t order, uint32_t did) /* LOG change to UNUSED */ /* FIND node DID unused */ lpfc_printf_log(phba, KERN_INFO, LOG_NODE, - "%d:0931 FIND node DID unused " + "%d:0905 FIND node DID unused " "Data: x%p x%x x%x x%x\n", phba->brd_no, ndlp, ndlp->nlp_DID, @@ -2335,7 +2338,7 @@ lpfc_disc_timeout_handler(struct lpfc_hba *phba) initlinkmbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (!initlinkmbox) { lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, - "%d:0226 Device Discovery " + "%d:0206 Device Discovery " "completion error\n", phba->brd_no); phba->hba_state = LPFC_HBA_ERROR; @@ -2365,7 +2368,7 @@ lpfc_disc_timeout_handler(struct lpfc_hba *phba) if (!clearlambox) { clrlaerr = 1; lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, - "%d:0226 Device Discovery " + "%d:0207 Device Discovery " "completion error\n", phba->brd_no); phba->hba_state = LPFC_HBA_ERROR; diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c index ef47b824cbe..f6948ffe689 100644 --- a/drivers/scsi/lpfc/lpfc_init.c +++ b/drivers/scsi/lpfc/lpfc_init.c @@ -1379,6 +1379,7 @@ lpfc_offline(struct lpfc_hba * phba) /* stop all timers associated with this hba */ lpfc_stop_timer(phba); phba->work_hba_events = 0; + phba->work_ha = 0; lpfc_printf_log(phba, KERN_WARNING, @@ -1616,7 +1617,11 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid) goto out_free_iocbq; } - /* We can rely on a queue depth attribute only after SLI HBA setup */ + /* + * Set initial can_queue value since 0 is no longer supported and + * scsi_add_host will fail. This will be adjusted later based on the + * max xri value determined in hba setup. + */ host->can_queue = phba->cfg_hba_queue_depth - 10; /* Tell the midlayer we support 16 byte commands */ @@ -1656,6 +1661,12 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid) goto out_free_irq; } + /* + * hba setup may have changed the hba_queue_depth so we need to adjust + * the value of can_queue. + */ + host->can_queue = phba->cfg_hba_queue_depth - 10; + lpfc_discovery_wait(phba); if (phba->cfg_poll & DISABLE_FCP_RING_INT) { diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c index e42f22aaf71..4d016c2a1b2 100644 --- a/drivers/scsi/lpfc/lpfc_mbox.c +++ b/drivers/scsi/lpfc/lpfc_mbox.c @@ -651,3 +651,19 @@ lpfc_mbox_get(struct lpfc_hba * phba) return mbq; } + +int +lpfc_mbox_tmo_val(struct lpfc_hba *phba, int cmd) +{ + switch (cmd) { + case MBX_WRITE_NV: /* 0x03 */ + case MBX_UPDATE_CFG: /* 0x1B */ + case MBX_DOWN_LOAD: /* 0x1C */ + case MBX_DEL_LD_ENTRY: /* 0x1D */ + case MBX_LOAD_AREA: /* 0x81 */ + case MBX_FLASH_WR_ULA: /* 0x98 */ + case MBX_LOAD_EXP_ROM: /* 0x9C */ + return LPFC_MBOX_TMO_FLASH_CMD; + } + return LPFC_MBOX_TMO; +} diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c index bd0b0e293d6..20449a8dd53 100644 --- a/drivers/scsi/lpfc/lpfc_nportdisc.c +++ b/drivers/scsi/lpfc/lpfc_nportdisc.c @@ -179,7 +179,7 @@ lpfc_els_abort(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp, /* Abort outstanding I/O on NPort <nlp_DID> */ lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY, - "%d:0201 Abort outstanding I/O on NPort x%x " + "%d:0205 Abort outstanding I/O on NPort x%x " "Data: x%x x%x x%x\n", phba->brd_no, ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi); @@ -393,6 +393,20 @@ lpfc_rcv_plogi(struct lpfc_hba * phba, mbox->context2 = ndlp; ndlp->nlp_flag |= (NLP_ACC_REGLOGIN | NLP_RCV_PLOGI); + /* + * If there is an outstanding PLOGI issued, abort it before + * sending ACC rsp for received PLOGI. If pending plogi + * is not canceled here, the plogi will be rejected by + * remote port and will be retried. On a configuration with + * single discovery thread, this will cause a huge delay in + * discovery. Also this will cause multiple state machines + * running in parallel for this node. + */ + if (ndlp->nlp_state == NLP_STE_PLOGI_ISSUE) { + /* software abort outstanding PLOGI */ + lpfc_els_abort(phba, ndlp, 1); + } + lpfc_els_rsp_acc(phba, ELS_CMD_PLOGI, cmdiocb, ndlp, mbox, 0); return 1; @@ -1601,7 +1615,13 @@ lpfc_rcv_padisc_npr_node(struct lpfc_hba * phba, lpfc_rcv_padisc(phba, ndlp, cmdiocb); - if (!(ndlp->nlp_flag & NLP_DELAY_TMO)) { + /* + * Do not start discovery if discovery is about to start + * or discovery in progress for this node. Starting discovery + * here will affect the counting of discovery threads. + */ + if ((!(ndlp->nlp_flag & NLP_DELAY_TMO)) && + (ndlp->nlp_flag & NLP_NPR_2B_DISC)){ if (ndlp->nlp_flag & NLP_NPR_ADISC) { ndlp->nlp_prev_state = NLP_STE_NPR_NODE; ndlp->nlp_state = NLP_STE_ADISC_ISSUE; diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c index a760a44173d..a8816a8738f 100644 --- a/drivers/scsi/lpfc/lpfc_scsi.c +++ b/drivers/scsi/lpfc/lpfc_scsi.c @@ -21,6 +21,7 @@ #include <linux/pci.h> #include <linux/interrupt.h> +#include <linux/delay.h> #include <scsi/scsi.h> #include <scsi/scsi_device.h> @@ -841,6 +842,21 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *)) return 0; } +static void +lpfc_block_error_handler(struct scsi_cmnd *cmnd) +{ + struct Scsi_Host *shost = cmnd->device->host; + struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device)); + + spin_lock_irq(shost->host_lock); + while (rport->port_state == FC_PORTSTATE_BLOCKED) { + spin_unlock_irq(shost->host_lock); + msleep(1000); + spin_lock_irq(shost->host_lock); + } + spin_unlock_irq(shost->host_lock); + return; +} static int lpfc_abort_handler(struct scsi_cmnd *cmnd) @@ -855,6 +871,7 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd) unsigned int loop_count = 0; int ret = SUCCESS; + lpfc_block_error_handler(cmnd); spin_lock_irq(shost->host_lock); lpfc_cmd = (struct lpfc_scsi_buf *)cmnd->host_scribble; @@ -957,6 +974,7 @@ lpfc_reset_lun_handler(struct scsi_cmnd *cmnd) int ret = FAILED; int cnt, loopcnt; + lpfc_block_error_handler(cmnd); spin_lock_irq(shost->host_lock); /* * If target is not in a MAPPED state, delay the reset until @@ -1073,6 +1091,7 @@ lpfc_reset_bus_handler(struct scsi_cmnd *cmnd) int cnt, loopcnt; struct lpfc_scsi_buf * lpfc_cmd; + lpfc_block_error_handler(cmnd); spin_lock_irq(shost->host_lock); lpfc_cmd = lpfc_get_scsi_buf(phba); @@ -1104,7 +1123,7 @@ lpfc_reset_bus_handler(struct scsi_cmnd *cmnd) ndlp->rport->dd_data); if (ret != SUCCESS) { lpfc_printf_log(phba, KERN_ERR, LOG_FCP, - "%d:0713 Bus Reset on target %d failed\n", + "%d:0700 Bus Reset on target %d failed\n", phba->brd_no, i); err_count++; } diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c index 350a625fa22..70f4d5a1348 100644 --- a/drivers/scsi/lpfc/lpfc_sli.c +++ b/drivers/scsi/lpfc/lpfc_sli.c @@ -320,7 +320,8 @@ lpfc_sli_next_iotag(struct lpfc_hba * phba, struct lpfc_iocbq * iocbq) kfree(old_arr); return iotag; } - } + } else + spin_unlock_irq(phba->host->host_lock); lpfc_printf_log(phba, KERN_ERR,LOG_SLI, "%d:0318 Failed to allocate IOTAG.last IOTAG is %d\n", @@ -969,9 +970,11 @@ void lpfc_sli_poll_fcp_ring(struct lpfc_hba * phba) * resources need to be recovered. */ if (unlikely(irsp->ulpCommand == CMD_XRI_ABORTED_CX)) { - printk(KERN_INFO "%s: IOCB cmd 0x%x processed." - " Skipping completion\n", __FUNCTION__, - irsp->ulpCommand); + lpfc_printf_log(phba, KERN_INFO, LOG_SLI, + "%d:0314 IOCB cmd 0x%x" + " processed. Skipping" + " completion", phba->brd_no, + irsp->ulpCommand); break; } @@ -1104,7 +1107,7 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba * phba, if (unlikely(irsp->ulpStatus)) { /* Rsp ring <ringno> error: IOCB */ lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, - "%d:0326 Rsp Ring %d error: IOCB Data: " + "%d:0336 Rsp Ring %d error: IOCB Data: " "x%x x%x x%x x%x x%x x%x x%x x%x\n", phba->brd_no, pring->ringno, irsp->un.ulpWord[0], irsp->un.ulpWord[1], @@ -1122,9 +1125,11 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba * phba, * resources need to be recovered. */ if (unlikely(irsp->ulpCommand == CMD_XRI_ABORTED_CX)) { - printk(KERN_INFO "%s: IOCB cmd 0x%x processed. " - "Skipping completion\n", __FUNCTION__, - irsp->ulpCommand); + lpfc_printf_log(phba, KERN_INFO, LOG_SLI, + "%d:0333 IOCB cmd 0x%x" + " processed. Skipping" + " completion\n", phba->brd_no, + irsp->ulpCommand); break; } @@ -1155,7 +1160,7 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba * phba, } else { /* Unknown IOCB command */ lpfc_printf_log(phba, KERN_ERR, LOG_SLI, - "%d:0321 Unknown IOCB command " + "%d:0334 Unknown IOCB command " "Data: x%x, x%x x%x x%x x%x\n", phba->brd_no, type, irsp->ulpCommand, irsp->ulpStatus, irsp->ulpIoTag, @@ -1238,7 +1243,7 @@ lpfc_sli_handle_slow_ring_event(struct lpfc_hba * phba, lpfc_printf_log(phba, KERN_ERR, LOG_SLI, - "%d:0312 Ring %d handler: portRspPut %d " + "%d:0303 Ring %d handler: portRspPut %d " "is bigger then rsp ring %d\n", phba->brd_no, pring->ringno, portRspPut, portRspMax); @@ -1383,7 +1388,7 @@ lpfc_sli_handle_slow_ring_event(struct lpfc_hba * phba, lpfc_printf_log(phba, KERN_ERR, LOG_SLI, - "%d:0321 Unknown IOCB command " + "%d:0335 Unknown IOCB command " "Data: x%x x%x x%x x%x\n", phba->brd_no, irsp->ulpCommand, @@ -1399,11 +1404,11 @@ lpfc_sli_handle_slow_ring_event(struct lpfc_hba * phba, next_iocb, &saveq->list, list) { + list_del(&rspiocbp->list); lpfc_sli_release_iocbq(phba, rspiocbp); } } - lpfc_sli_release_iocbq(phba, saveq); } } @@ -1711,15 +1716,13 @@ lpfc_sli_brdreset(struct lpfc_hba * phba) phba->fc_myDID = 0; phba->fc_prevDID = 0; - psli->sli_flag = 0; - /* Turn off parity checking and serr during the physical reset */ pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value); pci_write_config_word(phba->pcidev, PCI_COMMAND, (cfg_value & ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR))); - psli->sli_flag &= ~LPFC_SLI2_ACTIVE; + psli->sli_flag &= ~(LPFC_SLI2_ACTIVE | LPFC_PROCESS_LA); /* Now toggle INITFF bit in the Host Control Register */ writel(HC_INITFF, phba->HCregaddr); mdelay(1); @@ -1760,7 +1763,7 @@ lpfc_sli_brdrestart(struct lpfc_hba * phba) /* Restart HBA */ lpfc_printf_log(phba, KERN_INFO, LOG_SLI, - "%d:0328 Restart HBA Data: x%x x%x\n", phba->brd_no, + "%d:0337 Restart HBA Data: x%x x%x\n", phba->brd_no, phba->hba_state, psli->sli_flag); word0 = 0; @@ -1792,6 +1795,9 @@ lpfc_sli_brdrestart(struct lpfc_hba * phba) spin_unlock_irq(phba->host->host_lock); + memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets)); + psli->stats_start = get_seconds(); + if (skip_post) mdelay(100); else @@ -1902,6 +1908,9 @@ lpfc_sli_hba_setup(struct lpfc_hba * phba) } while (resetcount < 2 && !done) { + spin_lock_irq(phba->host->host_lock); + phba->sli.sli_flag |= LPFC_SLI_MBOX_ACTIVE; + spin_unlock_irq(phba->host->host_lock); phba->hba_state = LPFC_STATE_UNKNOWN; lpfc_sli_brdrestart(phba); msleep(2500); @@ -1909,6 +1918,9 @@ lpfc_sli_hba_setup(struct lpfc_hba * phba) if (rc) break; + spin_lock_irq(phba->host->host_lock); + phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; + spin_unlock_irq(phba->host->host_lock); resetcount++; /* Call pre CONFIG_PORT mailbox command initialization. A value of 0 @@ -2194,7 +2206,8 @@ lpfc_sli_issue_mbox(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmbox, uint32_t flag) return (MBX_NOT_FINISHED); } /* timeout active mbox command */ - mod_timer(&psli->mbox_tmo, jiffies + HZ * LPFC_MBOX_TMO); + mod_timer(&psli->mbox_tmo, (jiffies + + (HZ * lpfc_mbox_tmo_val(phba, mb->mbxCommand)))); } /* Mailbox cmd <cmd> issue */ @@ -2254,7 +2267,6 @@ lpfc_sli_issue_mbox(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmbox, uint32_t flag) break; case MBX_POLL: - i = 0; psli->mbox_active = NULL; if (psli->sli_flag & LPFC_SLI2_ACTIVE) { /* First read mbox status word */ @@ -2268,11 +2280,14 @@ lpfc_sli_issue_mbox(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmbox, uint32_t flag) /* Read the HBA Host Attention Register */ ha_copy = readl(phba->HAregaddr); + i = lpfc_mbox_tmo_val(phba, mb->mbxCommand); + i *= 1000; /* Convert to ms */ + /* Wait for command to complete */ while (((word0 & OWN_CHIP) == OWN_CHIP) || (!(ha_copy & HA_MBATT) && (phba->hba_state > LPFC_WARM_START))) { - if (i++ >= 100) { + if (i-- <= 0) { psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; spin_unlock_irqrestore(phba->host->host_lock, drvr_flag); @@ -2290,7 +2305,7 @@ lpfc_sli_issue_mbox(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmbox, uint32_t flag) /* Can be in interrupt context, do not sleep */ /* (or might be called with interrupts disabled) */ - mdelay(i); + mdelay(1); spin_lock_irqsave(phba->host->host_lock, drvr_flag); @@ -3005,7 +3020,7 @@ lpfc_sli_issue_iocb_wait(struct lpfc_hba * phba, if (timeleft == 0) { lpfc_printf_log(phba, KERN_ERR, LOG_SLI, - "%d:0329 IOCB wait timeout error - no " + "%d:0338 IOCB wait timeout error - no " "wake response Data x%x\n", phba->brd_no, timeout); retval = IOCB_TIMEDOUT; diff --git a/drivers/scsi/lpfc/lpfc_sli.h b/drivers/scsi/lpfc/lpfc_sli.h index d8ef0d2894d..e26de680935 100644 --- a/drivers/scsi/lpfc/lpfc_sli.h +++ b/drivers/scsi/lpfc/lpfc_sli.h @@ -172,6 +172,18 @@ struct lpfc_sli_stat { uint32_t mbox_busy; /* Mailbox cmd busy */ }; +/* Structure to store link status values when port stats are reset */ +struct lpfc_lnk_stat { + uint32_t link_failure_count; + uint32_t loss_of_sync_count; + uint32_t loss_of_signal_count; + uint32_t prim_seq_protocol_err_count; + uint32_t invalid_tx_word_count; + uint32_t invalid_crc_count; + uint32_t error_frames; + uint32_t link_events; +}; + /* Structure used to hold SLI information */ struct lpfc_sli { uint32_t num_rings; @@ -201,6 +213,8 @@ struct lpfc_sli { struct lpfc_iocbq ** iocbq_lookup; /* array to lookup IOCB by IOTAG */ size_t iocbq_lookup_len; /* current lengs of the array */ uint16_t last_iotag; /* last allocated IOTAG */ + unsigned long stats_start; /* in seconds */ + struct lpfc_lnk_stat lnk_stat_offsets; }; /* Given a pointer to the start of the ring, and the slot number of @@ -211,3 +225,9 @@ struct lpfc_sli { #define LPFC_MBOX_TMO 30 /* Sec tmo for outstanding mbox command */ +#define LPFC_MBOX_TMO_FLASH_CMD 300 /* Sec tmo for outstanding FLASH write + * or erase cmds. This is especially + * long because of the potential of + * multiple flash erases that can be + * spawned. + */ diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h index 10e89c6ae82..c7091ea29f3 100644 --- a/drivers/scsi/lpfc/lpfc_version.h +++ b/drivers/scsi/lpfc/lpfc_version.h @@ -18,7 +18,7 @@ * included with this package. * *******************************************************************/ -#define LPFC_DRIVER_VERSION "8.1.7" +#define LPFC_DRIVER_VERSION "8.1.9" #define LPFC_DRIVER_NAME "lpfc" diff --git a/drivers/scsi/megaraid/mega_common.h b/drivers/scsi/megaraid/mega_common.h index 4675343228a..8cd0bd1d0f7 100644 --- a/drivers/scsi/megaraid/mega_common.h +++ b/drivers/scsi/megaraid/mega_common.h @@ -37,6 +37,12 @@ #define LSI_MAX_CHANNELS 16 #define LSI_MAX_LOGICAL_DRIVES_64LD (64+1) +#define HBA_SIGNATURE_64_BIT 0x299 +#define PCI_CONF_AMISIG64 0xa4 + +#define MEGA_SCSI_INQ_EVPD 1 +#define MEGA_INVALID_FIELD_IN_CDB 0x24 + /** * scb_t - scsi command control block diff --git a/drivers/scsi/megaraid/megaraid_ioctl.h b/drivers/scsi/megaraid/megaraid_ioctl.h index bdaee144a1c..b8aa34202ec 100644 --- a/drivers/scsi/megaraid/megaraid_ioctl.h +++ b/drivers/scsi/megaraid/megaraid_ioctl.h @@ -132,6 +132,10 @@ typedef struct uioc { /* Driver Data: */ void __user * user_data; uint32_t user_data_len; + + /* 64bit alignment */ + uint32_t pad_for_64bit_align; + mraid_passthru_t __user *user_pthru; mraid_passthru_t *pthru32; diff --git a/drivers/scsi/megaraid/megaraid_mbox.c b/drivers/scsi/megaraid/megaraid_mbox.c index 92715130ac0..cd982c877da 100644 --- a/drivers/scsi/megaraid/megaraid_mbox.c +++ b/drivers/scsi/megaraid/megaraid_mbox.c @@ -10,7 +10,7 @@ * 2 of the License, or (at your option) any later version. * * FILE : megaraid_mbox.c - * Version : v2.20.4.8 (Apr 11 2006) + * Version : v2.20.4.9 (Jul 16 2006) * * Authors: * Atul Mukker <Atul.Mukker@lsil.com> @@ -720,6 +720,7 @@ megaraid_init_mbox(adapter_t *adapter) struct pci_dev *pdev; mraid_device_t *raid_dev; int i; + uint32_t magic64; adapter->ito = MBOX_TIMEOUT; @@ -863,12 +864,33 @@ megaraid_init_mbox(adapter_t *adapter) // Set the DMA mask to 64-bit. All supported controllers as capable of // DMA in this range - if (pci_set_dma_mask(adapter->pdev, DMA_64BIT_MASK) != 0) { - - con_log(CL_ANN, (KERN_WARNING - "megaraid: could not set DMA mask for 64-bit.\n")); + pci_read_config_dword(adapter->pdev, PCI_CONF_AMISIG64, &magic64); + + if (((magic64 == HBA_SIGNATURE_64_BIT) && + ((adapter->pdev->subsystem_device != + PCI_SUBSYS_ID_MEGARAID_SATA_150_6) || + (adapter->pdev->subsystem_device != + PCI_SUBSYS_ID_MEGARAID_SATA_150_4))) || + (adapter->pdev->vendor == PCI_VENDOR_ID_LSI_LOGIC && + adapter->pdev->device == PCI_DEVICE_ID_VERDE) || + (adapter->pdev->vendor == PCI_VENDOR_ID_LSI_LOGIC && + adapter->pdev->device == PCI_DEVICE_ID_DOBSON) || + (adapter->pdev->vendor == PCI_VENDOR_ID_LSI_LOGIC && + adapter->pdev->device == PCI_DEVICE_ID_LINDSAY) || + (adapter->pdev->vendor == PCI_VENDOR_ID_DELL && + adapter->pdev->device == PCI_DEVICE_ID_PERC4_DI_EVERGLADES) || + (adapter->pdev->vendor == PCI_VENDOR_ID_DELL && + adapter->pdev->device == PCI_DEVICE_ID_PERC4E_DI_KOBUK)) { + if (pci_set_dma_mask(adapter->pdev, DMA_64BIT_MASK)) { + con_log(CL_ANN, (KERN_WARNING + "megaraid: DMA mask for 64-bit failed\n")); - goto out_free_sysfs_res; + if (pci_set_dma_mask (adapter->pdev, DMA_32BIT_MASK)) { + con_log(CL_ANN, (KERN_WARNING + "megaraid: 32-bit DMA mask failed\n")); + goto out_free_sysfs_res; + } + } } // setup tasklet for DPC @@ -1622,6 +1644,14 @@ megaraid_mbox_build_cmd(adapter_t *adapter, struct scsi_cmnd *scp, int *busy) rdev->last_disp |= (1L << SCP2CHANNEL(scp)); } + if (scp->cmnd[1] & MEGA_SCSI_INQ_EVPD) { + scp->sense_buffer[0] = 0x70; + scp->sense_buffer[2] = ILLEGAL_REQUEST; + scp->sense_buffer[12] = MEGA_INVALID_FIELD_IN_CDB; + scp->result = CHECK_CONDITION << 1; + return NULL; + } + /* Fall through */ case READ_CAPACITY: diff --git a/drivers/scsi/megaraid/megaraid_mbox.h b/drivers/scsi/megaraid/megaraid_mbox.h index 868fb0ec93e..2b5a3285f79 100644 --- a/drivers/scsi/megaraid/megaraid_mbox.h +++ b/drivers/scsi/megaraid/megaraid_mbox.h @@ -21,8 +21,8 @@ #include "megaraid_ioctl.h" -#define MEGARAID_VERSION "2.20.4.8" -#define MEGARAID_EXT_VERSION "(Release Date: Mon Apr 11 12:27:22 EST 2006)" +#define MEGARAID_VERSION "2.20.4.9" +#define MEGARAID_EXT_VERSION "(Release Date: Sun Jul 16 12:27:22 EST 2006)" /* diff --git a/drivers/scsi/megaraid/megaraid_mm.c b/drivers/scsi/megaraid/megaraid_mm.c index e8f534fb336..d85b9a8f1b8 100644 --- a/drivers/scsi/megaraid/megaraid_mm.c +++ b/drivers/scsi/megaraid/megaraid_mm.c @@ -10,7 +10,7 @@ * 2 of the License, or (at your option) any later version. * * FILE : megaraid_mm.c - * Version : v2.20.2.6 (Mar 7 2005) + * Version : v2.20.2.7 (Jul 16 2006) * * Common management module */ diff --git a/drivers/scsi/megaraid/megaraid_mm.h b/drivers/scsi/megaraid/megaraid_mm.h index 3d9e67d6849..c8762b2b8ed 100644 --- a/drivers/scsi/megaraid/megaraid_mm.h +++ b/drivers/scsi/megaraid/megaraid_mm.h @@ -27,9 +27,9 @@ #include "megaraid_ioctl.h" -#define LSI_COMMON_MOD_VERSION "2.20.2.6" +#define LSI_COMMON_MOD_VERSION "2.20.2.7" #define LSI_COMMON_MOD_EXT_VERSION \ - "(Release Date: Mon Mar 7 00:01:03 EST 2005)" + "(Release Date: Sun Jul 16 00:01:03 EST 2006)" #define LSI_DBGLVL dbglevel diff --git a/drivers/scsi/pdc_adma.c b/drivers/scsi/pdc_adma.c index d1f38c32aa1..efc8fff1d25 100644 --- a/drivers/scsi/pdc_adma.c +++ b/drivers/scsi/pdc_adma.c @@ -183,7 +183,8 @@ static struct ata_port_info adma_port_info[] = { { .sht = &adma_ata_sht, .host_flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST | - ATA_FLAG_NO_LEGACY | ATA_FLAG_MMIO, + ATA_FLAG_NO_LEGACY | ATA_FLAG_MMIO | + ATA_FLAG_PIO_POLLING, .pio_mask = 0x10, /* pio4 */ .udma_mask = 0x1f, /* udma0-4 */ .port_ops = &adma_ata_ops, diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h index 139ea0e27fd..0930260aec2 100644 --- a/drivers/scsi/qla2xxx/qla_def.h +++ b/drivers/scsi/qla2xxx/qla_def.h @@ -487,6 +487,7 @@ typedef struct { #define MBA_IP_RCV_BUFFER_EMPTY 0x8026 /* IP receive buffer queue empty. */ #define MBA_IP_HDR_DATA_SPLIT 0x8027 /* IP header/data splitting feature */ /* used. */ +#define MBA_TRACE_NOTIFICATION 0x8028 /* Trace/Diagnostic notification. */ #define MBA_POINT_TO_POINT 0x8030 /* Point to point mode. */ #define MBA_CMPLT_1_16BIT 0x8031 /* Completion 1 16bit IOSB. */ #define MBA_CMPLT_2_16BIT 0x8032 /* Completion 2 16bit IOSB. */ diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c index 9758dba9554..859649160ca 100644 --- a/drivers/scsi/qla2xxx/qla_init.c +++ b/drivers/scsi/qla2xxx/qla_init.c @@ -3063,6 +3063,7 @@ qla2x00_update_fcports(scsi_qla_host_t *ha) int qla2x00_abort_isp(scsi_qla_host_t *ha) { + int rval; unsigned long flags = 0; uint16_t cnt; srb_t *sp; @@ -3119,6 +3120,16 @@ qla2x00_abort_isp(scsi_qla_host_t *ha) ha->isp_abort_cnt = 0; clear_bit(ISP_ABORT_RETRY, &ha->dpc_flags); + + if (ha->eft) { + rval = qla2x00_trace_control(ha, TC_ENABLE, + ha->eft_dma, EFT_NUM_BUFFERS); + if (rval) { + qla_printk(KERN_WARNING, ha, + "Unable to reinitialize EFT " + "(%d).\n", rval); + } + } } else { /* failed the ISP abort */ ha->flags.online = 1; if (test_bit(ISP_ABORT_RETRY, &ha->dpc_flags)) { diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c index 2b60a27eff0..c5b3c610a32 100644 --- a/drivers/scsi/qla2xxx/qla_iocb.c +++ b/drivers/scsi/qla2xxx/qla_iocb.c @@ -471,6 +471,7 @@ __qla2x00_marker(scsi_qla_host_t *ha, uint16_t loop_id, uint16_t lun, mrk24->nport_handle = cpu_to_le16(loop_id); mrk24->lun[1] = LSB(lun); mrk24->lun[2] = MSB(lun); + host_to_fcp_swap(mrk24->lun, sizeof(mrk24->lun)); } else { SET_TARGET_ID(ha, mrk->target, loop_id); mrk->lun = cpu_to_le16(lun); diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c index 795bf15b1b8..de0613135f7 100644 --- a/drivers/scsi/qla2xxx/qla_isr.c +++ b/drivers/scsi/qla2xxx/qla_isr.c @@ -587,6 +587,11 @@ qla2x00_async_event(scsi_qla_host_t *ha, uint16_t *mb) DEBUG2(printk("scsi(%ld): Discard RND Frame -- %04x %04x " "%04x.\n", ha->host_no, mb[1], mb[2], mb[3])); break; + + case MBA_TRACE_NOTIFICATION: + DEBUG2(printk("scsi(%ld): Trace Notification -- %04x %04x.\n", + ha->host_no, mb[1], mb[2])); + break; } } diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index ec7ebb6037e..65cbe2f5eea 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c @@ -744,7 +744,6 @@ qla2xxx_eh_device_reset(struct scsi_cmnd *cmd) { scsi_qla_host_t *ha = to_qla_host(cmd->device->host); fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata; - srb_t *sp; int ret; unsigned int id, lun; unsigned long serial; @@ -755,8 +754,7 @@ qla2xxx_eh_device_reset(struct scsi_cmnd *cmd) lun = cmd->device->lun; serial = cmd->serial_number; - sp = (srb_t *) CMD_SP(cmd); - if (!sp || !fcport) + if (!fcport) return ret; qla_printk(KERN_INFO, ha, @@ -875,7 +873,6 @@ qla2xxx_eh_bus_reset(struct scsi_cmnd *cmd) { scsi_qla_host_t *ha = to_qla_host(cmd->device->host); fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata; - srb_t *sp; int ret; unsigned int id, lun; unsigned long serial; @@ -886,8 +883,7 @@ qla2xxx_eh_bus_reset(struct scsi_cmnd *cmd) lun = cmd->device->lun; serial = cmd->serial_number; - sp = (srb_t *) CMD_SP(cmd); - if (!sp || !fcport) + if (!fcport) return ret; qla_printk(KERN_INFO, ha, @@ -936,7 +932,6 @@ qla2xxx_eh_host_reset(struct scsi_cmnd *cmd) { scsi_qla_host_t *ha = to_qla_host(cmd->device->host); fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata; - srb_t *sp; int ret; unsigned int id, lun; unsigned long serial; @@ -947,8 +942,7 @@ qla2xxx_eh_host_reset(struct scsi_cmnd *cmd) lun = cmd->device->lun; serial = cmd->serial_number; - sp = (srb_t *) CMD_SP(cmd); - if (!sp || !fcport) + if (!fcport) return ret; qla_printk(KERN_INFO, ha, @@ -2244,9 +2238,6 @@ qla2x00_do_dpc(void *data) next_loopid = 0; list_for_each_entry(fcport, &ha->fcports, list) { - if (fcport->port_type != FCT_TARGET) - continue; - /* * If the port is not ONLINE then try to login * to it if we haven't run out of retries. diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h index d2d68344065..971259032ef 100644 --- a/drivers/scsi/qla2xxx/qla_version.h +++ b/drivers/scsi/qla2xxx/qla_version.h @@ -7,9 +7,9 @@ /* * Driver version */ -#define QLA2XXX_VERSION "8.01.05-k3" +#define QLA2XXX_VERSION "8.01.07-k1" #define QLA_DRIVER_MAJOR_VER 8 #define QLA_DRIVER_MINOR_VER 1 -#define QLA_DRIVER_PATCH_VER 5 +#define QLA_DRIVER_PATCH_VER 7 #define QLA_DRIVER_BETA_VER 0 diff --git a/drivers/scsi/sata_sil24.c b/drivers/scsi/sata_sil24.c index 2e0f4a4076a..3f368c7d3ef 100644 --- a/drivers/scsi/sata_sil24.c +++ b/drivers/scsi/sata_sil24.c @@ -1106,7 +1106,6 @@ static int sil24_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) probe_ent->irq = pdev->irq; probe_ent->irq_flags = IRQF_SHARED; - probe_ent->mmio_base = port_base; probe_ent->private_data = hpriv; hpriv->host_base = host_base; diff --git a/drivers/scsi/sata_via.c b/drivers/scsi/sata_via.c index 03baec2191b..01d40369a8a 100644 --- a/drivers/scsi/sata_via.c +++ b/drivers/scsi/sata_via.c @@ -74,6 +74,7 @@ enum { static int svia_init_one (struct pci_dev *pdev, const struct pci_device_id *ent); static u32 svia_scr_read (struct ata_port *ap, unsigned int sc_reg); static void svia_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val); +static void vt6420_error_handler(struct ata_port *ap); static const struct pci_device_id svia_pci_tbl[] = { { 0x1106, 0x3149, PCI_ANY_ID, PCI_ANY_ID, 0, 0, vt6420 }, @@ -107,7 +108,38 @@ static struct scsi_host_template svia_sht = { .bios_param = ata_std_bios_param, }; -static const struct ata_port_operations svia_sata_ops = { +static const struct ata_port_operations vt6420_sata_ops = { + .port_disable = ata_port_disable, + + .tf_load = ata_tf_load, + .tf_read = ata_tf_read, + .check_status = ata_check_status, + .exec_command = ata_exec_command, + .dev_select = ata_std_dev_select, + + .bmdma_setup = ata_bmdma_setup, + .bmdma_start = ata_bmdma_start, + .bmdma_stop = ata_bmdma_stop, + .bmdma_status = ata_bmdma_status, + + .qc_prep = ata_qc_prep, + .qc_issue = ata_qc_issue_prot, + .data_xfer = ata_pio_data_xfer, + + .freeze = ata_bmdma_freeze, + .thaw = ata_bmdma_thaw, + .error_handler = vt6420_error_handler, + .post_internal_cmd = ata_bmdma_post_internal_cmd, + + .irq_handler = ata_interrupt, + .irq_clear = ata_bmdma_irq_clear, + + .port_start = ata_port_start, + .port_stop = ata_port_stop, + .host_stop = ata_host_stop, +}; + +static const struct ata_port_operations vt6421_sata_ops = { .port_disable = ata_port_disable, .tf_load = ata_tf_load, @@ -141,13 +173,13 @@ static const struct ata_port_operations svia_sata_ops = { .host_stop = ata_host_stop, }; -static struct ata_port_info svia_port_info = { +static struct ata_port_info vt6420_port_info = { .sht = &svia_sht, .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY, .pio_mask = 0x1f, .mwdma_mask = 0x07, .udma_mask = 0x7f, - .port_ops = &svia_sata_ops, + .port_ops = &vt6420_sata_ops, }; MODULE_AUTHOR("Jeff Garzik"); @@ -170,6 +202,81 @@ static void svia_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val) outl(val, ap->ioaddr.scr_addr + (4 * sc_reg)); } +/** + * vt6420_prereset - prereset for vt6420 + * @ap: target ATA port + * + * SCR registers on vt6420 are pieces of shit and may hang the + * whole machine completely if accessed with the wrong timing. + * To avoid such catastrophe, vt6420 doesn't provide generic SCR + * access operations, but uses SStatus and SControl only during + * boot probing in controlled way. + * + * As the old (pre EH update) probing code is proven to work, we + * strictly follow the access pattern. + * + * LOCKING: + * Kernel thread context (may sleep) + * + * RETURNS: + * 0 on success, -errno otherwise. + */ +static int vt6420_prereset(struct ata_port *ap) +{ + struct ata_eh_context *ehc = &ap->eh_context; + unsigned long timeout = jiffies + (HZ * 5); + u32 sstatus, scontrol; + int online; + + /* don't do any SCR stuff if we're not loading */ + if (!ATA_PFLAG_LOADING) + goto skip_scr; + + /* Resume phy. This is the old resume sequence from + * __sata_phy_reset(). + */ + svia_scr_write(ap, SCR_CONTROL, 0x300); + svia_scr_read(ap, SCR_CONTROL); /* flush */ + + /* wait for phy to become ready, if necessary */ + do { + msleep(200); + if ((svia_scr_read(ap, SCR_STATUS) & 0xf) != 1) + break; + } while (time_before(jiffies, timeout)); + + /* open code sata_print_link_status() */ + sstatus = svia_scr_read(ap, SCR_STATUS); + scontrol = svia_scr_read(ap, SCR_CONTROL); + + online = (sstatus & 0xf) == 0x3; + + ata_port_printk(ap, KERN_INFO, + "SATA link %s 1.5 Gbps (SStatus %X SControl %X)\n", + online ? "up" : "down", sstatus, scontrol); + + /* SStatus is read one more time */ + svia_scr_read(ap, SCR_STATUS); + + if (!online) { + /* tell EH to bail */ + ehc->i.action &= ~ATA_EH_RESET_MASK; + return 0; + } + + skip_scr: + /* wait for !BSY */ + ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT); + + return 0; +} + +static void vt6420_error_handler(struct ata_port *ap) +{ + return ata_bmdma_drive_eh(ap, vt6420_prereset, ata_std_softreset, + NULL, ata_std_postreset); +} + static const unsigned int svia_bar_sizes[] = { 8, 4, 8, 4, 16, 256 }; @@ -210,7 +317,7 @@ static void vt6421_init_addrs(struct ata_probe_ent *probe_ent, static struct ata_probe_ent *vt6420_init_probe_ent(struct pci_dev *pdev) { struct ata_probe_ent *probe_ent; - struct ata_port_info *ppi = &svia_port_info; + struct ata_port_info *ppi = &vt6420_port_info; probe_ent = ata_pci_init_native_mode(pdev, &ppi, ATA_PORT_PRIMARY | ATA_PORT_SECONDARY); if (!probe_ent) @@ -239,7 +346,7 @@ static struct ata_probe_ent *vt6421_init_probe_ent(struct pci_dev *pdev) probe_ent->sht = &svia_sht; probe_ent->host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY; - probe_ent->port_ops = &svia_sata_ops; + probe_ent->port_ops = &vt6421_sata_ops; probe_ent->n_ports = N_PORTS; probe_ent->irq = pdev->irq; probe_ent->irq_flags = IRQF_SHARED; diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c index 6a5b731bd5b..a8ed5a22009 100644 --- a/drivers/scsi/scsi_error.c +++ b/drivers/scsi/scsi_error.c @@ -460,7 +460,8 @@ static void scsi_eh_done(struct scsi_cmnd *scmd) * Return value: * SUCCESS or FAILED or NEEDS_RETRY **/ -static int scsi_send_eh_cmnd(struct scsi_cmnd *scmd, int timeout, int copy_sense) +static int scsi_send_eh_cmnd(struct scsi_cmnd *scmd, unsigned char *cmnd, + int cmnd_size, int timeout, int copy_sense) { struct scsi_device *sdev = scmd->device; struct Scsi_Host *shost = sdev->host; @@ -490,6 +491,9 @@ static int scsi_send_eh_cmnd(struct scsi_cmnd *scmd, int timeout, int copy_sense old_cmd_len = scmd->cmd_len; old_use_sg = scmd->use_sg; + memset(scmd->cmnd, 0, sizeof(scmd->cmnd)); + memcpy(scmd->cmnd, cmnd, cmnd_size); + if (copy_sense) { int gfp_mask = GFP_ATOMIC; @@ -610,8 +614,7 @@ static int scsi_request_sense(struct scsi_cmnd *scmd) static unsigned char generic_sense[6] = {REQUEST_SENSE, 0, 0, 0, 252, 0}; - memcpy(scmd->cmnd, generic_sense, sizeof(generic_sense)); - return scsi_send_eh_cmnd(scmd, SENSE_TIMEOUT, 1); + return scsi_send_eh_cmnd(scmd, generic_sense, 6, SENSE_TIMEOUT, 1); } /** @@ -736,10 +739,7 @@ static int scsi_eh_tur(struct scsi_cmnd *scmd) int retry_cnt = 1, rtn; retry_tur: - memcpy(scmd->cmnd, tur_command, sizeof(tur_command)); - - - rtn = scsi_send_eh_cmnd(scmd, SENSE_TIMEOUT, 0); + rtn = scsi_send_eh_cmnd(scmd, tur_command, 6, SENSE_TIMEOUT, 0); SCSI_LOG_ERROR_RECOVERY(3, printk("%s: scmd %p rtn %x\n", __FUNCTION__, scmd, rtn)); @@ -839,8 +839,8 @@ static int scsi_eh_try_stu(struct scsi_cmnd *scmd) if (scmd->device->allow_restart) { int rtn; - memcpy(scmd->cmnd, stu_command, sizeof(stu_command)); - rtn = scsi_send_eh_cmnd(scmd, START_UNIT_TIMEOUT, 0); + rtn = scsi_send_eh_cmnd(scmd, stu_command, 6, + START_UNIT_TIMEOUT, 0); if (rtn == SUCCESS) return 0; } diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c index 7b9e8fa1a4e..2ecd1418857 100644 --- a/drivers/scsi/scsi_transport_iscsi.c +++ b/drivers/scsi/scsi_transport_iscsi.c @@ -34,6 +34,7 @@ #define ISCSI_SESSION_ATTRS 11 #define ISCSI_CONN_ATTRS 11 #define ISCSI_HOST_ATTRS 0 +#define ISCSI_TRANSPORT_VERSION "1.1-646" struct iscsi_internal { int daemon_pid; @@ -634,13 +635,13 @@ mempool_zone_get_skb(struct mempool_zone *zone) } static int -iscsi_broadcast_skb(struct mempool_zone *zone, struct sk_buff *skb) +iscsi_broadcast_skb(struct mempool_zone *zone, struct sk_buff *skb, gfp_t gfp) { unsigned long flags; int rc; skb_get(skb); - rc = netlink_broadcast(nls, skb, 0, 1, GFP_KERNEL); + rc = netlink_broadcast(nls, skb, 0, 1, gfp); if (rc < 0) { mempool_free(skb, zone->pool); printk(KERN_ERR "iscsi: can not broadcast skb (%d)\n", rc); @@ -749,7 +750,7 @@ void iscsi_conn_error(struct iscsi_cls_conn *conn, enum iscsi_err error) ev->r.connerror.cid = conn->cid; ev->r.connerror.sid = iscsi_conn_get_sid(conn); - iscsi_broadcast_skb(conn->z_error, skb); + iscsi_broadcast_skb(conn->z_error, skb, GFP_ATOMIC); dev_printk(KERN_INFO, &conn->dev, "iscsi: detected conn error (%d)\n", error); @@ -895,7 +896,7 @@ int iscsi_if_destroy_session_done(struct iscsi_cls_conn *conn) * this will occur if the daemon is not up, so we just warn * the user and when the daemon is restarted it will handle it */ - rc = iscsi_broadcast_skb(conn->z_pdu, skb); + rc = iscsi_broadcast_skb(conn->z_pdu, skb, GFP_KERNEL); if (rc < 0) dev_printk(KERN_ERR, &conn->dev, "Cannot notify userspace of " "session destruction event. Check iscsi daemon\n"); @@ -958,7 +959,7 @@ int iscsi_if_create_session_done(struct iscsi_cls_conn *conn) * this will occur if the daemon is not up, so we just warn * the user and when the daemon is restarted it will handle it */ - rc = iscsi_broadcast_skb(conn->z_pdu, skb); + rc = iscsi_broadcast_skb(conn->z_pdu, skb, GFP_KERNEL); if (rc < 0) dev_printk(KERN_ERR, &conn->dev, "Cannot notify userspace of " "session creation event. Check iscsi daemon\n"); @@ -1613,6 +1614,9 @@ static __init int iscsi_transport_init(void) { int err; + printk(KERN_INFO "Loading iSCSI transport class v%s.", + ISCSI_TRANSPORT_VERSION); + err = class_register(&iscsi_transport_class); if (err) return err; @@ -1678,3 +1682,4 @@ MODULE_AUTHOR("Mike Christie <michaelc@cs.wisc.edu>, " "Alex Aizman <itn780@yahoo.com>"); MODULE_DESCRIPTION("iSCSI Transport Interface"); MODULE_LICENSE("GPL"); +MODULE_VERSION(ISCSI_TRANSPORT_VERSION); diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c index 65eef33846b..34f9343ed0a 100644 --- a/drivers/scsi/sg.c +++ b/drivers/scsi/sg.c @@ -18,8 +18,8 @@ * */ -static int sg_version_num = 30533; /* 2 digits for each component */ -#define SG_VERSION_STR "3.5.33" +static int sg_version_num = 30534; /* 2 digits for each component */ +#define SG_VERSION_STR "3.5.34" /* * D. P. Gilbert (dgilbert@interlog.com, dougg@triode.net.au), notes: @@ -60,7 +60,7 @@ static int sg_version_num = 30533; /* 2 digits for each component */ #ifdef CONFIG_SCSI_PROC_FS #include <linux/proc_fs.h> -static char *sg_version_date = "20050908"; +static char *sg_version_date = "20060818"; static int sg_proc_init(void); static void sg_proc_cleanup(void); @@ -1164,7 +1164,7 @@ sg_vma_nopage(struct vm_area_struct *vma, unsigned long addr, int *type) len = vma->vm_end - sa; len = (len < sg->length) ? len : sg->length; if (offset < len) { - page = sg->page; + page = virt_to_page(page_address(sg->page) + offset); get_page(page); /* increment page count */ break; } diff --git a/drivers/scsi/sym53c8xx_2/sym_glue.c b/drivers/scsi/sym53c8xx_2/sym_glue.c index 8c505076c0e..739d3ef46a4 100644 --- a/drivers/scsi/sym53c8xx_2/sym_glue.c +++ b/drivers/scsi/sym53c8xx_2/sym_glue.c @@ -2084,7 +2084,7 @@ static struct pci_device_id sym2_id_table[] __devinitdata = { { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_NCR_53C860, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_53C1510, - PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, + PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_STORAGE_SCSI<<8, 0xffff00, 0UL }, { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_NCR_53C896, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_NCR_53C895, diff --git a/drivers/serial/8250_pci.c b/drivers/serial/8250_pci.c index a1d322f8a16..cd1979daf2b 100644 --- a/drivers/serial/8250_pci.c +++ b/drivers/serial/8250_pci.c @@ -936,6 +936,7 @@ enum pci_board_num_t { pbn_b1_8_1382400, pbn_b2_1_115200, + pbn_b2_2_115200, pbn_b2_8_115200, pbn_b2_1_460800, @@ -1243,6 +1244,12 @@ static struct pciserial_board pci_boards[] __devinitdata = { .base_baud = 115200, .uart_offset = 8, }, + [pbn_b2_2_115200] = { + .flags = FL_BASE2, + .num_ports = 2, + .base_baud = 115200, + .uart_offset = 8, + }, [pbn_b2_8_115200] = { .flags = FL_BASE2, .num_ports = 8, @@ -2340,6 +2347,13 @@ static struct pci_device_id serial_pci_tbl[] = { pbn_b0_1_115200 }, /* + * IntaShield IS-200 + */ + { PCI_VENDOR_ID_INTASHIELD, PCI_DEVICE_ID_INTASHIELD_IS200, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, /* 135a.0811 */ + pbn_b2_2_115200 }, + + /* * These entries match devices with class COMMUNICATION_SERIAL, * COMMUNICATION_MODEM or COMMUNICATION_MULTISERIAL */ diff --git a/drivers/serial/sunsab.c b/drivers/serial/sunsab.c index dc673e1b6fd..cfe20f73043 100644 --- a/drivers/serial/sunsab.c +++ b/drivers/serial/sunsab.c @@ -886,6 +886,15 @@ static int sunsab_console_setup(struct console *con, char *options) unsigned long flags; unsigned int baud, quot; + /* + * The console framework calls us for each and every port + * registered. Defer the console setup until the requested + * port has been properly discovered. A bit of a hack, + * though... + */ + if (up->port.type != PORT_SUNSAB) + return -1; + printk("Console: ttyS%d (SAB82532)\n", (sunsab_reg.minor - 64) + con->index); diff --git a/drivers/serial/sunzilog.c b/drivers/serial/sunzilog.c index 47bc3d57e01..d34f336d53d 100644 --- a/drivers/serial/sunzilog.c +++ b/drivers/serial/sunzilog.c @@ -1146,6 +1146,9 @@ static int __init sunzilog_console_setup(struct console *con, char *options) unsigned long flags; int baud, brg; + if (up->port.type != PORT_SUNZILOG) + return -1; + printk(KERN_INFO "Console: ttyS%d (SunZilog zs%d)\n", (sunzilog_reg.minor - 64) + con->index, con->index); diff --git a/drivers/usb/host/ohci-au1xxx.c b/drivers/usb/host/ohci-au1xxx.c index 822914e2f43..f7a975d5db0 100644 --- a/drivers/usb/host/ohci-au1xxx.c +++ b/drivers/usb/host/ohci-au1xxx.c @@ -110,7 +110,6 @@ static void au1xxx_start_ohc(struct platform_device *dev) printk(KERN_DEBUG __FILE__ ": Clock to USB host has been enabled \n"); -#endif } static void au1xxx_stop_ohc(struct platform_device *dev) diff --git a/drivers/usb/input/appletouch.c b/drivers/usb/input/appletouch.c index 9e3f1390337..044faa07e29 100644 --- a/drivers/usb/input/appletouch.c +++ b/drivers/usb/input/appletouch.c @@ -597,9 +597,9 @@ static void atp_disconnect(struct usb_interface *iface) if (dev) { usb_kill_urb(dev->urb); input_unregister_device(dev->input); - usb_free_urb(dev->urb); usb_buffer_free(dev->udev, dev->datalen, dev->data, dev->urb->transfer_dma); + usb_free_urb(dev->urb); kfree(dev); } printk(KERN_INFO "input: appletouch disconnected\n"); diff --git a/drivers/usb/misc/cypress_cy7c63.c b/drivers/usb/misc/cypress_cy7c63.c index a4062a6adbb..9c46746d5d0 100644 --- a/drivers/usb/misc/cypress_cy7c63.c +++ b/drivers/usb/misc/cypress_cy7c63.c @@ -208,7 +208,7 @@ static int cypress_probe(struct usb_interface *interface, /* allocate memory for our device state and initialize it */ dev = kzalloc(sizeof(*dev), GFP_KERNEL); if (dev == NULL) { - dev_err(&dev->udev->dev, "Out of memory!\n"); + dev_err(&interface->dev, "Out of memory!\n"); goto error; } diff --git a/drivers/usb/misc/usbtest.c b/drivers/usb/misc/usbtest.c index 786e1dbe88e..983e104dd45 100644 --- a/drivers/usb/misc/usbtest.c +++ b/drivers/usb/misc/usbtest.c @@ -1242,11 +1242,12 @@ done: static int ctrl_out (struct usbtest_dev *dev, unsigned count, unsigned length, unsigned vary) { - unsigned i, j, len, retval; + unsigned i, j, len; + int retval; u8 *buf; char *what = "?"; struct usb_device *udev; - + if (length < 1 || length > 0xffff || vary >= length) return -EINVAL; diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c index a20da8528a5..15945e806f0 100644 --- a/drivers/usb/serial/ftdi_sio.c +++ b/drivers/usb/serial/ftdi_sio.c @@ -306,6 +306,8 @@ static struct ftdi_sio_quirk ftdi_HE_TIRA1_quirk = { static struct usb_device_id id_table_combined [] = { + { USB_DEVICE(FTDI_VID, FTDI_AMC232_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_CANUSB_PID) }, { USB_DEVICE(FTDI_VID, FTDI_ACTZWAVE_PID) }, { USB_DEVICE(FTDI_VID, FTDI_IRTRANS_PID) }, { USB_DEVICE(FTDI_VID, FTDI_IPLUS_PID) }, diff --git a/drivers/usb/serial/ftdi_sio.h b/drivers/usb/serial/ftdi_sio.h index 9f7343a4542..8888cd80a49 100644 --- a/drivers/usb/serial/ftdi_sio.h +++ b/drivers/usb/serial/ftdi_sio.h @@ -32,6 +32,12 @@ #define FTDI_NF_RIC_PID 0x0001 /* Product Id */ +/* www.canusb.com Lawicel CANUSB device */ +#define FTDI_CANUSB_PID 0xFFA8 /* Product Id */ + +/* AlphaMicro Components AMC-232USB01 device */ +#define FTDI_AMC232_PID 0xFF00 /* Product Id */ + /* ACT Solutions HomePro ZWave interface (http://www.act-solutions.com/HomePro.htm) */ #define FTDI_ACTZWAVE_PID 0xF2D0 diff --git a/drivers/usb/serial/ipaq.c b/drivers/usb/serial/ipaq.c index 7e1bd5d6dfa..9840bade79f 100644 --- a/drivers/usb/serial/ipaq.c +++ b/drivers/usb/serial/ipaq.c @@ -251,6 +251,8 @@ static struct usb_device_id ipaq_id_table [] = { { USB_DEVICE(0x04C5, 0x1079) }, /* FUJITSU USB Sync */ { USB_DEVICE(0x04DA, 0x2500) }, /* Panasonic USB Sync */ { USB_DEVICE(0x04DD, 0x9102) }, /* SHARP WS003SH USB Modem */ + { USB_DEVICE(0x04DD, 0x9121) }, /* SHARP WS004SH USB Modem */ + { USB_DEVICE(0x04DD, 0x9123) }, /* SHARP WS007SH USB Modem */ { USB_DEVICE(0x04E8, 0x5F00) }, /* Samsung NEXiO USB Sync */ { USB_DEVICE(0x04E8, 0x5F01) }, /* Samsung NEXiO USB Sync */ { USB_DEVICE(0x04E8, 0x5F02) }, /* Samsung NEXiO USB Sync */ diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c index efbbc0adb89..65e4d046951 100644 --- a/drivers/usb/serial/pl2303.c +++ b/drivers/usb/serial/pl2303.c @@ -79,7 +79,6 @@ static struct usb_device_id id_table [] = { { USB_DEVICE(SAGEM_VENDOR_ID, SAGEM_PRODUCT_ID) }, { USB_DEVICE(LEADTEK_VENDOR_ID, LEADTEK_9531_PRODUCT_ID) }, { USB_DEVICE(SPEEDDRAGON_VENDOR_ID, SPEEDDRAGON_PRODUCT_ID) }, - { USB_DEVICE(OTI_VENDOR_ID, OTI_PRODUCT_ID) }, { USB_DEVICE(DATAPILOT_U2_VENDOR_ID, DATAPILOT_U2_PRODUCT_ID) }, { USB_DEVICE(BELKIN_VENDOR_ID, BELKIN_PRODUCT_ID) }, { } /* Terminating entry */ diff --git a/drivers/usb/serial/pl2303.h b/drivers/usb/serial/pl2303.h index a692ac66ca6..55195e76eb6 100644 --- a/drivers/usb/serial/pl2303.h +++ b/drivers/usb/serial/pl2303.h @@ -82,10 +82,6 @@ #define SPEEDDRAGON_VENDOR_ID 0x0e55 #define SPEEDDRAGON_PRODUCT_ID 0x110b -/* Ours Technology Inc DKU-5 clone, chipset: Prolific Technology Inc */ -#define OTI_VENDOR_ID 0x0ea0 -#define OTI_PRODUCT_ID 0x6858 - /* DATAPILOT Universal-2 Phone Cable */ #define DATAPILOT_U2_VENDOR_ID 0x0731 #define DATAPILOT_U2_PRODUCT_ID 0x2003 diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h index 2793f9a912b..4a803d69fa3 100644 --- a/drivers/usb/storage/unusual_devs.h +++ b/drivers/usb/storage/unusual_devs.h @@ -1240,6 +1240,16 @@ UNUSUAL_DEV( 0x0ed1, 0x7636, 0x0103, 0x0103, US_SC_DEVICE, US_PR_DEVICE, NULL, US_FL_IGNORE_RESIDUE | US_FL_GO_SLOW | US_FL_MAX_SECTORS_64), +/* David Kuehling <dvdkhlng@gmx.de>: + * for MP3-Player AVOX WSX-300ER (bought in Japan). Reports lots of SCSI + * errors when trying to write. + */ +UNUSUAL_DEV( 0x0f19, 0x0105, 0x0100, 0x0100, + "C-MEX", + "A-VOX", + US_SC_DEVICE, US_PR_DEVICE, NULL, + US_FL_IGNORE_RESIDUE ), + /* Reported by Michael Stattmann <michael@stattmann.com> */ UNUSUAL_DEV( 0x0fce, 0xd008, 0x0000, 0x0000, "Sony Ericsson", @@ -1251,7 +1261,7 @@ UNUSUAL_DEV( 0x0fce, 0xd008, 0x0000, 0x0000, * Tested on hardware version 1.10. * Entry is needed only for the initializer function override. */ -UNUSUAL_DEV( 0x1019, 0x0c55, 0x0000, 0x9999, +UNUSUAL_DEV( 0x1019, 0x0c55, 0x0110, 0x0110, "Desknote", "UCR-61S2B", US_SC_DEVICE, US_PR_DEVICE, usb_stor_ucr61s2b_init, diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig index c40b9b8b1e7..702eb933cf8 100644 --- a/drivers/video/Kconfig +++ b/drivers/video/Kconfig @@ -554,7 +554,7 @@ config FB_VESA config FB_IMAC bool "Intel-based Macintosh Framebuffer Support" - depends on (FB = y) && X86 + depends on (FB = y) && X86 && EFI select FB_CFB_FILLRECT select FB_CFB_COPYAREA select FB_CFB_IMAGEBLIT diff --git a/drivers/video/aty/aty128fb.c b/drivers/video/aty/aty128fb.c index 3e827e04a2a..106d428b72c 100644 --- a/drivers/video/aty/aty128fb.c +++ b/drivers/video/aty/aty128fb.c @@ -1800,6 +1800,9 @@ static struct backlight_properties aty128_bl_data = { static void aty128_bl_set_power(struct fb_info *info, int power) { + if (info->bl_dev == NULL) + return; + mutex_lock(&info->bl_mutex); up(&info->bl_dev->sem); info->bl_dev->props->power = power; @@ -1828,7 +1831,7 @@ static void aty128_bl_init(struct aty128fb_par *par) bd = backlight_device_register(name, par, &aty128_bl_data); if (IS_ERR(bd)) { info->bl_dev = NULL; - printk("aty128: Backlight registration failed\n"); + printk(KERN_WARNING "aty128: Backlight registration failed\n"); goto error; } diff --git a/drivers/video/aty/atyfb_base.c b/drivers/video/aty/atyfb_base.c index 053ff63365b..510e4ea296e 100644 --- a/drivers/video/aty/atyfb_base.c +++ b/drivers/video/aty/atyfb_base.c @@ -2199,6 +2199,9 @@ static struct backlight_properties aty_bl_data = { static void aty_bl_set_power(struct fb_info *info, int power) { + if (info->bl_dev == NULL) + return; + mutex_lock(&info->bl_mutex); up(&info->bl_dev->sem); info->bl_dev->props->power = power; @@ -2223,7 +2226,7 @@ static void aty_bl_init(struct atyfb_par *par) bd = backlight_device_register(name, par, &aty_bl_data); if (IS_ERR(bd)) { info->bl_dev = NULL; - printk("aty: Backlight registration failed\n"); + printk(KERN_WARNING "aty: Backlight registration failed\n"); goto error; } diff --git a/drivers/video/imacfb.c b/drivers/video/imacfb.c index ff233b84dec..18ea4a54910 100644 --- a/drivers/video/imacfb.c +++ b/drivers/video/imacfb.c @@ -18,6 +18,8 @@ #include <linux/screen_info.h> #include <linux/slab.h> #include <linux/string.h> +#include <linux/dmi.h> +#include <linux/efi.h> #include <asm/io.h> @@ -28,7 +30,7 @@ typedef enum _MAC_TYPE { M_I20, M_MINI, M_MACBOOK, - M_NEW + M_UNKNOWN } MAC_TYPE; /* --------------------------------------------------------------------- */ @@ -52,10 +54,36 @@ static struct fb_fix_screeninfo imacfb_fix __initdata = { }; static int inverse; -static int model = M_NEW; +static int model = M_UNKNOWN; static int manual_height; static int manual_width; +static int set_system(struct dmi_system_id *id) +{ + printk(KERN_INFO "imacfb: %s detected - set system to %ld\n", + id->ident, (long)id->driver_data); + + model = (long)id->driver_data; + + return 0; +} + +static struct dmi_system_id __initdata dmi_system_table[] = { + { set_system, "iMac4,1", { + DMI_MATCH(DMI_BIOS_VENDOR,"Apple Computer, Inc."), + DMI_MATCH(DMI_PRODUCT_NAME,"iMac4,1") }, (void*)M_I17}, + { set_system, "MacBookPro1,1", { + DMI_MATCH(DMI_BIOS_VENDOR,"Apple Computer, Inc."), + DMI_MATCH(DMI_PRODUCT_NAME,"MacBookPro1,1") }, (void*)M_I17}, + { set_system, "MacBook1,1", { + DMI_MATCH(DMI_BIOS_VENDOR,"Apple Computer, Inc."), + DMI_MATCH(DMI_PRODUCT_NAME,"MacBook1,1")}, (void *)M_MACBOOK}, + { set_system, "Macmini1,1", { + DMI_MATCH(DMI_BIOS_VENDOR,"Apple Computer, Inc."), + DMI_MATCH(DMI_PRODUCT_NAME,"Macmini1,1")}, (void *)M_MINI}, + {}, +}; + #define DEFAULT_FB_MEM 1024*1024*16 /* --------------------------------------------------------------------- */ @@ -149,7 +177,6 @@ static int __init imacfb_probe(struct platform_device *dev) screen_info.lfb_linelength = 1472 * 4; screen_info.lfb_base = 0x80010000; break; - case M_NEW: case M_I20: screen_info.lfb_width = 1680; screen_info.lfb_height = 1050; @@ -207,6 +234,10 @@ static int __init imacfb_probe(struct platform_device *dev) size_remap = size_total; imacfb_fix.smem_len = size_remap; +#ifndef __i386__ + screen_info.imacpm_seg = 0; +#endif + if (!request_mem_region(imacfb_fix.smem_start, size_total, "imacfb")) { printk(KERN_WARNING "imacfb: cannot reserve video memory at 0x%lx\n", @@ -324,8 +355,16 @@ static int __init imacfb_init(void) int ret; char *option = NULL; - /* ignore error return of fb_get_options */ - fb_get_options("imacfb", &option); + if (!efi_enabled) + return -ENODEV; + if (!dmi_check_system(dmi_system_table)) + return -ENODEV; + if (model == M_UNKNOWN) + return -ENODEV; + + if (fb_get_options("imacfb", &option)) + return -ENODEV; + imacfb_setup(option); ret = platform_driver_register(&imacfb_driver); diff --git a/drivers/video/matrox/g450_pll.c b/drivers/video/matrox/g450_pll.c index 440272ad10e..7c76e079ca7 100644 --- a/drivers/video/matrox/g450_pll.c +++ b/drivers/video/matrox/g450_pll.c @@ -331,7 +331,15 @@ static int __g450_setclk(WPMINFO unsigned int fout, unsigned int pll, tmp |= M1064_XPIXCLKCTRL_PLL_UP; } matroxfb_DAC_out(PMINFO M1064_XPIXCLKCTRL, tmp); +#ifdef __powerpc__ + /* This is necessary to avoid jitter on PowerPC + * (OpenFirmware) systems, but apparently + * introduces jitter, at least on a x86-64 + * using DVI. + * A simple workaround is disable for non-PPC. + */ matroxfb_DAC_out(PMINFO M1064_XDVICLKCTRL, 0); +#endif /* __powerpc__ */ matroxfb_DAC_out(PMINFO M1064_XPWRCTRL, xpwrctrl); matroxfb_DAC_unlock_irqrestore(flags); diff --git a/drivers/video/nvidia/nv_backlight.c b/drivers/video/nvidia/nv_backlight.c index b45f577094a..14c37c42191 100644 --- a/drivers/video/nvidia/nv_backlight.c +++ b/drivers/video/nvidia/nv_backlight.c @@ -112,6 +112,8 @@ static struct backlight_properties nvidia_bl_data = { void nvidia_bl_set_power(struct fb_info *info, int power) { + if (info->bl_dev == NULL) + return; mutex_lock(&info->bl_mutex); up(&info->bl_dev->sem); info->bl_dev->props->power = power; @@ -140,7 +142,7 @@ void nvidia_bl_init(struct nvidia_par *par) bd = backlight_device_register(name, par, &nvidia_bl_data); if (IS_ERR(bd)) { info->bl_dev = NULL; - printk("nvidia: Backlight registration failed\n"); + printk(KERN_WARNING "nvidia: Backlight registration failed\n"); goto error; } diff --git a/drivers/video/riva/fbdev.c b/drivers/video/riva/fbdev.c index 76fc9d355eb..90363943bd5 100644 --- a/drivers/video/riva/fbdev.c +++ b/drivers/video/riva/fbdev.c @@ -354,6 +354,9 @@ static struct backlight_properties riva_bl_data = { static void riva_bl_set_power(struct fb_info *info, int power) { + if (info->bl_dev == NULL) + return; + mutex_lock(&info->bl_mutex); up(&info->bl_dev->sem); info->bl_dev->props->power = power; @@ -382,7 +385,7 @@ static void riva_bl_init(struct riva_par *par) bd = backlight_device_register(name, par, &riva_bl_data); if (IS_ERR(bd)) { info->bl_dev = NULL; - printk("riva: Backlight registration failed\n"); + printk(KERN_WARNING "riva: Backlight registration failed\n"); goto error; } diff --git a/fs/adfs/super.c b/fs/adfs/super.c index ba1c88af49f..82011019494 100644 --- a/fs/adfs/super.c +++ b/fs/adfs/super.c @@ -308,7 +308,7 @@ static struct adfs_discmap *adfs_read_map(struct super_block *sb, struct adfs_di if (adfs_checkmap(sb, dm)) return dm; - adfs_error(sb, NULL, "map corrupted"); + adfs_error(sb, "map corrupted"); error_free: while (--zone >= 0) diff --git a/fs/block_dev.c b/fs/block_dev.c index 37534573960..045f98854f1 100644 --- a/fs/block_dev.c +++ b/fs/block_dev.c @@ -884,6 +884,61 @@ void bd_set_size(struct block_device *bdev, loff_t size) } EXPORT_SYMBOL(bd_set_size); +static int __blkdev_put(struct block_device *bdev, unsigned int subclass) +{ + int ret = 0; + struct inode *bd_inode = bdev->bd_inode; + struct gendisk *disk = bdev->bd_disk; + + mutex_lock_nested(&bdev->bd_mutex, subclass); + lock_kernel(); + if (!--bdev->bd_openers) { + sync_blockdev(bdev); + kill_bdev(bdev); + } + if (bdev->bd_contains == bdev) { + if (disk->fops->release) + ret = disk->fops->release(bd_inode, NULL); + } else { + mutex_lock_nested(&bdev->bd_contains->bd_mutex, + subclass + 1); + bdev->bd_contains->bd_part_count--; + mutex_unlock(&bdev->bd_contains->bd_mutex); + } + if (!bdev->bd_openers) { + struct module *owner = disk->fops->owner; + + put_disk(disk); + module_put(owner); + + if (bdev->bd_contains != bdev) { + kobject_put(&bdev->bd_part->kobj); + bdev->bd_part = NULL; + } + bdev->bd_disk = NULL; + bdev->bd_inode->i_data.backing_dev_info = &default_backing_dev_info; + if (bdev != bdev->bd_contains) + __blkdev_put(bdev->bd_contains, subclass + 1); + bdev->bd_contains = NULL; + } + unlock_kernel(); + mutex_unlock(&bdev->bd_mutex); + bdput(bdev); + return ret; +} + +int blkdev_put(struct block_device *bdev) +{ + return __blkdev_put(bdev, BD_MUTEX_NORMAL); +} +EXPORT_SYMBOL(blkdev_put); + +int blkdev_put_partition(struct block_device *bdev) +{ + return __blkdev_put(bdev, BD_MUTEX_PARTITION); +} +EXPORT_SYMBOL(blkdev_put_partition); + static int blkdev_get_whole(struct block_device *bdev, mode_t mode, unsigned flags); @@ -980,7 +1035,7 @@ out_first: bdev->bd_disk = NULL; bdev->bd_inode->i_data.backing_dev_info = &default_backing_dev_info; if (bdev != bdev->bd_contains) - blkdev_put(bdev->bd_contains); + __blkdev_put(bdev->bd_contains, BD_MUTEX_WHOLE); bdev->bd_contains = NULL; put_disk(disk); module_put(owner); @@ -1079,63 +1134,6 @@ static int blkdev_open(struct inode * inode, struct file * filp) return res; } -static int __blkdev_put(struct block_device *bdev, unsigned int subclass) -{ - int ret = 0; - struct inode *bd_inode = bdev->bd_inode; - struct gendisk *disk = bdev->bd_disk; - - mutex_lock_nested(&bdev->bd_mutex, subclass); - lock_kernel(); - if (!--bdev->bd_openers) { - sync_blockdev(bdev); - kill_bdev(bdev); - } - if (bdev->bd_contains == bdev) { - if (disk->fops->release) - ret = disk->fops->release(bd_inode, NULL); - } else { - mutex_lock_nested(&bdev->bd_contains->bd_mutex, - subclass + 1); - bdev->bd_contains->bd_part_count--; - mutex_unlock(&bdev->bd_contains->bd_mutex); - } - if (!bdev->bd_openers) { - struct module *owner = disk->fops->owner; - - put_disk(disk); - module_put(owner); - - if (bdev->bd_contains != bdev) { - kobject_put(&bdev->bd_part->kobj); - bdev->bd_part = NULL; - } - bdev->bd_disk = NULL; - bdev->bd_inode->i_data.backing_dev_info = &default_backing_dev_info; - if (bdev != bdev->bd_contains) - __blkdev_put(bdev->bd_contains, subclass + 1); - bdev->bd_contains = NULL; - } - unlock_kernel(); - mutex_unlock(&bdev->bd_mutex); - bdput(bdev); - return ret; -} - -int blkdev_put(struct block_device *bdev) -{ - return __blkdev_put(bdev, BD_MUTEX_NORMAL); -} - -EXPORT_SYMBOL(blkdev_put); - -int blkdev_put_partition(struct block_device *bdev) -{ - return __blkdev_put(bdev, BD_MUTEX_PARTITION); -} - -EXPORT_SYMBOL(blkdev_put_partition); - static int blkdev_close(struct inode * inode, struct file * filp) { struct block_device *bdev = I_BDEV(filp->f_mapping->host); diff --git a/fs/cifs/CHANGES b/fs/cifs/CHANGES index a61d17ed182..0feb3bd49cb 100644 --- a/fs/cifs/CHANGES +++ b/fs/cifs/CHANGES @@ -1,3 +1,13 @@ +Version 1.45 +------------ +Do not time out lockw calls when using posix extensions. Do not +time out requests if server still responding reasonably fast +on requests on other threads. Improve POSIX locking emulation, +(lock cancel now works, and unlock of merged range works even +to Windows servers now). Fix oops on mount to lanman servers +(win9x, os/2 etc.) when null password. Do not send listxattr +(SMB to query all EAs) if nouser_xattr specified. + Version 1.44 ------------ Rewritten sessionsetup support, including support for legacy SMB diff --git a/fs/cifs/README b/fs/cifs/README index 7986d0d97ac..5f0e1bd64fe 100644 --- a/fs/cifs/README +++ b/fs/cifs/README @@ -408,7 +408,7 @@ A partial list of the supported mount options follows: user_xattr Allow getting and setting user xattrs as OS/2 EAs (extended attributes) to the server (default) e.g. via setfattr and getfattr utilities. - nouser_xattr Do not allow getfattr/setfattr to get/set xattrs + nouser_xattr Do not allow getfattr/setfattr to get/set/list xattrs mapchars Translate six of the seven reserved characters (not backslash) *?<>|: to the remap range (above 0xF000), which also diff --git a/fs/cifs/cifsencrypt.c b/fs/cifs/cifsencrypt.c index a89efaf78a2..4bc250b2d9f 100644 --- a/fs/cifs/cifsencrypt.c +++ b/fs/cifs/cifsencrypt.c @@ -277,7 +277,8 @@ void calc_lanman_hash(struct cifsSesInfo * ses, char * lnm_session_key) return; memset(password_with_pad, 0, CIFS_ENCPWD_SIZE); - strncpy(password_with_pad, ses->password, CIFS_ENCPWD_SIZE); + if(ses->password) + strncpy(password_with_pad, ses->password, CIFS_ENCPWD_SIZE); if((ses->server->secMode & SECMODE_PW_ENCRYPT) == 0) if(extended_security & CIFSSEC_MAY_PLNTXT) { diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c index c28ede59994..3cd750029be 100644 --- a/fs/cifs/cifsfs.c +++ b/fs/cifs/cifsfs.c @@ -402,7 +402,6 @@ static struct quotactl_ops cifs_quotactl_ops = { }; #endif -#ifdef CONFIG_CIFS_EXPERIMENTAL static void cifs_umount_begin(struct vfsmount * vfsmnt, int flags) { struct cifs_sb_info *cifs_sb; @@ -422,7 +421,7 @@ static void cifs_umount_begin(struct vfsmount * vfsmnt, int flags) tcon->tidStatus = CifsExiting; up(&tcon->tconSem); - /* cancel_brl_requests(tcon); */ + /* cancel_brl_requests(tcon); */ /* BB mark all brl mids as exiting */ /* cancel_notify_requests(tcon); */ if(tcon->ses && tcon->ses->server) { @@ -438,7 +437,6 @@ static void cifs_umount_begin(struct vfsmount * vfsmnt, int flags) return; } -#endif static int cifs_remount(struct super_block *sb, int *flags, char *data) { @@ -457,9 +455,7 @@ struct super_operations cifs_super_ops = { unless later we add lazy close of inodes or unless the kernel forgets to call us with the same number of releases (closes) as opens */ .show_options = cifs_show_options, -#ifdef CONFIG_CIFS_EXPERIMENTAL .umount_begin = cifs_umount_begin, -#endif .remount_fs = cifs_remount, }; diff --git a/fs/cifs/cifsfs.h b/fs/cifs/cifsfs.h index 8f75c6f2470..39ee8ef3bde 100644 --- a/fs/cifs/cifsfs.h +++ b/fs/cifs/cifsfs.h @@ -100,5 +100,5 @@ extern ssize_t cifs_getxattr(struct dentry *, const char *, void *, size_t); extern ssize_t cifs_listxattr(struct dentry *, char *, size_t); extern int cifs_ioctl (struct inode * inode, struct file * filep, unsigned int command, unsigned long arg); -#define CIFS_VERSION "1.44" +#define CIFS_VERSION "1.45" #endif /* _CIFSFS_H */ diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h index 6d7cf5f3bc0..b24006c47df 100644 --- a/fs/cifs/cifsglob.h +++ b/fs/cifs/cifsglob.h @@ -3,6 +3,7 @@ * * Copyright (C) International Business Machines Corp., 2002,2006 * Author(s): Steve French (sfrench@us.ibm.com) + * Jeremy Allison (jra@samba.org) * * This library is free software; you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as published @@ -158,7 +159,8 @@ struct TCP_Server_Info { /* 16th byte of RFC1001 workstation name is always null */ char workstation_RFC1001_name[SERVER_NAME_LEN_WITH_NULL]; __u32 sequence_number; /* needed for CIFS PDU signature */ - char mac_signing_key[CIFS_SESS_KEY_SIZE + 16]; + char mac_signing_key[CIFS_SESS_KEY_SIZE + 16]; + unsigned long lstrp; /* when we got last response from this server */ }; /* @@ -266,14 +268,14 @@ struct cifsTconInfo { }; /* - * This info hangs off the cifsFileInfo structure. This is used to track - * byte stream locks on the file + * This info hangs off the cifsFileInfo structure, pointed to by llist. + * This is used to track byte stream locks on the file */ struct cifsLockInfo { - struct cifsLockInfo *next; - int start; - int length; - int type; + struct list_head llist; /* pointer to next cifsLockInfo */ + __u64 offset; + __u64 length; + __u8 type; }; /* @@ -304,6 +306,8 @@ struct cifsFileInfo { /* lock scope id (0 if none) */ struct file * pfile; /* needed for writepage */ struct inode * pInode; /* needed for oplock break */ + struct semaphore lock_sem; + struct list_head llist; /* list of byte range locks we have. */ unsigned closePend:1; /* file is marked to close */ unsigned invalidHandle:1; /* file closed via session abend */ atomic_t wrtPending; /* handle in use - defer close */ diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h index a5ddc62d6fe..b35c55c3c8b 100644 --- a/fs/cifs/cifsproto.h +++ b/fs/cifs/cifsproto.h @@ -50,6 +50,10 @@ extern int SendReceive(const unsigned int /* xid */ , struct cifsSesInfo *, extern int SendReceive2(const unsigned int /* xid */ , struct cifsSesInfo *, struct kvec *, int /* nvec to send */, int * /* type of buf returned */ , const int long_op); +extern int SendReceiveBlockingLock(const unsigned int /* xid */ , struct cifsTconInfo *, + struct smb_hdr * /* input */ , + struct smb_hdr * /* out */ , + int * /* bytes returned */); extern int checkSMBhdr(struct smb_hdr *smb, __u16 mid); extern int checkSMB(struct smb_hdr *smb, __u16 mid, int length); extern int is_valid_oplock_break(struct smb_hdr *smb, struct TCP_Server_Info *); diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c index 19678c575df..075d8fb3d37 100644 --- a/fs/cifs/cifssmb.c +++ b/fs/cifs/cifssmb.c @@ -477,7 +477,7 @@ CIFSSMBNegotiate(unsigned int xid, struct cifsSesInfo *ses) /* BB get server time for time conversions and add code to use it and timezone since this is not UTC */ - if (rsp->EncryptionKeyLength == CIFS_CRYPTO_KEY_SIZE) { + if (rsp->EncryptionKeyLength == cpu_to_le16(CIFS_CRYPTO_KEY_SIZE)) { memcpy(server->cryptKey, rsp->EncryptionKey, CIFS_CRYPTO_KEY_SIZE); } else if (server->secMode & SECMODE_PW_ENCRYPT) { @@ -1460,8 +1460,13 @@ CIFSSMBLock(const int xid, struct cifsTconInfo *tcon, pSMB->hdr.smb_buf_length += count; pSMB->ByteCount = cpu_to_le16(count); - rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, + if (waitFlag) { + rc = SendReceiveBlockingLock(xid, tcon, (struct smb_hdr *) pSMB, + (struct smb_hdr *) pSMBr, &bytes_returned); + } else { + rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, (struct smb_hdr *) pSMBr, &bytes_returned, timeout); + } cifs_stats_inc(&tcon->num_locks); if (rc) { cFYI(1, ("Send error in Lock = %d", rc)); @@ -1484,6 +1489,7 @@ CIFSSMBPosixLock(const int xid, struct cifsTconInfo *tcon, char *data_offset; struct cifs_posix_lock *parm_data; int rc = 0; + int timeout = 0; int bytes_returned = 0; __u16 params, param_offset, offset, byte_count, count; @@ -1503,7 +1509,6 @@ CIFSSMBPosixLock(const int xid, struct cifsTconInfo *tcon, pSMB->MaxSetupCount = 0; pSMB->Reserved = 0; pSMB->Flags = 0; - pSMB->Timeout = 0; pSMB->Reserved2 = 0; param_offset = offsetof(struct smb_com_transaction2_sfi_req, Fid) - 4; offset = param_offset + params; @@ -1529,8 +1534,13 @@ CIFSSMBPosixLock(const int xid, struct cifsTconInfo *tcon, (((char *) &pSMB->hdr.Protocol) + offset); parm_data->lock_type = cpu_to_le16(lock_type); - if(waitFlag) + if(waitFlag) { + timeout = 3; /* blocking operation, no timeout */ parm_data->lock_flags = cpu_to_le16(1); + pSMB->Timeout = cpu_to_le32(-1); + } else + pSMB->Timeout = 0; + parm_data->pid = cpu_to_le32(current->tgid); parm_data->start = cpu_to_le64(pLockData->fl_start); parm_data->length = cpu_to_le64(len); /* normalize negative numbers */ @@ -1541,8 +1551,14 @@ CIFSSMBPosixLock(const int xid, struct cifsTconInfo *tcon, pSMB->Reserved4 = 0; pSMB->hdr.smb_buf_length += byte_count; pSMB->ByteCount = cpu_to_le16(byte_count); - rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, - (struct smb_hdr *) pSMBr, &bytes_returned, 0); + if (waitFlag) { + rc = SendReceiveBlockingLock(xid, tcon, (struct smb_hdr *) pSMB, + (struct smb_hdr *) pSMBr, &bytes_returned); + } else { + rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, + (struct smb_hdr *) pSMBr, &bytes_returned, timeout); + } + if (rc) { cFYI(1, ("Send error in Posix Lock = %d", rc)); } else if (get_flag) { diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c index 876eb9ef85f..5d394c72686 100644 --- a/fs/cifs/connect.c +++ b/fs/cifs/connect.c @@ -182,6 +182,7 @@ cifs_reconnect(struct TCP_Server_Info *server) while ((server->tcpStatus != CifsExiting) && (server->tcpStatus != CifsGood)) { + try_to_freeze(); if(server->protocolType == IPV6) { rc = ipv6_connect(&server->addr.sockAddr6,&server->ssocket); } else { @@ -612,6 +613,10 @@ multi_t2_fnd: #ifdef CONFIG_CIFS_STATS2 mid_entry->when_received = jiffies; #endif + /* so we do not time out requests to server + which is still responding (since server could + be busy but not dead) */ + server->lstrp = jiffies; break; } } @@ -1266,33 +1271,35 @@ find_unc(__be32 new_target_ip_addr, char *uncName, char *userName) read_lock(&GlobalSMBSeslock); list_for_each(tmp, &GlobalTreeConnectionList) { - cFYI(1, ("Next tcon - ")); + cFYI(1, ("Next tcon")); tcon = list_entry(tmp, struct cifsTconInfo, cifsConnectionList); if (tcon->ses) { if (tcon->ses->server) { cFYI(1, - (" old ip addr: %x == new ip %x ?", + ("old ip addr: %x == new ip %x ?", tcon->ses->server->addr.sockAddr.sin_addr. s_addr, new_target_ip_addr)); if (tcon->ses->server->addr.sockAddr.sin_addr. s_addr == new_target_ip_addr) { - /* BB lock tcon and server and tcp session and increment use count here? */ + /* BB lock tcon, server and tcp session and increment use count here? */ /* found a match on the TCP session */ /* BB check if reconnection needed */ - cFYI(1,("Matched ip, old UNC: %s == new: %s ?", + cFYI(1,("IP match, old UNC: %s new: %s", tcon->treeName, uncName)); if (strncmp (tcon->treeName, uncName, MAX_TREE_SIZE) == 0) { cFYI(1, - ("Matched UNC, old user: %s == new: %s ?", + ("and old usr: %s new: %s", tcon->treeName, uncName)); if (strncmp (tcon->ses->userName, userName, MAX_USERNAME_SIZE) == 0) { read_unlock(&GlobalSMBSeslock); - return tcon;/* also matched user (smb session)*/ + /* matched smb session + (user name */ + return tcon; } } } @@ -1969,7 +1976,18 @@ cifs_mount(struct super_block *sb, struct cifs_sb_info *cifs_sb, } cFYI(1,("Negotiate caps 0x%x",(int)cap)); - +#ifdef CONFIG_CIFS_DEBUG2 + if(cap & CIFS_UNIX_FCNTL_CAP) + cFYI(1,("FCNTL cap")); + if(cap & CIFS_UNIX_EXTATTR_CAP) + cFYI(1,("EXTATTR cap")); + if(cap & CIFS_UNIX_POSIX_PATHNAMES_CAP) + cFYI(1,("POSIX path cap")); + if(cap & CIFS_UNIX_XATTR_CAP) + cFYI(1,("XATTR cap")); + if(cap & CIFS_UNIX_POSIX_ACL_CAP) + cFYI(1,("POSIX ACL cap")); +#endif /* CIFS_DEBUG2 */ if (CIFSSMBSetFSUnixInfo(xid, tcon, cap)) { cFYI(1,("setting capabilities failed")); } diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c index ba4cbe9b068..914239d5363 100644 --- a/fs/cifs/dir.c +++ b/fs/cifs/dir.c @@ -267,6 +267,10 @@ cifs_create(struct inode *inode, struct dentry *direntry, int mode, pCifsFile->invalidHandle = FALSE; pCifsFile->closePend = FALSE; init_MUTEX(&pCifsFile->fh_sem); + init_MUTEX(&pCifsFile->lock_sem); + INIT_LIST_HEAD(&pCifsFile->llist); + atomic_set(&pCifsFile->wrtPending,0); + /* set the following in open now pCifsFile->pfile = file; */ write_lock(&GlobalSMBSeslock); diff --git a/fs/cifs/file.c b/fs/cifs/file.c index 944d2b9e092..e9c5ba9084f 100644 --- a/fs/cifs/file.c +++ b/fs/cifs/file.c @@ -5,6 +5,7 @@ * * Copyright (C) International Business Machines Corp., 2002,2003 * Author(s): Steve French (sfrench@us.ibm.com) + * Jeremy Allison (jra@samba.org) * * This library is free software; you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as published @@ -47,6 +48,8 @@ static inline struct cifsFileInfo *cifs_init_private( private_data->netfid = netfid; private_data->pid = current->tgid; init_MUTEX(&private_data->fh_sem); + init_MUTEX(&private_data->lock_sem); + INIT_LIST_HEAD(&private_data->llist); private_data->pfile = file; /* needed for writepage */ private_data->pInode = inode; private_data->invalidHandle = FALSE; @@ -473,6 +476,8 @@ int cifs_close(struct inode *inode, struct file *file) cifs_sb = CIFS_SB(inode->i_sb); pTcon = cifs_sb->tcon; if (pSMBFile) { + struct cifsLockInfo *li, *tmp; + pSMBFile->closePend = TRUE; if (pTcon) { /* no sense reconnecting to close a file that is @@ -496,6 +501,16 @@ int cifs_close(struct inode *inode, struct file *file) pSMBFile->netfid); } } + + /* Delete any outstanding lock records. + We'll lose them when the file is closed anyway. */ + down(&pSMBFile->lock_sem); + list_for_each_entry_safe(li, tmp, &pSMBFile->llist, llist) { + list_del(&li->llist); + kfree(li); + } + up(&pSMBFile->lock_sem); + write_lock(&GlobalSMBSeslock); list_del(&pSMBFile->flist); list_del(&pSMBFile->tlist); @@ -570,6 +585,21 @@ int cifs_closedir(struct inode *inode, struct file *file) return rc; } +static int store_file_lock(struct cifsFileInfo *fid, __u64 len, + __u64 offset, __u8 lockType) +{ + struct cifsLockInfo *li = kmalloc(sizeof(struct cifsLockInfo), GFP_KERNEL); + if (li == NULL) + return -ENOMEM; + li->offset = offset; + li->length = len; + li->type = lockType; + down(&fid->lock_sem); + list_add(&li->llist, &fid->llist); + up(&fid->lock_sem); + return 0; +} + int cifs_lock(struct file *file, int cmd, struct file_lock *pfLock) { int rc, xid; @@ -581,6 +611,7 @@ int cifs_lock(struct file *file, int cmd, struct file_lock *pfLock) struct cifsTconInfo *pTcon; __u16 netfid; __u8 lockType = LOCKING_ANDX_LARGE_FILES; + int posix_locking; length = 1 + pfLock->fl_end - pfLock->fl_start; rc = -EACCES; @@ -639,15 +670,14 @@ int cifs_lock(struct file *file, int cmd, struct file_lock *pfLock) } netfid = ((struct cifsFileInfo *)file->private_data)->netfid; + posix_locking = (cifs_sb->tcon->ses->capabilities & CAP_UNIX) && + (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(cifs_sb->tcon->fsUnixInfo.Capability)); /* BB add code here to normalize offset and length to account for negative length which we can not accept over the wire */ if (IS_GETLK(cmd)) { - if(experimEnabled && - (cifs_sb->tcon->ses->capabilities & CAP_UNIX) && - (CIFS_UNIX_FCNTL_CAP & - le64_to_cpu(cifs_sb->tcon->fsUnixInfo.Capability))) { + if(posix_locking) { int posix_lock_type; if(lockType & LOCKING_ANDX_SHARED_LOCK) posix_lock_type = CIFS_RDLCK; @@ -683,10 +713,15 @@ int cifs_lock(struct file *file, int cmd, struct file_lock *pfLock) FreeXid(xid); return rc; } - if (experimEnabled && - (cifs_sb->tcon->ses->capabilities & CAP_UNIX) && - (CIFS_UNIX_FCNTL_CAP & - le64_to_cpu(cifs_sb->tcon->fsUnixInfo.Capability))) { + + if (!numLock && !numUnlock) { + /* if no lock or unlock then nothing + to do since we do not know what it is */ + FreeXid(xid); + return -EOPNOTSUPP; + } + + if (posix_locking) { int posix_lock_type; if(lockType & LOCKING_ANDX_SHARED_LOCK) posix_lock_type = CIFS_RDLCK; @@ -695,18 +730,46 @@ int cifs_lock(struct file *file, int cmd, struct file_lock *pfLock) if(numUnlock == 1) posix_lock_type = CIFS_UNLCK; - else if(numLock == 0) { - /* if no lock or unlock then nothing - to do since we do not know what it is */ - FreeXid(xid); - return -EOPNOTSUPP; - } + rc = CIFSSMBPosixLock(xid, pTcon, netfid, 0 /* set */, length, pfLock, posix_lock_type, wait_flag); - } else - rc = CIFSSMBLock(xid, pTcon, netfid, length, pfLock->fl_start, - numUnlock, numLock, lockType, wait_flag); + } else { + struct cifsFileInfo *fid = (struct cifsFileInfo *)file->private_data; + + if (numLock) { + rc = CIFSSMBLock(xid, pTcon, netfid, length, pfLock->fl_start, + 0, numLock, lockType, wait_flag); + + if (rc == 0) { + /* For Windows locks we must store them. */ + rc = store_file_lock(fid, length, + pfLock->fl_start, lockType); + } + } else if (numUnlock) { + /* For each stored lock that this unlock overlaps + completely, unlock it. */ + int stored_rc = 0; + struct cifsLockInfo *li, *tmp; + + down(&fid->lock_sem); + list_for_each_entry_safe(li, tmp, &fid->llist, llist) { + if (pfLock->fl_start <= li->offset && + length >= li->length) { + stored_rc = CIFSSMBLock(xid, pTcon, netfid, + li->length, li->offset, + 1, 0, li->type, FALSE); + if (stored_rc) + rc = stored_rc; + + list_del(&li->llist); + kfree(li); + } + } + up(&fid->lock_sem); + } + } + if (pfLock->fl_flags & FL_POSIX) posix_lock_file_wait(file, pfLock); FreeXid(xid); diff --git a/fs/cifs/netmisc.c b/fs/cifs/netmisc.c index b66eff5dc62..ce87550e918 100644 --- a/fs/cifs/netmisc.c +++ b/fs/cifs/netmisc.c @@ -72,6 +72,7 @@ static const struct smb_to_posix_error mapping_table_ERRDOS[] = { {ERRinvlevel,-EOPNOTSUPP}, {ERRdirnotempty, -ENOTEMPTY}, {ERRnotlocked, -ENOLCK}, + {ERRcancelviolation, -ENOLCK}, {ERRalreadyexists, -EEXIST}, {ERRmoredata, -EOVERFLOW}, {ERReasnotsupported,-EOPNOTSUPP}, diff --git a/fs/cifs/readdir.c b/fs/cifs/readdir.c index 03bbcb37791..105761e3ba0 100644 --- a/fs/cifs/readdir.c +++ b/fs/cifs/readdir.c @@ -556,7 +556,7 @@ static int cifs_entry_is_dot(char *current_entry, struct cifsFileInfo *cfile) FIND_FILE_STANDARD_INFO * pFindData = (FIND_FILE_STANDARD_INFO *)current_entry; filename = &pFindData->FileName[0]; - len = le32_to_cpu(pFindData->FileNameLength); + len = pFindData->FileNameLength; } else { cFYI(1,("Unknown findfirst level %d",cfile->srch_inf.info_level)); } diff --git a/fs/cifs/sess.c b/fs/cifs/sess.c index 7202d534ef0..d1705ab8136 100644 --- a/fs/cifs/sess.c +++ b/fs/cifs/sess.c @@ -372,7 +372,7 @@ CIFS_SessSetup(unsigned int xid, struct cifsSesInfo *ses, int first_time, /* no capabilities flags in old lanman negotiation */ - pSMB->old_req.PasswordLength = CIFS_SESS_KEY_SIZE; + pSMB->old_req.PasswordLength = cpu_to_le16(CIFS_SESS_KEY_SIZE); /* BB calculate hash with password */ /* and copy into bcc */ diff --git a/fs/cifs/smberr.h b/fs/cifs/smberr.h index cd41c67ff8d..212c3c29640 100644 --- a/fs/cifs/smberr.h +++ b/fs/cifs/smberr.h @@ -95,6 +95,7 @@ #define ERRinvlevel 124 #define ERRdirnotempty 145 #define ERRnotlocked 158 +#define ERRcancelviolation 173 #define ERRalreadyexists 183 #define ERRbadpipe 230 #define ERRpipebusy 231 diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c index 17ba329e2b3..48d47b46b1f 100644 --- a/fs/cifs/transport.c +++ b/fs/cifs/transport.c @@ -3,7 +3,8 @@ * * Copyright (C) International Business Machines Corp., 2002,2005 * Author(s): Steve French (sfrench@us.ibm.com) - * + * Jeremy Allison (jra@samba.org) 2006. + * * This library is free software; you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as published * by the Free Software Foundation; either version 2.1 of the License, or @@ -36,7 +37,7 @@ extern mempool_t *cifs_mid_poolp; extern kmem_cache_t *cifs_oplock_cachep; static struct mid_q_entry * -AllocMidQEntry(struct smb_hdr *smb_buffer, struct cifsSesInfo *ses) +AllocMidQEntry(const struct smb_hdr *smb_buffer, struct cifsSesInfo *ses) { struct mid_q_entry *temp; @@ -203,6 +204,10 @@ smb_send(struct socket *ssocket, struct smb_hdr *smb_buffer, rc = 0; } + /* Don't want to modify the buffer as a + side effect of this call. */ + smb_buffer->smb_buf_length = smb_buf_length; + return rc; } @@ -217,6 +222,7 @@ smb_send2(struct socket *ssocket, struct kvec *iov, int n_vec, unsigned int len = iov[0].iov_len; unsigned int total_len; int first_vec = 0; + unsigned int smb_buf_length = smb_buffer->smb_buf_length; if(ssocket == NULL) return -ENOTSOCK; /* BB eventually add reconnect code here */ @@ -293,36 +299,15 @@ smb_send2(struct socket *ssocket, struct kvec *iov, int n_vec, } else rc = 0; + /* Don't want to modify the buffer as a + side effect of this call. */ + smb_buffer->smb_buf_length = smb_buf_length; + return rc; } -int -SendReceive2(const unsigned int xid, struct cifsSesInfo *ses, - struct kvec *iov, int n_vec, int * pRespBufType /* ret */, - const int long_op) +static int wait_for_free_request(struct cifsSesInfo *ses, const int long_op) { - int rc = 0; - unsigned int receive_len; - unsigned long timeout; - struct mid_q_entry *midQ; - struct smb_hdr *in_buf = iov[0].iov_base; - - *pRespBufType = CIFS_NO_BUFFER; /* no response buf yet */ - - if ((ses == NULL) || (ses->server == NULL)) { - cifs_small_buf_release(in_buf); - cERROR(1,("Null session")); - return -EIO; - } - - if(ses->server->tcpStatus == CifsExiting) { - cifs_small_buf_release(in_buf); - return -ENOENT; - } - - /* Ensure that we do not send more than 50 overlapping requests - to the same server. We may make this configurable later or - use ses->maxReq */ if(long_op == -1) { /* oplock breaks must not be held up */ atomic_inc(&ses->server->inFlight); @@ -345,53 +330,140 @@ SendReceive2(const unsigned int xid, struct cifsSesInfo *ses, } else { if(ses->server->tcpStatus == CifsExiting) { spin_unlock(&GlobalMid_Lock); - cifs_small_buf_release(in_buf); return -ENOENT; } - /* can not count locking commands against total since - they are allowed to block on server */ + /* can not count locking commands against total since + they are allowed to block on server */ - if(long_op < 3) { /* update # of requests on the wire to server */ + if (long_op < 3) atomic_inc(&ses->server->inFlight); - } spin_unlock(&GlobalMid_Lock); break; } } } - /* make sure that we sign in the same order that we send on this socket - and avoid races inside tcp sendmsg code that could cause corruption - of smb data */ - - down(&ses->server->tcpSem); + return 0; +} +static int allocate_mid(struct cifsSesInfo *ses, struct smb_hdr *in_buf, + struct mid_q_entry **ppmidQ) +{ if (ses->server->tcpStatus == CifsExiting) { - rc = -ENOENT; - goto out_unlock2; + return -ENOENT; } else if (ses->server->tcpStatus == CifsNeedReconnect) { cFYI(1,("tcp session dead - return to caller to retry")); - rc = -EAGAIN; - goto out_unlock2; + return -EAGAIN; } else if (ses->status != CifsGood) { /* check if SMB session is bad because we are setting it up */ if((in_buf->Command != SMB_COM_SESSION_SETUP_ANDX) && (in_buf->Command != SMB_COM_NEGOTIATE)) { - rc = -EAGAIN; - goto out_unlock2; + return -EAGAIN; } /* else ok - we are setting up session */ } - midQ = AllocMidQEntry(in_buf, ses); - if (midQ == NULL) { + *ppmidQ = AllocMidQEntry(in_buf, ses); + if (*ppmidQ == NULL) { + return -ENOMEM; + } + return 0; +} + +static int wait_for_response(struct cifsSesInfo *ses, + struct mid_q_entry *midQ, + unsigned long timeout, + unsigned long time_to_wait) +{ + unsigned long curr_timeout; + + for (;;) { + curr_timeout = timeout + jiffies; + wait_event(ses->server->response_q, + (!(midQ->midState == MID_REQUEST_SUBMITTED)) || + time_after(jiffies, curr_timeout) || + ((ses->server->tcpStatus != CifsGood) && + (ses->server->tcpStatus != CifsNew))); + + if (time_after(jiffies, curr_timeout) && + (midQ->midState == MID_REQUEST_SUBMITTED) && + ((ses->server->tcpStatus == CifsGood) || + (ses->server->tcpStatus == CifsNew))) { + + unsigned long lrt; + + /* We timed out. Is the server still + sending replies ? */ + spin_lock(&GlobalMid_Lock); + lrt = ses->server->lstrp; + spin_unlock(&GlobalMid_Lock); + + /* Calculate time_to_wait past last receive time. + Although we prefer not to time out if the + server is still responding - we will time + out if the server takes more than 15 (or 45 + or 180) seconds to respond to this request + and has not responded to any request from + other threads on the client within 10 seconds */ + lrt += time_to_wait; + if (time_after(jiffies, lrt)) { + /* No replies for time_to_wait. */ + cERROR(1,("server not responding")); + return -1; + } + } else { + return 0; + } + } +} + +int +SendReceive2(const unsigned int xid, struct cifsSesInfo *ses, + struct kvec *iov, int n_vec, int * pRespBufType /* ret */, + const int long_op) +{ + int rc = 0; + unsigned int receive_len; + unsigned long timeout; + struct mid_q_entry *midQ; + struct smb_hdr *in_buf = iov[0].iov_base; + + *pRespBufType = CIFS_NO_BUFFER; /* no response buf yet */ + + if ((ses == NULL) || (ses->server == NULL)) { + cifs_small_buf_release(in_buf); + cERROR(1,("Null session")); + return -EIO; + } + + if(ses->server->tcpStatus == CifsExiting) { + cifs_small_buf_release(in_buf); + return -ENOENT; + } + + /* Ensure that we do not send more than 50 overlapping requests + to the same server. We may make this configurable later or + use ses->maxReq */ + + rc = wait_for_free_request(ses, long_op); + if (rc) { + cifs_small_buf_release(in_buf); + return rc; + } + + /* make sure that we sign in the same order that we send on this socket + and avoid races inside tcp sendmsg code that could cause corruption + of smb data */ + + down(&ses->server->tcpSem); + + rc = allocate_mid(ses, in_buf, &midQ); + if (rc) { up(&ses->server->tcpSem); cifs_small_buf_release(in_buf); - /* If not lock req, update # of requests on wire to server */ - if(long_op < 3) { - atomic_dec(&ses->server->inFlight); - wake_up(&ses->server->request_q); - } - return -ENOMEM; + /* Update # of requests on wire to server */ + atomic_dec(&ses->server->inFlight); + wake_up(&ses->server->request_q); + return rc; } rc = cifs_sign_smb2(iov, n_vec, ses->server, &midQ->sequence_number); @@ -406,32 +478,23 @@ SendReceive2(const unsigned int xid, struct cifsSesInfo *ses, atomic_dec(&ses->server->inSend); midQ->when_sent = jiffies; #endif - if(rc < 0) { - DeleteMidQEntry(midQ); - up(&ses->server->tcpSem); - cifs_small_buf_release(in_buf); - /* If not lock req, update # of requests on wire to server */ - if(long_op < 3) { - atomic_dec(&ses->server->inFlight); - wake_up(&ses->server->request_q); - } - return rc; - } else { - up(&ses->server->tcpSem); - cifs_small_buf_release(in_buf); - } + + up(&ses->server->tcpSem); + cifs_small_buf_release(in_buf); + + if(rc < 0) + goto out; if (long_op == -1) - goto cifs_no_response_exit2; + goto out; else if (long_op == 2) /* writes past end of file can take loong time */ timeout = 180 * HZ; else if (long_op == 1) timeout = 45 * HZ; /* should be greater than servers oplock break timeout (about 43 seconds) */ - else if (long_op > 2) { - timeout = MAX_SCHEDULE_TIMEOUT; - } else + else timeout = 15 * HZ; + /* wait for 15 seconds or until woken up due to response arriving or due to last connection to this server being unmounted */ if (signal_pending(current)) { @@ -441,19 +504,7 @@ SendReceive2(const unsigned int xid, struct cifsSesInfo *ses, } /* No user interrupts in wait - wreaks havoc with performance */ - if(timeout != MAX_SCHEDULE_TIMEOUT) { - timeout += jiffies; - wait_event(ses->server->response_q, - (!(midQ->midState & MID_REQUEST_SUBMITTED)) || - time_after(jiffies, timeout) || - ((ses->server->tcpStatus != CifsGood) && - (ses->server->tcpStatus != CifsNew))); - } else { - wait_event(ses->server->response_q, - (!(midQ->midState & MID_REQUEST_SUBMITTED)) || - ((ses->server->tcpStatus != CifsGood) && - (ses->server->tcpStatus != CifsNew))); - } + wait_for_response(ses, midQ, timeout, 10 * HZ); spin_lock(&GlobalMid_Lock); if (midQ->resp_buf) { @@ -481,11 +532,9 @@ SendReceive2(const unsigned int xid, struct cifsSesInfo *ses, } spin_unlock(&GlobalMid_Lock); DeleteMidQEntry(midQ); - /* If not lock req, update # of requests on wire to server */ - if(long_op < 3) { - atomic_dec(&ses->server->inFlight); - wake_up(&ses->server->request_q); - } + /* Update # of requests on wire to server */ + atomic_dec(&ses->server->inFlight); + wake_up(&ses->server->request_q); return rc; } @@ -536,24 +585,12 @@ SendReceive2(const unsigned int xid, struct cifsSesInfo *ses, cFYI(1,("Bad MID state?")); } } -cifs_no_response_exit2: - DeleteMidQEntry(midQ); - - if(long_op < 3) { - atomic_dec(&ses->server->inFlight); - wake_up(&ses->server->request_q); - } - return rc; +out: -out_unlock2: - up(&ses->server->tcpSem); - cifs_small_buf_release(in_buf); - /* If not lock req, update # of requests on wire to server */ - if(long_op < 3) { - atomic_dec(&ses->server->inFlight); - wake_up(&ses->server->request_q); - } + DeleteMidQEntry(midQ); + atomic_dec(&ses->server->inFlight); + wake_up(&ses->server->request_q); return rc; } @@ -583,85 +620,34 @@ SendReceive(const unsigned int xid, struct cifsSesInfo *ses, /* Ensure that we do not send more than 50 overlapping requests to the same server. We may make this configurable later or use ses->maxReq */ - if(long_op == -1) { - /* oplock breaks must not be held up */ - atomic_inc(&ses->server->inFlight); - } else { - spin_lock(&GlobalMid_Lock); - while(1) { - if(atomic_read(&ses->server->inFlight) >= - cifs_max_pending){ - spin_unlock(&GlobalMid_Lock); -#ifdef CONFIG_CIFS_STATS2 - atomic_inc(&ses->server->num_waiters); -#endif - wait_event(ses->server->request_q, - atomic_read(&ses->server->inFlight) - < cifs_max_pending); -#ifdef CONFIG_CIFS_STATS2 - atomic_dec(&ses->server->num_waiters); -#endif - spin_lock(&GlobalMid_Lock); - } else { - if(ses->server->tcpStatus == CifsExiting) { - spin_unlock(&GlobalMid_Lock); - return -ENOENT; - } - /* can not count locking commands against total since - they are allowed to block on server */ - - if(long_op < 3) { - /* update # of requests on the wire to server */ - atomic_inc(&ses->server->inFlight); - } - spin_unlock(&GlobalMid_Lock); - break; - } - } - } + rc = wait_for_free_request(ses, long_op); + if (rc) + return rc; + /* make sure that we sign in the same order that we send on this socket and avoid races inside tcp sendmsg code that could cause corruption of smb data */ down(&ses->server->tcpSem); - if (ses->server->tcpStatus == CifsExiting) { - rc = -ENOENT; - goto out_unlock; - } else if (ses->server->tcpStatus == CifsNeedReconnect) { - cFYI(1,("tcp session dead - return to caller to retry")); - rc = -EAGAIN; - goto out_unlock; - } else if (ses->status != CifsGood) { - /* check if SMB session is bad because we are setting it up */ - if((in_buf->Command != SMB_COM_SESSION_SETUP_ANDX) && - (in_buf->Command != SMB_COM_NEGOTIATE)) { - rc = -EAGAIN; - goto out_unlock; - } /* else ok - we are setting up session */ - } - midQ = AllocMidQEntry(in_buf, ses); - if (midQ == NULL) { + rc = allocate_mid(ses, in_buf, &midQ); + if (rc) { up(&ses->server->tcpSem); - /* If not lock req, update # of requests on wire to server */ - if(long_op < 3) { - atomic_dec(&ses->server->inFlight); - wake_up(&ses->server->request_q); - } - return -ENOMEM; + /* Update # of requests on wire to server */ + atomic_dec(&ses->server->inFlight); + wake_up(&ses->server->request_q); + return rc; } if (in_buf->smb_buf_length > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) { - up(&ses->server->tcpSem); cERROR(1, ("Illegal length, greater than maximum frame, %d", in_buf->smb_buf_length)); DeleteMidQEntry(midQ); - /* If not lock req, update # of requests on wire to server */ - if(long_op < 3) { - atomic_dec(&ses->server->inFlight); - wake_up(&ses->server->request_q); - } + up(&ses->server->tcpSem); + /* Update # of requests on wire to server */ + atomic_dec(&ses->server->inFlight); + wake_up(&ses->server->request_q); return -EIO; } @@ -677,27 +663,19 @@ SendReceive(const unsigned int xid, struct cifsSesInfo *ses, atomic_dec(&ses->server->inSend); midQ->when_sent = jiffies; #endif - if(rc < 0) { - DeleteMidQEntry(midQ); - up(&ses->server->tcpSem); - /* If not lock req, update # of requests on wire to server */ - if(long_op < 3) { - atomic_dec(&ses->server->inFlight); - wake_up(&ses->server->request_q); - } - return rc; - } else - up(&ses->server->tcpSem); + up(&ses->server->tcpSem); + + if(rc < 0) + goto out; + if (long_op == -1) - goto cifs_no_response_exit; + goto out; else if (long_op == 2) /* writes past end of file can take loong time */ timeout = 180 * HZ; else if (long_op == 1) timeout = 45 * HZ; /* should be greater than servers oplock break timeout (about 43 seconds) */ - else if (long_op > 2) { - timeout = MAX_SCHEDULE_TIMEOUT; - } else + else timeout = 15 * HZ; /* wait for 15 seconds or until woken up due to response arriving or due to last connection to this server being unmounted */ @@ -708,19 +686,7 @@ SendReceive(const unsigned int xid, struct cifsSesInfo *ses, } /* No user interrupts in wait - wreaks havoc with performance */ - if(timeout != MAX_SCHEDULE_TIMEOUT) { - timeout += jiffies; - wait_event(ses->server->response_q, - (!(midQ->midState & MID_REQUEST_SUBMITTED)) || - time_after(jiffies, timeout) || - ((ses->server->tcpStatus != CifsGood) && - (ses->server->tcpStatus != CifsNew))); - } else { - wait_event(ses->server->response_q, - (!(midQ->midState & MID_REQUEST_SUBMITTED)) || - ((ses->server->tcpStatus != CifsGood) && - (ses->server->tcpStatus != CifsNew))); - } + wait_for_response(ses, midQ, timeout, 10 * HZ); spin_lock(&GlobalMid_Lock); if (midQ->resp_buf) { @@ -748,11 +714,9 @@ SendReceive(const unsigned int xid, struct cifsSesInfo *ses, } spin_unlock(&GlobalMid_Lock); DeleteMidQEntry(midQ); - /* If not lock req, update # of requests on wire to server */ - if(long_op < 3) { - atomic_dec(&ses->server->inFlight); - wake_up(&ses->server->request_q); - } + /* Update # of requests on wire to server */ + atomic_dec(&ses->server->inFlight); + wake_up(&ses->server->request_q); return rc; } @@ -799,23 +763,253 @@ SendReceive(const unsigned int xid, struct cifsSesInfo *ses, cERROR(1,("Bad MID state?")); } } -cifs_no_response_exit: + +out: + DeleteMidQEntry(midQ); + atomic_dec(&ses->server->inFlight); + wake_up(&ses->server->request_q); - if(long_op < 3) { - atomic_dec(&ses->server->inFlight); - wake_up(&ses->server->request_q); - } + return rc; +} + +/* Send an NT_CANCEL SMB to cause the POSIX blocking lock to return. */ + +static int +send_nt_cancel(struct cifsTconInfo *tcon, struct smb_hdr *in_buf, + struct mid_q_entry *midQ) +{ + int rc = 0; + struct cifsSesInfo *ses = tcon->ses; + __u16 mid = in_buf->Mid; + header_assemble(in_buf, SMB_COM_NT_CANCEL, tcon, 0); + in_buf->Mid = mid; + down(&ses->server->tcpSem); + rc = cifs_sign_smb(in_buf, ses->server, &midQ->sequence_number); + if (rc) { + up(&ses->server->tcpSem); + return rc; + } + rc = smb_send(ses->server->ssocket, in_buf, in_buf->smb_buf_length, + (struct sockaddr *) &(ses->server->addr.sockAddr)); + up(&ses->server->tcpSem); return rc; +} + +/* We send a LOCKINGX_CANCEL_LOCK to cause the Windows + blocking lock to return. */ + +static int +send_lock_cancel(const unsigned int xid, struct cifsTconInfo *tcon, + struct smb_hdr *in_buf, + struct smb_hdr *out_buf) +{ + int bytes_returned; + struct cifsSesInfo *ses = tcon->ses; + LOCK_REQ *pSMB = (LOCK_REQ *)in_buf; + + /* We just modify the current in_buf to change + the type of lock from LOCKING_ANDX_SHARED_LOCK + or LOCKING_ANDX_EXCLUSIVE_LOCK to + LOCKING_ANDX_CANCEL_LOCK. */ + + pSMB->LockType = LOCKING_ANDX_CANCEL_LOCK|LOCKING_ANDX_LARGE_FILES; + pSMB->Timeout = 0; + pSMB->hdr.Mid = GetNextMid(ses->server); + + return SendReceive(xid, ses, in_buf, out_buf, + &bytes_returned, 0); +} -out_unlock: +int +SendReceiveBlockingLock(const unsigned int xid, struct cifsTconInfo *tcon, + struct smb_hdr *in_buf, struct smb_hdr *out_buf, + int *pbytes_returned) +{ + int rc = 0; + int rstart = 0; + unsigned int receive_len; + struct mid_q_entry *midQ; + struct cifsSesInfo *ses; + + if (tcon == NULL || tcon->ses == NULL) { + cERROR(1,("Null smb session")); + return -EIO; + } + ses = tcon->ses; + + if(ses->server == NULL) { + cERROR(1,("Null tcp session")); + return -EIO; + } + + if(ses->server->tcpStatus == CifsExiting) + return -ENOENT; + + /* Ensure that we do not send more than 50 overlapping requests + to the same server. We may make this configurable later or + use ses->maxReq */ + + rc = wait_for_free_request(ses, 3); + if (rc) + return rc; + + /* make sure that we sign in the same order that we send on this socket + and avoid races inside tcp sendmsg code that could cause corruption + of smb data */ + + down(&ses->server->tcpSem); + + rc = allocate_mid(ses, in_buf, &midQ); + if (rc) { + up(&ses->server->tcpSem); + return rc; + } + + if (in_buf->smb_buf_length > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) { + up(&ses->server->tcpSem); + cERROR(1, ("Illegal length, greater than maximum frame, %d", + in_buf->smb_buf_length)); + DeleteMidQEntry(midQ); + return -EIO; + } + + rc = cifs_sign_smb(in_buf, ses->server, &midQ->sequence_number); + + midQ->midState = MID_REQUEST_SUBMITTED; +#ifdef CONFIG_CIFS_STATS2 + atomic_inc(&ses->server->inSend); +#endif + rc = smb_send(ses->server->ssocket, in_buf, in_buf->smb_buf_length, + (struct sockaddr *) &(ses->server->addr.sockAddr)); +#ifdef CONFIG_CIFS_STATS2 + atomic_dec(&ses->server->inSend); + midQ->when_sent = jiffies; +#endif up(&ses->server->tcpSem); - /* If not lock req, update # of requests on wire to server */ - if(long_op < 3) { - atomic_dec(&ses->server->inFlight); - wake_up(&ses->server->request_q); + + if(rc < 0) { + DeleteMidQEntry(midQ); + return rc; + } + + /* Wait for a reply - allow signals to interrupt. */ + rc = wait_event_interruptible(ses->server->response_q, + (!(midQ->midState == MID_REQUEST_SUBMITTED)) || + ((ses->server->tcpStatus != CifsGood) && + (ses->server->tcpStatus != CifsNew))); + + /* Were we interrupted by a signal ? */ + if ((rc == -ERESTARTSYS) && + (midQ->midState == MID_REQUEST_SUBMITTED) && + ((ses->server->tcpStatus == CifsGood) || + (ses->server->tcpStatus == CifsNew))) { + + if (in_buf->Command == SMB_COM_TRANSACTION2) { + /* POSIX lock. We send a NT_CANCEL SMB to cause the + blocking lock to return. */ + + rc = send_nt_cancel(tcon, in_buf, midQ); + if (rc) { + DeleteMidQEntry(midQ); + return rc; + } + } else { + /* Windows lock. We send a LOCKINGX_CANCEL_LOCK + to cause the blocking lock to return. */ + + rc = send_lock_cancel(xid, tcon, in_buf, out_buf); + + /* If we get -ENOLCK back the lock may have + already been removed. Don't exit in this case. */ + if (rc && rc != -ENOLCK) { + DeleteMidQEntry(midQ); + return rc; + } + } + + /* Wait 5 seconds for the response. */ + if (wait_for_response(ses, midQ, 5 * HZ, 5 * HZ)==0) { + /* We got the response - restart system call. */ + rstart = 1; + } + } + + spin_lock(&GlobalMid_Lock); + if (midQ->resp_buf) { + spin_unlock(&GlobalMid_Lock); + receive_len = midQ->resp_buf->smb_buf_length; + } else { + cERROR(1,("No response for cmd %d mid %d", + midQ->command, midQ->mid)); + if(midQ->midState == MID_REQUEST_SUBMITTED) { + if(ses->server->tcpStatus == CifsExiting) + rc = -EHOSTDOWN; + else { + ses->server->tcpStatus = CifsNeedReconnect; + midQ->midState = MID_RETRY_NEEDED; + } + } + + if (rc != -EHOSTDOWN) { + if(midQ->midState == MID_RETRY_NEEDED) { + rc = -EAGAIN; + cFYI(1,("marking request for retry")); + } else { + rc = -EIO; + } + } + spin_unlock(&GlobalMid_Lock); + DeleteMidQEntry(midQ); + return rc; } + + if (receive_len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE) { + cERROR(1, ("Frame too large received. Length: %d Xid: %d", + receive_len, xid)); + rc = -EIO; + } else { /* rcvd frame is ok */ + + if (midQ->resp_buf && out_buf + && (midQ->midState == MID_RESPONSE_RECEIVED)) { + out_buf->smb_buf_length = receive_len; + memcpy((char *)out_buf + 4, + (char *)midQ->resp_buf + 4, + receive_len); + + dump_smb(out_buf, 92); + /* convert the length into a more usable form */ + if((receive_len > 24) && + (ses->server->secMode & (SECMODE_SIGN_REQUIRED | + SECMODE_SIGN_ENABLED))) { + rc = cifs_verify_signature(out_buf, + ses->server->mac_signing_key, + midQ->sequence_number+1); + if(rc) { + cERROR(1,("Unexpected SMB signature")); + /* BB FIXME add code to kill session */ + } + } + + *pbytes_returned = out_buf->smb_buf_length; + + /* BB special case reconnect tid and uid here? */ + rc = map_smb_to_linux_error(out_buf); + /* convert ByteCount if necessary */ + if (receive_len >= + sizeof (struct smb_hdr) - + 4 /* do not count RFC1001 header */ + + (2 * out_buf->WordCount) + 2 /* bcc */ ) + BCC(out_buf) = le16_to_cpu(BCC_LE(out_buf)); + } else { + rc = -EIO; + cERROR(1,("Bad MID state?")); + } + } + DeleteMidQEntry(midQ); + if (rstart && rc == -EACCES) + return -ERESTARTSYS; return rc; } diff --git a/fs/cifs/xattr.c b/fs/cifs/xattr.c index 7754d641775..067648b7179 100644 --- a/fs/cifs/xattr.c +++ b/fs/cifs/xattr.c @@ -330,11 +330,15 @@ ssize_t cifs_listxattr(struct dentry * direntry, char * data, size_t buf_size) sb = direntry->d_inode->i_sb; if(sb == NULL) return -EIO; - xid = GetXid(); cifs_sb = CIFS_SB(sb); pTcon = cifs_sb->tcon; + if(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_XATTR) + return -EOPNOTSUPP; + + xid = GetXid(); + full_path = build_path_from_dentry(direntry); if(full_path == NULL) { FreeXid(xid); diff --git a/fs/eventpoll.c b/fs/eventpoll.c index 19ffb043abb..3a3567433b9 100644 --- a/fs/eventpoll.c +++ b/fs/eventpoll.c @@ -1168,7 +1168,7 @@ static int ep_unlink(struct eventpoll *ep, struct epitem *epi) eexit_1: DNPRINTK(3, (KERN_INFO "[%p] eventpoll: ep_unlink(%p, %p) = %d\n", - current, ep, epi->file, error)); + current, ep, epi->ffd.file, error)); return error; } @@ -1236,7 +1236,7 @@ static int ep_poll_callback(wait_queue_t *wait, unsigned mode, int sync, void *k struct eventpoll *ep = epi->ep; DNPRINTK(3, (KERN_INFO "[%p] eventpoll: poll_callback(%p) epi=%p ep=%p\n", - current, epi->file, epi, ep)); + current, epi->ffd.file, epi, ep)); write_lock_irqsave(&ep->lock, flags); diff --git a/fs/exec.c b/fs/exec.c index 8344ba73a2a..54135df2a96 100644 --- a/fs/exec.c +++ b/fs/exec.c @@ -486,8 +486,6 @@ struct file *open_exec(const char *name) if (!(nd.mnt->mnt_flags & MNT_NOEXEC) && S_ISREG(inode->i_mode)) { int err = vfs_permission(&nd, MAY_EXEC); - if (!err && !(inode->i_mode & 0111)) - err = -EACCES; file = ERR_PTR(err); if (!err) { file = nameidata_to_filp(&nd, O_RDONLY); @@ -753,7 +751,7 @@ no_thread_group: write_lock_irq(&tasklist_lock); spin_lock(&oldsighand->siglock); - spin_lock(&newsighand->siglock); + spin_lock_nested(&newsighand->siglock, SINGLE_DEPTH_NESTING); rcu_assign_pointer(current->sighand, newsighand); recalc_sigpending(); @@ -922,12 +920,6 @@ int prepare_binprm(struct linux_binprm *bprm) int retval; mode = inode->i_mode; - /* - * Check execute perms again - if the caller has CAP_DAC_OVERRIDE, - * generic_permission lets a non-executable through - */ - if (!(mode & 0111)) /* with at least _one_ execute bit set */ - return -EACCES; if (bprm->file->f_op == NULL) return -EACCES; diff --git a/fs/ext2/super.c b/fs/ext2/super.c index f2702cda977..681dea8f953 100644 --- a/fs/ext2/super.c +++ b/fs/ext2/super.c @@ -775,7 +775,7 @@ static int ext2_fill_super(struct super_block *sb, void *data, int silent) if (EXT2_INODE_SIZE(sb) == 0) goto cantfind_ext2; sbi->s_inodes_per_block = sb->s_blocksize / EXT2_INODE_SIZE(sb); - if (sbi->s_inodes_per_block == 0) + if (sbi->s_inodes_per_block == 0 || sbi->s_inodes_per_group == 0) goto cantfind_ext2; sbi->s_itb_per_group = sbi->s_inodes_per_group / sbi->s_inodes_per_block; diff --git a/fs/ext3/balloc.c b/fs/ext3/balloc.c index a504a40d6d2..063d994bda0 100644 --- a/fs/ext3/balloc.c +++ b/fs/ext3/balloc.c @@ -1269,12 +1269,12 @@ ext3_fsblk_t ext3_new_blocks(handle_t *handle, struct inode *inode, goal = le32_to_cpu(es->s_first_data_block); group_no = (goal - le32_to_cpu(es->s_first_data_block)) / EXT3_BLOCKS_PER_GROUP(sb); + goal_group = group_no; +retry_alloc: gdp = ext3_get_group_desc(sb, group_no, &gdp_bh); if (!gdp) goto io_error; - goal_group = group_no; -retry: free_blocks = le16_to_cpu(gdp->bg_free_blocks_count); /* * if there is not enough free blocks to make a new resevation @@ -1349,7 +1349,7 @@ retry: if (my_rsv) { my_rsv = NULL; group_no = goal_group; - goto retry; + goto retry_alloc; } /* No space left on the device */ *errp = -ENOSPC; diff --git a/fs/fuse/file.c b/fs/fuse/file.c index 63614ed1633..5c4fcd1dbf5 100644 --- a/fs/fuse/file.c +++ b/fs/fuse/file.c @@ -395,14 +395,16 @@ static int fuse_readpages(struct file *file, struct address_space *mapping, struct fuse_readpages_data data; int err; + err = -EIO; if (is_bad_inode(inode)) - return -EIO; + goto clean_pages_up; data.file = file; data.inode = inode; data.req = fuse_get_req(fc); + err = PTR_ERR(data.req); if (IS_ERR(data.req)) - return PTR_ERR(data.req); + goto clean_pages_up; err = read_cache_pages(mapping, pages, fuse_readpages_fill, &data); if (!err) { @@ -412,6 +414,10 @@ static int fuse_readpages(struct file *file, struct address_space *mapping, fuse_put_request(fc, data.req); } return err; + +clean_pages_up: + put_pages_list(pages); + return err; } static size_t fuse_send_write(struct fuse_req *req, struct file *file, diff --git a/fs/ioprio.c b/fs/ioprio.c index 93aa5715f22..78b1deae3fa 100644 --- a/fs/ioprio.c +++ b/fs/ioprio.c @@ -44,6 +44,9 @@ static int set_task_ioprio(struct task_struct *task, int ioprio) task->ioprio = ioprio; ioc = task->io_context; + /* see wmb() in current_io_context() */ + smp_read_barrier_depends(); + if (ioc && ioc->set_ioprio) ioc->set_ioprio(ioc, ioprio); @@ -111,9 +114,9 @@ asmlinkage long sys_ioprio_set(int which, int who, int ioprio) continue; ret = set_task_ioprio(p, ioprio); if (ret) - break; + goto free_uid; } while_each_thread(g, p); - +free_uid: if (who) free_uid(user); break; @@ -137,6 +140,29 @@ out: return ret; } +int ioprio_best(unsigned short aprio, unsigned short bprio) +{ + unsigned short aclass = IOPRIO_PRIO_CLASS(aprio); + unsigned short bclass = IOPRIO_PRIO_CLASS(bprio); + + if (!ioprio_valid(aprio)) + return bprio; + if (!ioprio_valid(bprio)) + return aprio; + + if (aclass == IOPRIO_CLASS_NONE) + aclass = IOPRIO_CLASS_BE; + if (bclass == IOPRIO_CLASS_NONE) + bclass = IOPRIO_CLASS_BE; + + if (aclass == bclass) + return min(aprio, bprio); + if (aclass > bclass) + return bprio; + else + return aprio; +} + asmlinkage long sys_ioprio_get(int which, int who) { struct task_struct *g, *p; diff --git a/fs/jbd/commit.c b/fs/jbd/commit.c index 0971814c38b..42da6078431 100644 --- a/fs/jbd/commit.c +++ b/fs/jbd/commit.c @@ -261,7 +261,7 @@ void journal_commit_transaction(journal_t *journal) struct buffer_head *bh = jh2bh(jh); jbd_lock_bh_state(bh); - kfree(jh->b_committed_data); + jbd_slab_free(jh->b_committed_data, bh->b_size); jh->b_committed_data = NULL; jbd_unlock_bh_state(bh); } @@ -745,14 +745,14 @@ restart_loop: * Otherwise, we can just throw away the frozen data now. */ if (jh->b_committed_data) { - kfree(jh->b_committed_data); + jbd_slab_free(jh->b_committed_data, bh->b_size); jh->b_committed_data = NULL; if (jh->b_frozen_data) { jh->b_committed_data = jh->b_frozen_data; jh->b_frozen_data = NULL; } } else if (jh->b_frozen_data) { - kfree(jh->b_frozen_data); + jbd_slab_free(jh->b_frozen_data, bh->b_size); jh->b_frozen_data = NULL; } diff --git a/fs/jbd/journal.c b/fs/jbd/journal.c index 8c9b28dff11..f66724ce443 100644 --- a/fs/jbd/journal.c +++ b/fs/jbd/journal.c @@ -84,6 +84,7 @@ EXPORT_SYMBOL(journal_force_commit); static int journal_convert_superblock_v1(journal_t *, journal_superblock_t *); static void __journal_abort_soft (journal_t *journal, int errno); +static int journal_create_jbd_slab(size_t slab_size); /* * Helper function used to manage commit timeouts @@ -328,10 +329,10 @@ repeat: char *tmp; jbd_unlock_bh_state(bh_in); - tmp = jbd_rep_kmalloc(bh_in->b_size, GFP_NOFS); + tmp = jbd_slab_alloc(bh_in->b_size, GFP_NOFS); jbd_lock_bh_state(bh_in); if (jh_in->b_frozen_data) { - kfree(tmp); + jbd_slab_free(tmp, bh_in->b_size); goto repeat; } @@ -1069,17 +1070,17 @@ static int load_superblock(journal_t *journal) int journal_load(journal_t *journal) { int err; + journal_superblock_t *sb; err = load_superblock(journal); if (err) return err; + sb = journal->j_superblock; /* If this is a V2 superblock, then we have to check the * features flags on it. */ if (journal->j_format_version >= 2) { - journal_superblock_t *sb = journal->j_superblock; - if ((sb->s_feature_ro_compat & ~cpu_to_be32(JFS_KNOWN_ROCOMPAT_FEATURES)) || (sb->s_feature_incompat & @@ -1090,6 +1091,13 @@ int journal_load(journal_t *journal) } } + /* + * Create a slab for this blocksize + */ + err = journal_create_jbd_slab(cpu_to_be32(sb->s_blocksize)); + if (err) + return err; + /* Let the recovery code check whether it needs to recover any * data from the journal. */ if (journal_recover(journal)) @@ -1612,6 +1620,77 @@ void * __jbd_kmalloc (const char *where, size_t size, gfp_t flags, int retry) } /* + * jbd slab management: create 1k, 2k, 4k, 8k slabs as needed + * and allocate frozen and commit buffers from these slabs. + * + * Reason for doing this is to avoid, SLAB_DEBUG - since it could + * cause bh to cross page boundary. + */ + +#define JBD_MAX_SLABS 5 +#define JBD_SLAB_INDEX(size) (size >> 11) + +static kmem_cache_t *jbd_slab[JBD_MAX_SLABS]; +static const char *jbd_slab_names[JBD_MAX_SLABS] = { + "jbd_1k", "jbd_2k", "jbd_4k", NULL, "jbd_8k" +}; + +static void journal_destroy_jbd_slabs(void) +{ + int i; + + for (i = 0; i < JBD_MAX_SLABS; i++) { + if (jbd_slab[i]) + kmem_cache_destroy(jbd_slab[i]); + jbd_slab[i] = NULL; + } +} + +static int journal_create_jbd_slab(size_t slab_size) +{ + int i = JBD_SLAB_INDEX(slab_size); + + BUG_ON(i >= JBD_MAX_SLABS); + + /* + * Check if we already have a slab created for this size + */ + if (jbd_slab[i]) + return 0; + + /* + * Create a slab and force alignment to be same as slabsize - + * this will make sure that allocations won't cross the page + * boundary. + */ + jbd_slab[i] = kmem_cache_create(jbd_slab_names[i], + slab_size, slab_size, 0, NULL, NULL); + if (!jbd_slab[i]) { + printk(KERN_EMERG "JBD: no memory for jbd_slab cache\n"); + return -ENOMEM; + } + return 0; +} + +void * jbd_slab_alloc(size_t size, gfp_t flags) +{ + int idx; + + idx = JBD_SLAB_INDEX(size); + BUG_ON(jbd_slab[idx] == NULL); + return kmem_cache_alloc(jbd_slab[idx], flags | __GFP_NOFAIL); +} + +void jbd_slab_free(void *ptr, size_t size) +{ + int idx; + + idx = JBD_SLAB_INDEX(size); + BUG_ON(jbd_slab[idx] == NULL); + kmem_cache_free(jbd_slab[idx], ptr); +} + +/* * Journal_head storage management */ static kmem_cache_t *journal_head_cache; @@ -1799,13 +1878,13 @@ static void __journal_remove_journal_head(struct buffer_head *bh) printk(KERN_WARNING "%s: freeing " "b_frozen_data\n", __FUNCTION__); - kfree(jh->b_frozen_data); + jbd_slab_free(jh->b_frozen_data, bh->b_size); } if (jh->b_committed_data) { printk(KERN_WARNING "%s: freeing " "b_committed_data\n", __FUNCTION__); - kfree(jh->b_committed_data); + jbd_slab_free(jh->b_committed_data, bh->b_size); } bh->b_private = NULL; jh->b_bh = NULL; /* debug, really */ @@ -1961,6 +2040,7 @@ static void journal_destroy_caches(void) journal_destroy_revoke_caches(); journal_destroy_journal_head_cache(); journal_destroy_handle_cache(); + journal_destroy_jbd_slabs(); } static int __init journal_init(void) diff --git a/fs/jbd/transaction.c b/fs/jbd/transaction.c index 508b2ea91f4..de2e4cbbf79 100644 --- a/fs/jbd/transaction.c +++ b/fs/jbd/transaction.c @@ -666,8 +666,9 @@ repeat: if (!frozen_buffer) { JBUFFER_TRACE(jh, "allocate memory for buffer"); jbd_unlock_bh_state(bh); - frozen_buffer = jbd_kmalloc(jh2bh(jh)->b_size, - GFP_NOFS); + frozen_buffer = + jbd_slab_alloc(jh2bh(jh)->b_size, + GFP_NOFS); if (!frozen_buffer) { printk(KERN_EMERG "%s: OOM for frozen_buffer\n", @@ -879,7 +880,7 @@ int journal_get_undo_access(handle_t *handle, struct buffer_head *bh) repeat: if (!jh->b_committed_data) { - committed_data = jbd_kmalloc(jh2bh(jh)->b_size, GFP_NOFS); + committed_data = jbd_slab_alloc(jh2bh(jh)->b_size, GFP_NOFS); if (!committed_data) { printk(KERN_EMERG "%s: No memory for committed data\n", __FUNCTION__); @@ -906,7 +907,7 @@ repeat: out: journal_put_journal_head(jh); if (unlikely(committed_data)) - kfree(committed_data); + jbd_slab_free(committed_data, bh->b_size); return err; } diff --git a/fs/jfs/inode.c b/fs/jfs/inode.c index 43e3f566aad..a223cf4faa9 100644 --- a/fs/jfs/inode.c +++ b/fs/jfs/inode.c @@ -168,16 +168,15 @@ void jfs_dirty_inode(struct inode *inode) set_cflag(COMMIT_Dirty, inode); } -static int -jfs_get_blocks(struct inode *ip, sector_t lblock, unsigned long max_blocks, - struct buffer_head *bh_result, int create) +int jfs_get_block(struct inode *ip, sector_t lblock, + struct buffer_head *bh_result, int create) { s64 lblock64 = lblock; int rc = 0; xad_t xad; s64 xaddr; int xflag; - s32 xlen = max_blocks; + s32 xlen = bh_result->b_size >> ip->i_blkbits; /* * Take appropriate lock on inode @@ -188,7 +187,7 @@ jfs_get_blocks(struct inode *ip, sector_t lblock, unsigned long max_blocks, IREAD_LOCK(ip); if (((lblock64 << ip->i_sb->s_blocksize_bits) < ip->i_size) && - (!xtLookup(ip, lblock64, max_blocks, &xflag, &xaddr, &xlen, 0)) && + (!xtLookup(ip, lblock64, xlen, &xflag, &xaddr, &xlen, 0)) && xaddr) { if (xflag & XAD_NOTRECORDED) { if (!create) @@ -255,13 +254,6 @@ jfs_get_blocks(struct inode *ip, sector_t lblock, unsigned long max_blocks, return rc; } -static int jfs_get_block(struct inode *ip, sector_t lblock, - struct buffer_head *bh_result, int create) -{ - return jfs_get_blocks(ip, lblock, bh_result->b_size >> ip->i_blkbits, - bh_result, create); -} - static int jfs_writepage(struct page *page, struct writeback_control *wbc) { return nobh_writepage(page, jfs_get_block, wbc); diff --git a/fs/jfs/jfs_inode.h b/fs/jfs/jfs_inode.h index b5c7da6190d..1fc48df670c 100644 --- a/fs/jfs/jfs_inode.h +++ b/fs/jfs/jfs_inode.h @@ -32,6 +32,7 @@ extern void jfs_truncate_nolock(struct inode *, loff_t); extern void jfs_free_zero_link(struct inode *); extern struct dentry *jfs_get_parent(struct dentry *dentry); extern void jfs_set_inode_flags(struct inode *); +extern int jfs_get_block(struct inode *, sector_t, struct buffer_head *, int); extern const struct address_space_operations jfs_aops; extern struct inode_operations jfs_dir_inode_operations; diff --git a/fs/jfs/super.c b/fs/jfs/super.c index 4f6cfebc82d..143bcd1d5ea 100644 --- a/fs/jfs/super.c +++ b/fs/jfs/super.c @@ -26,6 +26,7 @@ #include <linux/moduleparam.h> #include <linux/kthread.h> #include <linux/posix_acl.h> +#include <linux/buffer_head.h> #include <asm/uaccess.h> #include <linux/seq_file.h> @@ -298,7 +299,7 @@ static int parse_options(char *options, struct super_block *sb, s64 *newLVSize, break; } -#if defined(CONFIG_QUOTA) +#ifdef CONFIG_QUOTA case Opt_quota: case Opt_usrquota: *flag |= JFS_USRQUOTA; @@ -597,7 +598,7 @@ static int jfs_show_options(struct seq_file *seq, struct vfsmount *vfs) if (sbi->flag & JFS_NOINTEGRITY) seq_puts(seq, ",nointegrity"); -#if defined(CONFIG_QUOTA) +#ifdef CONFIG_QUOTA if (sbi->flag & JFS_USRQUOTA) seq_puts(seq, ",usrquota"); @@ -608,6 +609,113 @@ static int jfs_show_options(struct seq_file *seq, struct vfsmount *vfs) return 0; } +#ifdef CONFIG_QUOTA + +/* Read data from quotafile - avoid pagecache and such because we cannot afford + * acquiring the locks... As quota files are never truncated and quota code + * itself serializes the operations (and noone else should touch the files) + * we don't have to be afraid of races */ +static ssize_t jfs_quota_read(struct super_block *sb, int type, char *data, + size_t len, loff_t off) +{ + struct inode *inode = sb_dqopt(sb)->files[type]; + sector_t blk = off >> sb->s_blocksize_bits; + int err = 0; + int offset = off & (sb->s_blocksize - 1); + int tocopy; + size_t toread; + struct buffer_head tmp_bh; + struct buffer_head *bh; + loff_t i_size = i_size_read(inode); + + if (off > i_size) + return 0; + if (off+len > i_size) + len = i_size-off; + toread = len; + while (toread > 0) { + tocopy = sb->s_blocksize - offset < toread ? + sb->s_blocksize - offset : toread; + + tmp_bh.b_state = 0; + tmp_bh.b_size = 1 << inode->i_blkbits; + err = jfs_get_block(inode, blk, &tmp_bh, 0); + if (err) + return err; + if (!buffer_mapped(&tmp_bh)) /* A hole? */ + memset(data, 0, tocopy); + else { + bh = sb_bread(sb, tmp_bh.b_blocknr); + if (!bh) + return -EIO; + memcpy(data, bh->b_data+offset, tocopy); + brelse(bh); + } + offset = 0; + toread -= tocopy; + data += tocopy; + blk++; + } + return len; +} + +/* Write to quotafile */ +static ssize_t jfs_quota_write(struct super_block *sb, int type, + const char *data, size_t len, loff_t off) +{ + struct inode *inode = sb_dqopt(sb)->files[type]; + sector_t blk = off >> sb->s_blocksize_bits; + int err = 0; + int offset = off & (sb->s_blocksize - 1); + int tocopy; + size_t towrite = len; + struct buffer_head tmp_bh; + struct buffer_head *bh; + + mutex_lock(&inode->i_mutex); + while (towrite > 0) { + tocopy = sb->s_blocksize - offset < towrite ? + sb->s_blocksize - offset : towrite; + + tmp_bh.b_state = 0; + tmp_bh.b_size = 1 << inode->i_blkbits; + err = jfs_get_block(inode, blk, &tmp_bh, 1); + if (err) + goto out; + if (offset || tocopy != sb->s_blocksize) + bh = sb_bread(sb, tmp_bh.b_blocknr); + else + bh = sb_getblk(sb, tmp_bh.b_blocknr); + if (!bh) { + err = -EIO; + goto out; + } + lock_buffer(bh); + memcpy(bh->b_data+offset, data, tocopy); + flush_dcache_page(bh->b_page); + set_buffer_uptodate(bh); + mark_buffer_dirty(bh); + unlock_buffer(bh); + brelse(bh); + offset = 0; + towrite -= tocopy; + data += tocopy; + blk++; + } +out: + if (len == towrite) + return err; + if (inode->i_size < off+len-towrite) + i_size_write(inode, off+len-towrite); + inode->i_version++; + inode->i_mtime = inode->i_ctime = CURRENT_TIME; + mark_inode_dirty(inode); + mutex_unlock(&inode->i_mutex); + return len - towrite; +} + +#endif + static struct super_operations jfs_super_operations = { .alloc_inode = jfs_alloc_inode, .destroy_inode = jfs_destroy_inode, @@ -621,7 +729,11 @@ static struct super_operations jfs_super_operations = { .unlockfs = jfs_unlockfs, .statfs = jfs_statfs, .remount_fs = jfs_remount, - .show_options = jfs_show_options + .show_options = jfs_show_options, +#ifdef CONFIG_QUOTA + .quota_read = jfs_quota_read, + .quota_write = jfs_quota_write, +#endif }; static struct export_operations jfs_export_operations = { diff --git a/fs/lockd/svcsubs.c b/fs/lockd/svcsubs.c index 2a4df9b3779..01b4db9e546 100644 --- a/fs/lockd/svcsubs.c +++ b/fs/lockd/svcsubs.c @@ -237,19 +237,22 @@ static int nlm_traverse_files(struct nlm_host *host, int action) { struct nlm_file *file, **fp; - int i; + int i, ret = 0; mutex_lock(&nlm_file_mutex); for (i = 0; i < FILE_NRHASH; i++) { fp = nlm_files + i; while ((file = *fp) != NULL) { + file->f_count++; + mutex_unlock(&nlm_file_mutex); + /* Traverse locks, blocks and shares of this file * and update file->f_locks count */ - if (nlm_inspect_file(host, file, action)) { - mutex_unlock(&nlm_file_mutex); - return 1; - } + if (nlm_inspect_file(host, file, action)) + ret = 1; + mutex_lock(&nlm_file_mutex); + file->f_count--; /* No more references to this file. Let go of it. */ if (!file->f_blocks && !file->f_locks && !file->f_shares && !file->f_count) { @@ -262,7 +265,7 @@ nlm_traverse_files(struct nlm_host *host, int action) } } mutex_unlock(&nlm_file_mutex); - return 0; + return ret; } /* diff --git a/fs/locks.c b/fs/locks.c index b0b41a64e10..d7c53392cac 100644 --- a/fs/locks.c +++ b/fs/locks.c @@ -1421,8 +1421,9 @@ static int __setlease(struct file *filp, long arg, struct file_lock **flp) if (!leases_enable) goto out; - error = lease_alloc(filp, arg, &fl); - if (error) + error = -ENOMEM; + fl = locks_alloc_lock(); + if (fl == NULL) goto out; locks_copy_lock(fl, lease); @@ -1430,6 +1431,7 @@ static int __setlease(struct file *filp, long arg, struct file_lock **flp) locks_insert_lock(before, fl); *flp = fl; + error = 0; out: return error; } diff --git a/fs/minix/inode.c b/fs/minix/inode.c index 9ea91c5eeb7..330ff9fc7cf 100644 --- a/fs/minix/inode.c +++ b/fs/minix/inode.c @@ -204,6 +204,8 @@ static int minix_fill_super(struct super_block *s, void *data, int silent) /* * Allocate the buffer map to keep the superblock small. */ + if (sbi->s_imap_blocks == 0 || sbi->s_zmap_blocks == 0) + goto out_illegal_sb; i = (sbi->s_imap_blocks + sbi->s_zmap_blocks) * sizeof(bh); map = kmalloc(i, GFP_KERNEL); if (!map) @@ -263,7 +265,7 @@ out_no_root: out_no_bitmap: printk("MINIX-fs: bad superblock or unable to read bitmaps\n"); - out_freemap: +out_freemap: for (i = 0; i < sbi->s_imap_blocks; i++) brelse(sbi->s_imap[i]); for (i = 0; i < sbi->s_zmap_blocks; i++) @@ -276,11 +278,16 @@ out_no_map: printk("MINIX-fs: can't allocate map\n"); goto out_release; +out_illegal_sb: + if (!silent) + printk("MINIX-fs: bad superblock\n"); + goto out_release; + out_no_fs: if (!silent) printk("VFS: Can't find a Minix or Minix V2 filesystem " "on device %s\n", s->s_id); - out_release: +out_release: brelse(bh); goto out; @@ -290,7 +297,7 @@ out_bad_hblock: out_bad_sb: printk("MINIX-fs: unable to read superblock\n"); - out: +out: s->s_fs_info = NULL; kfree(sbi); return -EINVAL; diff --git a/fs/namei.c b/fs/namei.c index 55a131230f9..432d6bc6fab 100644 --- a/fs/namei.c +++ b/fs/namei.c @@ -227,10 +227,10 @@ int generic_permission(struct inode *inode, int mask, int permission(struct inode *inode, int mask, struct nameidata *nd) { + umode_t mode = inode->i_mode; int retval, submask; if (mask & MAY_WRITE) { - umode_t mode = inode->i_mode; /* * Nobody gets write access to a read-only fs. @@ -247,6 +247,13 @@ int permission(struct inode *inode, int mask, struct nameidata *nd) } + /* + * MAY_EXEC on regular files requires special handling: We override + * filesystem execute permissions if the mode bits aren't set. + */ + if ((mask & MAY_EXEC) && S_ISREG(mode) && !(mode & S_IXUGO)) + return -EACCES; + /* Ordinary permission routines do not understand MAY_APPEND. */ submask = mask & ~MAY_APPEND; if (inode->i_op && inode->i_op->permission) @@ -1767,6 +1774,8 @@ struct dentry *lookup_create(struct nameidata *nd, int is_dir) if (nd->last_type != LAST_NORM) goto fail; nd->flags &= ~LOOKUP_PARENT; + nd->flags |= LOOKUP_CREATE; + nd->intent.open.flags = O_EXCL; /* * Do the final lookup. diff --git a/fs/nfs/file.c b/fs/nfs/file.c index cc2b874ad5a..48e892880d5 100644 --- a/fs/nfs/file.c +++ b/fs/nfs/file.c @@ -312,7 +312,13 @@ static void nfs_invalidate_page(struct page *page, unsigned long offset) static int nfs_release_page(struct page *page, gfp_t gfp) { - return !nfs_wb_page(page->mapping->host, page); + if (gfp & __GFP_FS) + return !nfs_wb_page(page->mapping->host, page); + else + /* + * Avoid deadlock on nfs_wait_on_request(). + */ + return 0; } const struct address_space_operations nfs_file_aops = { diff --git a/fs/nfs/idmap.c b/fs/nfs/idmap.c index b81e7ed3c90..07a5dd57646 100644 --- a/fs/nfs/idmap.c +++ b/fs/nfs/idmap.c @@ -130,9 +130,7 @@ nfs_idmap_delete(struct nfs4_client *clp) if (!idmap) return; - dput(idmap->idmap_dentry); - idmap->idmap_dentry = NULL; - rpc_unlink(idmap->idmap_path); + rpc_unlink(idmap->idmap_dentry); clp->cl_idmap = NULL; kfree(idmap); } diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index e6ee97f19d8..153898e1331 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -2668,7 +2668,7 @@ out: nfs4_set_cached_acl(inode, acl); } -static inline ssize_t nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t buflen) +static ssize_t __nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t buflen) { struct page *pages[NFS4ACL_MAXPAGES]; struct nfs_getaclargs args = { @@ -2721,6 +2721,19 @@ out_free: return ret; } +static ssize_t nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t buflen) +{ + struct nfs4_exception exception = { }; + ssize_t ret; + do { + ret = __nfs4_get_acl_uncached(inode, buf, buflen); + if (ret >= 0) + break; + ret = nfs4_handle_exception(NFS_SERVER(inode), ret, &exception); + } while (exception.retry); + return ret; +} + static ssize_t nfs4_proc_get_acl(struct inode *inode, void *buf, size_t buflen) { struct nfs_server *server = NFS_SERVER(inode); @@ -2737,7 +2750,7 @@ static ssize_t nfs4_proc_get_acl(struct inode *inode, void *buf, size_t buflen) return nfs4_get_acl_uncached(inode, buf, buflen); } -static int nfs4_proc_set_acl(struct inode *inode, const void *buf, size_t buflen) +static int __nfs4_proc_set_acl(struct inode *inode, const void *buf, size_t buflen) { struct nfs_server *server = NFS_SERVER(inode); struct page *pages[NFS4ACL_MAXPAGES]; @@ -2763,6 +2776,18 @@ static int nfs4_proc_set_acl(struct inode *inode, const void *buf, size_t buflen return ret; } +static int nfs4_proc_set_acl(struct inode *inode, const void *buf, size_t buflen) +{ + struct nfs4_exception exception = { }; + int err; + do { + err = nfs4_handle_exception(NFS_SERVER(inode), + __nfs4_proc_set_acl(inode, buf, buflen), + &exception); + } while (exception.retry); + return err; +} + static int nfs4_async_handle_error(struct rpc_task *task, const struct nfs_server *server) { diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c index 1750d996f49..730ec8fb31c 100644 --- a/fs/nfs/nfs4xdr.c +++ b/fs/nfs/nfs4xdr.c @@ -3355,7 +3355,7 @@ static int decode_readdir(struct xdr_stream *xdr, struct rpc_rqst *req, struct n struct kvec *iov = rcvbuf->head; unsigned int nr, pglen = rcvbuf->page_len; uint32_t *end, *entry, *p, *kaddr; - uint32_t len, attrlen; + uint32_t len, attrlen, xlen; int hdrlen, recvd, status; status = decode_op_hdr(xdr, OP_READDIR); @@ -3377,10 +3377,10 @@ static int decode_readdir(struct xdr_stream *xdr, struct rpc_rqst *req, struct n BUG_ON(pglen + readdir->pgbase > PAGE_CACHE_SIZE); kaddr = p = (uint32_t *) kmap_atomic(page, KM_USER0); - end = (uint32_t *) ((char *)p + pglen + readdir->pgbase); + end = p + ((pglen + readdir->pgbase) >> 2); entry = p; for (nr = 0; *p++; nr++) { - if (p + 3 > end) + if (end - p < 3) goto short_pkt; dprintk("cookie = %Lu, ", *((unsigned long long *)p)); p += 2; /* cookie */ @@ -3389,18 +3389,19 @@ static int decode_readdir(struct xdr_stream *xdr, struct rpc_rqst *req, struct n printk(KERN_WARNING "NFS: giant filename in readdir (len 0x%x)\n", len); goto err_unmap; } - dprintk("filename = %*s\n", len, (char *)p); - p += XDR_QUADLEN(len); - if (p + 1 > end) + xlen = XDR_QUADLEN(len); + if (end - p < xlen + 1) goto short_pkt; + dprintk("filename = %*s\n", len, (char *)p); + p += xlen; len = ntohl(*p++); /* bitmap length */ - p += len; - if (p + 1 > end) + if (end - p < len + 1) goto short_pkt; + p += len; attrlen = XDR_QUADLEN(ntohl(*p++)); - p += attrlen; /* attributes */ - if (p + 2 > end) + if (end - p < attrlen + 2) goto short_pkt; + p += attrlen; /* attributes */ entry = p; } if (!nr && (entry[0] != 0 || entry[1] == 0)) diff --git a/fs/nfs/read.c b/fs/nfs/read.c index 65c0c5b3235..da9cf11c326 100644 --- a/fs/nfs/read.c +++ b/fs/nfs/read.c @@ -116,10 +116,17 @@ static void nfs_readpage_truncate_uninitialised_page(struct nfs_read_data *data) pages = &data->args.pages[base >> PAGE_CACHE_SHIFT]; base &= ~PAGE_CACHE_MASK; pglen = PAGE_CACHE_SIZE - base; - if (pglen < remainder) + for (;;) { + if (remainder <= pglen) { + memclear_highpage_flush(*pages, base, remainder); + break; + } memclear_highpage_flush(*pages, base, pglen); - else - memclear_highpage_flush(*pages, base, remainder); + pages++; + remainder -= pglen; + pglen = PAGE_CACHE_SIZE; + base = 0; + } } /* @@ -476,6 +483,8 @@ static void nfs_readpage_set_pages_uptodate(struct nfs_read_data *data) unsigned int base = data->args.pgbase; struct page **pages; + if (data->res.eof) + count = data->args.count; if (unlikely(count == 0)) return; pages = &data->args.pages[base >> PAGE_CACHE_SHIFT]; @@ -483,11 +492,7 @@ static void nfs_readpage_set_pages_uptodate(struct nfs_read_data *data) count += base; for (;count >= PAGE_CACHE_SIZE; count -= PAGE_CACHE_SIZE, pages++) SetPageUptodate(*pages); - /* - * Was this an eof or a short read? If the latter, don't mark the page - * as uptodate yet. - */ - if (count > 0 && (data->res.eof || data->args.count == data->res.count)) + if (count != 0) SetPageUptodate(*pages); } @@ -502,6 +507,8 @@ static void nfs_readpage_set_pages_error(struct nfs_read_data *data) count += base; for (;count >= PAGE_CACHE_SIZE; count -= PAGE_CACHE_SIZE, pages++) SetPageError(*pages); + if (count != 0) + SetPageError(*pages); } /* diff --git a/fs/ocfs2/dlm/dlmmaster.c b/fs/ocfs2/dlm/dlmmaster.c index 1b8346dd057..9503240ef0e 100644 --- a/fs/ocfs2/dlm/dlmmaster.c +++ b/fs/ocfs2/dlm/dlmmaster.c @@ -2375,7 +2375,6 @@ leave: mlog(0, "returning %d\n", ret); return ret; } -EXPORT_SYMBOL_GPL(dlm_migrate_lockres); int dlm_lock_basts_flushed(struct dlm_ctxt *dlm, struct dlm_lock *lock) { diff --git a/fs/ocfs2/dlm/dlmunlock.c b/fs/ocfs2/dlm/dlmunlock.c index b0c3134f4f7..37be4b2e0d4 100644 --- a/fs/ocfs2/dlm/dlmunlock.c +++ b/fs/ocfs2/dlm/dlmunlock.c @@ -155,7 +155,7 @@ static enum dlm_status dlmunlock_common(struct dlm_ctxt *dlm, else status = dlm_get_unlock_actions(dlm, res, lock, lksb, &actions); - if (status != DLM_NORMAL) + if (status != DLM_NORMAL && (status != DLM_CANCELGRANT || !master_node)) goto leave; /* By now this has been masked out of cancel requests. */ @@ -183,8 +183,7 @@ static enum dlm_status dlmunlock_common(struct dlm_ctxt *dlm, spin_lock(&lock->spinlock); /* if the master told us the lock was already granted, * let the ast handle all of these actions */ - if (status == DLM_NORMAL && - lksb->status == DLM_CANCELGRANT) { + if (status == DLM_CANCELGRANT) { actions &= ~(DLM_UNLOCK_REMOVE_LOCK| DLM_UNLOCK_REGRANT_LOCK| DLM_UNLOCK_CLEAR_CONVERT_TYPE); @@ -349,14 +348,9 @@ static enum dlm_status dlm_send_remote_unlock_request(struct dlm_ctxt *dlm, vec, veclen, owner, &status); if (tmpret >= 0) { // successfully sent and received - if (status == DLM_CANCELGRANT) - ret = DLM_NORMAL; - else if (status == DLM_FORWARD) { + if (status == DLM_FORWARD) mlog(0, "master was in-progress. retry\n"); - ret = DLM_FORWARD; - } else - ret = status; - lksb->status = status; + ret = status; } else { mlog_errno(tmpret); if (dlm_is_host_down(tmpret)) { @@ -372,7 +366,6 @@ static enum dlm_status dlm_send_remote_unlock_request(struct dlm_ctxt *dlm, /* something bad. this will BUG in ocfs2 */ ret = dlm_err_to_dlm_status(tmpret); } - lksb->status = ret; } return ret; @@ -483,6 +476,10 @@ int dlm_unlock_lock_handler(struct o2net_msg *msg, u32 len, void *data) /* lock was found on queue */ lksb = lock->lksb; + if (flags & (LKM_VALBLK|LKM_PUT_LVB) && + lock->ml.type != LKM_EXMODE) + flags &= ~(LKM_VALBLK|LKM_PUT_LVB); + /* unlockast only called on originating node */ if (flags & LKM_PUT_LVB) { lksb->flags |= DLM_LKSB_PUT_LVB; @@ -507,11 +504,8 @@ not_found: "cookie=%u:%llu\n", dlm_get_lock_cookie_node(unlock->cookie), dlm_get_lock_cookie_seq(unlock->cookie)); - else { - /* send the lksb->status back to the other node */ - status = lksb->status; + else dlm_lock_put(lock); - } leave: if (res) @@ -533,26 +527,22 @@ static enum dlm_status dlm_get_cancel_actions(struct dlm_ctxt *dlm, if (dlm_lock_on_list(&res->blocked, lock)) { /* cancel this outright */ - lksb->status = DLM_NORMAL; status = DLM_NORMAL; *actions = (DLM_UNLOCK_CALL_AST | DLM_UNLOCK_REMOVE_LOCK); } else if (dlm_lock_on_list(&res->converting, lock)) { /* cancel the request, put back on granted */ - lksb->status = DLM_NORMAL; status = DLM_NORMAL; *actions = (DLM_UNLOCK_CALL_AST | DLM_UNLOCK_REMOVE_LOCK | DLM_UNLOCK_REGRANT_LOCK | DLM_UNLOCK_CLEAR_CONVERT_TYPE); } else if (dlm_lock_on_list(&res->granted, lock)) { - /* too late, already granted. DLM_CANCELGRANT */ - lksb->status = DLM_CANCELGRANT; - status = DLM_NORMAL; + /* too late, already granted. */ + status = DLM_CANCELGRANT; *actions = DLM_UNLOCK_CALL_AST; } else { mlog(ML_ERROR, "lock to cancel is not on any list!\n"); - lksb->status = DLM_IVLOCKID; status = DLM_IVLOCKID; *actions = 0; } @@ -569,13 +559,11 @@ static enum dlm_status dlm_get_unlock_actions(struct dlm_ctxt *dlm, /* unlock request */ if (!dlm_lock_on_list(&res->granted, lock)) { - lksb->status = DLM_DENIED; status = DLM_DENIED; dlm_error(status); *actions = 0; } else { /* unlock granted lock */ - lksb->status = DLM_NORMAL; status = DLM_NORMAL; *actions = (DLM_UNLOCK_FREE_LOCK | DLM_UNLOCK_CALL_AST | @@ -632,6 +620,8 @@ retry: spin_lock(&res->spinlock); is_master = (res->owner == dlm->node_num); + if (flags & LKM_VALBLK && lock->ml.type != LKM_EXMODE) + flags &= ~LKM_VALBLK; spin_unlock(&res->spinlock); if (is_master) { @@ -665,7 +655,7 @@ retry: } if (call_ast) { - mlog(0, "calling unlockast(%p, %d)\n", data, lksb->status); + mlog(0, "calling unlockast(%p, %d)\n", data, status); if (is_master) { /* it is possible that there is one last bast * pending. make sure it is flushed, then @@ -677,9 +667,12 @@ retry: wait_event(dlm->ast_wq, dlm_lock_basts_flushed(dlm, lock)); } - (*unlockast)(data, lksb->status); + (*unlockast)(data, status); } + if (status == DLM_CANCELGRANT) + status = DLM_NORMAL; + if (status == DLM_NORMAL) { mlog(0, "kicking the thread\n"); dlm_kick_thread(dlm, res); diff --git a/fs/ocfs2/localalloc.c b/fs/ocfs2/localalloc.c index 0d1973ea32b..1f17a4d0828 100644 --- a/fs/ocfs2/localalloc.c +++ b/fs/ocfs2/localalloc.c @@ -840,6 +840,12 @@ static int ocfs2_local_alloc_new_window(struct ocfs2_super *osb, mlog(0, "Allocating %u clusters for a new window.\n", ocfs2_local_alloc_window_bits(osb)); + + /* Instruct the allocation code to try the most recently used + * cluster group. We'll re-record the group used this pass + * below. */ + ac->ac_last_group = osb->la_last_gd; + /* we used the generic suballoc reserve function, but we set * everything up nicely, so there's no reason why we can't use * the more specific cluster api to claim bits. */ @@ -852,6 +858,8 @@ static int ocfs2_local_alloc_new_window(struct ocfs2_super *osb, goto bail; } + osb->la_last_gd = ac->ac_last_group; + la->la_bm_off = cpu_to_le32(cluster_off); alloc->id1.bitmap1.i_total = cpu_to_le32(cluster_count); /* just in case... In the future when we find space ourselves, diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h index cd4a6f253d1..0462a7f4e21 100644 --- a/fs/ocfs2/ocfs2.h +++ b/fs/ocfs2/ocfs2.h @@ -197,7 +197,6 @@ struct ocfs2_super struct ocfs2_node_map recovery_map; struct ocfs2_node_map umount_map; - u32 num_clusters; u64 root_blkno; u64 system_dir_blkno; u64 bitmap_blkno; @@ -237,6 +236,7 @@ struct ocfs2_super enum ocfs2_local_alloc_state local_alloc_state; struct buffer_head *local_alloc_bh; + u64 la_last_gd; /* Next two fields are for local node slot recovery during * mount. */ diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c index 195523090c8..9d91e66f51a 100644 --- a/fs/ocfs2/suballoc.c +++ b/fs/ocfs2/suballoc.c @@ -70,12 +70,6 @@ static int ocfs2_block_group_search(struct inode *inode, struct buffer_head *group_bh, u32 bits_wanted, u32 min_bits, u16 *bit_off, u16 *bits_found); -static int ocfs2_search_chain(struct ocfs2_alloc_context *ac, - u32 bits_wanted, - u32 min_bits, - u16 *bit_off, - unsigned int *num_bits, - u64 *bg_blkno); static int ocfs2_claim_suballoc_bits(struct ocfs2_super *osb, struct ocfs2_alloc_context *ac, u32 bits_wanted, @@ -85,11 +79,6 @@ static int ocfs2_claim_suballoc_bits(struct ocfs2_super *osb, u64 *bg_blkno); static int ocfs2_test_bg_bit_allocatable(struct buffer_head *bg_bh, int nr); -static int ocfs2_block_group_find_clear_bits(struct ocfs2_super *osb, - struct buffer_head *bg_bh, - unsigned int bits_wanted, - u16 *bit_off, - u16 *bits_found); static inline int ocfs2_block_group_set_bits(struct ocfs2_journal_handle *handle, struct inode *alloc_inode, struct ocfs2_group_desc *bg, @@ -143,6 +132,64 @@ static u32 ocfs2_bits_per_group(struct ocfs2_chain_list *cl) return (u32)le16_to_cpu(cl->cl_cpg) * (u32)le16_to_cpu(cl->cl_bpc); } +/* somewhat more expensive than our other checks, so use sparingly. */ +static int ocfs2_check_group_descriptor(struct super_block *sb, + struct ocfs2_dinode *di, + struct ocfs2_group_desc *gd) +{ + unsigned int max_bits; + + if (!OCFS2_IS_VALID_GROUP_DESC(gd)) { + OCFS2_RO_ON_INVALID_GROUP_DESC(sb, gd); + return -EIO; + } + + if (di->i_blkno != gd->bg_parent_dinode) { + ocfs2_error(sb, "Group descriptor # %llu has bad parent " + "pointer (%llu, expected %llu)", + (unsigned long long)le64_to_cpu(gd->bg_blkno), + (unsigned long long)le64_to_cpu(gd->bg_parent_dinode), + (unsigned long long)le64_to_cpu(di->i_blkno)); + return -EIO; + } + + max_bits = le16_to_cpu(di->id2.i_chain.cl_cpg) * le16_to_cpu(di->id2.i_chain.cl_bpc); + if (le16_to_cpu(gd->bg_bits) > max_bits) { + ocfs2_error(sb, "Group descriptor # %llu has bit count of %u", + (unsigned long long)le64_to_cpu(gd->bg_blkno), + le16_to_cpu(gd->bg_bits)); + return -EIO; + } + + if (le16_to_cpu(gd->bg_chain) >= + le16_to_cpu(di->id2.i_chain.cl_next_free_rec)) { + ocfs2_error(sb, "Group descriptor # %llu has bad chain %u", + (unsigned long long)le64_to_cpu(gd->bg_blkno), + le16_to_cpu(gd->bg_chain)); + return -EIO; + } + + if (le16_to_cpu(gd->bg_free_bits_count) > le16_to_cpu(gd->bg_bits)) { + ocfs2_error(sb, "Group descriptor # %llu has bit count %u but " + "claims that %u are free", + (unsigned long long)le64_to_cpu(gd->bg_blkno), + le16_to_cpu(gd->bg_bits), + le16_to_cpu(gd->bg_free_bits_count)); + return -EIO; + } + + if (le16_to_cpu(gd->bg_bits) > (8 * le16_to_cpu(gd->bg_size))) { + ocfs2_error(sb, "Group descriptor # %llu has bit count %u but " + "max bitmap bits of %u", + (unsigned long long)le64_to_cpu(gd->bg_blkno), + le16_to_cpu(gd->bg_bits), + 8 * le16_to_cpu(gd->bg_size)); + return -EIO; + } + + return 0; +} + static int ocfs2_block_group_fill(struct ocfs2_journal_handle *handle, struct inode *alloc_inode, struct buffer_head *bg_bh, @@ -663,6 +710,7 @@ static int ocfs2_test_bg_bit_allocatable(struct buffer_head *bg_bh, static int ocfs2_block_group_find_clear_bits(struct ocfs2_super *osb, struct buffer_head *bg_bh, unsigned int bits_wanted, + unsigned int total_bits, u16 *bit_off, u16 *bits_found) { @@ -679,10 +727,8 @@ static int ocfs2_block_group_find_clear_bits(struct ocfs2_super *osb, found = start = best_offset = best_size = 0; bitmap = bg->bg_bitmap; - while((offset = ocfs2_find_next_zero_bit(bitmap, - le16_to_cpu(bg->bg_bits), - start)) != -1) { - if (offset == le16_to_cpu(bg->bg_bits)) + while((offset = ocfs2_find_next_zero_bit(bitmap, total_bits, start)) != -1) { + if (offset == total_bits) break; if (!ocfs2_test_bg_bit_allocatable(bg_bh, offset)) { @@ -911,14 +957,35 @@ static int ocfs2_cluster_group_search(struct inode *inode, { int search = -ENOSPC; int ret; - struct ocfs2_group_desc *bg = (struct ocfs2_group_desc *) group_bh->b_data; + struct ocfs2_group_desc *gd = (struct ocfs2_group_desc *) group_bh->b_data; u16 tmp_off, tmp_found; + unsigned int max_bits, gd_cluster_off; BUG_ON(!ocfs2_is_cluster_bitmap(inode)); - if (bg->bg_free_bits_count) { + if (gd->bg_free_bits_count) { + max_bits = le16_to_cpu(gd->bg_bits); + + /* Tail groups in cluster bitmaps which aren't cpg + * aligned are prone to partial extention by a failed + * fs resize. If the file system resize never got to + * update the dinode cluster count, then we don't want + * to trust any clusters past it, regardless of what + * the group descriptor says. */ + gd_cluster_off = ocfs2_blocks_to_clusters(inode->i_sb, + le64_to_cpu(gd->bg_blkno)); + if ((gd_cluster_off + max_bits) > + OCFS2_I(inode)->ip_clusters) { + max_bits = OCFS2_I(inode)->ip_clusters - gd_cluster_off; + mlog(0, "Desc %llu, bg_bits %u, clusters %u, use %u\n", + (unsigned long long)le64_to_cpu(gd->bg_blkno), + le16_to_cpu(gd->bg_bits), + OCFS2_I(inode)->ip_clusters, max_bits); + } + ret = ocfs2_block_group_find_clear_bits(OCFS2_SB(inode->i_sb), group_bh, bits_wanted, + max_bits, &tmp_off, &tmp_found); if (ret) return ret; @@ -951,17 +1018,109 @@ static int ocfs2_block_group_search(struct inode *inode, if (bg->bg_free_bits_count) ret = ocfs2_block_group_find_clear_bits(OCFS2_SB(inode->i_sb), group_bh, bits_wanted, + le16_to_cpu(bg->bg_bits), bit_off, bits_found); return ret; } +static int ocfs2_alloc_dinode_update_counts(struct inode *inode, + struct ocfs2_journal_handle *handle, + struct buffer_head *di_bh, + u32 num_bits, + u16 chain) +{ + int ret; + u32 tmp_used; + struct ocfs2_dinode *di = (struct ocfs2_dinode *) di_bh->b_data; + struct ocfs2_chain_list *cl = (struct ocfs2_chain_list *) &di->id2.i_chain; + + ret = ocfs2_journal_access(handle, inode, di_bh, + OCFS2_JOURNAL_ACCESS_WRITE); + if (ret < 0) { + mlog_errno(ret); + goto out; + } + + tmp_used = le32_to_cpu(di->id1.bitmap1.i_used); + di->id1.bitmap1.i_used = cpu_to_le32(num_bits + tmp_used); + le32_add_cpu(&cl->cl_recs[chain].c_free, -num_bits); + + ret = ocfs2_journal_dirty(handle, di_bh); + if (ret < 0) + mlog_errno(ret); + +out: + return ret; +} + +static int ocfs2_search_one_group(struct ocfs2_alloc_context *ac, + u32 bits_wanted, + u32 min_bits, + u16 *bit_off, + unsigned int *num_bits, + u64 gd_blkno, + u16 *bits_left) +{ + int ret; + u16 found; + struct buffer_head *group_bh = NULL; + struct ocfs2_group_desc *gd; + struct inode *alloc_inode = ac->ac_inode; + struct ocfs2_journal_handle *handle = ac->ac_handle; + + ret = ocfs2_read_block(OCFS2_SB(alloc_inode->i_sb), gd_blkno, + &group_bh, OCFS2_BH_CACHED, alloc_inode); + if (ret < 0) { + mlog_errno(ret); + return ret; + } + + gd = (struct ocfs2_group_desc *) group_bh->b_data; + if (!OCFS2_IS_VALID_GROUP_DESC(gd)) { + OCFS2_RO_ON_INVALID_GROUP_DESC(alloc_inode->i_sb, gd); + ret = -EIO; + goto out; + } + + ret = ac->ac_group_search(alloc_inode, group_bh, bits_wanted, min_bits, + bit_off, &found); + if (ret < 0) { + if (ret != -ENOSPC) + mlog_errno(ret); + goto out; + } + + *num_bits = found; + + ret = ocfs2_alloc_dinode_update_counts(alloc_inode, handle, ac->ac_bh, + *num_bits, + le16_to_cpu(gd->bg_chain)); + if (ret < 0) { + mlog_errno(ret); + goto out; + } + + ret = ocfs2_block_group_set_bits(handle, alloc_inode, gd, group_bh, + *bit_off, *num_bits); + if (ret < 0) + mlog_errno(ret); + + *bits_left = le16_to_cpu(gd->bg_free_bits_count); + +out: + brelse(group_bh); + + return ret; +} + static int ocfs2_search_chain(struct ocfs2_alloc_context *ac, u32 bits_wanted, u32 min_bits, u16 *bit_off, unsigned int *num_bits, - u64 *bg_blkno) + u64 *bg_blkno, + u16 *bits_left) { int status; u16 chain, tmp_bits; @@ -988,9 +1147,9 @@ static int ocfs2_search_chain(struct ocfs2_alloc_context *ac, goto bail; } bg = (struct ocfs2_group_desc *) group_bh->b_data; - if (!OCFS2_IS_VALID_GROUP_DESC(bg)) { - OCFS2_RO_ON_INVALID_GROUP_DESC(alloc_inode->i_sb, bg); - status = -EIO; + status = ocfs2_check_group_descriptor(alloc_inode->i_sb, fe, bg); + if (status) { + mlog_errno(status); goto bail; } @@ -1018,9 +1177,9 @@ static int ocfs2_search_chain(struct ocfs2_alloc_context *ac, goto bail; } bg = (struct ocfs2_group_desc *) group_bh->b_data; - if (!OCFS2_IS_VALID_GROUP_DESC(bg)) { - OCFS2_RO_ON_INVALID_GROUP_DESC(alloc_inode->i_sb, bg); - status = -EIO; + status = ocfs2_check_group_descriptor(alloc_inode->i_sb, fe, bg); + if (status) { + mlog_errno(status); goto bail; } } @@ -1099,6 +1258,7 @@ static int ocfs2_search_chain(struct ocfs2_alloc_context *ac, (unsigned long long)fe->i_blkno); *bg_blkno = le64_to_cpu(bg->bg_blkno); + *bits_left = le16_to_cpu(bg->bg_free_bits_count); bail: if (group_bh) brelse(group_bh); @@ -1120,6 +1280,8 @@ static int ocfs2_claim_suballoc_bits(struct ocfs2_super *osb, { int status; u16 victim, i; + u16 bits_left = 0; + u64 hint_blkno = ac->ac_last_group; struct ocfs2_chain_list *cl; struct ocfs2_dinode *fe; @@ -1146,6 +1308,28 @@ static int ocfs2_claim_suballoc_bits(struct ocfs2_super *osb, goto bail; } + if (hint_blkno) { + /* Attempt to short-circuit the usual search mechanism + * by jumping straight to the most recently used + * allocation group. This helps us mantain some + * contiguousness across allocations. */ + status = ocfs2_search_one_group(ac, bits_wanted, min_bits, + bit_off, num_bits, + hint_blkno, &bits_left); + if (!status) { + /* Be careful to update *bg_blkno here as the + * caller is expecting it to be filled in, and + * ocfs2_search_one_group() won't do that for + * us. */ + *bg_blkno = hint_blkno; + goto set_hint; + } + if (status < 0 && status != -ENOSPC) { + mlog_errno(status); + goto bail; + } + } + cl = (struct ocfs2_chain_list *) &fe->id2.i_chain; victim = ocfs2_find_victim_chain(cl); @@ -1153,9 +1337,9 @@ static int ocfs2_claim_suballoc_bits(struct ocfs2_super *osb, ac->ac_allow_chain_relink = 1; status = ocfs2_search_chain(ac, bits_wanted, min_bits, bit_off, - num_bits, bg_blkno); + num_bits, bg_blkno, &bits_left); if (!status) - goto bail; + goto set_hint; if (status < 0 && status != -ENOSPC) { mlog_errno(status); goto bail; @@ -1177,8 +1361,8 @@ static int ocfs2_claim_suballoc_bits(struct ocfs2_super *osb, ac->ac_chain = i; status = ocfs2_search_chain(ac, bits_wanted, min_bits, - bit_off, num_bits, - bg_blkno); + bit_off, num_bits, bg_blkno, + &bits_left); if (!status) break; if (status < 0 && status != -ENOSPC) { @@ -1186,8 +1370,19 @@ static int ocfs2_claim_suballoc_bits(struct ocfs2_super *osb, goto bail; } } -bail: +set_hint: + if (status != -ENOSPC) { + /* If the next search of this group is not likely to + * yield a suitable extent, then we reset the last + * group hint so as to not waste a disk read */ + if (bits_left < min_bits) + ac->ac_last_group = 0; + else + ac->ac_last_group = *bg_blkno; + } + +bail: mlog_exit(status); return status; } @@ -1341,7 +1536,7 @@ int ocfs2_claim_clusters(struct ocfs2_super *osb, { int status; unsigned int bits_wanted = ac->ac_bits_wanted - ac->ac_bits_given; - u64 bg_blkno; + u64 bg_blkno = 0; u16 bg_bit_off; mlog_entry_void(); @@ -1494,9 +1689,9 @@ static int ocfs2_free_suballoc_bits(struct ocfs2_journal_handle *handle, } group = (struct ocfs2_group_desc *) group_bh->b_data; - if (!OCFS2_IS_VALID_GROUP_DESC(group)) { - OCFS2_RO_ON_INVALID_GROUP_DESC(alloc_inode->i_sb, group); - status = -EIO; + status = ocfs2_check_group_descriptor(alloc_inode->i_sb, fe, group); + if (status) { + mlog_errno(status); goto bail; } BUG_ON((count + start_bit) > le16_to_cpu(group->bg_bits)); diff --git a/fs/ocfs2/suballoc.h b/fs/ocfs2/suballoc.h index a76c82a7cea..c787838d105 100644 --- a/fs/ocfs2/suballoc.h +++ b/fs/ocfs2/suballoc.h @@ -49,6 +49,8 @@ struct ocfs2_alloc_context { u16 ac_chain; int ac_allow_chain_relink; group_search_t *ac_group_search; + + u64 ac_last_group; }; void ocfs2_free_alloc_context(struct ocfs2_alloc_context *ac); diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c index 382706a67ff..d17e33e66a1 100644 --- a/fs/ocfs2/super.c +++ b/fs/ocfs2/super.c @@ -1442,8 +1442,13 @@ static int ocfs2_initialize_super(struct super_block *sb, osb->bitmap_blkno = OCFS2_I(inode)->ip_blkno; + /* We don't have a cluster lock on the bitmap here because + * we're only interested in static information and the extra + * complexity at mount time isn't worht it. Don't pass the + * inode in to the read function though as we don't want it to + * be put in the cache. */ status = ocfs2_read_block(osb, osb->bitmap_blkno, &bitmap_bh, 0, - inode); + NULL); iput(inode); if (status < 0) { mlog_errno(status); @@ -1452,7 +1457,6 @@ static int ocfs2_initialize_super(struct super_block *sb, di = (struct ocfs2_dinode *) bitmap_bh->b_data; osb->bitmap_cpg = le16_to_cpu(di->id2.i_chain.cl_cpg); - osb->num_clusters = le32_to_cpu(di->id1.bitmap1.i_total); brelse(bitmap_bh); mlog(0, "cluster bitmap inode: %llu, clusters per group: %u\n", (unsigned long long)osb->bitmap_blkno, osb->bitmap_cpg); diff --git a/fs/partitions/sun.c b/fs/partitions/sun.c index abe91ca03ed..0a5927c806c 100644 --- a/fs/partitions/sun.c +++ b/fs/partitions/sun.c @@ -74,7 +74,7 @@ int sun_partition(struct parsed_partitions *state, struct block_device *bdev) spc = be16_to_cpu(label->ntrks) * be16_to_cpu(label->nsect); for (i = 0; i < 8; i++, p++) { unsigned long st_sector; - int num_sectors; + unsigned int num_sectors; st_sector = be32_to_cpu(p->start_cylinder) * spc; num_sectors = be32_to_cpu(p->num_sectors); diff --git a/fs/proc/proc_misc.c b/fs/proc/proc_misc.c index 9f2cfc30f9c..94215622544 100644 --- a/fs/proc/proc_misc.c +++ b/fs/proc/proc_misc.c @@ -169,7 +169,7 @@ static int meminfo_read_proc(char *page, char **start, off_t off, "Mapped: %8lu kB\n" "Slab: %8lu kB\n" "PageTables: %8lu kB\n" - "NFS Unstable: %8lu kB\n" + "NFS_Unstable: %8lu kB\n" "Bounce: %8lu kB\n" "CommitLimit: %8lu kB\n" "Committed_AS: %8lu kB\n" diff --git a/fs/reiserfs/xattr.c b/fs/reiserfs/xattr.c index 39fedaa88a0..d935fb9394e 100644 --- a/fs/reiserfs/xattr.c +++ b/fs/reiserfs/xattr.c @@ -424,7 +424,7 @@ int xattr_readdir(struct file *file, filldir_t filler, void *buf) int res = -ENOTDIR; if (!file->f_op || !file->f_op->readdir) goto out; - mutex_lock(&inode->i_mutex); + mutex_lock_nested(&inode->i_mutex, I_MUTEX_XATTR); // down(&inode->i_zombie); res = -ENOENT; if (!IS_DEADDIR(inode)) { diff --git a/fs/udf/super.c b/fs/udf/super.c index 4df822c881b..fcce1a21a51 100644 --- a/fs/udf/super.c +++ b/fs/udf/super.c @@ -115,6 +115,13 @@ static struct inode *udf_alloc_inode(struct super_block *sb) ei = (struct udf_inode_info *)kmem_cache_alloc(udf_inode_cachep, SLAB_KERNEL); if (!ei) return NULL; + + ei->i_unique = 0; + ei->i_lenExtents = 0; + ei->i_next_alloc_block = 0; + ei->i_next_alloc_goal = 0; + ei->i_strat4096 = 0; + return &ei->vfs_inode; } @@ -1652,7 +1659,7 @@ static int udf_fill_super(struct super_block *sb, void *options, int silent) iput(inode); goto error_out; } - sb->s_maxbytes = MAX_LFS_FILESIZE; + sb->s_maxbytes = 1<<30; return 0; error_out: diff --git a/fs/udf/truncate.c b/fs/udf/truncate.c index e1b0e8cfecb..0abd66ce36e 100644 --- a/fs/udf/truncate.c +++ b/fs/udf/truncate.c @@ -239,37 +239,51 @@ void udf_truncate_extents(struct inode * inode) { if (offset) { - extoffset -= adsize; - etype = udf_next_aext(inode, &bloc, &extoffset, &eloc, &elen, &bh, 1); - if (etype == (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30)) - { - extoffset -= adsize; - elen = EXT_NOT_RECORDED_NOT_ALLOCATED | (elen + offset); - udf_write_aext(inode, bloc, &extoffset, eloc, elen, bh, 0); + /* + * OK, there is not extent covering inode->i_size and + * no extent above inode->i_size => truncate is + * extending the file by 'offset'. + */ + if ((!bh && extoffset == udf_file_entry_alloc_offset(inode)) || + (bh && extoffset == sizeof(struct allocExtDesc))) { + /* File has no extents at all! */ + memset(&eloc, 0x00, sizeof(kernel_lb_addr)); + elen = EXT_NOT_RECORDED_NOT_ALLOCATED | offset; + udf_add_aext(inode, &bloc, &extoffset, eloc, elen, &bh, 1); } - else if (etype == (EXT_NOT_RECORDED_ALLOCATED >> 30)) - { - kernel_lb_addr neloc = { 0, 0 }; + else { extoffset -= adsize; - nelen = EXT_NOT_RECORDED_NOT_ALLOCATED | - ((elen + offset + inode->i_sb->s_blocksize - 1) & - ~(inode->i_sb->s_blocksize - 1)); - udf_write_aext(inode, bloc, &extoffset, neloc, nelen, bh, 1); - udf_add_aext(inode, &bloc, &extoffset, eloc, (etype << 30) | elen, &bh, 1); - } - else - { - if (elen & (inode->i_sb->s_blocksize - 1)) + etype = udf_next_aext(inode, &bloc, &extoffset, &eloc, &elen, &bh, 1); + if (etype == (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30)) + { + extoffset -= adsize; + elen = EXT_NOT_RECORDED_NOT_ALLOCATED | (elen + offset); + udf_write_aext(inode, bloc, &extoffset, eloc, elen, bh, 0); + } + else if (etype == (EXT_NOT_RECORDED_ALLOCATED >> 30)) { + kernel_lb_addr neloc = { 0, 0 }; extoffset -= adsize; - elen = EXT_RECORDED_ALLOCATED | - ((elen + inode->i_sb->s_blocksize - 1) & + nelen = EXT_NOT_RECORDED_NOT_ALLOCATED | + ((elen + offset + inode->i_sb->s_blocksize - 1) & ~(inode->i_sb->s_blocksize - 1)); - udf_write_aext(inode, bloc, &extoffset, eloc, elen, bh, 1); + udf_write_aext(inode, bloc, &extoffset, neloc, nelen, bh, 1); + udf_add_aext(inode, &bloc, &extoffset, eloc, (etype << 30) | elen, &bh, 1); + } + else + { + if (elen & (inode->i_sb->s_blocksize - 1)) + { + extoffset -= adsize; + elen = EXT_RECORDED_ALLOCATED | + ((elen + inode->i_sb->s_blocksize - 1) & + ~(inode->i_sb->s_blocksize - 1)); + udf_write_aext(inode, bloc, &extoffset, eloc, elen, bh, 1); + } + memset(&eloc, 0x00, sizeof(kernel_lb_addr)); + elen = EXT_NOT_RECORDED_NOT_ALLOCATED | offset; + udf_add_aext(inode, &bloc, &extoffset, eloc, elen, &bh, 1); } - memset(&eloc, 0x00, sizeof(kernel_lb_addr)); - elen = EXT_NOT_RECORDED_NOT_ALLOCATED | offset; - udf_add_aext(inode, &bloc, &extoffset, eloc, elen, &bh, 1); } } } diff --git a/fs/ufs/inode.c b/fs/ufs/inode.c index e7c8615beb6..30c6e8a9446 100644 --- a/fs/ufs/inode.c +++ b/fs/ufs/inode.c @@ -169,18 +169,20 @@ static void ufs_clear_frag(struct inode *inode, struct buffer_head *bh) static struct buffer_head * ufs_clear_frags(struct inode *inode, sector_t beg, - unsigned int n) + unsigned int n, sector_t want) { - struct buffer_head *res, *bh; + struct buffer_head *res = NULL, *bh; sector_t end = beg + n; - res = sb_getblk(inode->i_sb, beg); - ufs_clear_frag(inode, res); - for (++beg; beg < end; ++beg) { + for (; beg < end; ++beg) { bh = sb_getblk(inode->i_sb, beg); ufs_clear_frag(inode, bh); - brelse(bh); + if (want != beg) + brelse(bh); + else + res = bh; } + BUG_ON(!res); return res; } @@ -265,7 +267,9 @@ repeat: lastfrag = ufsi->i_lastfrag; } - goal = fs32_to_cpu(sb, ufsi->i_u1.i_data[lastblock]) + uspi->s_fpb; + tmp = fs32_to_cpu(sb, ufsi->i_u1.i_data[lastblock]); + if (tmp) + goal = tmp + uspi->s_fpb; tmp = ufs_new_fragments (inode, p, fragment - blockoff, goal, required + blockoff, err, locked_page); @@ -277,13 +281,15 @@ repeat: tmp = ufs_new_fragments(inode, p, fragment - (blockoff - lastblockoff), fs32_to_cpu(sb, *p), required + (blockoff - lastblockoff), err, locked_page); - } + } else /* (lastblock > block) */ { /* * We will allocate new block before last allocated block */ - else /* (lastblock > block) */ { - if (lastblock && (tmp = fs32_to_cpu(sb, ufsi->i_u1.i_data[lastblock-1]))) - goal = tmp + uspi->s_fpb; + if (block) { + tmp = fs32_to_cpu(sb, ufsi->i_u1.i_data[block-1]); + if (tmp) + goal = tmp + uspi->s_fpb; + } tmp = ufs_new_fragments(inode, p, fragment - blockoff, goal, uspi->s_fpb, err, locked_page); } @@ -296,7 +302,7 @@ repeat: } if (!phys) { - result = ufs_clear_frags(inode, tmp + blockoff, required); + result = ufs_clear_frags(inode, tmp, required, tmp + blockoff); } else { *phys = tmp + blockoff; result = NULL; @@ -383,7 +389,7 @@ repeat: } } - if (block && (tmp = fs32_to_cpu(sb, ((__fs32*)bh->b_data)[block-1]) + uspi->s_fpb)) + if (block && (tmp = fs32_to_cpu(sb, ((__fs32*)bh->b_data)[block-1]))) goal = tmp + uspi->s_fpb; else goal = bh->b_blocknr + uspi->s_fpb; @@ -397,7 +403,8 @@ repeat: if (!phys) { - result = ufs_clear_frags(inode, tmp + blockoff, uspi->s_fpb); + result = ufs_clear_frags(inode, tmp, uspi->s_fpb, + tmp + blockoff); } else { *phys = tmp + blockoff; *new = 1; diff --git a/fs/ufs/truncate.c b/fs/ufs/truncate.c index c9b55872079..ea11d04c41a 100644 --- a/fs/ufs/truncate.c +++ b/fs/ufs/truncate.c @@ -375,17 +375,15 @@ static int ufs_alloc_lastblock(struct inode *inode) int err = 0; struct address_space *mapping = inode->i_mapping; struct ufs_sb_private_info *uspi = UFS_SB(inode->i_sb)->s_uspi; - struct ufs_inode_info *ufsi = UFS_I(inode); unsigned lastfrag, i, end; struct page *lastpage; struct buffer_head *bh; lastfrag = (i_size_read(inode) + uspi->s_fsize - 1) >> uspi->s_fshift; - if (!lastfrag) { - ufsi->i_lastfrag = 0; + if (!lastfrag) goto out; - } + lastfrag--; lastpage = ufs_get_locked_page(mapping, lastfrag >> @@ -400,25 +398,25 @@ static int ufs_alloc_lastblock(struct inode *inode) for (i = 0; i < end; ++i) bh = bh->b_this_page; - if (!buffer_mapped(bh)) { - err = ufs_getfrag_block(inode, lastfrag, bh, 1); - - if (unlikely(err)) - goto out_unlock; - - if (buffer_new(bh)) { - clear_buffer_new(bh); - unmap_underlying_metadata(bh->b_bdev, - bh->b_blocknr); - /* - * we do not zeroize fragment, because of - * if it maped to hole, it already contains zeroes - */ - set_buffer_uptodate(bh); - mark_buffer_dirty(bh); - set_page_dirty(lastpage); - } + + err = ufs_getfrag_block(inode, lastfrag, bh, 1); + + if (unlikely(err)) + goto out_unlock; + + if (buffer_new(bh)) { + clear_buffer_new(bh); + unmap_underlying_metadata(bh->b_bdev, + bh->b_blocknr); + /* + * we do not zeroize fragment, because of + * if it maped to hole, it already contains zeroes + */ + set_buffer_uptodate(bh); + mark_buffer_dirty(bh); + set_page_dirty(lastpage); } + out_unlock: ufs_put_locked_page(lastpage); out: @@ -440,23 +438,11 @@ int ufs_truncate(struct inode *inode, loff_t old_i_size) if (IS_APPEND(inode) || IS_IMMUTABLE(inode)) return -EPERM; - if (inode->i_size > old_i_size) { - /* - * if we expand file we should care about - * allocation of block for last byte first of all - */ - err = ufs_alloc_lastblock(inode); + err = ufs_alloc_lastblock(inode); - if (err) { - i_size_write(inode, old_i_size); - goto out; - } - /* - * go away, because of we expand file, and we do not - * need free blocks, and zeroizes page - */ - lock_kernel(); - goto almost_end; + if (err) { + i_size_write(inode, old_i_size); + goto out; } block_truncate_page(inode->i_mapping, inode->i_size, ufs_getfrag_block); @@ -477,21 +463,8 @@ int ufs_truncate(struct inode *inode, loff_t old_i_size) yield(); } - if (inode->i_size < old_i_size) { - /* - * now we should have enough space - * to allocate block for last byte - */ - err = ufs_alloc_lastblock(inode); - if (err) - /* - * looks like all the same - we have no space, - * but we truncate file already - */ - inode->i_size = (ufsi->i_lastfrag - 1) * uspi->s_fsize; - } -almost_end: inode->i_mtime = inode->i_ctime = CURRENT_TIME_SEC; + ufsi->i_lastfrag = DIRECT_FRAGMENT; unlock_kernel(); mark_inode_dirty(inode); out: diff --git a/fs/xfs/xfs_alloc.c b/fs/xfs/xfs_alloc.c index eef6763f3a6..d2bbcd882a6 100644 --- a/fs/xfs/xfs_alloc.c +++ b/fs/xfs/xfs_alloc.c @@ -1835,40 +1835,47 @@ xfs_alloc_fix_freelist( &agbp))) return error; if (!pag->pagf_init) { + ASSERT(flags & XFS_ALLOC_FLAG_TRYLOCK); + ASSERT(!(flags & XFS_ALLOC_FLAG_FREEING)); args->agbp = NULL; return 0; } } else agbp = NULL; - /* If this is a metadata preferred pag and we are user data + /* + * If this is a metadata preferred pag and we are user data * then try somewhere else if we are not being asked to * try harder at this point */ - if (pag->pagf_metadata && args->userdata && flags) { + if (pag->pagf_metadata && args->userdata && + (flags & XFS_ALLOC_FLAG_TRYLOCK)) { + ASSERT(!(flags & XFS_ALLOC_FLAG_FREEING)); args->agbp = NULL; return 0; } - need = XFS_MIN_FREELIST_PAG(pag, mp); - delta = need > pag->pagf_flcount ? need - pag->pagf_flcount : 0; - /* - * If it looks like there isn't a long enough extent, or enough - * total blocks, reject it. - */ - longest = (pag->pagf_longest > delta) ? - (pag->pagf_longest - delta) : - (pag->pagf_flcount > 0 || pag->pagf_longest > 0); - if (args->minlen + args->alignment + args->minalignslop - 1 > longest || - (!(flags & XFS_ALLOC_FLAG_FREEING) && - (int)(pag->pagf_freeblks + pag->pagf_flcount - - need - args->total) < - (int)args->minleft)) { - if (agbp) - xfs_trans_brelse(tp, agbp); - args->agbp = NULL; - return 0; + if (!(flags & XFS_ALLOC_FLAG_FREEING)) { + need = XFS_MIN_FREELIST_PAG(pag, mp); + delta = need > pag->pagf_flcount ? need - pag->pagf_flcount : 0; + /* + * If it looks like there isn't a long enough extent, or enough + * total blocks, reject it. + */ + longest = (pag->pagf_longest > delta) ? + (pag->pagf_longest - delta) : + (pag->pagf_flcount > 0 || pag->pagf_longest > 0); + if ((args->minlen + args->alignment + args->minalignslop - 1) > + longest || + ((int)(pag->pagf_freeblks + pag->pagf_flcount - + need - args->total) < (int)args->minleft)) { + if (agbp) + xfs_trans_brelse(tp, agbp); + args->agbp = NULL; + return 0; + } } + /* * Get the a.g. freespace buffer. * Can fail if we're not blocking on locks, and it's held. @@ -1878,6 +1885,8 @@ xfs_alloc_fix_freelist( &agbp))) return error; if (agbp == NULL) { + ASSERT(flags & XFS_ALLOC_FLAG_TRYLOCK); + ASSERT(!(flags & XFS_ALLOC_FLAG_FREEING)); args->agbp = NULL; return 0; } @@ -1887,22 +1896,24 @@ xfs_alloc_fix_freelist( */ agf = XFS_BUF_TO_AGF(agbp); need = XFS_MIN_FREELIST(agf, mp); - delta = need > be32_to_cpu(agf->agf_flcount) ? - (need - be32_to_cpu(agf->agf_flcount)) : 0; /* * If there isn't enough total or single-extent, reject it. */ - longest = be32_to_cpu(agf->agf_longest); - longest = (longest > delta) ? (longest - delta) : - (be32_to_cpu(agf->agf_flcount) > 0 || longest > 0); - if (args->minlen + args->alignment + args->minalignslop - 1 > longest || - (!(flags & XFS_ALLOC_FLAG_FREEING) && - (int)(be32_to_cpu(agf->agf_freeblks) + - be32_to_cpu(agf->agf_flcount) - need - args->total) < - (int)args->minleft)) { - xfs_trans_brelse(tp, agbp); - args->agbp = NULL; - return 0; + if (!(flags & XFS_ALLOC_FLAG_FREEING)) { + delta = need > be32_to_cpu(agf->agf_flcount) ? + (need - be32_to_cpu(agf->agf_flcount)) : 0; + longest = be32_to_cpu(agf->agf_longest); + longest = (longest > delta) ? (longest - delta) : + (be32_to_cpu(agf->agf_flcount) > 0 || longest > 0); + if ((args->minlen + args->alignment + args->minalignslop - 1) > + longest || + ((int)(be32_to_cpu(agf->agf_freeblks) + + be32_to_cpu(agf->agf_flcount) - need - args->total) < + (int)args->minleft)) { + xfs_trans_brelse(tp, agbp); + args->agbp = NULL; + return 0; + } } /* * Make the freelist shorter if it's too long. @@ -1950,12 +1961,11 @@ xfs_alloc_fix_freelist( * on a completely full ag. */ if (targs.agbno == NULLAGBLOCK) { - if (!(flags & XFS_ALLOC_FLAG_FREEING)) { - xfs_trans_brelse(tp, agflbp); - args->agbp = NULL; - return 0; - } - break; + if (flags & XFS_ALLOC_FLAG_FREEING) + break; + xfs_trans_brelse(tp, agflbp); + args->agbp = NULL; + return 0; } /* * Put each allocated block on the list. @@ -2442,31 +2452,26 @@ xfs_free_extent( xfs_fsblock_t bno, /* starting block number of extent */ xfs_extlen_t len) /* length of extent */ { -#ifdef DEBUG - xfs_agf_t *agf; /* a.g. freespace header */ -#endif - xfs_alloc_arg_t args; /* allocation argument structure */ + xfs_alloc_arg_t args; int error; ASSERT(len != 0); + memset(&args, 0, sizeof(xfs_alloc_arg_t)); args.tp = tp; args.mp = tp->t_mountp; args.agno = XFS_FSB_TO_AGNO(args.mp, bno); ASSERT(args.agno < args.mp->m_sb.sb_agcount); args.agbno = XFS_FSB_TO_AGBNO(args.mp, bno); - args.alignment = 1; - args.minlen = args.minleft = args.minalignslop = 0; down_read(&args.mp->m_peraglock); args.pag = &args.mp->m_perag[args.agno]; if ((error = xfs_alloc_fix_freelist(&args, XFS_ALLOC_FLAG_FREEING))) goto error0; #ifdef DEBUG ASSERT(args.agbp != NULL); - agf = XFS_BUF_TO_AGF(args.agbp); - ASSERT(args.agbno + len <= be32_to_cpu(agf->agf_length)); + ASSERT((args.agbno + len) <= + be32_to_cpu(XFS_BUF_TO_AGF(args.agbp)->agf_length)); #endif - error = xfs_free_ag_extent(tp, args.agbp, args.agno, args.agbno, - len, 0); + error = xfs_free_ag_extent(tp, args.agbp, args.agno, args.agbno, len, 0); error0: up_read(&args.mp->m_peraglock); return error; diff --git a/fs/xfs/xfs_bmap.c b/fs/xfs/xfs_bmap.c index 3a613753906..bf46fae303a 100644 --- a/fs/xfs/xfs_bmap.c +++ b/fs/xfs/xfs_bmap.c @@ -4993,7 +4993,7 @@ xfs_bmapi( bma.firstblock = *firstblock; bma.alen = alen; bma.off = aoff; - bma.conv = (flags & XFS_BMAPI_CONVERT); + bma.conv = !!(flags & XFS_BMAPI_CONVERT); bma.wasdel = wasdelay; bma.minlen = minlen; bma.low = flist->xbf_low; diff --git a/include/asm-arm/arch-pxa/ssp.h b/include/asm-arm/arch-pxa/ssp.h index 949878c0d90..ea200551a75 100644 --- a/include/asm-arm/arch-pxa/ssp.h +++ b/include/asm-arm/arch-pxa/ssp.h @@ -40,8 +40,8 @@ struct ssp_dev { }; int ssp_write_word(struct ssp_dev *dev, u32 data); -int ssp_read_word(struct ssp_dev *dev); -void ssp_flush(struct ssp_dev *dev); +int ssp_read_word(struct ssp_dev *dev, u32 *data); +int ssp_flush(struct ssp_dev *dev); void ssp_enable(struct ssp_dev *dev); void ssp_disable(struct ssp_dev *dev); void ssp_save_state(struct ssp_dev *dev, struct ssp_state *ssp); diff --git a/include/asm-arm/arch-s3c2410/dma.h b/include/asm-arm/arch-s3c2410/dma.h index 72964f9b841..7463fd5252c 100644 --- a/include/asm-arm/arch-s3c2410/dma.h +++ b/include/asm-arm/arch-s3c2410/dma.h @@ -104,6 +104,7 @@ enum s3c2410_chan_op_e { S3C2410_DMAOP_RESUME, S3C2410_DMAOP_FLUSH, S3C2410_DMAOP_TIMEOUT, /* internal signal to handler */ + S3C2410_DMAOP_STARTED, /* indicate channel started */ }; typedef enum s3c2410_chan_op_e s3c2410_chan_op_t; diff --git a/include/asm-arm/arch-s3c2410/regs-rtc.h b/include/asm-arm/arch-s3c2410/regs-rtc.h index 228983f89bc..0fbec07bb6b 100644 --- a/include/asm-arm/arch-s3c2410/regs-rtc.h +++ b/include/asm-arm/arch-s3c2410/regs-rtc.h @@ -18,7 +18,7 @@ #ifndef __ASM_ARCH_REGS_RTC_H #define __ASM_ARCH_REGS_RTC_H __FILE__ -#define S3C2410_RTCREG(x) ((x) + S3C24XX_VA_RTC) +#define S3C2410_RTCREG(x) (x) #define S3C2410_RTCCON S3C2410_RTCREG(0x40) #define S3C2410_RTCCON_RTCEN (1<<0) diff --git a/include/asm-arm/hardware/ssp.h b/include/asm-arm/hardware/ssp.h index 28aa11b769c..3b42e181997 100644 --- a/include/asm-arm/hardware/ssp.h +++ b/include/asm-arm/hardware/ssp.h @@ -16,8 +16,8 @@ struct ssp_state { }; int ssp_write_word(u16 data); -int ssp_read_word(void); -void ssp_flush(void); +int ssp_read_word(u16 *data); +int ssp_flush(void); void ssp_enable(void); void ssp_disable(void); void ssp_save_state(struct ssp_state *ssp); diff --git a/include/asm-arm/io.h b/include/asm-arm/io.h index b3479fc1cc8..bf7b9dea30f 100644 --- a/include/asm-arm/io.h +++ b/include/asm-arm/io.h @@ -291,5 +291,12 @@ extern void pci_iounmap(struct pci_dev *dev, void __iomem *addr); */ #define xlate_dev_kmem_ptr(p) p +/* + * Register ISA memory and port locations for glibc iopl/inb/outb + * emulation. + */ +extern void register_isa_ports(unsigned int mmio, unsigned int io, + unsigned int io_shift); + #endif /* __KERNEL__ */ #endif /* __ASM_ARM_IO_H */ diff --git a/include/asm-arm/procinfo.h b/include/asm-arm/procinfo.h index edb7b6502fc..91a31adfa8a 100644 --- a/include/asm-arm/procinfo.h +++ b/include/asm-arm/procinfo.h @@ -55,5 +55,6 @@ extern unsigned int elf_hwcap; #define HWCAP_VFP 64 #define HWCAP_EDSP 128 #define HWCAP_JAVA 256 +#define HWCAP_IWMMXT 512 #endif diff --git a/include/asm-i386/alternative.h b/include/asm-i386/alternative.h index 96adbabec74..b01a7ec409c 100644 --- a/include/asm-i386/alternative.h +++ b/include/asm-i386/alternative.h @@ -88,9 +88,6 @@ static inline void alternatives_smp_switch(int smp) {} /* * Alternative inline assembly for SMP. * - * alternative_smp() takes two versions (SMP first, UP second) and is - * for more complex stuff such as spinlocks. - * * The LOCK_PREFIX macro defined here replaces the LOCK and * LOCK_PREFIX macros used everywhere in the source tree. * @@ -110,21 +107,6 @@ static inline void alternatives_smp_switch(int smp) {} */ #ifdef CONFIG_SMP -#define alternative_smp(smpinstr, upinstr, args...) \ - asm volatile ("661:\n\t" smpinstr "\n662:\n" \ - ".section .smp_altinstructions,\"a\"\n" \ - " .align 4\n" \ - " .long 661b\n" /* label */ \ - " .long 663f\n" /* new instruction */ \ - " .byte 0x68\n" /* X86_FEATURE_UP */ \ - " .byte 662b-661b\n" /* sourcelen */ \ - " .byte 664f-663f\n" /* replacementlen */ \ - ".previous\n" \ - ".section .smp_altinstr_replacement,\"awx\"\n" \ - "663:\n\t" upinstr "\n" /* replacement */ \ - "664:\n\t.fill 662b-661b,1,0x42\n" /* space for original */ \ - ".previous" : args) - #define LOCK_PREFIX \ ".section .smp_locks,\"a\"\n" \ " .align 4\n" \ @@ -133,8 +115,6 @@ static inline void alternatives_smp_switch(int smp) {} "661:\n\tlock; " #else /* ! CONFIG_SMP */ -#define alternative_smp(smpinstr, upinstr, args...) \ - asm volatile (upinstr : args) #define LOCK_PREFIX "" #endif diff --git a/include/asm-i386/mmzone.h b/include/asm-i386/mmzone.h index e33e9f9e4c6..22cb07cc8f3 100644 --- a/include/asm-i386/mmzone.h +++ b/include/asm-i386/mmzone.h @@ -14,7 +14,7 @@ extern struct pglist_data *node_data[]; #ifdef CONFIG_X86_NUMAQ #include <asm/numaq.h> -#else /* summit or generic arch */ +#elif defined(CONFIG_ACPI_SRAT)/* summit or generic arch */ #include <asm/srat.h> #endif diff --git a/include/asm-i386/rwlock.h b/include/asm-i386/rwlock.h index 96b0bef2ea5..3ac1ba98b1b 100644 --- a/include/asm-i386/rwlock.h +++ b/include/asm-i386/rwlock.h @@ -21,22 +21,20 @@ #define RW_LOCK_BIAS_STR "0x01000000" #define __build_read_lock_ptr(rw, helper) \ - alternative_smp("lock; subl $1,(%0)\n\t" \ + asm volatile(LOCK_PREFIX " ; subl $1,(%0)\n\t" \ "jns 1f\n" \ "call " helper "\n\t" \ - "1:\n", \ - "subl $1,(%0)\n\t", \ + "1:\n" \ :"a" (rw) : "memory") #define __build_read_lock_const(rw, helper) \ - alternative_smp("lock; subl $1,%0\n\t" \ + asm volatile(LOCK_PREFIX " ; subl $1,%0\n\t" \ "jns 1f\n" \ "pushl %%eax\n\t" \ "leal %0,%%eax\n\t" \ "call " helper "\n\t" \ "popl %%eax\n\t" \ - "1:\n", \ - "subl $1,%0\n\t", \ + "1:\n" : \ "+m" (*(volatile int *)rw) : : "memory") #define __build_read_lock(rw, helper) do { \ @@ -47,7 +45,7 @@ } while (0) #define __build_write_lock_ptr(rw, helper) \ - alternative_smp("lock; subl $" RW_LOCK_BIAS_STR ",(%0)\n\t" \ + asm volatile(LOCK_PREFIX " ; subl $" RW_LOCK_BIAS_STR ",(%0)\n\t" \ "jz 1f\n" \ "call " helper "\n\t" \ "1:\n", \ @@ -55,7 +53,7 @@ :"a" (rw) : "memory") #define __build_write_lock_const(rw, helper) \ - alternative_smp("lock; subl $" RW_LOCK_BIAS_STR ",%0\n\t" \ + asm volatile(LOCK_PREFIX " ; subl $" RW_LOCK_BIAS_STR ",%0\n\t" \ "jz 1f\n" \ "pushl %%eax\n\t" \ "leal %0,%%eax\n\t" \ diff --git a/include/asm-i386/spinlock.h b/include/asm-i386/spinlock.h index d816c62a7a1..d1020363c41 100644 --- a/include/asm-i386/spinlock.h +++ b/include/asm-i386/spinlock.h @@ -22,7 +22,7 @@ #define __raw_spin_lock_string \ "\n1:\t" \ - "lock ; decb %0\n\t" \ + LOCK_PREFIX " ; decb %0\n\t" \ "jns 3f\n" \ "2:\t" \ "rep;nop\n\t" \ @@ -38,7 +38,7 @@ */ #define __raw_spin_lock_string_flags \ "\n1:\t" \ - "lock ; decb %0\n\t" \ + LOCK_PREFIX " ; decb %0\n\t" \ "jns 5f\n" \ "2:\t" \ "testl $0x200, %1\n\t" \ @@ -57,15 +57,9 @@ "jmp 4b\n" \ "5:\n\t" -#define __raw_spin_lock_string_up \ - "\n\tdecb %0" - static inline void __raw_spin_lock(raw_spinlock_t *lock) { - alternative_smp( - __raw_spin_lock_string, - __raw_spin_lock_string_up, - "+m" (lock->slock) : : "memory"); + asm(__raw_spin_lock_string : "+m" (lock->slock) : : "memory"); } /* @@ -76,10 +70,7 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock) #ifndef CONFIG_PROVE_LOCKING static inline void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags) { - alternative_smp( - __raw_spin_lock_string_flags, - __raw_spin_lock_string_up, - "+m" (lock->slock) : "r" (flags) : "memory"); + asm(__raw_spin_lock_string_flags : "+m" (lock->slock) : "r" (flags) : "memory"); } #endif diff --git a/include/asm-i386/unistd.h b/include/asm-i386/unistd.h index fc1c8ddae14..d983b74e4d9 100644 --- a/include/asm-i386/unistd.h +++ b/include/asm-i386/unistd.h @@ -324,8 +324,6 @@ #define __NR_vmsplice 316 #define __NR_move_pages 317 -#ifdef __KERNEL__ - #define NR_syscalls 318 /* @@ -425,6 +423,8 @@ __asm__ volatile ("push %%ebp ; push %%ebx ; movl 4(%2),%%ebp ; " \ __syscall_return(type,__res); \ } +#ifdef __KERNEL__ + #define __ARCH_WANT_IPC_PARSE_VERSION #define __ARCH_WANT_OLD_READDIR #define __ARCH_WANT_OLD_STAT diff --git a/include/asm-i386/unwind.h b/include/asm-i386/unwind.h index 69f0f1df672..4c1a0b96856 100644 --- a/include/asm-i386/unwind.h +++ b/include/asm-i386/unwind.h @@ -87,6 +87,7 @@ static inline int arch_unw_user_mode(const struct unwind_frame_info *info) #else #define UNW_PC(frame) ((void)(frame), 0) +#define UNW_SP(frame) ((void)(frame), 0) static inline int arch_unw_user_mode(const void *info) { diff --git a/include/asm-ia64/sn/sn_sal.h b/include/asm-ia64/sn/sn_sal.h index bd4452bda35..ba826b3f75b 100644 --- a/include/asm-ia64/sn/sn_sal.h +++ b/include/asm-ia64/sn/sn_sal.h @@ -706,12 +706,9 @@ static inline int sn_change_memprotect(u64 paddr, u64 len, u64 perms, u64 *nasid_array) { struct ia64_sal_retval ret_stuff; - unsigned long irq_flags; - local_irq_save(irq_flags); ia64_sal_oemcall_nolock(&ret_stuff, SN_SAL_MEMPROTECT, paddr, len, (u64)nasid_array, perms, 0, 0, 0); - local_irq_restore(irq_flags); return ret_stuff.status; } #define SN_MEMPROT_ACCESS_CLASS_0 0x14a080 @@ -1143,12 +1140,9 @@ static inline int sn_inject_error(u64 paddr, u64 *data, u64 *ecc) { struct ia64_sal_retval ret_stuff; - unsigned long irq_flags; - local_irq_save(irq_flags); ia64_sal_oemcall_nolock(&ret_stuff, SN_SAL_INJECT_ERROR, paddr, (u64)data, (u64)ecc, 0, 0, 0, 0); - local_irq_restore(irq_flags); return ret_stuff.status; } diff --git a/include/asm-ia64/sn/xp.h b/include/asm-ia64/sn/xp.h index 9bd2f9bf329..6f807e0193b 100644 --- a/include/asm-ia64/sn/xp.h +++ b/include/asm-ia64/sn/xp.h @@ -60,23 +60,37 @@ * the bte_copy() once in the hope that the failure was due to a temporary * aberration (i.e., the link going down temporarily). * - * See bte_copy for definition of the input parameters. + * src - physical address of the source of the transfer. + * vdst - virtual address of the destination of the transfer. + * len - number of bytes to transfer from source to destination. + * mode - see bte_copy() for definition. + * notification - see bte_copy() for definition. * * Note: xp_bte_copy() should never be called while holding a spinlock. */ static inline bte_result_t -xp_bte_copy(u64 src, u64 dest, u64 len, u64 mode, void *notification) +xp_bte_copy(u64 src, u64 vdst, u64 len, u64 mode, void *notification) { bte_result_t ret; + u64 pdst = ia64_tpa(vdst); - ret = bte_copy(src, dest, len, mode, notification); + /* + * Ensure that the physically mapped memory is contiguous. + * + * We do this by ensuring that the memory is from region 7 only. + * If the need should arise to use memory from one of the other + * regions, then modify the BUG_ON() statement to ensure that the + * memory from that region is always physically contiguous. + */ + BUG_ON(REGION_NUMBER(vdst) != RGN_KERNEL); + ret = bte_copy(src, pdst, len, mode, notification); if (ret != BTE_SUCCESS) { if (!in_interrupt()) { cond_resched(); } - ret = bte_copy(src, dest, len, mode, notification); + ret = bte_copy(src, pdst, len, mode, notification); } return ret; diff --git a/include/asm-ia64/sn/xpc.h b/include/asm-ia64/sn/xpc.h index b72af597878..35e1386f37a 100644 --- a/include/asm-ia64/sn/xpc.h +++ b/include/asm-ia64/sn/xpc.h @@ -683,7 +683,9 @@ extern struct xpc_vars *xpc_vars; extern struct xpc_rsvd_page *xpc_rsvd_page; extern struct xpc_vars_part *xpc_vars_part; extern struct xpc_partition xpc_partitions[XP_MAX_PARTITIONS + 1]; -extern char xpc_remote_copy_buffer[]; +extern char *xpc_remote_copy_buffer; +extern void *xpc_remote_copy_buffer_base; +extern void *xpc_kmalloc_cacheline_aligned(size_t, gfp_t, void **); extern struct xpc_rsvd_page *xpc_rsvd_page_init(void); extern void xpc_allow_IPI_ops(void); extern void xpc_restrict_IPI_ops(void); diff --git a/include/asm-ppc/cpm2.h b/include/asm-ppc/cpm2.h index c70344b9104..f6a7ff04ffe 100644 --- a/include/asm-ppc/cpm2.h +++ b/include/asm-ppc/cpm2.h @@ -1093,5 +1093,100 @@ typedef struct im_idma { #define FCC_PSMR_RMII ((uint)0x00020000) /* Use RMII interface */ +/* FCC iop & clock configuration. BSP code is responsible to define Fx_RXCLK & Fx_TXCLK + * in order to use clock-computing stuff below for the FCC x + */ + +/* Automatically generates register configurations */ +#define PC_CLK(x) ((uint)(1<<(x-1))) /* FCC CLK I/O ports */ + +#define CMXFCR_RF1CS(x) ((uint)((x-5)<<27)) /* FCC1 Receive Clock Source */ +#define CMXFCR_TF1CS(x) ((uint)((x-5)<<24)) /* FCC1 Transmit Clock Source */ +#define CMXFCR_RF2CS(x) ((uint)((x-9)<<19)) /* FCC2 Receive Clock Source */ +#define CMXFCR_TF2CS(x) ((uint)((x-9)<<16)) /* FCC2 Transmit Clock Source */ +#define CMXFCR_RF3CS(x) ((uint)((x-9)<<11)) /* FCC3 Receive Clock Source */ +#define CMXFCR_TF3CS(x) ((uint)((x-9)<<8)) /* FCC3 Transmit Clock Source */ + +#define PC_F1RXCLK PC_CLK(F1_RXCLK) +#define PC_F1TXCLK PC_CLK(F1_TXCLK) +#define CMX1_CLK_ROUTE (CMXFCR_RF1CS(F1_RXCLK) | CMXFCR_TF1CS(F1_TXCLK)) +#define CMX1_CLK_MASK ((uint)0xff000000) + +#define PC_F2RXCLK PC_CLK(F2_RXCLK) +#define PC_F2TXCLK PC_CLK(F2_TXCLK) +#define CMX2_CLK_ROUTE (CMXFCR_RF2CS(F2_RXCLK) | CMXFCR_TF2CS(F2_TXCLK)) +#define CMX2_CLK_MASK ((uint)0x00ff0000) + +#define PC_F3RXCLK PC_CLK(F3_RXCLK) +#define PC_F3TXCLK PC_CLK(F3_TXCLK) +#define CMX3_CLK_ROUTE (CMXFCR_RF3CS(F3_RXCLK) | CMXFCR_TF3CS(F3_TXCLK)) +#define CMX3_CLK_MASK ((uint)0x0000ff00) + +#define CPMUX_CLK_MASK (CMX3_CLK_MASK | CMX2_CLK_MASK) +#define CPMUX_CLK_ROUTE (CMX3_CLK_ROUTE | CMX2_CLK_ROUTE) + +#define CLK_TRX (PC_F3TXCLK | PC_F3RXCLK | PC_F2TXCLK | PC_F2RXCLK) + +/* I/O Pin assignment for FCC1. I don't yet know the best way to do this, + * but there is little variation among the choices. + */ +#define PA1_COL 0x00000001U +#define PA1_CRS 0x00000002U +#define PA1_TXER 0x00000004U +#define PA1_TXEN 0x00000008U +#define PA1_RXDV 0x00000010U +#define PA1_RXER 0x00000020U +#define PA1_TXDAT 0x00003c00U +#define PA1_RXDAT 0x0003c000U +#define PA1_PSORA0 (PA1_RXDAT | PA1_TXDAT) +#define PA1_PSORA1 (PA1_COL | PA1_CRS | PA1_TXER | PA1_TXEN | \ + PA1_RXDV | PA1_RXER) +#define PA1_DIRA0 (PA1_RXDAT | PA1_CRS | PA1_COL | PA1_RXER | PA1_RXDV) +#define PA1_DIRA1 (PA1_TXDAT | PA1_TXEN | PA1_TXER) + + +/* I/O Pin assignment for FCC2. I don't yet know the best way to do this, + * but there is little variation among the choices. + */ +#define PB2_TXER 0x00000001U +#define PB2_RXDV 0x00000002U +#define PB2_TXEN 0x00000004U +#define PB2_RXER 0x00000008U +#define PB2_COL 0x00000010U +#define PB2_CRS 0x00000020U +#define PB2_TXDAT 0x000003c0U +#define PB2_RXDAT 0x00003c00U +#define PB2_PSORB0 (PB2_RXDAT | PB2_TXDAT | PB2_CRS | PB2_COL | \ + PB2_RXER | PB2_RXDV | PB2_TXER) +#define PB2_PSORB1 (PB2_TXEN) +#define PB2_DIRB0 (PB2_RXDAT | PB2_CRS | PB2_COL | PB2_RXER | PB2_RXDV) +#define PB2_DIRB1 (PB2_TXDAT | PB2_TXEN | PB2_TXER) + + +/* I/O Pin assignment for FCC3. I don't yet know the best way to do this, + * but there is little variation among the choices. + */ +#define PB3_RXDV 0x00004000U +#define PB3_RXER 0x00008000U +#define PB3_TXER 0x00010000U +#define PB3_TXEN 0x00020000U +#define PB3_COL 0x00040000U +#define PB3_CRS 0x00080000U +#define PB3_TXDAT 0x0f000000U +#define PC3_TXDAT 0x00000010U +#define PB3_RXDAT 0x00f00000U +#define PB3_PSORB0 (PB3_RXDAT | PB3_TXDAT | PB3_CRS | PB3_COL | \ + PB3_RXER | PB3_RXDV | PB3_TXER | PB3_TXEN) +#define PB3_PSORB1 0 +#define PB3_DIRB0 (PB3_RXDAT | PB3_CRS | PB3_COL | PB3_RXER | PB3_RXDV) +#define PB3_DIRB1 (PB3_TXDAT | PB3_TXEN | PB3_TXER) +#define PC3_DIRC1 (PC3_TXDAT) + +/* Handy macro to specify mem for FCCs*/ +#define FCC_MEM_OFFSET(x) (CPM_FCC_SPECIAL_BASE + (x*128)) +#define FCC1_MEM_OFFSET FCC_MEM_OFFSET(0) +#define FCC2_MEM_OFFSET FCC_MEM_OFFSET(1) +#define FCC2_MEM_OFFSET FCC_MEM_OFFSET(2) + #endif /* __CPM2__ */ #endif /* __KERNEL__ */ diff --git a/include/asm-ppc/mpc8260.h b/include/asm-ppc/mpc8260.h index 4b93481e767..23579d4afae 100644 --- a/include/asm-ppc/mpc8260.h +++ b/include/asm-ppc/mpc8260.h @@ -82,6 +82,7 @@ enum ppc_sys_devices { MPC82xx_CPM_SMC2, MPC82xx_CPM_USB, MPC82xx_SEC1, + MPC82xx_MDIO_BB, NUM_PPC_SYS_DEVS, }; diff --git a/include/asm-ppc/mpc8xx.h b/include/asm-ppc/mpc8xx.h index adcce33f20a..d3a2f2fe230 100644 --- a/include/asm-ppc/mpc8xx.h +++ b/include/asm-ppc/mpc8xx.h @@ -110,6 +110,7 @@ enum ppc_sys_devices { MPC8xx_CPM_SMC1, MPC8xx_CPM_SMC2, MPC8xx_CPM_USB, + MPC8xx_MDIO_FEC, NUM_PPC_SYS_DEVS, }; diff --git a/include/asm-sparc64/pgtable.h b/include/asm-sparc64/pgtable.h index 1ba19eb34ce..ebfe395cfb8 100644 --- a/include/asm-sparc64/pgtable.h +++ b/include/asm-sparc64/pgtable.h @@ -234,7 +234,7 @@ static inline pte_t pfn_pte(unsigned long pfn, pgprot_t prot) sz_bits = 0UL; if (_PAGE_SZBITS_4U != 0UL || _PAGE_SZBITS_4V != 0UL) { __asm__ __volatile__( - "\n661: sethi %uhi(%1), %0\n" + "\n661: sethi %%uhi(%1), %0\n" " sllx %0, 32, %0\n" " .section .sun4v_2insn_patch, \"ax\"\n" " .word 661b\n" diff --git a/include/asm-x86_64/alternative.h b/include/asm-x86_64/alternative.h index aa67bfd1b3c..a584826cc57 100644 --- a/include/asm-x86_64/alternative.h +++ b/include/asm-x86_64/alternative.h @@ -4,6 +4,7 @@ #ifdef __KERNEL__ #include <linux/types.h> +#include <asm/cpufeature.h> struct alt_instr { u8 *instr; /* original instruction */ @@ -102,9 +103,6 @@ static inline void alternatives_smp_switch(int smp) {} /* * Alternative inline assembly for SMP. * - * alternative_smp() takes two versions (SMP first, UP second) and is - * for more complex stuff such as spinlocks. - * * The LOCK_PREFIX macro defined here replaces the LOCK and * LOCK_PREFIX macros used everywhere in the source tree. * @@ -124,21 +122,6 @@ static inline void alternatives_smp_switch(int smp) {} */ #ifdef CONFIG_SMP -#define alternative_smp(smpinstr, upinstr, args...) \ - asm volatile ("661:\n\t" smpinstr "\n662:\n" \ - ".section .smp_altinstructions,\"a\"\n" \ - " .align 8\n" \ - " .quad 661b\n" /* label */ \ - " .quad 663f\n" /* new instruction */ \ - " .byte 0x66\n" /* X86_FEATURE_UP */ \ - " .byte 662b-661b\n" /* sourcelen */ \ - " .byte 664f-663f\n" /* replacementlen */ \ - ".previous\n" \ - ".section .smp_altinstr_replacement,\"awx\"\n" \ - "663:\n\t" upinstr "\n" /* replacement */ \ - "664:\n\t.fill 662b-661b,1,0x42\n" /* space for original */ \ - ".previous" : args) - #define LOCK_PREFIX \ ".section .smp_locks,\"a\"\n" \ " .align 8\n" \ @@ -147,8 +130,6 @@ static inline void alternatives_smp_switch(int smp) {} "661:\n\tlock; " #else /* ! CONFIG_SMP */ -#define alternative_smp(smpinstr, upinstr, args...) \ - asm volatile (upinstr : args) #define LOCK_PREFIX "" #endif diff --git a/include/asm-x86_64/processor.h b/include/asm-x86_64/processor.h index 3b3c1217fe6..de9c3147ee4 100644 --- a/include/asm-x86_64/processor.h +++ b/include/asm-x86_64/processor.h @@ -232,8 +232,14 @@ struct tss_struct { unsigned long io_bitmap[IO_BITMAP_LONGS + 1]; } __attribute__((packed)) ____cacheline_aligned; + extern struct cpuinfo_x86 boot_cpu_data; DECLARE_PER_CPU(struct tss_struct,init_tss); +/* Save the original ist values for checking stack pointers during debugging */ +struct orig_ist { + unsigned long ist[7]; +}; +DECLARE_PER_CPU(struct orig_ist, orig_ist); #ifdef CONFIG_X86_VSMP #define ARCH_MIN_TASKALIGN (1 << INTERNODE_CACHE_SHIFT) diff --git a/include/asm-x86_64/spinlock.h b/include/asm-x86_64/spinlock.h index 8d3421996f9..248a79f0eaf 100644 --- a/include/asm-x86_64/spinlock.h +++ b/include/asm-x86_64/spinlock.h @@ -21,7 +21,7 @@ #define __raw_spin_lock_string \ "\n1:\t" \ - "lock ; decl %0\n\t" \ + LOCK_PREFIX " ; decl %0\n\t" \ "js 2f\n" \ LOCK_SECTION_START("") \ "2:\t" \ @@ -40,10 +40,7 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock) { - alternative_smp( - __raw_spin_lock_string, - __raw_spin_lock_string_up, - "=m" (lock->slock) : : "memory"); + asm volatile(__raw_spin_lock_string : "=m" (lock->slock) : : "memory"); } #define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) @@ -125,12 +122,12 @@ static inline int __raw_write_trylock(raw_rwlock_t *lock) static inline void __raw_read_unlock(raw_rwlock_t *rw) { - asm volatile("lock ; incl %0" :"=m" (rw->lock) : : "memory"); + asm volatile(LOCK_PREFIX " ; incl %0" :"=m" (rw->lock) : : "memory"); } static inline void __raw_write_unlock(raw_rwlock_t *rw) { - asm volatile("lock ; addl $" RW_LOCK_BIAS_STR ",%0" + asm volatile(LOCK_PREFIX " ; addl $" RW_LOCK_BIAS_STR ",%0" : "=m" (rw->lock) : : "memory"); } diff --git a/include/asm-x86_64/unistd.h b/include/asm-x86_64/unistd.h index 94387c915e5..2d89d309a2a 100644 --- a/include/asm-x86_64/unistd.h +++ b/include/asm-x86_64/unistd.h @@ -620,8 +620,6 @@ __SYSCALL(__NR_vmsplice, sys_vmsplice) #define __NR_move_pages 279 __SYSCALL(__NR_move_pages, sys_move_pages) -#ifdef __KERNEL__ - #define __NR_syscall_max __NR_move_pages #ifndef __NO_STUBS @@ -746,6 +744,8 @@ __syscall_return(type,__res); \ #else /* __KERNEL_SYSCALLS__ */ +#ifdef __KERNEL__ + #include <linux/syscalls.h> #include <asm/ptrace.h> @@ -838,9 +838,9 @@ asmlinkage long sys_rt_sigaction(int sig, struct sigaction __user *oact, size_t sigsetsize); -#endif /* __ASSEMBLY__ */ +#endif -#endif /* __NO_STUBS */ +#endif /* * "Conditional" syscalls @@ -850,5 +850,6 @@ asmlinkage long sys_rt_sigaction(int sig, */ #define cond_syscall(x) asm(".weak\t" #x "\n\t.set\t" #x ",sys_ni_syscall") -#endif /* __KERNEL__ */ +#endif + #endif diff --git a/include/asm-x86_64/unwind.h b/include/asm-x86_64/unwind.h index f3e7124effe..1f6e9bfb569 100644 --- a/include/asm-x86_64/unwind.h +++ b/include/asm-x86_64/unwind.h @@ -95,6 +95,7 @@ static inline int arch_unw_user_mode(const struct unwind_frame_info *info) #else #define UNW_PC(frame) ((void)(frame), 0) +#define UNW_SP(frame) ((void)(frame), 0) static inline int arch_unw_user_mode(const void *info) { diff --git a/include/linux/compat_ioctl.h b/include/linux/compat_ioctl.h index 269d000bb2a..bea0255196c 100644 --- a/include/linux/compat_ioctl.h +++ b/include/linux/compat_ioctl.h @@ -216,6 +216,7 @@ COMPATIBLE_IOCTL(VT_RESIZE) COMPATIBLE_IOCTL(VT_RESIZEX) COMPATIBLE_IOCTL(VT_LOCKSWITCH) COMPATIBLE_IOCTL(VT_UNLOCKSWITCH) +COMPATIBLE_IOCTL(VT_GETHIFONTMASK) /* Little p (/dev/rtc, /dev/envctrl, etc.) */ COMPATIBLE_IOCTL(RTC_AIE_ON) COMPATIBLE_IOCTL(RTC_AIE_OFF) diff --git a/include/linux/fb.h b/include/linux/fb.h index 4ad0673b199..2f335e96601 100644 --- a/include/linux/fb.h +++ b/include/linux/fb.h @@ -1,7 +1,6 @@ #ifndef _LINUX_FB_H #define _LINUX_FB_H -#include <linux/backlight.h> #include <asm/types.h> /* Definitions of frame buffers */ @@ -381,6 +380,7 @@ struct fb_cursor { #include <linux/workqueue.h> #include <linux/notifier.h> #include <linux/list.h> +#include <linux/backlight.h> #include <asm/io.h> struct vm_area_struct; diff --git a/include/linux/fs.h b/include/linux/fs.h index 25610205c90..555bc195c42 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -570,13 +570,14 @@ struct inode { * 3: quota file * * The locking order between these classes is - * parent -> child -> normal -> quota + * parent -> child -> normal -> xattr -> quota */ enum inode_i_mutex_lock_class { I_MUTEX_NORMAL, I_MUTEX_PARENT, I_MUTEX_CHILD, + I_MUTEX_XATTR, I_MUTEX_QUOTA }; diff --git a/include/linux/fs_enet_pd.h b/include/linux/fs_enet_pd.h index 783c476b867..74ed35a00a9 100644 --- a/include/linux/fs_enet_pd.h +++ b/include/linux/fs_enet_pd.h @@ -69,34 +69,21 @@ enum fs_ioport { fsiop_porte, }; -struct fs_mii_bus_info { - int method; /* mii method */ - int id; /* the id of the mii_bus */ - int disable_aneg; /* if the controller needs to negothiate speed & duplex */ - int lpa; /* the default board-specific vallues will be applied otherwise */ - - union { - struct { - int duplex; - int speed; - } fixed; - - struct { - /* nothing */ - } fec; - - struct { - /* nothing */ - } scc; - - struct { - int mdio_port; /* port & bit for MDIO */ - int mdio_bit; - int mdc_port; /* port & bit for MDC */ - int mdc_bit; - int delay; /* delay in us */ - } bitbang; - } i; +struct fs_mii_bit { + u32 offset; + u8 bit; + u8 polarity; +}; +struct fs_mii_bb_platform_info { + struct fs_mii_bit mdio_dir; + struct fs_mii_bit mdio_dat; + struct fs_mii_bit mdc_dat; + int mdio_port; /* port & bit for MDIO */ + int mdio_bit; + int mdc_port; /* port & bit for MDC */ + int mdc_bit; + int delay; /* delay in us */ + int irq[32]; /* irqs per phy's */ }; struct fs_platform_info { @@ -119,6 +106,7 @@ struct fs_platform_info { u32 device_flags; int phy_addr; /* the phy address (-1 no phy) */ + const char* bus_id; int phy_irq; /* the phy irq (if it exists) */ const struct fs_mii_bus_info *bus_info; @@ -130,6 +118,10 @@ struct fs_platform_info { int napi_weight; /* NAPI weight */ int use_rmii; /* use RMII mode */ + int has_phy; /* if the network is phy container as well...*/ +}; +struct fs_mii_fec_platform_info { + u32 irq[32]; + u32 mii_speed; }; - #endif diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h index 383627ad328..ab274083274 100644 --- a/include/linux/if_vlan.h +++ b/include/linux/if_vlan.h @@ -155,6 +155,11 @@ static inline int __vlan_hwaccel_rx(struct sk_buff *skb, { struct net_device_stats *stats; + if (skb_bond_should_drop(skb)) { + dev_kfree_skb_any(skb); + return NET_RX_DROP; + } + skb->dev = grp->vlan_devices[vlan_tag & VLAN_VID_MASK]; if (skb->dev == NULL) { dev_kfree_skb_any(skb); diff --git a/include/linux/ioprio.h b/include/linux/ioprio.h index 88d5961f7a3..8e2042b9d47 100644 --- a/include/linux/ioprio.h +++ b/include/linux/ioprio.h @@ -59,27 +59,6 @@ static inline int task_nice_ioprio(struct task_struct *task) /* * For inheritance, return the highest of the two given priorities */ -static inline int ioprio_best(unsigned short aprio, unsigned short bprio) -{ - unsigned short aclass = IOPRIO_PRIO_CLASS(aprio); - unsigned short bclass = IOPRIO_PRIO_CLASS(bprio); - - if (!ioprio_valid(aprio)) - return bprio; - if (!ioprio_valid(bprio)) - return aprio; - - if (aclass == IOPRIO_CLASS_NONE) - aclass = IOPRIO_CLASS_BE; - if (bclass == IOPRIO_CLASS_NONE) - bclass = IOPRIO_CLASS_BE; - - if (aclass == bclass) - return min(aprio, bprio); - if (aclass > bclass) - return bprio; - else - return aprio; -} +extern int ioprio_best(unsigned short aprio, unsigned short bprio); #endif diff --git a/include/linux/jbd.h b/include/linux/jbd.h index 20eb34403d0..a04c154c520 100644 --- a/include/linux/jbd.h +++ b/include/linux/jbd.h @@ -72,6 +72,9 @@ extern int journal_enable_debug; #endif extern void * __jbd_kmalloc (const char *where, size_t size, gfp_t flags, int retry); +extern void * jbd_slab_alloc(size_t size, gfp_t flags); +extern void jbd_slab_free(void *ptr, size_t size); + #define jbd_kmalloc(size, flags) \ __jbd_kmalloc(__FUNCTION__, (size), (flags), journal_oom_retry) #define jbd_rep_kmalloc(size, flags) \ diff --git a/include/linux/kernel.h b/include/linux/kernel.h index 181c69cad4e..851aa1bcfc1 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h @@ -210,6 +210,7 @@ extern enum system_states { extern void dump_stack(void); #ifdef DEBUG +/* If you are writing a driver, please use dev_dbg instead */ #define pr_debug(fmt,arg...) \ printk(KERN_DEBUG fmt,##arg) #else diff --git a/include/linux/mm.h b/include/linux/mm.h index 990957e0929..f0b135cd86d 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -336,6 +336,7 @@ static inline void init_page_count(struct page *page) } void put_page(struct page *page); +void put_pages_list(struct list_head *pages); void split_page(struct page *page, unsigned int order); diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 75f02d8c6ed..50a4719512e 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -320,6 +320,9 @@ struct net_device #define NETIF_F_TSO_ECN (SKB_GSO_TCP_ECN << NETIF_F_GSO_SHIFT) #define NETIF_F_TSO6 (SKB_GSO_TCPV6 << NETIF_F_GSO_SHIFT) + /* List of features with software fallbacks. */ +#define NETIF_F_GSO_SOFTWARE (NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6) + #define NETIF_F_GEN_CSUM (NETIF_F_NO_CSUM | NETIF_F_HW_CSUM) #define NETIF_F_ALL_CSUM (NETIF_F_IP_CSUM | NETIF_F_GEN_CSUM) @@ -1012,6 +1015,30 @@ static inline int netif_needs_gso(struct net_device *dev, struct sk_buff *skb) unlikely(skb->ip_summed != CHECKSUM_HW)); } +/* On bonding slaves other than the currently active slave, suppress + * duplicates except for 802.3ad ETH_P_SLOW and alb non-mcast/bcast. + */ +static inline int skb_bond_should_drop(struct sk_buff *skb) +{ + struct net_device *dev = skb->dev; + struct net_device *master = dev->master; + + if (master && + (dev->priv_flags & IFF_SLAVE_INACTIVE)) { + if (master->priv_flags & IFF_MASTER_ALB) { + if (skb->pkt_type != PACKET_BROADCAST && + skb->pkt_type != PACKET_MULTICAST) + return 0; + } + if (master->priv_flags & IFF_MASTER_8023AD && + skb->protocol == __constant_htons(ETH_P_SLOW)) + return 0; + + return 1; + } + return 0; +} + #endif /* __KERNEL__ */ #endif /* _LINUX_DEV_H */ diff --git a/include/linux/netfilter_bridge.h b/include/linux/netfilter_bridge.h index 10c13dc4665..427c67ff89e 100644 --- a/include/linux/netfilter_bridge.h +++ b/include/linux/netfilter_bridge.h @@ -48,15 +48,25 @@ enum nf_br_hook_priorities { /* Only used in br_forward.c */ static inline -void nf_bridge_maybe_copy_header(struct sk_buff *skb) +int nf_bridge_maybe_copy_header(struct sk_buff *skb) { + int err; + if (skb->nf_bridge) { if (skb->protocol == __constant_htons(ETH_P_8021Q)) { + err = skb_cow(skb, 18); + if (err) + return err; memcpy(skb->data - 18, skb->nf_bridge->data, 18); skb_push(skb, 4); - } else + } else { + err = skb_cow(skb, 16); + if (err) + return err; memcpy(skb->data - 16, skb->nf_bridge->data, 16); + } } + return 0; } /* This is called by the IP fragmenting code and it ensures there is diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h index 2d3fb6416d9..db9cbf68e12 100644 --- a/include/linux/nfs_xdr.h +++ b/include/linux/nfs_xdr.h @@ -659,7 +659,7 @@ struct nfs4_rename_res { struct nfs4_setclientid { const nfs4_verifier * sc_verifier; /* request */ unsigned int sc_name_len; - char sc_name[32]; /* request */ + char sc_name[48]; /* request */ u32 sc_prog; /* request */ unsigned int sc_netid_len; char sc_netid[4]; /* request */ diff --git a/include/linux/node.h b/include/linux/node.h index 81dcec84cd8..bc001bc225c 100644 --- a/include/linux/node.h +++ b/include/linux/node.h @@ -30,12 +30,20 @@ extern struct node node_devices[]; extern int register_node(struct node *, int, struct node *); extern void unregister_node(struct node *node); +#ifdef CONFIG_NUMA extern int register_one_node(int nid); extern void unregister_one_node(int nid); -#ifdef CONFIG_NUMA extern int register_cpu_under_node(unsigned int cpu, unsigned int nid); extern int unregister_cpu_under_node(unsigned int cpu, unsigned int nid); #else +static inline int register_one_node(int nid) +{ + return 0; +} +static inline int unregister_one_node(int nid) +{ + return 0; +} static inline int register_cpu_under_node(unsigned int cpu, unsigned int nid) { return 0; diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index 4eae06b08cf..4c2839eab7f 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h @@ -1726,6 +1726,9 @@ #define PCI_VENDOR_ID_DOMEX 0x134a #define PCI_DEVICE_ID_DOMEX_DMX3191D 0x0001 +#define PCI_VENDOR_ID_INTASHIELD 0x135a +#define PCI_DEVICE_ID_INTASHIELD_IS200 0x0d80 + #define PCI_VENDOR_ID_QUATECH 0x135C #define PCI_DEVICE_ID_QUATECH_QSC100 0x0010 #define PCI_DEVICE_ID_QUATECH_DSC100 0x0020 diff --git a/include/linux/phy.h b/include/linux/phy.h index 331521a10a2..9447a57ee8a 100644 --- a/include/linux/phy.h +++ b/include/linux/phy.h @@ -378,6 +378,7 @@ int phy_mii_ioctl(struct phy_device *phydev, struct mii_ioctl_data *mii_data, int cmd); int phy_start_interrupts(struct phy_device *phydev); void phy_print_status(struct phy_device *phydev); +struct phy_device* phy_device_create(struct mii_bus *bus, int addr, int phy_id); extern struct bus_type mdio_bus_type; #endif /* __PHY_H */ diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 19c96d498e2..755e9cddac4 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -1040,6 +1040,21 @@ static inline int pskb_trim(struct sk_buff *skb, unsigned int len) } /** + * pskb_trim_unique - remove end from a paged unique (not cloned) buffer + * @skb: buffer to alter + * @len: new length + * + * This is identical to pskb_trim except that the caller knows that + * the skb is not cloned so we should never get an error due to out- + * of-memory. + */ +static inline void pskb_trim_unique(struct sk_buff *skb, unsigned int len) +{ + int err = pskb_trim(skb, len); + BUG_ON(err); +} + +/** * skb_orphan - orphan a buffer * @skb: buffer to orphan * @@ -1081,7 +1096,7 @@ static inline void __skb_queue_purge(struct sk_buff_head *list) * the headroom they think they need without accounting for the * built in space. The built in space is used for optimisations. * - * %NULL is returned in there is no free memory. + * %NULL is returned if there is no free memory. */ static inline struct sk_buff *__dev_alloc_skb(unsigned int length, gfp_t gfp_mask) @@ -1101,7 +1116,7 @@ static inline struct sk_buff *__dev_alloc_skb(unsigned int length, * the headroom they think they need without accounting for the * built in space. The built in space is used for optimisations. * - * %NULL is returned in there is no free memory. Although this function + * %NULL is returned if there is no free memory. Although this function * allocates memory it can be called from an interrupt. */ static inline struct sk_buff *dev_alloc_skb(unsigned int length) diff --git a/include/linux/sunrpc/rpc_pipe_fs.h b/include/linux/sunrpc/rpc_pipe_fs.h index 2c2189cb30a..a481472c948 100644 --- a/include/linux/sunrpc/rpc_pipe_fs.h +++ b/include/linux/sunrpc/rpc_pipe_fs.h @@ -42,9 +42,9 @@ RPC_I(struct inode *inode) extern int rpc_queue_upcall(struct inode *, struct rpc_pipe_msg *); extern struct dentry *rpc_mkdir(char *, struct rpc_clnt *); -extern int rpc_rmdir(char *); +extern int rpc_rmdir(struct dentry *); extern struct dentry *rpc_mkpipe(char *, void *, struct rpc_pipe_ops *, int flags); -extern int rpc_unlink(char *); +extern int rpc_unlink(struct dentry *); extern struct vfsmount *rpc_get_mount(void); extern void rpc_put_mount(void); diff --git a/include/linux/sunrpc/xprt.h b/include/linux/sunrpc/xprt.h index 840e47a4ccc..3a0cca255b7 100644 --- a/include/linux/sunrpc/xprt.h +++ b/include/linux/sunrpc/xprt.h @@ -37,7 +37,7 @@ extern unsigned int xprt_max_resvport; #define RPC_MIN_RESVPORT (1U) #define RPC_MAX_RESVPORT (65535U) -#define RPC_DEF_MIN_RESVPORT (650U) +#define RPC_DEF_MIN_RESVPORT (665U) #define RPC_DEF_MAX_RESVPORT (1023U) /* diff --git a/include/linux/tty.h b/include/linux/tty.h index e421d5e3481..04827ca6578 100644 --- a/include/linux/tty.h +++ b/include/linux/tty.h @@ -59,6 +59,7 @@ struct tty_bufhead { struct tty_buffer *head; /* Queue head */ struct tty_buffer *tail; /* Active buffer */ struct tty_buffer *free; /* Free queue head */ + int memory_used; /* Buffer space used excluding free queue */ }; /* * The pty uses char_buf and flag_buf as a contiguous buffer diff --git a/include/linux/vt.h b/include/linux/vt.h index 8ab334a4822..ba806e8711b 100644 --- a/include/linux/vt.h +++ b/include/linux/vt.h @@ -60,5 +60,6 @@ struct vt_consize { #define VT_RESIZEX 0x560A /* set kernel's idea of screensize + more */ #define VT_LOCKSWITCH 0x560B /* disallow vt switching */ #define VT_UNLOCKSWITCH 0x560C /* allow vt switching */ +#define VT_GETHIFONTMASK 0x560D /* return hi font mask */ #endif /* _LINUX_VT_H */ diff --git a/include/media/v4l2-dev.h b/include/media/v4l2-dev.h index f8665326ed9..600d61d7d2a 100644 --- a/include/media/v4l2-dev.h +++ b/include/media/v4l2-dev.h @@ -16,7 +16,7 @@ #include <linux/device.h> #include <linux/mutex.h> #include <linux/compiler.h> /* need __user */ -#ifdef CONFIG_VIDEO_V4L1 +#ifdef CONFIG_VIDEO_V4L1_COMPAT #include <linux/videodev.h> #else #include <linux/videodev2.h> diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h index a9663b49ea5..92eae0e0f3f 100644 --- a/include/net/sctp/sctp.h +++ b/include/net/sctp/sctp.h @@ -404,19 +404,6 @@ static inline int sctp_list_single_entry(struct list_head *head) return ((head->next != head) && (head->next == head->prev)); } -/* Calculate the size (in bytes) occupied by the data of an iovec. */ -static inline size_t get_user_iov_size(struct iovec *iov, int iovlen) -{ - size_t retval = 0; - - for (; iovlen > 0; --iovlen) { - retval += iov->iov_len; - iov++; - } - - return retval; -} - /* Generate a random jitter in the range of -50% ~ +50% of input RTO. */ static inline __s32 sctp_jitter(__u32 rto) { diff --git a/include/net/sctp/sm.h b/include/net/sctp/sm.h index 1eac3d0eb7a..de313de4fef 100644 --- a/include/net/sctp/sm.h +++ b/include/net/sctp/sm.h @@ -221,8 +221,7 @@ struct sctp_chunk *sctp_make_abort_no_data(const struct sctp_association *, const struct sctp_chunk *, __u32 tsn); struct sctp_chunk *sctp_make_abort_user(const struct sctp_association *, - const struct sctp_chunk *, - const struct msghdr *); + const struct msghdr *, size_t msg_len); struct sctp_chunk *sctp_make_abort_violation(const struct sctp_association *, const struct sctp_chunk *, const __u8 *, diff --git a/include/scsi/libiscsi.h b/include/scsi/libiscsi.h index ba2760802de..41904f611d1 100644 --- a/include/scsi/libiscsi.h +++ b/include/scsi/libiscsi.h @@ -60,6 +60,7 @@ struct iscsi_nopin; #define TMABORT_SUCCESS 0x1 #define TMABORT_FAILED 0x2 #define TMABORT_TIMEDOUT 0x3 +#define TMABORT_NOT_FOUND 0x4 /* Connection suspend "bit" */ #define ISCSI_SUSPEND_BIT 1 @@ -83,6 +84,12 @@ struct iscsi_mgmt_task { struct list_head running; }; +enum { + ISCSI_TASK_COMPLETED, + ISCSI_TASK_PENDING, + ISCSI_TASK_RUNNING, +}; + struct iscsi_cmd_task { /* * Becuae LLDs allocate their hdr differently, this is a pointer to @@ -101,6 +108,8 @@ struct iscsi_cmd_task { struct iscsi_conn *conn; /* used connection */ struct iscsi_mgmt_task *mtask; /* tmf mtask in progr */ + /* state set/tested under session->lock */ + int state; struct list_head running; /* running cmd list */ void *dd_data; /* driver/transport data */ }; @@ -126,6 +135,14 @@ struct iscsi_conn { int id; /* CID */ struct list_head item; /* maintains list of conns */ int c_stage; /* connection state */ + /* + * Preallocated buffer for pdus that have data but do not + * originate from scsi-ml. We never have two pdus using the + * buffer at the same time. It is only allocated to + * the default max recv size because the pdus we support + * should always fit in this buffer + */ + char *data; struct iscsi_mgmt_task *login_mtask; /* mtask used for login/text */ struct iscsi_mgmt_task *mtask; /* xmit mtask in progress */ struct iscsi_cmd_task *ctask; /* xmit ctask in progress */ @@ -134,7 +151,7 @@ struct iscsi_conn { struct kfifo *immqueue; /* immediate xmit queue */ struct kfifo *mgmtqueue; /* mgmt (control) xmit queue */ struct list_head mgmt_run_list; /* list of control tasks */ - struct kfifo *xmitqueue; /* data-path cmd queue */ + struct list_head xmitqueue; /* data-path cmd queue */ struct list_head run_list; /* list of cmds in progress */ struct work_struct xmitwork; /* per-conn. xmit workqueue */ /* diff --git a/include/scsi/scsi_transport_iscsi.h b/include/scsi/scsi_transport_iscsi.h index 5a3df1d7085..39e833260bd 100644 --- a/include/scsi/scsi_transport_iscsi.h +++ b/include/scsi/scsi_transport_iscsi.h @@ -57,8 +57,6 @@ struct sockaddr; * @stop_conn: suspend/recover/terminate connection * @send_pdu: send iSCSI PDU, Login, Logout, NOP-Out, Reject, Text. * @session_recovery_timedout: notify LLD a block during recovery timed out - * @suspend_conn_recv: susepend the recv side of the connection - * @termincate_conn: destroy socket connection. Called with mutex lock. * @init_cmd_task: Initialize a iscsi_cmd_task and any internal structs. * Called from queuecommand with session lock held. * @init_mgmt_task: Initialize a iscsi_mgmt_task and any internal structs. @@ -112,8 +110,6 @@ struct iscsi_transport { char *data, uint32_t data_size); void (*get_stats) (struct iscsi_cls_conn *conn, struct iscsi_stats *stats); - void (*suspend_conn_recv) (struct iscsi_conn *conn); - void (*terminate_conn) (struct iscsi_conn *conn); void (*init_cmd_task) (struct iscsi_cmd_task *ctask); void (*init_mgmt_task) (struct iscsi_conn *conn, struct iscsi_mgmt_task *mtask, diff --git a/kernel/cpuset.c b/kernel/cpuset.c index 1a649f2bb9b..4ea6f0dc2fc 100644 --- a/kernel/cpuset.c +++ b/kernel/cpuset.c @@ -816,6 +816,10 @@ static int update_cpumask(struct cpuset *cs, char *buf) struct cpuset trialcs; int retval, cpus_unchanged; + /* top_cpuset.cpus_allowed tracks cpu_online_map; it's read-only */ + if (cs == &top_cpuset) + return -EACCES; + trialcs = *cs; retval = cpulist_parse(buf, trialcs.cpus_allowed); if (retval < 0) @@ -2033,6 +2037,33 @@ out: return err; } +/* + * The top_cpuset tracks what CPUs and Memory Nodes are online, + * period. This is necessary in order to make cpusets transparent + * (of no affect) on systems that are actively using CPU hotplug + * but making no active use of cpusets. + * + * This handles CPU hotplug (cpuhp) events. If someday Memory + * Nodes can be hotplugged (dynamically changing node_online_map) + * then we should handle that too, perhaps in a similar way. + */ + +#ifdef CONFIG_HOTPLUG_CPU +static int cpuset_handle_cpuhp(struct notifier_block *nb, + unsigned long phase, void *cpu) +{ + mutex_lock(&manage_mutex); + mutex_lock(&callback_mutex); + + top_cpuset.cpus_allowed = cpu_online_map; + + mutex_unlock(&callback_mutex); + mutex_unlock(&manage_mutex); + + return 0; +} +#endif + /** * cpuset_init_smp - initialize cpus_allowed * @@ -2043,6 +2074,8 @@ void __init cpuset_init_smp(void) { top_cpuset.cpus_allowed = cpu_online_map; top_cpuset.mems_allowed = node_online_map; + + hotcpu_notifier(cpuset_handle_cpuhp, 0); } /** @@ -2387,7 +2420,7 @@ EXPORT_SYMBOL_GPL(cpuset_mem_spread_node); int cpuset_excl_nodes_overlap(const struct task_struct *p) { const struct cpuset *cs1, *cs2; /* my and p's cpuset ancestors */ - int overlap = 0; /* do cpusets overlap? */ + int overlap = 1; /* do cpusets overlap? */ task_lock(current); if (current->flags & PF_EXITING) { diff --git a/kernel/futex.c b/kernel/futex.c index c2b2e0b83ab..b9b8aea5389 100644 --- a/kernel/futex.c +++ b/kernel/futex.c @@ -297,7 +297,7 @@ static int futex_handle_fault(unsigned long address, int attempt) struct vm_area_struct * vma; struct mm_struct *mm = current->mm; - if (attempt >= 2 || !(vma = find_vma(mm, address)) || + if (attempt > 2 || !(vma = find_vma(mm, address)) || vma->vm_start > address || !(vma->vm_flags & VM_WRITE)) return -EFAULT; @@ -397,7 +397,7 @@ static struct task_struct * futex_find_get_task(pid_t pid) p = NULL; goto out_unlock; } - if (p->state == EXIT_ZOMBIE || p->exit_state == EXIT_ZOMBIE) { + if (p->exit_state != 0) { p = NULL; goto out_unlock; } @@ -747,8 +747,10 @@ retry: */ if (attempt++) { if (futex_handle_fault((unsigned long)uaddr2, - attempt)) + attempt)) { + ret = -EFAULT; goto out; + } goto retry; } @@ -1322,9 +1324,10 @@ static int do_futex_lock_pi(u32 __user *uaddr, int detect, int trylock, * still holding the mmap_sem. */ if (attempt++) { - if (futex_handle_fault((unsigned long)uaddr, attempt)) + if (futex_handle_fault((unsigned long)uaddr, attempt)) { + ret = -EFAULT; goto out_unlock_release_sem; - + } goto retry_locked; } @@ -1506,9 +1509,10 @@ pi_faulted: * still holding the mmap_sem. */ if (attempt++) { - if (futex_handle_fault((unsigned long)uaddr, attempt)) + if (futex_handle_fault((unsigned long)uaddr, attempt)) { + ret = -EFAULT; goto out_unlock; - + } goto retry_locked; } diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c index be989efc785..21c38a7e666 100644 --- a/kernel/hrtimer.c +++ b/kernel/hrtimer.c @@ -187,7 +187,7 @@ switch_hrtimer_base(struct hrtimer *timer, struct hrtimer_base *base) { struct hrtimer_base *new_base; - new_base = &__get_cpu_var(hrtimer_bases[base->index]); + new_base = &__get_cpu_var(hrtimer_bases)[base->index]; if (base != new_base) { /* diff --git a/kernel/panic.c b/kernel/panic.c index d8a0bca2123..9b8dcfd1ca9 100644 --- a/kernel/panic.c +++ b/kernel/panic.c @@ -18,6 +18,7 @@ #include <linux/interrupt.h> #include <linux/nmi.h> #include <linux/kexec.h> +#include <linux/debug_locks.h> int panic_on_oops; int tainted; diff --git a/kernel/sched.c b/kernel/sched.c index a2be2d05529..a234fbee123 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -4162,10 +4162,8 @@ do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param) read_unlock_irq(&tasklist_lock); return -ESRCH; } - get_task_struct(p); - read_unlock_irq(&tasklist_lock); retval = sched_setscheduler(p, policy, &lparam); - put_task_struct(p); + read_unlock_irq(&tasklist_lock); return retval; } diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c index dcfb5d73146..51cacd111db 100644 --- a/kernel/stop_machine.c +++ b/kernel/stop_machine.c @@ -111,7 +111,6 @@ static int stop_machine(void) /* If some failed, kill them all. */ if (ret < 0) { stopmachine_set_state(STOPMACHINE_EXIT); - up(&stopmachine_mutex); return ret; } diff --git a/kernel/timer.c b/kernel/timer.c index b650f04888e..1d7dd6267c2 100644 --- a/kernel/timer.c +++ b/kernel/timer.c @@ -1324,46 +1324,19 @@ asmlinkage long sys_getpid(void) } /* - * Accessing ->group_leader->real_parent is not SMP-safe, it could - * change from under us. However, rather than getting any lock - * we can use an optimistic algorithm: get the parent - * pid, and go back and check that the parent is still - * the same. If it has changed (which is extremely unlikely - * indeed), we just try again.. - * - * NOTE! This depends on the fact that even if we _do_ - * get an old value of "parent", we can happily dereference - * the pointer (it was and remains a dereferencable kernel pointer - * no matter what): we just can't necessarily trust the result - * until we know that the parent pointer is valid. - * - * NOTE2: ->group_leader never changes from under us. + * Accessing ->real_parent is not SMP-safe, it could + * change from under us. However, we can use a stale + * value of ->real_parent under rcu_read_lock(), see + * release_task()->call_rcu(delayed_put_task_struct). */ asmlinkage long sys_getppid(void) { int pid; - struct task_struct *me = current; - struct task_struct *parent; - parent = me->group_leader->real_parent; - for (;;) { - pid = parent->tgid; -#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT) -{ - struct task_struct *old = parent; + rcu_read_lock(); + pid = rcu_dereference(current->real_parent)->tgid; + rcu_read_unlock(); - /* - * Make sure we read the pid before re-reading the - * parent pointer: - */ - smp_rmb(); - parent = me->group_leader->real_parent; - if (old != parent) - continue; -} -#endif - break; - } return pid; } diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 448e8f7b342..835fe28b87a 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -68,7 +68,7 @@ struct workqueue_struct { /* All the per-cpu workqueues on the system, for hotplug cpu to add/remove threads to each one as cpus come/go. */ -static DEFINE_SPINLOCK(workqueue_lock); +static DEFINE_MUTEX(workqueue_mutex); static LIST_HEAD(workqueues); static int singlethread_cpu; @@ -320,10 +320,10 @@ void fastcall flush_workqueue(struct workqueue_struct *wq) } else { int cpu; - lock_cpu_hotplug(); + mutex_lock(&workqueue_mutex); for_each_online_cpu(cpu) flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, cpu)); - unlock_cpu_hotplug(); + mutex_unlock(&workqueue_mutex); } } EXPORT_SYMBOL_GPL(flush_workqueue); @@ -371,8 +371,7 @@ struct workqueue_struct *__create_workqueue(const char *name, } wq->name = name; - /* We don't need the distraction of CPUs appearing and vanishing. */ - lock_cpu_hotplug(); + mutex_lock(&workqueue_mutex); if (singlethread) { INIT_LIST_HEAD(&wq->list); p = create_workqueue_thread(wq, singlethread_cpu); @@ -381,9 +380,7 @@ struct workqueue_struct *__create_workqueue(const char *name, else wake_up_process(p); } else { - spin_lock(&workqueue_lock); list_add(&wq->list, &workqueues); - spin_unlock(&workqueue_lock); for_each_online_cpu(cpu) { p = create_workqueue_thread(wq, cpu); if (p) { @@ -393,7 +390,7 @@ struct workqueue_struct *__create_workqueue(const char *name, destroy = 1; } } - unlock_cpu_hotplug(); + mutex_unlock(&workqueue_mutex); /* * Was there any error during startup? If yes then clean up: @@ -434,17 +431,15 @@ void destroy_workqueue(struct workqueue_struct *wq) flush_workqueue(wq); /* We don't need the distraction of CPUs appearing and vanishing. */ - lock_cpu_hotplug(); + mutex_lock(&workqueue_mutex); if (is_single_threaded(wq)) cleanup_workqueue_thread(wq, singlethread_cpu); else { for_each_online_cpu(cpu) cleanup_workqueue_thread(wq, cpu); - spin_lock(&workqueue_lock); list_del(&wq->list); - spin_unlock(&workqueue_lock); } - unlock_cpu_hotplug(); + mutex_unlock(&workqueue_mutex); free_percpu(wq->cpu_wq); kfree(wq); } @@ -515,11 +510,13 @@ int schedule_on_each_cpu(void (*func)(void *info), void *info) if (!works) return -ENOMEM; + mutex_lock(&workqueue_mutex); for_each_online_cpu(cpu) { INIT_WORK(per_cpu_ptr(works, cpu), func, info); __queue_work(per_cpu_ptr(keventd_wq->cpu_wq, cpu), per_cpu_ptr(works, cpu)); } + mutex_unlock(&workqueue_mutex); flush_workqueue(keventd_wq); free_percpu(works); return 0; @@ -635,6 +632,7 @@ static int __devinit workqueue_cpu_callback(struct notifier_block *nfb, switch (action) { case CPU_UP_PREPARE: + mutex_lock(&workqueue_mutex); /* Create a new workqueue thread for it. */ list_for_each_entry(wq, &workqueues, list) { if (!create_workqueue_thread(wq, hotcpu)) { @@ -653,6 +651,7 @@ static int __devinit workqueue_cpu_callback(struct notifier_block *nfb, kthread_bind(cwq->thread, hotcpu); wake_up_process(cwq->thread); } + mutex_unlock(&workqueue_mutex); break; case CPU_UP_CANCELED: @@ -664,6 +663,15 @@ static int __devinit workqueue_cpu_callback(struct notifier_block *nfb, any_online_cpu(cpu_online_map)); cleanup_workqueue_thread(wq, hotcpu); } + mutex_unlock(&workqueue_mutex); + break; + + case CPU_DOWN_PREPARE: + mutex_lock(&workqueue_mutex); + break; + + case CPU_DOWN_FAILED: + mutex_unlock(&workqueue_mutex); break; case CPU_DEAD: @@ -671,6 +679,7 @@ static int __devinit workqueue_cpu_callback(struct notifier_block *nfb, cleanup_workqueue_thread(wq, hotcpu); list_for_each_entry(wq, &workqueues, list) take_over_work(wq, hotcpu); + mutex_unlock(&workqueue_mutex); break; } diff --git a/lib/ts_bm.c b/lib/ts_bm.c index 0110e441480..d90822c378a 100644 --- a/lib/ts_bm.c +++ b/lib/ts_bm.c @@ -111,15 +111,14 @@ static int subpattern(u8 *pattern, int i, int j, int g) return ret; } -static void compute_prefix_tbl(struct ts_bm *bm, const u8 *pattern, - unsigned int len) +static void compute_prefix_tbl(struct ts_bm *bm) { int i, j, g; for (i = 0; i < ASIZE; i++) - bm->bad_shift[i] = len; - for (i = 0; i < len - 1; i++) - bm->bad_shift[pattern[i]] = len - 1 - i; + bm->bad_shift[i] = bm->patlen; + for (i = 0; i < bm->patlen - 1; i++) + bm->bad_shift[bm->pattern[i]] = bm->patlen - 1 - i; /* Compute the good shift array, used to match reocurrences * of a subpattern */ @@ -150,8 +149,8 @@ static struct ts_config *bm_init(const void *pattern, unsigned int len, bm = ts_config_priv(conf); bm->patlen = len; bm->pattern = (u8 *) bm->good_shift + prefix_tbl_len; - compute_prefix_tbl(bm, pattern, len); memcpy(bm->pattern, pattern, len); + compute_prefix_tbl(bm); return conf; } diff --git a/mm/swap.c b/mm/swap.c index 8fd095c4ae5..687686a61f7 100644 --- a/mm/swap.c +++ b/mm/swap.c @@ -54,6 +54,26 @@ void put_page(struct page *page) } EXPORT_SYMBOL(put_page); +/** + * put_pages_list(): release a list of pages + * + * Release a list of pages which are strung together on page.lru. Currently + * used by read_cache_pages() and related error recovery code. + * + * @pages: list of pages threaded on page->lru + */ +void put_pages_list(struct list_head *pages) +{ + while (!list_empty(pages)) { + struct page *victim; + + victim = list_entry(pages->prev, struct page, lru); + list_del(&victim->lru); + page_cache_release(victim); + } +} +EXPORT_SYMBOL(put_pages_list); + /* * Writeback is about to end against a page which has been marked for immediate * reclaim. If it still appears to be reclaimable, move it to the tail of the diff --git a/mm/swapfile.c b/mm/swapfile.c index e70d6c6d6fe..f1f5ec78378 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -442,11 +442,12 @@ int swap_type_of(dev_t device) if (!(swap_info[i].flags & SWP_WRITEOK)) continue; + if (!device) { spin_unlock(&swap_lock); return i; } - inode = swap_info->swap_file->f_dentry->d_inode; + inode = swap_info[i].swap_file->f_dentry->d_inode; if (S_ISBLK(inode->i_mode) && device == MKDEV(imajor(inode), iminor(inode))) { spin_unlock(&swap_lock); diff --git a/net/atm/proc.c b/net/atm/proc.c index 3f95b0886a6..91fe5f53ff1 100644 --- a/net/atm/proc.c +++ b/net/atm/proc.c @@ -507,7 +507,7 @@ err_out: goto out; } -void __exit atm_proc_exit(void) +void atm_proc_exit(void) { atm_proc_dirs_remove(); } diff --git a/net/bridge/br_forward.c b/net/bridge/br_forward.c index 6ccd32b3080..864fbbc7b24 100644 --- a/net/bridge/br_forward.c +++ b/net/bridge/br_forward.c @@ -40,11 +40,15 @@ int br_dev_queue_push_xmit(struct sk_buff *skb) else { #ifdef CONFIG_BRIDGE_NETFILTER /* ip_refrag calls ip_fragment, doesn't copy the MAC header. */ - nf_bridge_maybe_copy_header(skb); + if (nf_bridge_maybe_copy_header(skb)) + kfree_skb(skb); + else #endif - skb_push(skb, ETH_HLEN); + { + skb_push(skb, ETH_HLEN); - dev_queue_xmit(skb); + dev_queue_xmit(skb); + } } return 0; diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c index f55ef682ef8..b1211d5342f 100644 --- a/net/bridge/br_if.c +++ b/net/bridge/br_if.c @@ -386,12 +386,17 @@ void br_features_recompute(struct net_bridge *br) checksum = 0; if (feature & NETIF_F_GSO) - feature |= NETIF_F_TSO; + feature |= NETIF_F_GSO_SOFTWARE; feature |= NETIF_F_GSO; features &= feature; } + if (!(checksum & NETIF_F_ALL_CSUM)) + features &= ~NETIF_F_SG; + if (!(features & NETIF_F_SG)) + features &= ~NETIF_F_GSO_MASK; + br->dev->features = features | checksum | NETIF_F_LLTX | NETIF_F_GSO_ROBUST; } diff --git a/net/bridge/netfilter/ebt_ulog.c b/net/bridge/netfilter/ebt_ulog.c index 02693a230dc..9f950db3b76 100644 --- a/net/bridge/netfilter/ebt_ulog.c +++ b/net/bridge/netfilter/ebt_ulog.c @@ -74,6 +74,9 @@ static void ulog_send(unsigned int nlgroup) if (timer_pending(&ub->timer)) del_timer(&ub->timer); + if (!ub->skb) + return; + /* last nlmsg needs NLMSG_DONE */ if (ub->qlen > 1) ub->lastnlh->nlmsg_type = NLMSG_DONE; diff --git a/net/core/dev.c b/net/core/dev.c index d95e2626d94..d4a1ec3bded 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -116,6 +116,7 @@ #include <linux/audit.h> #include <linux/dmaengine.h> #include <linux/err.h> +#include <linux/ctype.h> /* * The list of packet types we will receive (as opposed to discard) @@ -632,14 +633,22 @@ struct net_device * dev_get_by_flags(unsigned short if_flags, unsigned short mas * @name: name string * * Network device names need to be valid file names to - * to allow sysfs to work + * to allow sysfs to work. We also disallow any kind of + * whitespace. */ int dev_valid_name(const char *name) { - return !(*name == '\0' - || !strcmp(name, ".") - || !strcmp(name, "..") - || strchr(name, '/')); + if (*name == '\0') + return 0; + if (!strcmp(name, ".") || !strcmp(name, "..")) + return 0; + + while (*name) { + if (*name == '/' || isspace(*name)) + return 0; + name++; + } + return 1; } /** @@ -1619,26 +1628,10 @@ static inline struct net_device *skb_bond(struct sk_buff *skb) struct net_device *dev = skb->dev; if (dev->master) { - /* - * On bonding slaves other than the currently active - * slave, suppress duplicates except for 802.3ad - * ETH_P_SLOW and alb non-mcast/bcast. - */ - if (dev->priv_flags & IFF_SLAVE_INACTIVE) { - if (dev->master->priv_flags & IFF_MASTER_ALB) { - if (skb->pkt_type != PACKET_BROADCAST && - skb->pkt_type != PACKET_MULTICAST) - goto keep; - } - - if (dev->master->priv_flags & IFF_MASTER_8023AD && - skb->protocol == __constant_htons(ETH_P_SLOW)) - goto keep; - + if (skb_bond_should_drop(skb)) { kfree_skb(skb); return NULL; } -keep: skb->dev = dev->master; } diff --git a/net/core/dst.c b/net/core/dst.c index 470c05bc4cb..1a5e49da0e7 100644 --- a/net/core/dst.c +++ b/net/core/dst.c @@ -95,12 +95,11 @@ static void dst_run_gc(unsigned long dummy) dst_gc_timer_inc = DST_GC_INC; dst_gc_timer_expires = DST_GC_MIN; } - dst_gc_timer.expires = jiffies + dst_gc_timer_expires; #if RT_CACHE_DEBUG >= 2 printk("dst_total: %d/%d %ld\n", atomic_read(&dst_total), delayed, dst_gc_timer_expires); #endif - add_timer(&dst_gc_timer); + mod_timer(&dst_gc_timer, jiffies + dst_gc_timer_expires); out: spin_unlock(&dst_lock); diff --git a/net/core/pktgen.c b/net/core/pktgen.c index 67ed14ddabd..6a7320b39ed 100644 --- a/net/core/pktgen.c +++ b/net/core/pktgen.c @@ -2149,6 +2149,8 @@ static struct sk_buff *fill_packet_ipv4(struct net_device *odev, skb->mac.raw = ((u8 *) iph) - 14 - pkt_dev->nr_labels*sizeof(u32); skb->dev = odev; skb->pkt_type = PACKET_HOST; + skb->nh.iph = iph; + skb->h.uh = udph; if (pkt_dev->nfrags <= 0) pgh = (struct pktgen_hdr *)skb_put(skb, datalen); @@ -2460,6 +2462,8 @@ static struct sk_buff *fill_packet_ipv6(struct net_device *odev, skb->protocol = protocol; skb->dev = odev; skb->pkt_type = PACKET_HOST; + skb->nh.ipv6h = iph; + skb->h.uh = udph; if (pkt_dev->nfrags <= 0) pgh = (struct pktgen_hdr *)skb_put(skb, datalen); diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index 20e5bb73f14..30cc1ba6ed5 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c @@ -394,6 +394,9 @@ static int do_setlink(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg) } if (ida[IFLA_ADDRESS - 1]) { + struct sockaddr *sa; + int len; + if (!dev->set_mac_address) { err = -EOPNOTSUPP; goto out; @@ -405,7 +408,17 @@ static int do_setlink(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg) if (ida[IFLA_ADDRESS - 1]->rta_len != RTA_LENGTH(dev->addr_len)) goto out; - err = dev->set_mac_address(dev, RTA_DATA(ida[IFLA_ADDRESS - 1])); + len = sizeof(sa_family_t) + dev->addr_len; + sa = kmalloc(len, GFP_KERNEL); + if (!sa) { + err = -ENOMEM; + goto out; + } + sa->sa_family = dev->type; + memcpy(sa->sa_data, RTA_DATA(ida[IFLA_ADDRESS - 1]), + dev->addr_len); + err = dev->set_mac_address(dev, sa); + kfree(sa); if (err) goto out; send_addr_notify = 1; diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 022d8894c11..c54f3664bce 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -268,8 +268,10 @@ struct sk_buff *__netdev_alloc_skb(struct net_device *dev, struct sk_buff *skb; skb = alloc_skb(length + NET_SKB_PAD, gfp_mask); - if (likely(skb)) + if (likely(skb)) { skb_reserve(skb, NET_SKB_PAD); + skb->dev = dev; + } return skb; } diff --git a/net/core/utils.c b/net/core/utils.c index 4f96f389243..e31c90e0559 100644 --- a/net/core/utils.c +++ b/net/core/utils.c @@ -130,12 +130,13 @@ void __init net_random_init(void) static int net_random_reseed(void) { int i; - unsigned long seed[NR_CPUS]; + unsigned long seed; - get_random_bytes(seed, sizeof(seed)); for_each_possible_cpu(i) { struct nrnd_state *state = &per_cpu(net_rand_state,i); - __net_srandom(state, seed[i]); + + get_random_bytes(&seed, sizeof(seed)); + __net_srandom(state, seed); } return 0; } diff --git a/net/core/wireless.c b/net/core/wireless.c index d2bc72d318f..de0bde4b51d 100644 --- a/net/core/wireless.c +++ b/net/core/wireless.c @@ -82,6 +82,7 @@ #include <linux/init.h> /* for __init */ #include <linux/if_arp.h> /* ARPHRD_ETHER */ #include <linux/etherdevice.h> /* compare_ether_addr */ +#include <linux/interrupt.h> #include <linux/wireless.h> /* Pretty obvious */ #include <net/iw_handler.h> /* New driver API */ @@ -1842,6 +1843,18 @@ int wireless_rtnetlink_set(struct net_device * dev, */ #ifdef WE_EVENT_RTNETLINK +static struct sk_buff_head wireless_nlevent_queue; + +static void wireless_nlevent_process(unsigned long data) +{ + struct sk_buff *skb; + + while ((skb = skb_dequeue(&wireless_nlevent_queue))) + netlink_broadcast(rtnl, skb, 0, RTNLGRP_LINK, GFP_ATOMIC); +} + +static DECLARE_TASKLET(wireless_nlevent_tasklet, wireless_nlevent_process, 0); + /* ---------------------------------------------------------------- */ /* * Fill a rtnetlink message with our event data. @@ -1904,8 +1917,17 @@ static inline void rtmsg_iwinfo(struct net_device * dev, return; } NETLINK_CB(skb).dst_group = RTNLGRP_LINK; - netlink_broadcast(rtnl, skb, 0, RTNLGRP_LINK, GFP_ATOMIC); + skb_queue_tail(&wireless_nlevent_queue, skb); + tasklet_schedule(&wireless_nlevent_tasklet); +} + +static int __init wireless_nlevent_init(void) +{ + skb_queue_head_init(&wireless_nlevent_queue); + return 0; } + +subsys_initcall(wireless_nlevent_init); #endif /* WE_EVENT_RTNETLINK */ /* ---------------------------------------------------------------- */ diff --git a/net/dccp/ccids/ccid3.c b/net/dccp/ccids/ccid3.c index c39bff706cf..090bc39e819 100644 --- a/net/dccp/ccids/ccid3.c +++ b/net/dccp/ccids/ccid3.c @@ -2,7 +2,7 @@ * net/dccp/ccids/ccid3.c * * Copyright (c) 2005 The University of Waikato, Hamilton, New Zealand. - * Copyright (c) 2005-6 Ian McDonald <imcdnzl@gmail.com> + * Copyright (c) 2005-6 Ian McDonald <ian.mcdonald@jandi.co.nz> * * An implementation of the DCCP protocol * @@ -342,6 +342,8 @@ static int ccid3_hc_tx_send_packet(struct sock *sk, new_packet->dccphtx_ccval = DCCP_SKB_CB(skb)->dccpd_ccval = hctx->ccid3hctx_last_win_count; + timeval_add_usecs(&hctx->ccid3hctx_t_nom, + hctx->ccid3hctx_t_ipi); } out: return rc; @@ -413,7 +415,8 @@ static void ccid3_hc_tx_packet_sent(struct sock *sk, int more, int len) case TFRC_SSTATE_NO_FBACK: case TFRC_SSTATE_FBACK: if (len > 0) { - hctx->ccid3hctx_t_nom = now; + timeval_sub_usecs(&hctx->ccid3hctx_t_nom, + hctx->ccid3hctx_t_ipi); ccid3_calc_new_t_ipi(hctx); ccid3_calc_new_delta(hctx); timeval_add_usecs(&hctx->ccid3hctx_t_nom, @@ -757,8 +760,7 @@ static void ccid3_hc_rx_send_feedback(struct sock *sk) } hcrx->ccid3hcrx_tstamp_last_feedback = now; - hcrx->ccid3hcrx_last_counter = packet->dccphrx_ccval; - hcrx->ccid3hcrx_seqno_last_counter = packet->dccphrx_seqno; + hcrx->ccid3hcrx_ccval_last_counter = packet->dccphrx_ccval; hcrx->ccid3hcrx_bytes_recv = 0; /* Convert to multiples of 10us */ @@ -782,7 +784,7 @@ static int ccid3_hc_rx_insert_options(struct sock *sk, struct sk_buff *skb) if (!(sk->sk_state == DCCP_OPEN || sk->sk_state == DCCP_PARTOPEN)) return 0; - DCCP_SKB_CB(skb)->dccpd_ccval = hcrx->ccid3hcrx_last_counter; + DCCP_SKB_CB(skb)->dccpd_ccval = hcrx->ccid3hcrx_ccval_last_counter; if (dccp_packet_without_ack(skb)) return 0; @@ -854,6 +856,11 @@ static u32 ccid3_hc_rx_calc_first_li(struct sock *sk) interval = 1; } found: + if (!tail) { + LIMIT_NETDEBUG(KERN_WARNING "%s: tail is null\n", + __FUNCTION__); + return ~0; + } rtt = timeval_delta(&tstamp, &tail->dccphrx_tstamp) * 4 / interval; ccid3_pr_debug("%s, sk=%p, approximated RTT to %uus\n", dccp_role(sk), sk, rtt); @@ -864,9 +871,20 @@ found: delta = timeval_delta(&tstamp, &hcrx->ccid3hcrx_tstamp_last_feedback); x_recv = usecs_div(hcrx->ccid3hcrx_bytes_recv, delta); + if (x_recv == 0) + x_recv = hcrx->ccid3hcrx_x_recv; + tmp1 = (u64)x_recv * (u64)rtt; do_div(tmp1,10000000); tmp2 = (u32)tmp1; + + if (!tmp2) { + LIMIT_NETDEBUG(KERN_WARNING "tmp2 = 0 " + "%s: x_recv = %u, rtt =%u\n", + __FUNCTION__, x_recv, rtt); + return ~0; + } + fval = (hcrx->ccid3hcrx_s * 100000) / tmp2; /* do not alter order above or you will get overflow on 32 bit */ p = tfrc_calc_x_reverse_lookup(fval); @@ -882,31 +900,101 @@ found: static void ccid3_hc_rx_update_li(struct sock *sk, u64 seq_loss, u8 win_loss) { struct ccid3_hc_rx_sock *hcrx = ccid3_hc_rx_sk(sk); + struct dccp_li_hist_entry *next, *head; + u64 seq_temp; - if (seq_loss != DCCP_MAX_SEQNO + 1 && - list_empty(&hcrx->ccid3hcrx_li_hist)) { - struct dccp_li_hist_entry *li_tail; + if (list_empty(&hcrx->ccid3hcrx_li_hist)) { + if (!dccp_li_hist_interval_new(ccid3_li_hist, + &hcrx->ccid3hcrx_li_hist, seq_loss, win_loss)) + return; - li_tail = dccp_li_hist_interval_new(ccid3_li_hist, - &hcrx->ccid3hcrx_li_hist, - seq_loss, win_loss); - if (li_tail == NULL) + next = (struct dccp_li_hist_entry *) + hcrx->ccid3hcrx_li_hist.next; + next->dccplih_interval = ccid3_hc_rx_calc_first_li(sk); + } else { + struct dccp_li_hist_entry *entry; + struct list_head *tail; + + head = (struct dccp_li_hist_entry *) + hcrx->ccid3hcrx_li_hist.next; + /* FIXME win count check removed as was wrong */ + /* should make this check with receive history */ + /* and compare there as per section 10.2 of RFC4342 */ + + /* new loss event detected */ + /* calculate last interval length */ + seq_temp = dccp_delta_seqno(head->dccplih_seqno, seq_loss); + entry = dccp_li_hist_entry_new(ccid3_li_hist, SLAB_ATOMIC); + + if (entry == NULL) { + printk(KERN_CRIT "%s: out of memory\n",__FUNCTION__); + dump_stack(); return; - li_tail->dccplih_interval = ccid3_hc_rx_calc_first_li(sk); - } else - LIMIT_NETDEBUG(KERN_WARNING "%s: FIXME: find end of " - "interval\n", __FUNCTION__); + } + + list_add(&entry->dccplih_node, &hcrx->ccid3hcrx_li_hist); + + tail = hcrx->ccid3hcrx_li_hist.prev; + list_del(tail); + kmem_cache_free(ccid3_li_hist->dccplih_slab, tail); + + /* Create the newest interval */ + entry->dccplih_seqno = seq_loss; + entry->dccplih_interval = seq_temp; + entry->dccplih_win_count = win_loss; + } } -static void ccid3_hc_rx_detect_loss(struct sock *sk) +static int ccid3_hc_rx_detect_loss(struct sock *sk, + struct dccp_rx_hist_entry *packet) { struct ccid3_hc_rx_sock *hcrx = ccid3_hc_rx_sk(sk); - u8 win_loss; - const u64 seq_loss = dccp_rx_hist_detect_loss(&hcrx->ccid3hcrx_hist, - &hcrx->ccid3hcrx_li_hist, - &win_loss); + struct dccp_rx_hist_entry *rx_hist = dccp_rx_hist_head(&hcrx->ccid3hcrx_hist); + u64 seqno = packet->dccphrx_seqno; + u64 tmp_seqno; + int loss = 0; + u8 ccval; + + + tmp_seqno = hcrx->ccid3hcrx_seqno_nonloss; + + if (!rx_hist || + follows48(packet->dccphrx_seqno, hcrx->ccid3hcrx_seqno_nonloss)) { + hcrx->ccid3hcrx_seqno_nonloss = seqno; + hcrx->ccid3hcrx_ccval_nonloss = packet->dccphrx_ccval; + goto detect_out; + } + - ccid3_hc_rx_update_li(sk, seq_loss, win_loss); + while (dccp_delta_seqno(hcrx->ccid3hcrx_seqno_nonloss, seqno) + > TFRC_RECV_NUM_LATE_LOSS) { + loss = 1; + ccid3_hc_rx_update_li(sk, hcrx->ccid3hcrx_seqno_nonloss, + hcrx->ccid3hcrx_ccval_nonloss); + tmp_seqno = hcrx->ccid3hcrx_seqno_nonloss; + dccp_inc_seqno(&tmp_seqno); + hcrx->ccid3hcrx_seqno_nonloss = tmp_seqno; + dccp_inc_seqno(&tmp_seqno); + while (dccp_rx_hist_find_entry(&hcrx->ccid3hcrx_hist, + tmp_seqno, &ccval)) { + hcrx->ccid3hcrx_seqno_nonloss = tmp_seqno; + hcrx->ccid3hcrx_ccval_nonloss = ccval; + dccp_inc_seqno(&tmp_seqno); + } + } + + /* FIXME - this code could be simplified with above while */ + /* but works at moment */ + if (follows48(packet->dccphrx_seqno, hcrx->ccid3hcrx_seqno_nonloss)) { + hcrx->ccid3hcrx_seqno_nonloss = seqno; + hcrx->ccid3hcrx_ccval_nonloss = packet->dccphrx_ccval; + } + +detect_out: + dccp_rx_hist_add_packet(ccid3_rx_hist, &hcrx->ccid3hcrx_hist, + &hcrx->ccid3hcrx_li_hist, packet, + hcrx->ccid3hcrx_seqno_nonloss); + return loss; } static void ccid3_hc_rx_packet_recv(struct sock *sk, struct sk_buff *skb) @@ -916,8 +1004,8 @@ static void ccid3_hc_rx_packet_recv(struct sock *sk, struct sk_buff *skb) struct dccp_rx_hist_entry *packet; struct timeval now; u8 win_count; - u32 p_prev, r_sample, t_elapsed; - int ins; + u32 p_prev, rtt_prev, r_sample, t_elapsed; + int loss; BUG_ON(hcrx == NULL || !(hcrx->ccid3hcrx_state == TFRC_RSTATE_NO_DATA || @@ -932,7 +1020,7 @@ static void ccid3_hc_rx_packet_recv(struct sock *sk, struct sk_buff *skb) case DCCP_PKT_DATAACK: if (opt_recv->dccpor_timestamp_echo == 0) break; - p_prev = hcrx->ccid3hcrx_rtt; + rtt_prev = hcrx->ccid3hcrx_rtt; dccp_timestamp(sk, &now); timeval_sub_usecs(&now, opt_recv->dccpor_timestamp_echo * 10); r_sample = timeval_usecs(&now); @@ -951,8 +1039,8 @@ static void ccid3_hc_rx_packet_recv(struct sock *sk, struct sk_buff *skb) hcrx->ccid3hcrx_rtt = (hcrx->ccid3hcrx_rtt * 9) / 10 + r_sample / 10; - if (p_prev != hcrx->ccid3hcrx_rtt) - ccid3_pr_debug("%s, New RTT=%luus, elapsed time=%u\n", + if (rtt_prev != hcrx->ccid3hcrx_rtt) + ccid3_pr_debug("%s, New RTT=%uus, elapsed time=%u\n", dccp_role(sk), hcrx->ccid3hcrx_rtt, opt_recv->dccpor_elapsed_time); break; @@ -973,8 +1061,7 @@ static void ccid3_hc_rx_packet_recv(struct sock *sk, struct sk_buff *skb) win_count = packet->dccphrx_ccval; - ins = dccp_rx_hist_add_packet(ccid3_rx_hist, &hcrx->ccid3hcrx_hist, - &hcrx->ccid3hcrx_li_hist, packet); + loss = ccid3_hc_rx_detect_loss(sk, packet); if (DCCP_SKB_CB(skb)->dccpd_type == DCCP_PKT_ACK) return; @@ -991,7 +1078,7 @@ static void ccid3_hc_rx_packet_recv(struct sock *sk, struct sk_buff *skb) case TFRC_RSTATE_DATA: hcrx->ccid3hcrx_bytes_recv += skb->len - dccp_hdr(skb)->dccph_doff * 4; - if (ins != 0) + if (loss) break; dccp_timestamp(sk, &now); @@ -1012,7 +1099,6 @@ static void ccid3_hc_rx_packet_recv(struct sock *sk, struct sk_buff *skb) ccid3_pr_debug("%s, sk=%p(%s), data loss! Reacting...\n", dccp_role(sk), sk, dccp_state_name(sk->sk_state)); - ccid3_hc_rx_detect_loss(sk); p_prev = hcrx->ccid3hcrx_p; /* Calculate loss event rate */ @@ -1022,6 +1108,9 @@ static void ccid3_hc_rx_packet_recv(struct sock *sk, struct sk_buff *skb) /* Scaling up by 1000000 as fixed decimal */ if (i_mean != 0) hcrx->ccid3hcrx_p = 1000000 / i_mean; + } else { + printk(KERN_CRIT "%s: empty loss hist\n",__FUNCTION__); + dump_stack(); } if (hcrx->ccid3hcrx_p > p_prev) { @@ -1230,7 +1319,7 @@ static __exit void ccid3_module_exit(void) } module_exit(ccid3_module_exit); -MODULE_AUTHOR("Ian McDonald <iam4@cs.waikato.ac.nz>, " +MODULE_AUTHOR("Ian McDonald <ian.mcdonald@jandi.co.nz>, " "Arnaldo Carvalho de Melo <acme@ghostprotocols.net>"); MODULE_DESCRIPTION("DCCP TFRC CCID3 CCID"); MODULE_LICENSE("GPL"); diff --git a/net/dccp/ccids/ccid3.h b/net/dccp/ccids/ccid3.h index 5ade4f668b2..0a2cb7536d2 100644 --- a/net/dccp/ccids/ccid3.h +++ b/net/dccp/ccids/ccid3.h @@ -1,13 +1,13 @@ /* * net/dccp/ccids/ccid3.h * - * Copyright (c) 2005 The University of Waikato, Hamilton, New Zealand. + * Copyright (c) 2005-6 The University of Waikato, Hamilton, New Zealand. * * An implementation of the DCCP protocol * * This code has been developed by the University of Waikato WAND * research group. For further information please see http://www.wand.net.nz/ - * or e-mail Ian McDonald - iam4@cs.waikato.ac.nz + * or e-mail Ian McDonald - ian.mcdonald@jandi.co.nz * * This code also uses code from Lulea University, rereleased as GPL by its * authors: @@ -120,9 +120,10 @@ struct ccid3_hc_rx_sock { #define ccid3hcrx_x_recv ccid3hcrx_tfrc.tfrcrx_x_recv #define ccid3hcrx_rtt ccid3hcrx_tfrc.tfrcrx_rtt #define ccid3hcrx_p ccid3hcrx_tfrc.tfrcrx_p - u64 ccid3hcrx_seqno_last_counter:48, + u64 ccid3hcrx_seqno_nonloss:48, + ccid3hcrx_ccval_nonloss:4, ccid3hcrx_state:8, - ccid3hcrx_last_counter:4; + ccid3hcrx_ccval_last_counter:4; u32 ccid3hcrx_bytes_recv; struct timeval ccid3hcrx_tstamp_last_feedback; struct timeval ccid3hcrx_tstamp_last_ack; diff --git a/net/dccp/ccids/lib/loss_interval.c b/net/dccp/ccids/lib/loss_interval.c index 5d7b7d86438..906c81ab9d4 100644 --- a/net/dccp/ccids/lib/loss_interval.c +++ b/net/dccp/ccids/lib/loss_interval.c @@ -2,7 +2,7 @@ * net/dccp/ccids/lib/loss_interval.c * * Copyright (c) 2005 The University of Waikato, Hamilton, New Zealand. - * Copyright (c) 2005 Ian McDonald <iam4@cs.waikato.ac.nz> + * Copyright (c) 2005-6 Ian McDonald <ian.mcdonald@jandi.co.nz> * Copyright (c) 2005 Arnaldo Carvalho de Melo <acme@conectiva.com.br> * * This program is free software; you can redistribute it and/or modify @@ -12,6 +12,7 @@ */ #include <linux/module.h> +#include <net/sock.h> #include "loss_interval.h" @@ -90,13 +91,13 @@ u32 dccp_li_hist_calc_i_mean(struct list_head *list) u32 w_tot = 0; list_for_each_entry_safe(li_entry, li_next, list, dccplih_node) { - if (i < DCCP_LI_HIST_IVAL_F_LENGTH) { + if (li_entry->dccplih_interval != ~0) { i_tot0 += li_entry->dccplih_interval * dccp_li_hist_w[i]; w_tot += dccp_li_hist_w[i]; + if (i != 0) + i_tot1 += li_entry->dccplih_interval * dccp_li_hist_w[i - 1]; } - if (i != 0) - i_tot1 += li_entry->dccplih_interval * dccp_li_hist_w[i - 1]; if (++i > DCCP_LI_HIST_IVAL_F_LENGTH) break; @@ -107,37 +108,36 @@ u32 dccp_li_hist_calc_i_mean(struct list_head *list) i_tot = max(i_tot0, i_tot1); - /* FIXME: Why do we do this? -Ian McDonald */ - if (i_tot * 4 < w_tot) - i_tot = w_tot * 4; + if (!w_tot) { + LIMIT_NETDEBUG(KERN_WARNING "%s: w_tot = 0\n", __FUNCTION__); + return 1; + } - return i_tot * 4 / w_tot; + return i_tot / w_tot; } EXPORT_SYMBOL_GPL(dccp_li_hist_calc_i_mean); -struct dccp_li_hist_entry *dccp_li_hist_interval_new(struct dccp_li_hist *hist, - struct list_head *list, - const u64 seq_loss, - const u8 win_loss) +int dccp_li_hist_interval_new(struct dccp_li_hist *hist, + struct list_head *list, const u64 seq_loss, const u8 win_loss) { - struct dccp_li_hist_entry *tail = NULL, *entry; + struct dccp_li_hist_entry *entry; int i; - for (i = 0; i <= DCCP_LI_HIST_IVAL_F_LENGTH; ++i) { + for (i = 0; i < DCCP_LI_HIST_IVAL_F_LENGTH; i++) { entry = dccp_li_hist_entry_new(hist, SLAB_ATOMIC); if (entry == NULL) { dccp_li_hist_purge(hist, list); - return NULL; + dump_stack(); + return 0; } - if (tail == NULL) - tail = entry; + entry->dccplih_interval = ~0; list_add(&entry->dccplih_node, list); } entry->dccplih_seqno = seq_loss; entry->dccplih_win_count = win_loss; - return tail; + return 1; } EXPORT_SYMBOL_GPL(dccp_li_hist_interval_new); diff --git a/net/dccp/ccids/lib/loss_interval.h b/net/dccp/ccids/lib/loss_interval.h index 43bf78269d1..0ae85f0340b 100644 --- a/net/dccp/ccids/lib/loss_interval.h +++ b/net/dccp/ccids/lib/loss_interval.h @@ -4,7 +4,7 @@ * net/dccp/ccids/lib/loss_interval.h * * Copyright (c) 2005 The University of Waikato, Hamilton, New Zealand. - * Copyright (c) 2005 Ian McDonald <iam4@cs.waikato.ac.nz> + * Copyright (c) 2005 Ian McDonald <ian.mcdonald@jandi.co.nz> * Copyright (c) 2005 Arnaldo Carvalho de Melo <acme@conectiva.com.br> * * This program is free software; you can redistribute it and/or modify it @@ -52,9 +52,6 @@ extern void dccp_li_hist_purge(struct dccp_li_hist *hist, extern u32 dccp_li_hist_calc_i_mean(struct list_head *list); -extern struct dccp_li_hist_entry * - dccp_li_hist_interval_new(struct dccp_li_hist *hist, - struct list_head *list, - const u64 seq_loss, - const u8 win_loss); +extern int dccp_li_hist_interval_new(struct dccp_li_hist *hist, + struct list_head *list, const u64 seq_loss, const u8 win_loss); #endif /* _DCCP_LI_HIST_ */ diff --git a/net/dccp/ccids/lib/packet_history.c b/net/dccp/ccids/lib/packet_history.c index ad98d6a322e..b876c9c81c6 100644 --- a/net/dccp/ccids/lib/packet_history.c +++ b/net/dccp/ccids/lib/packet_history.c @@ -1,13 +1,13 @@ /* - * net/dccp/packet_history.h + * net/dccp/packet_history.c * - * Copyright (c) 2005 The University of Waikato, Hamilton, New Zealand. + * Copyright (c) 2005-6 The University of Waikato, Hamilton, New Zealand. * * An implementation of the DCCP protocol * * This code has been developed by the University of Waikato WAND * research group. For further information please see http://www.wand.net.nz/ - * or e-mail Ian McDonald - iam4@cs.waikato.ac.nz + * or e-mail Ian McDonald - ian.mcdonald@jandi.co.nz * * This code also uses code from Lulea University, rereleased as GPL by its * authors: @@ -112,64 +112,27 @@ struct dccp_rx_hist_entry * EXPORT_SYMBOL_GPL(dccp_rx_hist_find_data_packet); -int dccp_rx_hist_add_packet(struct dccp_rx_hist *hist, +void dccp_rx_hist_add_packet(struct dccp_rx_hist *hist, struct list_head *rx_list, struct list_head *li_list, - struct dccp_rx_hist_entry *packet) + struct dccp_rx_hist_entry *packet, + u64 nonloss_seqno) { - struct dccp_rx_hist_entry *entry, *next, *iter; + struct dccp_rx_hist_entry *entry, *next; u8 num_later = 0; - iter = dccp_rx_hist_head(rx_list); - if (iter == NULL) - dccp_rx_hist_add_entry(rx_list, packet); - else { - const u64 seqno = packet->dccphrx_seqno; - - if (after48(seqno, iter->dccphrx_seqno)) - dccp_rx_hist_add_entry(rx_list, packet); - else { - if (dccp_rx_hist_entry_data_packet(iter)) - num_later = 1; - - list_for_each_entry_continue(iter, rx_list, - dccphrx_node) { - if (after48(seqno, iter->dccphrx_seqno)) { - dccp_rx_hist_add_entry(&iter->dccphrx_node, - packet); - goto trim_history; - } - - if (dccp_rx_hist_entry_data_packet(iter)) - num_later++; + list_add(&packet->dccphrx_node, rx_list); - if (num_later == TFRC_RECV_NUM_LATE_LOSS) { - dccp_rx_hist_entry_delete(hist, packet); - return 1; - } - } - - if (num_later < TFRC_RECV_NUM_LATE_LOSS) - dccp_rx_hist_add_entry(rx_list, packet); - /* - * FIXME: else what? should we destroy the packet - * like above? - */ - } - } - -trim_history: - /* - * Trim history (remove all packets after the NUM_LATE_LOSS + 1 - * data packets) - */ num_later = TFRC_RECV_NUM_LATE_LOSS + 1; if (!list_empty(li_list)) { list_for_each_entry_safe(entry, next, rx_list, dccphrx_node) { if (num_later == 0) { - list_del_init(&entry->dccphrx_node); - dccp_rx_hist_entry_delete(hist, entry); + if (after48(nonloss_seqno, + entry->dccphrx_seqno)) { + list_del_init(&entry->dccphrx_node); + dccp_rx_hist_entry_delete(hist, entry); + } } else if (dccp_rx_hist_entry_data_packet(entry)) --num_later; } @@ -217,94 +180,10 @@ trim_history: --num_later; } } - - return 0; } EXPORT_SYMBOL_GPL(dccp_rx_hist_add_packet); -u64 dccp_rx_hist_detect_loss(struct list_head *rx_list, - struct list_head *li_list, u8 *win_loss) -{ - struct dccp_rx_hist_entry *entry, *next, *packet; - struct dccp_rx_hist_entry *a_loss = NULL; - struct dccp_rx_hist_entry *b_loss = NULL; - u64 seq_loss = DCCP_MAX_SEQNO + 1; - u8 num_later = TFRC_RECV_NUM_LATE_LOSS; - - list_for_each_entry_safe(entry, next, rx_list, dccphrx_node) { - if (num_later == 0) { - b_loss = entry; - break; - } else if (dccp_rx_hist_entry_data_packet(entry)) - --num_later; - } - - if (b_loss == NULL) - goto out; - - num_later = 1; - list_for_each_entry_safe_continue(entry, next, rx_list, dccphrx_node) { - if (num_later == 0) { - a_loss = entry; - break; - } else if (dccp_rx_hist_entry_data_packet(entry)) - --num_later; - } - - if (a_loss == NULL) { - if (list_empty(li_list)) { - /* no loss event have occured yet */ - LIMIT_NETDEBUG("%s: TODO: find a lost data packet by " - "comparing to initial seqno\n", - __FUNCTION__); - goto out; - } else { - LIMIT_NETDEBUG("%s: Less than 4 data pkts in history!", - __FUNCTION__); - goto out; - } - } - - /* Locate a lost data packet */ - entry = packet = b_loss; - list_for_each_entry_safe_continue(entry, next, rx_list, dccphrx_node) { - u64 delta = dccp_delta_seqno(entry->dccphrx_seqno, - packet->dccphrx_seqno); - - if (delta != 0) { - if (dccp_rx_hist_entry_data_packet(packet)) - --delta; - /* - * FIXME: check this, probably this % usage is because - * in earlier drafts the ndp count was just 8 bits - * long, but now it cam be up to 24 bits long. - */ -#if 0 - if (delta % DCCP_NDP_LIMIT != - (packet->dccphrx_ndp - - entry->dccphrx_ndp) % DCCP_NDP_LIMIT) -#endif - if (delta != packet->dccphrx_ndp - entry->dccphrx_ndp) { - seq_loss = entry->dccphrx_seqno; - dccp_inc_seqno(&seq_loss); - } - } - packet = entry; - if (packet == a_loss) - break; - } -out: - if (seq_loss != DCCP_MAX_SEQNO + 1) - *win_loss = a_loss->dccphrx_ccval; - else - *win_loss = 0; /* Paranoia */ - - return seq_loss; -} - -EXPORT_SYMBOL_GPL(dccp_rx_hist_detect_loss); - struct dccp_tx_hist *dccp_tx_hist_new(const char *name) { struct dccp_tx_hist *hist = kmalloc(sizeof(*hist), GFP_ATOMIC); @@ -365,6 +244,25 @@ struct dccp_tx_hist_entry * EXPORT_SYMBOL_GPL(dccp_tx_hist_find_entry); +int dccp_rx_hist_find_entry(const struct list_head *list, const u64 seq, + u8 *ccval) +{ + struct dccp_rx_hist_entry *packet = NULL, *entry; + + list_for_each_entry(entry, list, dccphrx_node) + if (entry->dccphrx_seqno == seq) { + packet = entry; + break; + } + + if (packet) + *ccval = packet->dccphrx_ccval; + + return packet != NULL; +} + +EXPORT_SYMBOL_GPL(dccp_rx_hist_find_entry); + void dccp_tx_hist_purge_older(struct dccp_tx_hist *hist, struct list_head *list, struct dccp_tx_hist_entry *packet) @@ -391,7 +289,7 @@ void dccp_tx_hist_purge(struct dccp_tx_hist *hist, struct list_head *list) EXPORT_SYMBOL_GPL(dccp_tx_hist_purge); -MODULE_AUTHOR("Ian McDonald <iam4@cs.waikato.ac.nz>, " +MODULE_AUTHOR("Ian McDonald <ian.mcdonald@jandi.co.nz>, " "Arnaldo Carvalho de Melo <acme@ghostprotocols.net>"); MODULE_DESCRIPTION("DCCP TFRC library"); MODULE_LICENSE("GPL"); diff --git a/net/dccp/ccids/lib/packet_history.h b/net/dccp/ccids/lib/packet_history.h index 673c209e4e8..067cf1c85a3 100644 --- a/net/dccp/ccids/lib/packet_history.h +++ b/net/dccp/ccids/lib/packet_history.h @@ -1,13 +1,13 @@ /* * net/dccp/packet_history.h * - * Copyright (c) 2005 The University of Waikato, Hamilton, New Zealand. + * Copyright (c) 2005-6 The University of Waikato, Hamilton, New Zealand. * * An implementation of the DCCP protocol * * This code has been developed by the University of Waikato WAND * research group. For further information please see http://www.wand.net.nz/ - * or e-mail Ian McDonald - iam4@cs.waikato.ac.nz + * or e-mail Ian McDonald - ian.mcdonald@jandi.co.nz * * This code also uses code from Lulea University, rereleased as GPL by its * authors: @@ -106,6 +106,8 @@ static inline void dccp_tx_hist_entry_delete(struct dccp_tx_hist *hist, extern struct dccp_tx_hist_entry * dccp_tx_hist_find_entry(const struct list_head *list, const u64 seq); +extern int dccp_rx_hist_find_entry(const struct list_head *list, const u64 seq, + u8 *ccval); static inline void dccp_tx_hist_add_entry(struct list_head *list, struct dccp_tx_hist_entry *entry) @@ -164,12 +166,6 @@ static inline void dccp_rx_hist_entry_delete(struct dccp_rx_hist *hist, extern void dccp_rx_hist_purge(struct dccp_rx_hist *hist, struct list_head *list); -static inline void dccp_rx_hist_add_entry(struct list_head *list, - struct dccp_rx_hist_entry *entry) -{ - list_add(&entry->dccphrx_node, list); -} - static inline struct dccp_rx_hist_entry * dccp_rx_hist_head(struct list_head *list) { @@ -188,10 +184,11 @@ static inline int entry->dccphrx_type == DCCP_PKT_DATAACK; } -extern int dccp_rx_hist_add_packet(struct dccp_rx_hist *hist, +extern void dccp_rx_hist_add_packet(struct dccp_rx_hist *hist, struct list_head *rx_list, struct list_head *li_list, - struct dccp_rx_hist_entry *packet); + struct dccp_rx_hist_entry *packet, + u64 nonloss_seqno); extern u64 dccp_rx_hist_detect_loss(struct list_head *rx_list, struct list_head *li_list, u8 *win_loss); diff --git a/net/dccp/ccids/lib/tfrc.h b/net/dccp/ccids/lib/tfrc.h index 130c4c40cfe..45f30f59ea2 100644 --- a/net/dccp/ccids/lib/tfrc.h +++ b/net/dccp/ccids/lib/tfrc.h @@ -4,7 +4,7 @@ * net/dccp/ccids/lib/tfrc.h * * Copyright (c) 2005 The University of Waikato, Hamilton, New Zealand. - * Copyright (c) 2005 Ian McDonald <iam4@cs.waikato.ac.nz> + * Copyright (c) 2005 Ian McDonald <ian.mcdonald@jandi.co.nz> * Copyright (c) 2005 Arnaldo Carvalho de Melo <acme@conectiva.com.br> * Copyright (c) 2003 Nils-Erik Mattsson, Joacim Haggmark, Magnus Erixzon * diff --git a/net/dccp/ccids/lib/tfrc_equation.c b/net/dccp/ccids/lib/tfrc_equation.c index 4fd2ebebf5a..44076e0c659 100644 --- a/net/dccp/ccids/lib/tfrc_equation.c +++ b/net/dccp/ccids/lib/tfrc_equation.c @@ -2,7 +2,7 @@ * net/dccp/ccids/lib/tfrc_equation.c * * Copyright (c) 2005 The University of Waikato, Hamilton, New Zealand. - * Copyright (c) 2005 Ian McDonald <iam4@cs.waikato.ac.nz> + * Copyright (c) 2005 Ian McDonald <ian.mcdonald@jandi.co.nz> * Copyright (c) 2005 Arnaldo Carvalho de Melo <acme@conectiva.com.br> * Copyright (c) 2003 Nils-Erik Mattsson, Joacim Haggmark, Magnus Erixzon * diff --git a/net/dccp/dccp.h b/net/dccp/dccp.h index d00a2f4ee5d..a5c5475724c 100644 --- a/net/dccp/dccp.h +++ b/net/dccp/dccp.h @@ -5,7 +5,7 @@ * * An implementation of the DCCP protocol * Copyright (c) 2005 Arnaldo Carvalho de Melo <acme@conectiva.com.br> - * Copyright (c) 2005 Ian McDonald <iam4@cs.waikato.ac.nz> + * Copyright (c) 2005-6 Ian McDonald <ian.mcdonald@jandi.co.nz> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as @@ -81,6 +81,14 @@ static inline u64 max48(const u64 seq1, const u64 seq2) return after48(seq1, seq2) ? seq1 : seq2; } +/* is seq1 next seqno after seq2 */ +static inline int follows48(const u64 seq1, const u64 seq2) +{ + int diff = (seq1 & 0xFFFF) - (seq2 & 0xFFFF); + + return diff==1; +} + enum { DCCP_MIB_NUM = 0, DCCP_MIB_ACTIVEOPENS, /* ActiveOpens */ diff --git a/net/dccp/options.c b/net/dccp/options.c index daf72bb671f..07a34696ac9 100644 --- a/net/dccp/options.c +++ b/net/dccp/options.c @@ -4,7 +4,7 @@ * An implementation of the DCCP protocol * Copyright (c) 2005 Aristeu Sergio Rozanski Filho <aris@cathedrallabs.org> * Copyright (c) 2005 Arnaldo Carvalho de Melo <acme@ghostprotocols.net> - * Copyright (c) 2005 Ian McDonald <iam4@cs.waikato.ac.nz> + * Copyright (c) 2005 Ian McDonald <ian.mcdonald@jandi.co.nz> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c index 9be53a8e72c..51738000f3d 100644 --- a/net/ipv4/fib_semantics.c +++ b/net/ipv4/fib_semantics.c @@ -159,7 +159,7 @@ void free_fib_info(struct fib_info *fi) void fib_release_info(struct fib_info *fi) { - write_lock(&fib_info_lock); + write_lock_bh(&fib_info_lock); if (fi && --fi->fib_treeref == 0) { hlist_del(&fi->fib_hash); if (fi->fib_prefsrc) @@ -172,7 +172,7 @@ void fib_release_info(struct fib_info *fi) fi->fib_dead = 1; fib_info_put(fi); } - write_unlock(&fib_info_lock); + write_unlock_bh(&fib_info_lock); } static __inline__ int nh_comp(const struct fib_info *fi, const struct fib_info *ofi) @@ -598,7 +598,7 @@ static void fib_hash_move(struct hlist_head *new_info_hash, unsigned int old_size = fib_hash_size; unsigned int i, bytes; - write_lock(&fib_info_lock); + write_lock_bh(&fib_info_lock); old_info_hash = fib_info_hash; old_laddrhash = fib_info_laddrhash; fib_hash_size = new_size; @@ -639,7 +639,7 @@ static void fib_hash_move(struct hlist_head *new_info_hash, } fib_info_laddrhash = new_laddrhash; - write_unlock(&fib_info_lock); + write_unlock_bh(&fib_info_lock); bytes = old_size * sizeof(struct hlist_head *); fib_hash_free(old_info_hash, bytes); @@ -820,7 +820,7 @@ link_it: fi->fib_treeref++; atomic_inc(&fi->fib_clntref); - write_lock(&fib_info_lock); + write_lock_bh(&fib_info_lock); hlist_add_head(&fi->fib_hash, &fib_info_hash[fib_info_hashfn(fi)]); if (fi->fib_prefsrc) { @@ -839,7 +839,7 @@ link_it: head = &fib_info_devhash[hash]; hlist_add_head(&nh->nh_hash, head); } endfor_nexthops(fi) - write_unlock(&fib_info_lock); + write_unlock_bh(&fib_info_lock); return fi; err_inval: diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c index 9f4b752f5a3..8e8117c19e4 100644 --- a/net/ipv4/igmp.c +++ b/net/ipv4/igmp.c @@ -1793,29 +1793,35 @@ int ip_mc_leave_group(struct sock *sk, struct ip_mreqn *imr) struct in_device *in_dev; u32 group = imr->imr_multiaddr.s_addr; u32 ifindex; + int ret = -EADDRNOTAVAIL; rtnl_lock(); in_dev = ip_mc_find_dev(imr); - if (!in_dev) { - rtnl_unlock(); - return -ENODEV; - } ifindex = imr->imr_ifindex; for (imlp = &inet->mc_list; (iml = *imlp) != NULL; imlp = &iml->next) { - if (iml->multi.imr_multiaddr.s_addr == group && - iml->multi.imr_ifindex == ifindex) { - (void) ip_mc_leave_src(sk, iml, in_dev); + if (iml->multi.imr_multiaddr.s_addr != group) + continue; + if (ifindex) { + if (iml->multi.imr_ifindex != ifindex) + continue; + } else if (imr->imr_address.s_addr && imr->imr_address.s_addr != + iml->multi.imr_address.s_addr) + continue; + + (void) ip_mc_leave_src(sk, iml, in_dev); - *imlp = iml->next; + *imlp = iml->next; + if (in_dev) ip_mc_dec_group(in_dev, group); - rtnl_unlock(); - sock_kfree_s(sk, iml, sizeof(*iml)); - return 0; - } + rtnl_unlock(); + sock_kfree_s(sk, iml, sizeof(*iml)); + return 0; } + if (!in_dev) + ret = -ENODEV; rtnl_unlock(); - return -EADDRNOTAVAIL; + return ret; } int ip_mc_source(int add, int omode, struct sock *sk, struct @@ -2199,13 +2205,13 @@ void ip_mc_drop_socket(struct sock *sk) struct in_device *in_dev; inet->mc_list = iml->next; - if ((in_dev = inetdev_by_index(iml->multi.imr_ifindex)) != NULL) { - (void) ip_mc_leave_src(sk, iml, in_dev); + in_dev = inetdev_by_index(iml->multi.imr_ifindex); + (void) ip_mc_leave_src(sk, iml, in_dev); + if (in_dev != NULL) { ip_mc_dec_group(in_dev, iml->multi.imr_multiaddr.s_addr); in_dev_put(in_dev); } sock_kfree_s(sk, iml, sizeof(*iml)); - } rtnl_unlock(); } diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c index 9bf307a2978..4c20f554689 100644 --- a/net/ipv4/ip_output.c +++ b/net/ipv4/ip_output.c @@ -947,7 +947,7 @@ alloc_new_skb: skb_prev->csum = csum_sub(skb_prev->csum, skb->csum); data += fraggap; - skb_trim(skb_prev, maxfraglen); + pskb_trim_unique(skb_prev, maxfraglen); } copy = datalen - transhdrlen - fraggap; @@ -1142,7 +1142,7 @@ ssize_t ip_append_page(struct sock *sk, struct page *page, data, fraggap, 0); skb_prev->csum = csum_sub(skb_prev->csum, skb->csum); - skb_trim(skb_prev, maxfraglen); + pskb_trim_unique(skb_prev, maxfraglen); } /* diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c index 80c73ca9011..8d1d7a6e72a 100644 --- a/net/ipv4/netfilter/arp_tables.c +++ b/net/ipv4/netfilter/arp_tables.c @@ -236,7 +236,7 @@ unsigned int arpt_do_table(struct sk_buff **pskb, struct arpt_entry *e, *back; const char *indev, *outdev; void *table_base; - struct xt_table_info *private = table->private; + struct xt_table_info *private; /* ARP header, plus 2 device addresses, plus 2 IP addresses. */ if (!pskb_may_pull((*pskb), (sizeof(struct arphdr) + @@ -248,6 +248,7 @@ unsigned int arpt_do_table(struct sk_buff **pskb, outdev = out ? out->name : nulldevname; read_lock_bh(&table->lock); + private = table->private; table_base = (void *)private->entries[smp_processor_id()]; e = get_entry(table_base, private->hook_entry[hook]); back = get_entry(table_base, private->underflow[hook]); @@ -1170,21 +1171,34 @@ static int __init arp_tables_init(void) { int ret; - xt_proto_init(NF_ARP); + ret = xt_proto_init(NF_ARP); + if (ret < 0) + goto err1; /* Noone else will be downing sem now, so we won't sleep */ - xt_register_target(&arpt_standard_target); - xt_register_target(&arpt_error_target); + ret = xt_register_target(&arpt_standard_target); + if (ret < 0) + goto err2; + ret = xt_register_target(&arpt_error_target); + if (ret < 0) + goto err3; /* Register setsockopt */ ret = nf_register_sockopt(&arpt_sockopts); - if (ret < 0) { - duprintf("Unable to register sockopts.\n"); - return ret; - } + if (ret < 0) + goto err4; printk("arp_tables: (C) 2002 David S. Miller\n"); return 0; + +err4: + xt_unregister_target(&arpt_error_target); +err3: + xt_unregister_target(&arpt_standard_target); +err2: + xt_proto_fini(NF_ARP); +err1: + return ret; } static void __exit arp_tables_fini(void) diff --git a/net/ipv4/netfilter/ip_conntrack_netlink.c b/net/ipv4/netfilter/ip_conntrack_netlink.c index 33891bb1fde..0d4cc92391f 100644 --- a/net/ipv4/netfilter/ip_conntrack_netlink.c +++ b/net/ipv4/netfilter/ip_conntrack_netlink.c @@ -415,21 +415,18 @@ ctnetlink_dump_table(struct sk_buff *skb, struct netlink_callback *cb) cb->args[0], *id); read_lock_bh(&ip_conntrack_lock); + last = (struct ip_conntrack *)cb->args[1]; for (; cb->args[0] < ip_conntrack_htable_size; cb->args[0]++) { restart: - last = (struct ip_conntrack *)cb->args[1]; list_for_each_prev(i, &ip_conntrack_hash[cb->args[0]]) { h = (struct ip_conntrack_tuple_hash *) i; if (DIRECTION(h) != IP_CT_DIR_ORIGINAL) continue; ct = tuplehash_to_ctrack(h); - if (last != NULL) { - if (ct == last) { - ip_conntrack_put(last); - cb->args[1] = 0; - last = NULL; - } else + if (cb->args[1]) { + if (ct != last) continue; + cb->args[1] = 0; } if (ctnetlink_fill_info(skb, NETLINK_CB(cb->skb).pid, cb->nlh->nlmsg_seq, @@ -440,17 +437,17 @@ restart: goto out; } } - if (last != NULL) { - ip_conntrack_put(last); + if (cb->args[1]) { cb->args[1] = 0; goto restart; } } out: read_unlock_bh(&ip_conntrack_lock); + if (last) + ip_conntrack_put(last); DEBUGP("leaving, last bucket=%lu id=%u\n", cb->args[0], *id); - return skb->len; } diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c index fc5bdd5eb7d..048514f15f2 100644 --- a/net/ipv4/netfilter/ip_tables.c +++ b/net/ipv4/netfilter/ip_tables.c @@ -230,7 +230,7 @@ ipt_do_table(struct sk_buff **pskb, const char *indev, *outdev; void *table_base; struct ipt_entry *e, *back; - struct xt_table_info *private = table->private; + struct xt_table_info *private; /* Initialization */ ip = (*pskb)->nh.iph; @@ -247,6 +247,7 @@ ipt_do_table(struct sk_buff **pskb, read_lock_bh(&table->lock); IP_NF_ASSERT(table->valid_hooks & (1 << hook)); + private = table->private; table_base = (void *)private->entries[smp_processor_id()]; e = get_entry(table_base, private->hook_entry[hook]); @@ -2239,22 +2240,39 @@ static int __init ip_tables_init(void) { int ret; - xt_proto_init(AF_INET); + ret = xt_proto_init(AF_INET); + if (ret < 0) + goto err1; /* Noone else will be downing sem now, so we won't sleep */ - xt_register_target(&ipt_standard_target); - xt_register_target(&ipt_error_target); - xt_register_match(&icmp_matchstruct); + ret = xt_register_target(&ipt_standard_target); + if (ret < 0) + goto err2; + ret = xt_register_target(&ipt_error_target); + if (ret < 0) + goto err3; + ret = xt_register_match(&icmp_matchstruct); + if (ret < 0) + goto err4; /* Register setsockopt */ ret = nf_register_sockopt(&ipt_sockopts); - if (ret < 0) { - duprintf("Unable to register sockopts.\n"); - return ret; - } + if (ret < 0) + goto err5; printk("ip_tables: (C) 2000-2006 Netfilter Core Team\n"); return 0; + +err5: + xt_unregister_match(&icmp_matchstruct); +err4: + xt_unregister_target(&ipt_error_target); +err3: + xt_unregister_target(&ipt_standard_target); +err2: + xt_proto_fini(AF_INET); +err1: + return ret; } static void __exit ip_tables_fini(void) diff --git a/net/ipv4/netfilter/ipt_ULOG.c b/net/ipv4/netfilter/ipt_ULOG.c index d7dd7fe7051..d46fd677fa1 100644 --- a/net/ipv4/netfilter/ipt_ULOG.c +++ b/net/ipv4/netfilter/ipt_ULOG.c @@ -115,6 +115,11 @@ static void ulog_send(unsigned int nlgroupnum) del_timer(&ub->timer); } + if (!ub->skb) { + DEBUGP("ipt_ULOG: ulog_send: nothing to send\n"); + return; + } + /* last nlmsg needs NLMSG_DONE */ if (ub->qlen > 1) ub->lastnlh->nlmsg_type = NLMSG_DONE; diff --git a/net/ipv4/netfilter/ipt_hashlimit.c b/net/ipv4/netfilter/ipt_hashlimit.c index 6b662449e82..3bd2368e1fc 100644 --- a/net/ipv4/netfilter/ipt_hashlimit.c +++ b/net/ipv4/netfilter/ipt_hashlimit.c @@ -454,15 +454,12 @@ hashlimit_match(const struct sk_buff *skb, dh->rateinfo.credit_cap = user2credits(hinfo->cfg.avg * hinfo->cfg.burst); dh->rateinfo.cost = user2credits(hinfo->cfg.avg); - - spin_unlock_bh(&hinfo->lock); - return 1; + } else { + /* update expiration timeout */ + dh->expires = now + msecs_to_jiffies(hinfo->cfg.expire); + rateinfo_recalc(dh, now); } - /* update expiration timeout */ - dh->expires = now + msecs_to_jiffies(hinfo->cfg.expire); - - rateinfo_recalc(dh, now); if (dh->rateinfo.credit >= dh->rateinfo.cost) { /* We're underlimit. */ dh->rateinfo.credit -= dh->rateinfo.cost; diff --git a/net/ipv4/route.c b/net/ipv4/route.c index 19bd49d69d9..b873cbcdd0b 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c @@ -3157,7 +3157,7 @@ int __init ip_rt_init(void) rhash_entries, (num_physpages >= 128 * 1024) ? 15 : 17, - HASH_HIGHMEM, + 0, &rt_hash_log, &rt_hash_mask, 0); diff --git a/net/ipv4/tcp_cong.c b/net/ipv4/tcp_cong.c index 5765f9d0317..7ff2e4273a7 100644 --- a/net/ipv4/tcp_cong.c +++ b/net/ipv4/tcp_cong.c @@ -189,7 +189,7 @@ void tcp_slow_start(struct tcp_sock *tp) return; /* We MAY increase by 2 if discovered delayed ack */ - if (sysctl_tcp_abc > 1 && tp->bytes_acked > 2*tp->mss_cache) { + if (sysctl_tcp_abc > 1 && tp->bytes_acked >= 2*tp->mss_cache) { if (tp->snd_cwnd < tp->snd_cwnd_clamp) tp->snd_cwnd++; } diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 104af5d5bcb..111ff39a08c 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -2505,8 +2505,13 @@ static int tcp_ack(struct sock *sk, struct sk_buff *skb, int flag) if (before(ack, prior_snd_una)) goto old_ack; - if (sysctl_tcp_abc && icsk->icsk_ca_state < TCP_CA_CWR) - tp->bytes_acked += ack - prior_snd_una; + if (sysctl_tcp_abc) { + if (icsk->icsk_ca_state < TCP_CA_CWR) + tp->bytes_acked += ack - prior_snd_una; + else if (icsk->icsk_ca_state == TCP_CA_Loss) + /* we assume just one segment left network */ + tp->bytes_acked += min(ack - prior_snd_una, tp->mss_cache); + } if (!(flag&FLAG_SLOWPATH) && after(ack, prior_snd_una)) { /* Window is constant, pure forward advance. diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 5c08ea20a18..b4f3ffe1b3b 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -201,6 +201,7 @@ void tcp_select_initial_window(int __space, __u32 mss, * See RFC1323 for an explanation of the limit to 14 */ space = max_t(u32, sysctl_tcp_rmem[2], sysctl_rmem_max); + space = min_t(u32, space, *window_clamp); while (space > 65535 && (*rcv_wscale) < 14) { space >>= 1; (*rcv_wscale)++; @@ -466,7 +467,8 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it, if (skb->len != tcp_header_size) tcp_event_data_sent(tp, skb, sk); - TCP_INC_STATS(TCP_MIB_OUTSEGS); + if (after(tcb->end_seq, tp->snd_nxt) || tcb->seq == tcb->end_seq) + TCP_INC_STATS(TCP_MIB_OUTSEGS); err = icsk->icsk_af_ops->queue_xmit(skb, 0); if (likely(err <= 0)) @@ -2157,10 +2159,9 @@ int tcp_connect(struct sock *sk) skb_shinfo(buff)->gso_size = 0; skb_shinfo(buff)->gso_type = 0; buff->csum = 0; + tp->snd_nxt = tp->write_seq; TCP_SKB_CB(buff)->seq = tp->write_seq++; TCP_SKB_CB(buff)->end_seq = tp->write_seq; - tp->snd_nxt = tp->write_seq; - tp->pushed_seq = tp->write_seq; /* Send it off. */ TCP_SKB_CB(buff)->when = tcp_time_stamp; @@ -2170,6 +2171,12 @@ int tcp_connect(struct sock *sk) sk_charge_skb(sk, buff); tp->packets_out += tcp_skb_pcount(buff); tcp_transmit_skb(sk, buff, 1, GFP_KERNEL); + + /* We change tp->snd_nxt after the tcp_transmit_skb() call + * in order to make this packet get counted in tcpOutSegs. + */ + tp->snd_nxt = tp->write_seq; + tp->pushed_seq = tp->write_seq; TCP_INC_STATS(TCP_MIB_ACTIVEOPENS); /* Timer for repeating the SYN until an answer. */ diff --git a/net/ipv4/tcp_probe.c b/net/ipv4/tcp_probe.c index b3435324b57..dab37d2f65f 100644 --- a/net/ipv4/tcp_probe.c +++ b/net/ipv4/tcp_probe.c @@ -130,11 +130,12 @@ static ssize_t tcpprobe_read(struct file *file, char __user *buf, error = wait_event_interruptible(tcpw.wait, __kfifo_len(tcpw.fifo) != 0); if (error) - return error; + goto out_free; cnt = kfifo_get(tcpw.fifo, tbuf, len); error = copy_to_user(buf, tbuf, cnt); +out_free: vfree(tbuf); return error ? error : cnt; diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index 8ea1e36bf8e..c7852b38e03 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c @@ -578,6 +578,8 @@ ipv6_add_addr(struct inet6_dev *idev, const struct in6_addr *addr, int pfxlen, ifa->flags = flags | IFA_F_TENTATIVE; ifa->cstamp = ifa->tstamp = jiffies; + ifa->rt = rt; + ifa->idev = idev; in6_dev_hold(idev); /* For caller */ @@ -603,8 +605,6 @@ ipv6_add_addr(struct inet6_dev *idev, const struct in6_addr *addr, int pfxlen, } #endif - ifa->rt = rt; - in6_ifa_hold(ifa); write_unlock(&idev->lock); out2: @@ -1909,11 +1909,11 @@ static int inet6_addr_add(int ifindex, struct in6_addr *pfx, int plen, ifp = ipv6_add_addr(idev, pfx, plen, scope, ifa_flags); if (!IS_ERR(ifp)) { - spin_lock(&ifp->lock); + spin_lock_bh(&ifp->lock); ifp->valid_lft = valid_lft; ifp->prefered_lft = prefered_lft; ifp->tstamp = jiffies; - spin_unlock(&ifp->lock); + spin_unlock_bh(&ifp->lock); addrconf_dad_start(ifp, 0); in6_ifa_put(ifp); diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c index 1044b6fce0d..3d6e9a35115 100644 --- a/net/ipv6/icmp.c +++ b/net/ipv6/icmp.c @@ -712,6 +712,11 @@ discard_it: return 0; } +/* + * Special lock-class for __icmpv6_socket: + */ +static struct lock_class_key icmpv6_socket_sk_dst_lock_key; + int __init icmpv6_init(struct net_proto_family *ops) { struct sock *sk; @@ -730,6 +735,14 @@ int __init icmpv6_init(struct net_proto_family *ops) sk = per_cpu(__icmpv6_socket, i)->sk; sk->sk_allocation = GFP_ATOMIC; + /* + * Split off their lock-class, because sk->sk_dst_lock + * gets used from softirqs, which is safe for + * __icmpv6_socket (because those never get directly used + * via userspace syscalls), but unsafe for normal sockets. + */ + lockdep_set_class(&sk->sk_dst_lock, + &icmpv6_socket_sk_dst_lock_key); /* Enough space for 2 64K ICMP packets, including * sk_buff struct overhead. diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index 69451af6abe..4fb47a25291 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c @@ -1095,7 +1095,7 @@ alloc_new_skb: skb_prev->csum = csum_sub(skb_prev->csum, skb->csum); data += fraggap; - skb_trim(skb_prev, maxfraglen); + pskb_trim_unique(skb_prev, maxfraglen); } copy = datalen - transhdrlen - fraggap; if (copy < 0) { diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c index 9d697d4dcff..639eb20c9f1 100644 --- a/net/ipv6/mcast.c +++ b/net/ipv6/mcast.c @@ -268,13 +268,14 @@ int ipv6_sock_mc_drop(struct sock *sk, int ifindex, struct in6_addr *addr) if ((dev = dev_get_by_index(mc_lst->ifindex)) != NULL) { struct inet6_dev *idev = in6_dev_get(dev); + (void) ip6_mc_leave_src(sk, mc_lst, idev); if (idev) { - (void) ip6_mc_leave_src(sk,mc_lst,idev); __ipv6_dev_mc_dec(idev, &mc_lst->addr); in6_dev_put(idev); } dev_put(dev); - } + } else + (void) ip6_mc_leave_src(sk, mc_lst, NULL); sock_kfree_s(sk, mc_lst, sizeof(*mc_lst)); return 0; } @@ -334,13 +335,14 @@ void ipv6_sock_mc_close(struct sock *sk) if (dev) { struct inet6_dev *idev = in6_dev_get(dev); + (void) ip6_mc_leave_src(sk, mc_lst, idev); if (idev) { - (void) ip6_mc_leave_src(sk, mc_lst, idev); __ipv6_dev_mc_dec(idev, &mc_lst->addr); in6_dev_put(idev); } dev_put(dev); - } + } else + (void) ip6_mc_leave_src(sk, mc_lst, NULL); sock_kfree_s(sk, mc_lst, sizeof(*mc_lst)); diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c index f26898b0034..c9d6b23cd3f 100644 --- a/net/ipv6/netfilter/ip6_tables.c +++ b/net/ipv6/netfilter/ip6_tables.c @@ -1398,23 +1398,39 @@ static int __init ip6_tables_init(void) { int ret; - xt_proto_init(AF_INET6); + ret = xt_proto_init(AF_INET6); + if (ret < 0) + goto err1; /* Noone else will be downing sem now, so we won't sleep */ - xt_register_target(&ip6t_standard_target); - xt_register_target(&ip6t_error_target); - xt_register_match(&icmp6_matchstruct); + ret = xt_register_target(&ip6t_standard_target); + if (ret < 0) + goto err2; + ret = xt_register_target(&ip6t_error_target); + if (ret < 0) + goto err3; + ret = xt_register_match(&icmp6_matchstruct); + if (ret < 0) + goto err4; /* Register setsockopt */ ret = nf_register_sockopt(&ip6t_sockopts); - if (ret < 0) { - duprintf("Unable to register sockopts.\n"); - xt_proto_fini(AF_INET6); - return ret; - } + if (ret < 0) + goto err5; printk("ip6_tables: (C) 2000-2006 Netfilter Core Team\n"); return 0; + +err5: + xt_unregister_match(&icmp6_matchstruct); +err4: + xt_unregister_target(&ip6t_error_target); +err3: + xt_unregister_target(&ip6t_standard_target); +err2: + xt_proto_fini(AF_INET6); +err1: + return ret; } static void __exit ip6_tables_fini(void) diff --git a/net/ipv6/route.c b/net/ipv6/route.c index 4b163711f3a..d9baca062d2 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@ -1532,6 +1532,10 @@ int ipv6_route_ioctl(unsigned int cmd, void __user *arg) static int ip6_pkt_discard(struct sk_buff *skb) { + int type = ipv6_addr_type(&skb->nh.ipv6h->daddr); + if (type == IPV6_ADDR_ANY || type == IPV6_ADDR_RESERVED) + IP6_INC_STATS(IPSTATS_MIB_INADDRERRORS); + IP6_INC_STATS(IPSTATS_MIB_OUTNOROUTES); icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_NOROUTE, 0, skb->dev); kfree_skb(skb); diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index b843a650be7..802a1a6b103 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c @@ -944,7 +944,7 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb, * comment in that function for the gory details. -acme */ - sk->sk_gso_type = SKB_GSO_TCPV6; + newsk->sk_gso_type = SKB_GSO_TCPV6; __ip6_dst_store(newsk, dst, NULL); newtcp6sk = (struct tcp6_sock *)newsk; diff --git a/net/ipx/af_ipx.c b/net/ipx/af_ipx.c index aa34ff4b707..bef3f61569f 100644 --- a/net/ipx/af_ipx.c +++ b/net/ipx/af_ipx.c @@ -1642,13 +1642,17 @@ static int ipx_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_ty if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL) goto out; - ipx = ipx_hdr(skb); - ipx_pktsize = ntohs(ipx->ipx_pktsize); + if (!pskb_may_pull(skb, sizeof(struct ipxhdr))) + goto drop; + + ipx_pktsize = ntohs(ipx_hdr(skb)->ipx_pktsize); /* Too small or invalid header? */ - if (ipx_pktsize < sizeof(struct ipxhdr) || ipx_pktsize > skb->len) + if (ipx_pktsize < sizeof(struct ipxhdr) || + !pskb_may_pull(skb, ipx_pktsize)) goto drop; + ipx = ipx_hdr(skb); if (ipx->ipx_checksum != IPX_NO_CHECKSUM && ipx->ipx_checksum != ipx_cksum(ipx, ipx_pktsize)) goto drop; diff --git a/net/llc/llc_sap.c b/net/llc/llc_sap.c index 42eb0c3a978..61cb8cf7d15 100644 --- a/net/llc/llc_sap.c +++ b/net/llc/llc_sap.c @@ -330,6 +330,9 @@ static void llc_sap_mcast(struct llc_sap *sap, if (llc->laddr.lsap != laddr->lsap) continue; + if (llc->dev != skb->dev) + continue; + skb1 = skb_clone(skb, GFP_ATOMIC); if (!skb1) break; diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c index af4845971f7..6527d4e048d 100644 --- a/net/netfilter/nf_conntrack_netlink.c +++ b/net/netfilter/nf_conntrack_netlink.c @@ -429,9 +429,9 @@ ctnetlink_dump_table(struct sk_buff *skb, struct netlink_callback *cb) cb->args[0], *id); read_lock_bh(&nf_conntrack_lock); + last = (struct nf_conn *)cb->args[1]; for (; cb->args[0] < nf_conntrack_htable_size; cb->args[0]++) { restart: - last = (struct nf_conn *)cb->args[1]; list_for_each_prev(i, &nf_conntrack_hash[cb->args[0]]) { h = (struct nf_conntrack_tuple_hash *) i; if (DIRECTION(h) != IP_CT_DIR_ORIGINAL) @@ -442,13 +442,10 @@ restart: * then dump everything. */ if (l3proto && L3PROTO(ct) != l3proto) continue; - if (last != NULL) { - if (ct == last) { - nf_ct_put(last); - cb->args[1] = 0; - last = NULL; - } else + if (cb->args[1]) { + if (ct != last) continue; + cb->args[1] = 0; } if (ctnetlink_fill_info(skb, NETLINK_CB(cb->skb).pid, cb->nlh->nlmsg_seq, @@ -459,17 +456,17 @@ restart: goto out; } } - if (last != NULL) { - nf_ct_put(last); + if (cb->args[1]) { cb->args[1] = 0; goto restart; } } out: read_unlock_bh(&nf_conntrack_lock); + if (last) + nf_ct_put(last); DEBUGP("leaving, last bucket=%lu id=%u\n", cb->args[0], *id); - return skb->len; } diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c index 61cdda4e5d3..b59d3b2bde2 100644 --- a/net/netfilter/nfnetlink_log.c +++ b/net/netfilter/nfnetlink_log.c @@ -366,6 +366,9 @@ __nfulnl_send(struct nfulnl_instance *inst) if (timer_pending(&inst->timer)) del_timer(&inst->timer); + if (!inst->skb) + return 0; + if (inst->qlen > 1) inst->lastnlh->nlmsg_type = NLMSG_DONE; diff --git a/net/netfilter/xt_physdev.c b/net/netfilter/xt_physdev.c index a9f4f6f3c62..63a96546746 100644 --- a/net/netfilter/xt_physdev.c +++ b/net/netfilter/xt_physdev.c @@ -10,6 +10,7 @@ #include <linux/module.h> #include <linux/skbuff.h> +#include <linux/netfilter_bridge.h> #include <linux/netfilter/xt_physdev.h> #include <linux/netfilter/x_tables.h> #include <linux/netfilter_bridge.h> diff --git a/net/netfilter/xt_string.c b/net/netfilter/xt_string.c index d8e3891b5f8..275330fcdaa 100644 --- a/net/netfilter/xt_string.c +++ b/net/netfilter/xt_string.c @@ -37,7 +37,7 @@ static int match(const struct sk_buff *skb, return (skb_find_text((struct sk_buff *)skb, conf->from_offset, conf->to_offset, conf->config, &state) - != UINT_MAX) && !conf->invert; + != UINT_MAX) ^ conf->invert; } #define STRING_TEXT_PRIV(m) ((struct xt_string_info *) m) diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c index b85c1f9f128..8b85036ba8e 100644 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c @@ -1273,8 +1273,7 @@ netlink_kernel_create(int unit, unsigned int groups, struct netlink_sock *nlk; unsigned long *listeners = NULL; - if (!nl_table) - return NULL; + BUG_ON(!nl_table); if (unit<0 || unit>=MAX_LINKS) return NULL; @@ -1745,11 +1744,8 @@ static int __init netlink_proto_init(void) netlink_skb_parms_too_large(); nl_table = kcalloc(MAX_LINKS, sizeof(*nl_table), GFP_KERNEL); - if (!nl_table) { -enomem: - printk(KERN_CRIT "netlink_init: Cannot allocate nl_table\n"); - return -ENOMEM; - } + if (!nl_table) + goto panic; if (num_physpages >= (128 * 1024)) max = num_physpages >> (21 - PAGE_SHIFT); @@ -1769,7 +1765,7 @@ enomem: nl_pid_hash_free(nl_table[i].hash.table, 1 * sizeof(*hash->table)); kfree(nl_table); - goto enomem; + goto panic; } memset(hash->table, 0, 1 * sizeof(*hash->table)); hash->max_shift = order; @@ -1786,6 +1782,8 @@ enomem: rtnetlink_init(); out: return err; +panic: + panic("netlink_init: Cannot allocate nl_table\n"); } core_initcall(netlink_proto_init); diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c index eea36696674..0a6cfa0005b 100644 --- a/net/sched/cls_u32.c +++ b/net/sched/cls_u32.c @@ -796,7 +796,7 @@ static int __init init_u32(void) { printk("u32 classifier\n"); #ifdef CONFIG_CLS_U32_PERF - printk(" Perfomance counters on\n"); + printk(" Performance counters on\n"); #endif #ifdef CONFIG_NET_CLS_POLICE printk(" OLD policer on \n"); diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c index 4f11f585820..17b509282cf 100644 --- a/net/sctp/sm_make_chunk.c +++ b/net/sctp/sm_make_chunk.c @@ -806,38 +806,26 @@ no_mem: /* Helper to create ABORT with a SCTP_ERROR_USER_ABORT error. */ struct sctp_chunk *sctp_make_abort_user(const struct sctp_association *asoc, - const struct sctp_chunk *chunk, - const struct msghdr *msg) + const struct msghdr *msg, + size_t paylen) { struct sctp_chunk *retval; - void *payload = NULL, *payoff; - size_t paylen = 0; - struct iovec *iov = NULL; - int iovlen = 0; - - if (msg) { - iov = msg->msg_iov; - iovlen = msg->msg_iovlen; - paylen = get_user_iov_size(iov, iovlen); - } + void *payload = NULL; + int err; - retval = sctp_make_abort(asoc, chunk, sizeof(sctp_errhdr_t) + paylen); + retval = sctp_make_abort(asoc, NULL, sizeof(sctp_errhdr_t) + paylen); if (!retval) goto err_chunk; if (paylen) { /* Put the msg_iov together into payload. */ - payload = kmalloc(paylen, GFP_ATOMIC); + payload = kmalloc(paylen, GFP_KERNEL); if (!payload) goto err_payload; - payoff = payload; - for (; iovlen > 0; --iovlen) { - if (copy_from_user(payoff, iov->iov_base,iov->iov_len)) - goto err_copy; - payoff += iov->iov_len; - iov++; - } + err = memcpy_fromiovec(payload, msg->msg_iov, paylen); + if (err < 0) + goto err_copy; } sctp_init_cause(retval, SCTP_ERROR_USER_ABORT, payload, paylen); diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c index ead3f1b0ea3..5b5ae795832 100644 --- a/net/sctp/sm_statefuns.c +++ b/net/sctp/sm_statefuns.c @@ -4031,18 +4031,12 @@ sctp_disposition_t sctp_sf_do_9_1_prm_abort( * from its upper layer, but retransmits data to the far end * if necessary to fill gaps. */ - struct msghdr *msg = arg; - struct sctp_chunk *abort; + struct sctp_chunk *abort = arg; sctp_disposition_t retval; retval = SCTP_DISPOSITION_CONSUME; - /* Generate ABORT chunk to send the peer. */ - abort = sctp_make_abort_user(asoc, NULL, msg); - if (!abort) - retval = SCTP_DISPOSITION_NOMEM; - else - sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(abort)); + sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(abort)); /* Even if we can't send the ABORT due to low memory delete the * TCB. This is a departure from our typical NOMEM handling. @@ -4166,8 +4160,7 @@ sctp_disposition_t sctp_sf_cookie_wait_prm_abort( void *arg, sctp_cmd_seq_t *commands) { - struct msghdr *msg = arg; - struct sctp_chunk *abort; + struct sctp_chunk *abort = arg; sctp_disposition_t retval; /* Stop T1-init timer */ @@ -4175,12 +4168,7 @@ sctp_disposition_t sctp_sf_cookie_wait_prm_abort( SCTP_TO(SCTP_EVENT_TIMEOUT_T1_INIT)); retval = SCTP_DISPOSITION_CONSUME; - /* Generate ABORT chunk to send the peer */ - abort = sctp_make_abort_user(asoc, NULL, msg); - if (!abort) - retval = SCTP_DISPOSITION_NOMEM; - else - sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(abort)); + sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(abort)); sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE, SCTP_STATE(SCTP_STATE_CLOSED)); diff --git a/net/sctp/socket.c b/net/sctp/socket.c index 54722e622e6..dab15949958 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -1289,9 +1289,13 @@ SCTP_STATIC void sctp_close(struct sock *sk, long timeout) } } - if (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime) - sctp_primitive_ABORT(asoc, NULL); - else + if (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime) { + struct sctp_chunk *chunk; + + chunk = sctp_make_abort_user(asoc, NULL, 0); + if (chunk) + sctp_primitive_ABORT(asoc, chunk); + } else sctp_primitive_SHUTDOWN(asoc, NULL); } @@ -1520,8 +1524,16 @@ SCTP_STATIC int sctp_sendmsg(struct kiocb *iocb, struct sock *sk, goto out_unlock; } if (sinfo_flags & SCTP_ABORT) { + struct sctp_chunk *chunk; + + chunk = sctp_make_abort_user(asoc, msg, msg_len); + if (!chunk) { + err = -ENOMEM; + goto out_unlock; + } + SCTP_DEBUG_PRINTK("Aborting association: %p\n", asoc); - sctp_primitive_ABORT(asoc, msg); + sctp_primitive_ABORT(asoc, chunk); err = 0; goto out_unlock; } diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c index 4a9aa9393b9..ef1cf5b476c 100644 --- a/net/sunrpc/auth_gss/auth_gss.c +++ b/net/sunrpc/auth_gss/auth_gss.c @@ -718,8 +718,7 @@ gss_destroy(struct rpc_auth *auth) auth, auth->au_flavor); gss_auth = container_of(auth, struct gss_auth, rpc_auth); - rpc_unlink(gss_auth->path); - dput(gss_auth->dentry); + rpc_unlink(gss_auth->dentry); gss_auth->dentry = NULL; gss_mech_put(gss_auth->mech); diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c index d6409e75721..3e19d321067 100644 --- a/net/sunrpc/clnt.c +++ b/net/sunrpc/clnt.c @@ -183,8 +183,7 @@ rpc_new_client(struct rpc_xprt *xprt, char *servname, out_no_auth: if (!IS_ERR(clnt->cl_dentry)) { - rpc_rmdir(clnt->cl_pathname); - dput(clnt->cl_dentry); + rpc_rmdir(clnt->cl_dentry); rpc_put_mount(); } out_no_path: @@ -251,10 +250,8 @@ rpc_clone_client(struct rpc_clnt *clnt) new->cl_autobind = 0; new->cl_oneshot = 0; new->cl_dead = 0; - if (!IS_ERR(new->cl_dentry)) { + if (!IS_ERR(new->cl_dentry)) dget(new->cl_dentry); - rpc_get_mount(); - } rpc_init_rtt(&new->cl_rtt_default, clnt->cl_xprt->timeout.to_initval); if (new->cl_auth) atomic_inc(&new->cl_auth->au_count); @@ -317,11 +314,15 @@ rpc_destroy_client(struct rpc_clnt *clnt) clnt->cl_auth = NULL; } if (clnt->cl_parent != clnt) { + if (!IS_ERR(clnt->cl_dentry)) + dput(clnt->cl_dentry); rpc_destroy_client(clnt->cl_parent); goto out_free; } - if (clnt->cl_pathname[0]) - rpc_rmdir(clnt->cl_pathname); + if (!IS_ERR(clnt->cl_dentry)) { + rpc_rmdir(clnt->cl_dentry); + rpc_put_mount(); + } if (clnt->cl_xprt) { xprt_destroy(clnt->cl_xprt); clnt->cl_xprt = NULL; @@ -331,10 +332,6 @@ rpc_destroy_client(struct rpc_clnt *clnt) out_free: rpc_free_iostats(clnt->cl_metrics); clnt->cl_metrics = NULL; - if (!IS_ERR(clnt->cl_dentry)) { - dput(clnt->cl_dentry); - rpc_put_mount(); - } kfree(clnt); return 0; } @@ -1184,6 +1181,17 @@ call_verify(struct rpc_task *task) u32 *p = iov->iov_base, n; int error = -EACCES; + if ((task->tk_rqstp->rq_rcv_buf.len & 3) != 0) { + /* RFC-1014 says that the representation of XDR data must be a + * multiple of four bytes + * - if it isn't pointer subtraction in the NFS client may give + * undefined results + */ + printk(KERN_WARNING + "call_verify: XDR representation not a multiple of" + " 4 bytes: 0x%x\n", task->tk_rqstp->rq_rcv_buf.len); + goto out_eio; + } if ((len -= 3) < 0) goto out_overflow; p += 1; /* skip XID */ diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c index a3bd2db2e02..0b1a1ac8a4b 100644 --- a/net/sunrpc/rpc_pipe.c +++ b/net/sunrpc/rpc_pipe.c @@ -539,6 +539,7 @@ repeat: rpc_close_pipes(dentry->d_inode); simple_unlink(dir, dentry); } + inode_dir_notify(dir, DN_DELETE); dput(dentry); } while (n); goto repeat; @@ -610,8 +611,8 @@ __rpc_rmdir(struct inode *dir, struct dentry *dentry) int error; shrink_dcache_parent(dentry); - if (dentry->d_inode) - rpc_close_pipes(dentry->d_inode); + if (d_unhashed(dentry)) + return 0; if ((error = simple_rmdir(dir, dentry)) != 0) return error; if (!error) { @@ -684,28 +685,20 @@ err_dput: } int -rpc_rmdir(char *path) +rpc_rmdir(struct dentry *dentry) { - struct nameidata nd; - struct dentry *dentry; + struct dentry *parent; struct inode *dir; int error; - if ((error = rpc_lookup_parent(path, &nd)) != 0) - return error; - dir = nd.dentry->d_inode; + parent = dget_parent(dentry); + dir = parent->d_inode; mutex_lock_nested(&dir->i_mutex, I_MUTEX_PARENT); - dentry = lookup_one_len(nd.last.name, nd.dentry, nd.last.len); - if (IS_ERR(dentry)) { - error = PTR_ERR(dentry); - goto out_release; - } rpc_depopulate(dentry); error = __rpc_rmdir(dir, dentry); dput(dentry); -out_release: mutex_unlock(&dir->i_mutex); - rpc_release_path(&nd); + dput(parent); return error; } @@ -746,32 +739,26 @@ err_dput: } int -rpc_unlink(char *path) +rpc_unlink(struct dentry *dentry) { - struct nameidata nd; - struct dentry *dentry; + struct dentry *parent; struct inode *dir; - int error; + int error = 0; - if ((error = rpc_lookup_parent(path, &nd)) != 0) - return error; - dir = nd.dentry->d_inode; + parent = dget_parent(dentry); + dir = parent->d_inode; mutex_lock_nested(&dir->i_mutex, I_MUTEX_PARENT); - dentry = lookup_one_len(nd.last.name, nd.dentry, nd.last.len); - if (IS_ERR(dentry)) { - error = PTR_ERR(dentry); - goto out_release; - } - d_drop(dentry); - if (dentry->d_inode) { - rpc_close_pipes(dentry->d_inode); - error = simple_unlink(dir, dentry); + if (!d_unhashed(dentry)) { + d_drop(dentry); + if (dentry->d_inode) { + rpc_close_pipes(dentry->d_inode); + error = simple_unlink(dir, dentry); + } + inode_dir_notify(dir, DN_DELETE); } dput(dentry); - inode_dir_notify(dir, DN_DELETE); -out_release: mutex_unlock(&dir->i_mutex); - rpc_release_path(&nd); + dput(parent); return error; } diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c index f35bc676128..3da67ca2c3c 100644 --- a/net/xfrm/xfrm_policy.c +++ b/net/xfrm/xfrm_policy.c @@ -1134,12 +1134,33 @@ int __xfrm_route_forward(struct sk_buff *skb, unsigned short family) } EXPORT_SYMBOL(__xfrm_route_forward); +/* Optimize later using cookies and generation ids. */ + static struct dst_entry *xfrm_dst_check(struct dst_entry *dst, u32 cookie) { - /* If it is marked obsolete, which is how we even get here, - * then we have purged it from the policy bundle list and we - * did that for a good reason. + /* Code (such as __xfrm4_bundle_create()) sets dst->obsolete + * to "-1" to force all XFRM destinations to get validated by + * dst_ops->check on every use. We do this because when a + * normal route referenced by an XFRM dst is obsoleted we do + * not go looking around for all parent referencing XFRM dsts + * so that we can invalidate them. It is just too much work. + * Instead we make the checks here on every use. For example: + * + * XFRM dst A --> IPv4 dst X + * + * X is the "xdst->route" of A (X is also the "dst->path" of A + * in this example). If X is marked obsolete, "A" will not + * notice. That's what we are validating here via the + * stale_bundle() check. + * + * When a policy's bundle is pruned, we dst_free() the XFRM + * dst which causes it's ->obsolete field to be set to a + * positive non-zero integer. If an XFRM dst has been pruned + * like this, we want to force a new route lookup. */ + if (dst->obsolete < 0 && !stale_bundle(dst)) + return dst; + return NULL; } diff --git a/scripts/mod/file2alias.c b/scripts/mod/file2alias.c index 44312926b84..e2de650d3db 100644 --- a/scripts/mod/file2alias.c +++ b/scripts/mod/file2alias.c @@ -391,7 +391,7 @@ static void do_input(char *alias, unsigned int i; for (i = min; i < max; i++) - if (arr[i / BITS_PER_LONG] & (1 << (i%BITS_PER_LONG))) + if (arr[i / BITS_PER_LONG] & (1L << (i%BITS_PER_LONG))) sprintf(alias + strlen(alias), "%X,*", i); } diff --git a/sound/oss/Kconfig b/sound/oss/Kconfig index f4980ca5c05..1b7c3dfc2b4 100644 --- a/sound/oss/Kconfig +++ b/sound/oss/Kconfig @@ -31,7 +31,7 @@ config SOUND_EMU10K1 For more information on this driver and the degree of support for the different card models please check: - <http://sourceforge.net/projects/emu10k1/> + <http://sourceforge.net/projects/emu10k1/> It is now possible to load dsp microcode patches into the EMU10K1 chip. These patches are used to implement real time sound @@ -140,7 +140,7 @@ config SOUND_TRIDENT system support" and "Sysctl support", and after the /proc file system has been mounted, executing the command - command what is enabled + command what is enabled echo 0>/proc/ALi5451 pcm out is also set to S/PDIF out. (Default). @@ -838,7 +838,7 @@ config SOUND_WAVEARTIST config SOUND_TVMIXER tristate "TV card (bt848) mixer support" - depends on SOUND_PRIME && I2C + depends on SOUND_PRIME && I2C && VIDEO_V4L1 help Support for audio mixer facilities on the BT848 TV frame-grabber card. diff --git a/sound/pci/Kconfig b/sound/pci/Kconfig index d7ad32f514d..e49c0fe21b0 100644 --- a/sound/pci/Kconfig +++ b/sound/pci/Kconfig @@ -16,16 +16,16 @@ config SND_AD1889 will be called snd-ad1889. config SND_ALS300 - tristate "Avance Logic ALS300/ALS300+" - depends on SND - select SND_PCM - select SND_AC97_CODEC - select SND_OPL3_LIB - help - Say 'Y' or 'M' to include support for Avance Logic ALS300/ALS300+ + tristate "Avance Logic ALS300/ALS300+" + depends on SND + select SND_PCM + select SND_AC97_CODEC + select SND_OPL3_LIB + help + Say 'Y' or 'M' to include support for Avance Logic ALS300/ALS300+ - To compile this driver as a module, choose M here: the module - will be called snd-als300 + To compile this driver as a module, choose M here: the module + will be called snd-als300 config SND_ALS4000 tristate "Avance Logic ALS4000" @@ -78,49 +78,49 @@ config SND_ATIIXP_MODEM will be called snd-atiixp-modem. config SND_AU8810 - tristate "Aureal Advantage" - depends on SND + tristate "Aureal Advantage" + depends on SND select SND_MPU401_UART select SND_AC97_CODEC - help + help Say Y here to include support for Aureal Advantage soundcards. Supported features: Hardware Mixer, SRC, EQ and SPDIF output. - 3D support code is in place, but not yet useable. For more info, - email the ALSA developer list, or <mjander@users.sourceforge.net>. + 3D support code is in place, but not yet useable. For more info, + email the ALSA developer list, or <mjander@users.sourceforge.net>. To compile this driver as a module, choose M here: the module will be called snd-au8810. - + config SND_AU8820 - tristate "Aureal Vortex" - depends on SND + tristate "Aureal Vortex" + depends on SND select SND_MPU401_UART select SND_AC97_CODEC - help + help Say Y here to include support for Aureal Vortex soundcards. - Supported features: Hardware Mixer and SRC. For more info, email - the ALSA developer list, or <mjander@users.sourceforge.net>. + Supported features: Hardware Mixer and SRC. For more info, email + the ALSA developer list, or <mjander@users.sourceforge.net>. To compile this driver as a module, choose M here: the module will be called snd-au8820. - + config SND_AU8830 - tristate "Aureal Vortex 2" - depends on SND + tristate "Aureal Vortex 2" + depends on SND select SND_MPU401_UART select SND_AC97_CODEC - help + help Say Y here to include support for Aureal Vortex 2 soundcards. - Supported features: Hardware Mixer, SRC, EQ and SPDIF output. - 3D support code is in place, but not yet useable. For more info, - email the ALSA developer list, or <mjander@users.sourceforge.net>. + Supported features: Hardware Mixer, SRC, EQ and SPDIF output. + 3D support code is in place, but not yet useable. For more info, + email the ALSA developer list, or <mjander@users.sourceforge.net>. To compile this driver as a module, choose M here: the module will be called snd-au8830. - + config SND_AZT3328 tristate "Aztech AZF3328 / PCI168 (EXPERIMENTAL)" depends on SND && EXPERIMENTAL @@ -135,10 +135,10 @@ config SND_AZT3328 will be called snd-azt3328. config SND_BT87X - tristate "Bt87x Audio Capture" - depends on SND + tristate "Bt87x Audio Capture" + depends on SND select SND_PCM - help + help If you want to record audio from TV cards based on Brooktree Bt878/Bt879 chips, say Y here and read <file:Documentation/sound/alsa/Bt87x.txt>. @@ -209,7 +209,7 @@ config SND_CS46XX config SND_CS46XX_NEW_DSP bool "Cirrus Logic (Sound Fusion) New DSP support" depends on SND_CS46XX - default y + default y help Say Y here to use a new DSP image for SPDIF and dual codecs. @@ -225,7 +225,7 @@ config SND_CS5535AUDIO referred to as NS CS5535 IO or AMD CS5535 IO companion in various literature. This driver also supports the CS5536 audio device. However, for both chips, on certain boards, you may - need to use ac97_quirk=hp_only if your board has physically + need to use ac97_quirk=hp_only if your board has physically mapped headphone out to master output. If that works for you, send lspci -vvv output to the mailing list so that your board can be identified in the quirks list. @@ -468,11 +468,13 @@ config SND_FM801_TEA575X_BOOL FM801 chip with a TEA5757 tuner connected to GPIO1-3 pins (Media Forte SF256-PCS-02) into the snd-fm801 driver. + This will enable support for the old V4L1 API. + config SND_FM801_TEA575X tristate depends on SND_FM801_TEA575X_BOOL default SND_FM801 - select VIDEO_DEV + select VIDEO_V4L1 config SND_HDA_INTEL tristate "Intel HD Audio" diff --git a/usr/Makefile b/usr/Makefile index e93824269da..5b31c0b61c7 100644 --- a/usr/Makefile +++ b/usr/Makefile @@ -35,6 +35,9 @@ quiet_cmd_initfs = GEN $@ cmd_initfs = $(initramfs) -o $@ $(ramfs-args) $(ramfs-input) targets := initramfs_data.cpio.gz +# do not try to update files included in initramfs +$(deps_initramfs): ; + $(deps_initramfs): klibcdirs # We rebuild initramfs_data.cpio.gz if: # 1) Any included file is newer then initramfs_data.cpio.gz |