diff options
100 files changed, 1257 insertions, 1191 deletions
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt index a13d69b2217..8ae5fac08df 100644 --- a/Documentation/kernel-parameters.txt +++ b/Documentation/kernel-parameters.txt @@ -1444,7 +1444,8 @@ and is between 256 and 4096 characters. It is defined in the file Param: "schedule" - profile schedule points. Param: <number> - step/bucket size as a power of 2 for statistical time based profiling. - Param: "sleep" - profile D-state sleeping (millisecs) + Param: "sleep" - profile D-state sleeping (millisecs). + Requires CONFIG_SCHEDSTATS Param: "kvm" - profile VM exits. processor.max_cstate= [HW,ACPI] diff --git a/Documentation/lguest/lguest.c b/Documentation/lguest/lguest.c index 5bdc37f8184..f2668390e8f 100644 --- a/Documentation/lguest/lguest.c +++ b/Documentation/lguest/lguest.c @@ -34,25 +34,24 @@ #include <zlib.h> #include <assert.h> #include <sched.h> -/*L:110 We can ignore the 30 include files we need for this program, but I do - * want to draw attention to the use of kernel-style types. - * - * As Linus said, "C is a Spartan language, and so should your naming be." I - * like these abbreviations and the header we need uses them, so we define them - * here. - */ -typedef unsigned long long u64; -typedef uint32_t u32; -typedef uint16_t u16; -typedef uint8_t u8; #include "linux/lguest_launcher.h" -#include "linux/pci_ids.h" #include "linux/virtio_config.h" #include "linux/virtio_net.h" #include "linux/virtio_blk.h" #include "linux/virtio_console.h" #include "linux/virtio_ring.h" #include "asm-x86/bootparam.h" +/*L:110 We can ignore the 38 include files we need for this program, but I do + * want to draw attention to the use of kernel-style types. + * + * As Linus said, "C is a Spartan language, and so should your naming be." I + * like these abbreviations, so we define them here. Note that u64 is always + * unsigned long long, which works on all Linux systems: this means that we can + * use %llu in printf for any u64. */ +typedef unsigned long long u64; +typedef uint32_t u32; +typedef uint16_t u16; +typedef uint8_t u8; /*:*/ #define PAGE_PRESENT 0x7 /* Present, RW, Execute */ @@ -361,8 +360,8 @@ static unsigned long load_bzimage(int fd) } /*L:140 Loading the kernel is easy when it's a "vmlinux", but most kernels - * come wrapped up in the self-decompressing "bzImage" format. With some funky - * coding, we can load those, too. */ + * come wrapped up in the self-decompressing "bzImage" format. With a little + * work, we can load those, too. */ static unsigned long load_kernel(int fd) { Elf32_Ehdr hdr; @@ -465,6 +464,7 @@ static unsigned long setup_pagetables(unsigned long mem, * to know where it is. */ return to_guest_phys(pgdir); } +/*:*/ /* Simple routine to roll all the commandline arguments together with spaces * between them. */ @@ -481,9 +481,9 @@ static void concat(char *dst, char *args[]) dst[len] = '\0'; } -/* This is where we actually tell the kernel to initialize the Guest. We saw - * the arguments it expects when we looked at initialize() in lguest_user.c: - * the base of guest "physical" memory, the top physical page to allow, the +/*L:185 This is where we actually tell the kernel to initialize the Guest. We + * saw the arguments it expects when we looked at initialize() in lguest_user.c: + * the base of Guest "physical" memory, the top physical page to allow, the * top level pagetable and the entry point for the Guest. */ static int tell_kernel(unsigned long pgdir, unsigned long start) { @@ -513,13 +513,14 @@ static void add_device_fd(int fd) /*L:200 * The Waker. * - * With a console and network devices, we can have lots of input which we need - * to process. We could try to tell the kernel what file descriptors to watch, - * but handing a file descriptor mask through to the kernel is fairly icky. + * With console, block and network devices, we can have lots of input which we + * need to process. We could try to tell the kernel what file descriptors to + * watch, but handing a file descriptor mask through to the kernel is fairly + * icky. * * Instead, we fork off a process which watches the file descriptors and writes - * the LHREQ_BREAK command to the /dev/lguest filedescriptor to tell the Host - * loop to stop running the Guest. This causes it to return from the + * the LHREQ_BREAK command to the /dev/lguest file descriptor to tell the Host + * stop running the Guest. This causes the Launcher to return from the * /dev/lguest read with -EAGAIN, where it will write to /dev/lguest to reset * the LHREQ_BREAK and wake us up again. * @@ -545,7 +546,9 @@ static void wake_parent(int pipefd, int lguest_fd) if (read(pipefd, &fd, sizeof(fd)) == 0) exit(0); /* Otherwise it's telling us to change what file - * descriptors we're to listen to. */ + * descriptors we're to listen to. Positive means + * listen to a new one, negative means stop + * listening. */ if (fd >= 0) FD_SET(fd, &devices.infds); else @@ -560,7 +563,7 @@ static int setup_waker(int lguest_fd) { int pipefd[2], child; - /* We create a pipe to talk to the waker, and also so it knows when the + /* We create a pipe to talk to the Waker, and also so it knows when the * Launcher dies (and closes pipe). */ pipe(pipefd); child = fork(); @@ -568,7 +571,8 @@ static int setup_waker(int lguest_fd) err(1, "forking"); if (child == 0) { - /* Close the "writing" end of our copy of the pipe */ + /* We are the Waker: close the "writing" end of our copy of the + * pipe and start waiting for input. */ close(pipefd[1]); wake_parent(pipefd[0], lguest_fd); } @@ -579,12 +583,12 @@ static int setup_waker(int lguest_fd) return pipefd[1]; } -/*L:210 +/* * Device Handling. * - * When the Guest sends DMA to us, it sends us an array of addresses and sizes. + * When the Guest gives us a buffer, it sends an array of addresses and sizes. * We need to make sure it's not trying to reach into the Launcher itself, so - * we have a convenient routine which check it and exits with an error message + * we have a convenient routine which checks it and exits with an error message * if something funny is going on: */ static void *_check_pointer(unsigned long addr, unsigned int size, @@ -601,7 +605,9 @@ static void *_check_pointer(unsigned long addr, unsigned int size, /* A macro which transparently hands the line number to the real function. */ #define check_pointer(addr,size) _check_pointer(addr, size, __LINE__) -/* This function returns the next descriptor in the chain, or vq->vring.num. */ +/* Each buffer in the virtqueues is actually a chain of descriptors. This + * function returns the next descriptor in the chain, or vq->vring.num if we're + * at the end. */ static unsigned next_desc(struct virtqueue *vq, unsigned int i) { unsigned int next; @@ -680,13 +686,14 @@ static unsigned get_vq_desc(struct virtqueue *vq, return head; } -/* Once we've used one of their buffers, we tell them about it. We'll then +/* After we've used one of their buffers, we tell them about it. We'll then * want to send them an interrupt, using trigger_irq(). */ static void add_used(struct virtqueue *vq, unsigned int head, int len) { struct vring_used_elem *used; - /* Get a pointer to the next entry in the used ring. */ + /* The virtqueue contains a ring of used buffers. Get a pointer to the + * next entry in that used ring. */ used = &vq->vring.used->ring[vq->vring.used->idx % vq->vring.num]; used->id = head; used->len = len; @@ -700,6 +707,7 @@ static void trigger_irq(int fd, struct virtqueue *vq) { unsigned long buf[] = { LHREQ_IRQ, vq->config.irq }; + /* If they don't want an interrupt, don't send one. */ if (vq->vring.avail->flags & VRING_AVAIL_F_NO_INTERRUPT) return; @@ -716,8 +724,11 @@ static void add_used_and_trigger(int fd, struct virtqueue *vq, trigger_irq(fd, vq); } -/* Here is the input terminal setting we save, and the routine to restore them - * on exit so the user can see what they type next. */ +/* + * The Console + * + * Here is the input terminal setting we save, and the routine to restore them + * on exit so the user gets their terminal back. */ static struct termios orig_term; static void restore_term(void) { @@ -818,7 +829,10 @@ static void handle_console_output(int fd, struct virtqueue *vq) } } -/* Handling output for network is also simple: we get all the output buffers +/* + * The Network + * + * Handling output for network is also simple: we get all the output buffers * and write them (ignoring the first element) to this device's file descriptor * (stdout). */ static void handle_net_output(int fd, struct virtqueue *vq) @@ -831,8 +845,9 @@ static void handle_net_output(int fd, struct virtqueue *vq) while ((head = get_vq_desc(vq, iov, &out, &in)) != vq->vring.num) { if (in) errx(1, "Input buffers in output queue?"); - /* Check header, but otherwise ignore it (we said we supported - * no features). */ + /* Check header, but otherwise ignore it (we told the Guest we + * supported no features, so it shouldn't have anything + * interesting). */ (void)convert(&iov[0], struct virtio_net_hdr); len = writev(vq->dev->fd, iov+1, out-1); add_used_and_trigger(fd, vq, head, len); @@ -883,7 +898,8 @@ static bool handle_tun_input(int fd, struct device *dev) return true; } -/* This callback ensures we try again, in case we stopped console or net +/*L:215 This is the callback attached to the network and console input + * virtqueues: it ensures we try again, in case we stopped console or net * delivery because Guest didn't have any buffers. */ static void enable_fd(int fd, struct virtqueue *vq) { @@ -919,7 +935,7 @@ static void handle_output(int fd, unsigned long addr) strnlen(from_guest_phys(addr), guest_limit - addr)); } -/* This is called when the waker wakes us up: check for incoming file +/* This is called when the Waker wakes us up: check for incoming file * descriptors. */ static void handle_input(int fd) { @@ -986,8 +1002,7 @@ static struct lguest_device_desc *new_dev_desc(u16 type) } /* Each device descriptor is followed by some configuration information. - * The first byte is a "status" byte for the Guest to report what's happening. - * After that are fields: u8 type, u8 len, [... len bytes...]. + * Each configuration field looks like: u8 type, u8 len, [... len bytes...]. * * This routine adds a new field to an existing device's descriptor. It only * works for the last device, but that's OK because that's how we use it. */ @@ -1044,14 +1059,17 @@ static void add_virtqueue(struct device *dev, unsigned int num_descs, /* Link virtqueue back to device. */ vq->dev = dev; - /* Set up handler. */ + /* Set the routine to call when the Guest does something to this + * virtqueue. */ vq->handle_output = handle_output; + + /* Set the "Don't Notify Me" flag if we don't have a handler */ if (!handle_output) vq->vring.used->flags = VRING_USED_F_NO_NOTIFY; } /* This routine does all the creation and setup of a new device, including - * caling new_dev_desc() to allocate the descriptor and device memory. */ + * calling new_dev_desc() to allocate the descriptor and device memory. */ static struct device *new_device(const char *name, u16 type, int fd, bool (*handle_input)(int, struct device *)) { @@ -1060,7 +1078,7 @@ static struct device *new_device(const char *name, u16 type, int fd, /* Append to device list. Prepending to a single-linked list is * easier, but the user expects the devices to be arranged on the bus * in command-line order. The first network device on the command line - * is eth0, the first block device /dev/lgba, etc. */ + * is eth0, the first block device /dev/vda, etc. */ *devices.lastdev = dev; dev->next = NULL; devices.lastdev = &dev->next; @@ -1104,7 +1122,7 @@ static void setup_console(void) /* The console needs two virtqueues: the input then the output. When * they put something the input queue, we make sure we're listening to * stdin. When they put something in the output queue, we write it to - * stdout. */ + * stdout. */ add_virtqueue(dev, VIRTQUEUE_NUM, enable_fd); add_virtqueue(dev, VIRTQUEUE_NUM, handle_console_output); @@ -1252,21 +1270,17 @@ static void setup_tun_net(const char *arg) verbose("attached to bridge: %s\n", br_name); } - -/* - * Block device. +/* Our block (disk) device should be really simple: the Guest asks for a block + * number and we read or write that position in the file. Unfortunately, that + * was amazingly slow: the Guest waits until the read is finished before + * running anything else, even if it could have been doing useful work. * - * Serving a block device is really easy: the Guest asks for a block number and - * we read or write that position in the file. - * - * Unfortunately, this is amazingly slow: the Guest waits until the read is - * finished before running anything else, even if it could be doing useful - * work. We could use async I/O, except it's reputed to suck so hard that - * characters actually go missing from your code when you try to use it. + * We could use async I/O, except it's reputed to suck so hard that characters + * actually go missing from your code when you try to use it. * * So we farm the I/O out to thread, and communicate with it via a pipe. */ -/* This hangs off device->priv, with the data. */ +/* This hangs off device->priv. */ struct vblk_info { /* The size of the file. */ @@ -1282,8 +1296,14 @@ struct vblk_info * Launcher triggers interrupt to Guest. */ int done_fd; }; +/*:*/ -/* This is the core of the I/O thread. It returns true if it did something. */ +/*L:210 + * The Disk + * + * Remember that the block device is handled by a separate I/O thread. We head + * straight into the core of that thread here: + */ static bool service_io(struct device *dev) { struct vblk_info *vblk = dev->priv; @@ -1294,10 +1314,14 @@ static bool service_io(struct device *dev) struct iovec iov[dev->vq->vring.num]; off64_t off; + /* See if there's a request waiting. If not, nothing to do. */ head = get_vq_desc(dev->vq, iov, &out_num, &in_num); if (head == dev->vq->vring.num) return false; + /* Every block request should contain at least one output buffer + * (detailing the location on disk and the type of request) and one + * input buffer (to hold the result). */ if (out_num == 0 || in_num == 0) errx(1, "Bad virtblk cmd %u out=%u in=%u", head, out_num, in_num); @@ -1306,10 +1330,15 @@ static bool service_io(struct device *dev) in = convert(&iov[out_num+in_num-1], struct virtio_blk_inhdr); off = out->sector * 512; - /* This is how we implement barriers. Pretty poor, no? */ + /* The block device implements "barriers", where the Guest indicates + * that it wants all previous writes to occur before this write. We + * don't have a way of asking our kernel to do a barrier, so we just + * synchronize all the data in the file. Pretty poor, no? */ if (out->type & VIRTIO_BLK_T_BARRIER) fdatasync(vblk->fd); + /* In general the virtio block driver is allowed to try SCSI commands. + * It'd be nice if we supported eject, for example, but we don't. */ if (out->type & VIRTIO_BLK_T_SCSI_CMD) { fprintf(stderr, "Scsi commands unsupported\n"); in->status = VIRTIO_BLK_S_UNSUPP; @@ -1375,7 +1404,7 @@ static int io_thread(void *_dev) /* When this read fails, it means Launcher died, so we follow. */ while (read(vblk->workpipe[0], &c, 1) == 1) { - /* We acknowledge each request immediately, to reduce latency, + /* We acknowledge each request immediately to reduce latency, * rather than waiting until we've done them all. I haven't * measured to see if it makes any difference. */ while (service_io(dev)) @@ -1384,12 +1413,14 @@ static int io_thread(void *_dev) return 0; } -/* When the thread says some I/O is done, we interrupt the Guest. */ +/* Now we've seen the I/O thread, we return to the Launcher to see what happens + * when the thread tells us it's completed some I/O. */ static bool handle_io_finish(int fd, struct device *dev) { char c; - /* If child died, presumably it printed message. */ + /* If the I/O thread died, presumably it printed the error, so we + * simply exit. */ if (read(dev->fd, &c, 1) != 1) exit(1); @@ -1398,7 +1429,7 @@ static bool handle_io_finish(int fd, struct device *dev) return true; } -/* When the Guest submits some I/O, we wake the I/O thread. */ +/* When the Guest submits some I/O, we just need to wake the I/O thread. */ static void handle_virtblk_output(int fd, struct virtqueue *vq) { struct vblk_info *vblk = vq->dev->priv; @@ -1410,7 +1441,7 @@ static void handle_virtblk_output(int fd, struct virtqueue *vq) exit(1); } -/* This creates a virtual block device. */ +/*L:198 This actually sets up a virtual block device. */ static void setup_block_file(const char *filename) { int p[2]; @@ -1426,7 +1457,7 @@ static void setup_block_file(const char *filename) /* The device responds to return from I/O thread. */ dev = new_device("block", VIRTIO_ID_BLOCK, p[0], handle_io_finish); - /* The device has a virtqueue. */ + /* The device has one virtqueue, where the Guest places requests. */ add_virtqueue(dev, VIRTQUEUE_NUM, handle_virtblk_output); /* Allocate the room for our own bookkeeping */ @@ -1448,7 +1479,8 @@ static void setup_block_file(const char *filename) /* The I/O thread writes to this end of the pipe when done. */ vblk->done_fd = p[1]; - /* This is how we tell the I/O thread about more work. */ + /* This is the second pipe, which is how we tell the I/O thread about + * more work. */ pipe(vblk->workpipe); /* Create stack for thread and run it */ @@ -1487,24 +1519,25 @@ static void __attribute__((noreturn)) run_guest(int lguest_fd) char reason[1024] = { 0 }; read(lguest_fd, reason, sizeof(reason)-1); errx(1, "%s", reason); - /* EAGAIN means the waker wanted us to look at some input. + /* EAGAIN means the Waker wanted us to look at some input. * Anything else means a bug or incompatible change. */ } else if (errno != EAGAIN) err(1, "Running guest failed"); - /* Service input, then unset the BREAK which releases - * the Waker. */ + /* Service input, then unset the BREAK to release the Waker. */ handle_input(lguest_fd); if (write(lguest_fd, args, sizeof(args)) < 0) err(1, "Resetting break"); } } /* - * This is the end of the Launcher. + * This is the end of the Launcher. The good news: we are over halfway + * through! The bad news: the most fiendish part of the code still lies ahead + * of us. * - * But wait! We've seen I/O from the Launcher, and we've seen I/O from the - * Drivers. If we were to see the Host kernel I/O code, our understanding - * would be complete... :*/ + * Are you ready? Take a deep breath and join me in the core of the Host, in + * "make Host". + :*/ static struct option opts[] = { { "verbose", 0, NULL, 'v' }, @@ -1527,7 +1560,7 @@ int main(int argc, char *argv[]) /* Memory, top-level pagetable, code startpoint and size of the * (optional) initrd. */ unsigned long mem = 0, pgdir, start, initrd_size = 0; - /* A temporary and the /dev/lguest file descriptor. */ + /* Two temporaries and the /dev/lguest file descriptor. */ int i, c, lguest_fd; /* The boot information for the Guest. */ struct boot_params *boot; @@ -1622,6 +1655,7 @@ int main(int argc, char *argv[]) /* The boot header contains a command line pointer: we put the command * line after the boot header. */ boot->hdr.cmd_line_ptr = to_guest_phys(boot + 1); + /* We use a simple helper to copy the arguments separated by spaces. */ concat((char *)(boot + 1), argv+optind+2); /* Boot protocol version: 2.07 supports the fields for lguest. */ diff --git a/Documentation/networking/00-INDEX b/Documentation/networking/00-INDEX index 153d84d281e..f5a5e6d3d54 100644 --- a/Documentation/networking/00-INDEX +++ b/Documentation/networking/00-INDEX @@ -80,8 +80,6 @@ multicast.txt - Behaviour of cards under Multicast ncsa-telnet - notes on how NCSA telnet (DOS) breaks with MTU discovery enabled. -net-modules.txt - - info and "insmod" parameters for all network driver modules. netdevices.txt - info on network device driver functions exported to the kernel. olympic.txt diff --git a/Documentation/networking/net-modules.txt b/Documentation/networking/net-modules.txt deleted file mode 100644 index 98c4392dd0f..00000000000 --- a/Documentation/networking/net-modules.txt +++ /dev/null @@ -1,315 +0,0 @@ -Wed 2-Aug-95 <matti.aarnio@utu.fi> - - Linux network driver modules - - Do not mistake this for "README.modules" at the top-level - directory! That document tells about modules in general, while - this one tells only about network device driver modules. - - This is a potpourri of INSMOD-time(*) configuration options - (if such exists) and their default values of various modules - in the Linux network drivers collection. - - Some modules have also hidden (= non-documented) tunable values. - The choice of not documenting them is based on general belief, that - the less the user needs to know, the better. (There are things that - driver developers can use, others should not confuse themselves.) - - In many cases it is highly preferred that insmod:ing is done - ONLY with defining an explicit address for the card, AND BY - NOT USING AUTO-PROBING! - - Now most cards have some explicitly defined base address that they - are compiled with (to avoid auto-probing, among other things). - If that compiled value does not match your actual configuration, - do use the "io=0xXXX" -parameter for the insmod, and give there - a value matching your environment. - - If you are adventurous, you can ask the driver to autoprobe - by using the "io=0" parameter, however it is a potentially dangerous - thing to do in a live system. (If you don't know where the - card is located, you can try autoprobing, and after possible - crash recovery, insmod with proper IO-address..) - - -------------------------- - (*) "INSMOD-time" means when you load module with - /sbin/insmod you can feed it optional parameters. - See "man insmod". - -------------------------- - - - 8390 based Network Modules (Paul Gortmaker, Nov 12, 1995) - -------------------------- - -(Includes: smc-ultra, ne, wd, 3c503, hp, hp-plus, e2100 and ac3200) - -The 8390 series of network drivers now support multiple card systems without -reloading the same module multiple times (memory efficient!) This is done by -specifying multiple comma separated values, such as: - - insmod 3c503.o io=0x280,0x300,0x330,0x350 xcvr=0,1,0,1 - -The above would have the one module controlling four 3c503 cards, with card 2 -and 4 using external transceivers. The "insmod" manual describes the usage -of comma separated value lists. - -It is *STRONGLY RECOMMENDED* that you supply "io=" instead of autoprobing. -If an "io=" argument is not supplied, then the ISA drivers will complain -about autoprobing being not recommended, and begrudgingly autoprobe for -a *SINGLE CARD ONLY* -- if you want to use multiple cards you *have* to -supply an "io=0xNNN,0xQQQ,..." argument. - -The ne module is an exception to the above. A NE2000 is essentially an -8390 chip, some bus glue and some RAM. Because of this, the ne probe is -more invasive than the rest, and so at boot we make sure the ne probe is -done last of all the 8390 cards (so that it won't trip over other 8390 based -cards) With modules we can't ensure that all other non-ne 8390 cards have -already been found. Because of this, the ne module REQUIRES an "io=0xNNN" -argument passed in via insmod. It will refuse to autoprobe. - -It is also worth noting that auto-IRQ probably isn't as reliable during -the flurry of interrupt activity on a running machine. Cards such as the -ne2000 that can't get the IRQ setting from an EEPROM or configuration -register are probably best supplied with an "irq=M" argument as well. - - ----------------------------------------------------------------------- -Card/Module List - Configurable Parameters and Default Values ----------------------------------------------------------------------- - -3c501.c: - io = 0x280 IO base address - irq = 5 IRQ - (Probes ports: 0x280, 0x300) - -3c503.c: - io = 0 (It will complain if you don't supply an "io=0xNNN") - irq = 0 (IRQ software selected by driver using autoIRQ) - xcvr = 0 (Use xcvr=1 to select external transceiver.) - (Probes ports: 0x300, 0x310, 0x330, 0x350, 0x250, 0x280, 0x2A0, 0x2E0) - -3c505.c: - io = 0 - irq = 0 - dma = 6 (not autoprobed) - (Probes ports: 0x300, 0x280, 0x310) - -3c507.c: - io = 0x300 - irq = 0 - (Probes ports: 0x300, 0x320, 0x340, 0x280) - -3c509.c: - io = 0 - irq = 0 - ( Module load-time probing Works reliably only on EISA, ISA ID-PROBE - IS NOT RELIABLE! Compile this driver statically into kernel for - now, if you need it auto-probing on an ISA-bus machine. ) - -8390.c: - (No public options, several other modules need this one) - -a2065.c: - Since this is a Zorro board, it supports full autoprobing, even for - multiple boards. (m68k/Amiga) - -ac3200.c: - io = 0 (Checks 0x1000 to 0x8fff in 0x1000 intervals) - irq = 0 (Read from config register) - (EISA probing..) - -apricot.c: - io = 0x300 (Can't be altered!) - irq = 10 - -arcnet.c: - io = 0 - irqnum = 0 - shmem = 0 - num = 0 - DO SET THESE MANUALLY AT INSMOD! - (When probing, looks at the following possible addresses: - Suggested ones: - 0x300, 0x2E0, 0x2F0, 0x2D0 - Other ones: - 0x200, 0x210, 0x220, 0x230, 0x240, 0x250, 0x260, 0x270, - 0x280, 0x290, 0x2A0, 0x2B0, 0x2C0, - 0x310, 0x320, 0x330, 0x340, 0x350, 0x360, 0x370, - 0x380, 0x390, 0x3A0, 0x3E0, 0x3F0 ) - -ariadne.c: - Since this is a Zorro board, it supports full autoprobing, even for - multiple boards. (m68k/Amiga) - -at1700.c: - io = 0x260 - irq = 0 - (Probes ports: 0x260, 0x280, 0x2A0, 0x240, 0x340, 0x320, 0x380, 0x300) - -atarilance.c: - Supports full autoprobing. (m68k/Atari) - -atp.c: *Not modularized* - (Probes ports: 0x378, 0x278, 0x3BC; - fixed IRQs: 5 and 7 ) - -cops.c: - io = 0x240 - irq = 5 - nodeid = 0 (AutoSelect = 0, NodeID 1-254 is hand selected.) - (Probes ports: 0x240, 0x340, 0x200, 0x210, 0x220, 0x230, 0x260, - 0x2A0, 0x300, 0x310, 0x320, 0x330, 0x350, 0x360) - -de4x5.c: - io = 0x000b - irq = 10 - is_not_dec = 0 -- For non-DEC card using DEC 21040/21041/21140 chip, set this to 1 - (EISA, and PCI probing) - -de600.c: - de600_debug = 0 - (On port 0x378, irq 7 -- lpt1; compile time configurable) - -de620.c: - bnc = 0, utp = 0 <-- Force media by setting either. - io = 0x378 (also compile-time configurable) - irq = 7 - -depca.c: - io = 0x200 - irq = 7 - (Probes ports: ISA: 0x300, 0x200; - EISA: 0x0c00 ) - -dummy.c: - No options - -e2100.c: - io = 0 (It will complain if you don't supply an "io=0xNNN") - irq = 0 (IRQ software selected by driver) - mem = 0 (Override default shared memory start of 0xd0000) - xcvr = 0 (Use xcvr=1 to select external transceiver.) - (Probes ports: 0x300, 0x280, 0x380, 0x220) - -eepro.c: - io = 0x200 - irq = 0 - (Probes ports: 0x200, 0x240, 0x280, 0x2C0, 0x300, 0x320, 0x340, 0x360) - -eexpress.c: - io = 0x300 - irq = 0 (IRQ value read from EEPROM) - (Probes ports: 0x300, 0x270, 0x320, 0x340) - -eql.c: - (No parameters) - -ewrk3.c: - io = 0x300 - irq = 5 - (With module no autoprobing! - On EISA-bus does EISA probing. - Static linkage probes ports on ISA bus: - 0x100, 0x120, 0x140, 0x160, 0x180, 0x1A0, 0x1C0, - 0x200, 0x220, 0x240, 0x260, 0x280, 0x2A0, 0x2C0, 0x2E0, - 0x300, 0x340, 0x360, 0x380, 0x3A0, 0x3C0) - -hp-plus.c: - io = 0 (It will complain if you don't supply an "io=0xNNN") - irq = 0 (IRQ read from configuration register) - (Probes ports: 0x200, 0x240, 0x280, 0x2C0, 0x300, 0x320, 0x340) - -hp.c: - io = 0 (It will complain if you don't supply an "io=0xNNN") - irq = 0 (IRQ software selected by driver using autoIRQ) - (Probes ports: 0x300, 0x320, 0x340, 0x280, 0x2C0, 0x200, 0x240) - -hp100.c: - hp100_port = 0 (IO-base address) - (Does EISA-probing, if on EISA-slot; - On ISA-bus probes all ports from 0x100 thru to 0x3E0 - in increments of 0x020) - -hydra.c: - Since this is a Zorro board, it supports full autoprobing, even for - multiple boards. (m68k/Amiga) - -ibmtr.c: - io = 0xa20, 0xa24 (autoprobed by default) - irq = 0 (driver cannot select irq - read from hardware) - mem = 0 (shared memory base set at 0xd0000 and not yet - able to override thru mem= parameter.) - -lance.c: *Not modularized* - (PCI, and ISA probing; "CONFIG_PCI" needed for PCI support) - (Probes ISA ports: 0x300, 0x320, 0x340, 0x360) - -loopback.c: *Static kernel component* - -ne.c: - io = 0 (Explicitly *requires* an "io=0xNNN" value) - irq = 0 (Tries to determine configured IRQ via autoIRQ) - (Probes ports: 0x300, 0x280, 0x320, 0x340, 0x360) - -net_init.c: *Static kernel component* - -ni52.c: *Not modularized* - (Probes ports: 0x300, 0x280, 0x360, 0x320, 0x340 - mems: 0xD0000, 0xD2000, 0xC8000, 0xCA000, - 0xD4000, 0xD6000, 0xD8000 ) - -ni65.c: *Not modularized* **16MB MEMORY BARRIER BUG** - (Probes ports: 0x300, 0x320, 0x340, 0x360) - -pi2.c: *Not modularized* (well, NON-STANDARD modularization!) - Only one card supported at this time. - (Probes ports: 0x380, 0x300, 0x320, 0x340, 0x360, 0x3A0) - -plip.c: - io = 0 - irq = 0 (by default, uses IRQ 5 for port at 0x3bc, IRQ 7 - for port at 0x378, and IRQ 2 for port at 0x278) - (Probes ports: 0x278, 0x378, 0x3bc) - -ppp.c: - No options (ppp-2.2+ has some, this is based on non-dynamic - version from ppp-2.1.2d) - -seeq8005.c: *Not modularized* - (Probes ports: 0x300, 0x320, 0x340, 0x360) - -skeleton.c: *Skeleton* - -slhc.c: - No configuration parameters - -slip.c: - slip_maxdev = 256 (default value from SL_NRUNIT on slip.h) - - -smc-ultra.c: - io = 0 (It will complain if you don't supply an "io=0xNNN") - irq = 0 (IRQ val. read from EEPROM) - (Probes ports: 0x200, 0x220, 0x240, 0x280, 0x300, 0x340, 0x380) - -tulip.c: *Partial modularization* - (init-time memory allocation makes problems..) - -tunnel.c: - No insmod parameters - -wavelan.c: - io = 0x390 (Settable, but change not recommended) - irq = 0 (Not honoured, if changed..) - -wd.c: - io = 0 (It will complain if you don't supply an "io=0xNNN") - irq = 0 (IRQ val. read from EEPROM, ancient cards use autoIRQ) - mem = 0 (Force shared-memory on address 0xC8000, or whatever..) - mem_end = 0 (Force non-std. mem. size via supplying mem_end val.) - (eg. for 32k WD8003EBT, use mem=0xd0000 mem_end=0xd8000) - (Probes ports: 0x300, 0x280, 0x380, 0x240) - -znet.c: *Not modularized* - (Only one device on Zenith Z-Note (notebook?) systems, - configuration information from (EE)PROM) diff --git a/MAINTAINERS b/MAINTAINERS index 76b85715786..5862b786009 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -2259,6 +2259,13 @@ L: legousb-devel@lists.sourceforge.net W: http://legousb.sourceforge.net/ S: Maintained +LGUEST +P: Rusty Russell +M: rusty@rustcorp.com.au +L: lguest@ozlabs.org +W: http://lguest.ozlabs.org/ +S: Maintained + LINUX FOR IBM pSERIES (RS/6000) P: Paul Mackerras M: paulus@au.ibm.com diff --git a/arch/blackfin/kernel/dma-mapping.c b/arch/blackfin/kernel/dma-mapping.c index a16cb03c529..d6b61d56b65 100644 --- a/arch/blackfin/kernel/dma-mapping.c +++ b/arch/blackfin/kernel/dma-mapping.c @@ -35,6 +35,7 @@ #include <linux/device.h> #include <linux/dma-mapping.h> #include <linux/io.h> +#include <linux/scatterlist.h> #include <asm/cacheflush.h> #include <asm/bfin-global.h> diff --git a/arch/m68k/kernel/dma.c b/arch/m68k/kernel/dma.c index ef490e1ce60..6f8c080dd9f 100644 --- a/arch/m68k/kernel/dma.c +++ b/arch/m68k/kernel/dma.c @@ -9,10 +9,10 @@ #include <linux/dma-mapping.h> #include <linux/device.h> #include <linux/kernel.h> +#include <linux/scatterlist.h> #include <linux/vmalloc.h> #include <asm/pgalloc.h> -#include <asm/scatterlist.h> void *dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *handle, gfp_t flag) diff --git a/arch/sparc64/kernel/iommu_common.c b/arch/sparc64/kernel/iommu_common.c index b70324e0d83..efd5dff85f6 100644 --- a/arch/sparc64/kernel/iommu_common.c +++ b/arch/sparc64/kernel/iommu_common.c @@ -234,7 +234,7 @@ unsigned long prepare_sg(struct scatterlist *sg, int nents) dma_sg->dma_length = dent_len; if (dma_sg != sg) { - dma_sg = next_sg(dma_sg); + dma_sg = sg_next(dma_sg); dma_sg->dma_length = 0; } diff --git a/arch/sparc64/kernel/ldc.c b/arch/sparc64/kernel/ldc.c index c8313cb60f0..217478a9412 100644 --- a/arch/sparc64/kernel/ldc.c +++ b/arch/sparc64/kernel/ldc.c @@ -2121,7 +2121,7 @@ int ldc_map_sg(struct ldc_channel *lp, state.nc = 0; for (i = 0; i < num_sg; i++) - fill_cookies(&state, page_to_pfn(sg[i].page) << PAGE_SHIFT, + fill_cookies(&state, page_to_pfn(sg_page(&sg[i])) << PAGE_SHIFT, sg[i].offset, sg[i].length); return state.nc; diff --git a/arch/um/drivers/ubd_kern.c b/arch/um/drivers/ubd_kern.c index 3a8cd3dfb51..e184b44b101 100644 --- a/arch/um/drivers/ubd_kern.c +++ b/arch/um/drivers/ubd_kern.c @@ -35,6 +35,7 @@ #include "linux/genhd.h" #include "linux/spinlock.h" #include "linux/platform_device.h" +#include "linux/scatterlist.h" #include "asm/segment.h" #include "asm/uaccess.h" #include "asm/irq.h" @@ -704,6 +705,7 @@ static int ubd_add(int n, char **error_out) ubd_dev->size = ROUND_BLOCK(ubd_dev->size); INIT_LIST_HEAD(&ubd_dev->restart); + sg_init_table(&ubd_dev->sg, MAX_SG); err = -ENOMEM; ubd_dev->queue = blk_init_queue(do_ubd_request, &ubd_dev->lock); diff --git a/arch/x86/kernel/pci-gart_64.c b/arch/x86/kernel/pci-gart_64.c index c56e9ee6496..ae7e0161ce4 100644 --- a/arch/x86/kernel/pci-gart_64.c +++ b/arch/x86/kernel/pci-gart_64.c @@ -338,7 +338,6 @@ static int __dma_map_cont(struct scatterlist *start, int nelems, BUG_ON(s != start && s->offset); if (s == start) { - *sout = *s; sout->dma_address = iommu_bus_base; sout->dma_address += iommu_page*PAGE_SIZE + s->offset; sout->dma_length = s->length; @@ -365,7 +364,7 @@ static inline int dma_map_cont(struct scatterlist *start, int nelems, { if (!need) { BUG_ON(nelems != 1); - *sout = *start; + sout->dma_address = start->dma_address; sout->dma_length = start->length; return 0; } diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c index 8f1356258aa..a55b0902f9d 100644 --- a/arch/x86/lguest/boot.c +++ b/arch/x86/lguest/boot.c @@ -99,7 +99,7 @@ static cycle_t clock_base; * When lazy_mode is set, it means we're allowed to defer all hypercalls and do * them as a batch when lazy_mode is eventually turned off. Because hypercalls * are reasonably expensive, batching them up makes sense. For example, a - * large mmap might update dozens of page table entries: that code calls + * large munmap might update dozens of page table entries: that code calls * paravirt_enter_lazy_mmu(), does the dozen updates, then calls * lguest_leave_lazy_mode(). * @@ -164,8 +164,8 @@ void async_hcall(unsigned long call, /*:*/ /*G:033 - * Here are our first native-instruction replacements: four functions for - * interrupt control. + * After that diversion we return to our first native-instruction + * replacements: four functions for interrupt control. * * The simplest way of implementing these would be to have "turn interrupts * off" and "turn interrupts on" hypercalls. Unfortunately, this is too slow: @@ -184,7 +184,7 @@ static unsigned long save_fl(void) return lguest_data.irq_enabled; } -/* "restore_flags" just sets the flags back to the value given. */ +/* restore_flags() just sets the flags back to the value given. */ static void restore_fl(unsigned long flags) { lguest_data.irq_enabled = flags; @@ -357,7 +357,7 @@ static void lguest_cpuid(unsigned int *eax, unsigned int *ebx, * it. The Host needs to know when the Guest wants to change them, so we have * a whole series of functions like read_cr0() and write_cr0(). * - * We start with CR0. CR0 allows you to turn on and off all kinds of basic + * We start with cr0. cr0 allows you to turn on and off all kinds of basic * features, but Linux only really cares about one: the horrifically-named Task * Switched (TS) bit at bit 3 (ie. 8) * @@ -372,8 +372,7 @@ static void lguest_cpuid(unsigned int *eax, unsigned int *ebx, static unsigned long current_cr0, current_cr3; static void lguest_write_cr0(unsigned long val) { - /* 8 == TS bit. */ - lazy_hcall(LHCALL_TS, val & 8, 0, 0); + lazy_hcall(LHCALL_TS, val & X86_CR0_TS, 0, 0); current_cr0 = val; } @@ -388,10 +387,10 @@ static unsigned long lguest_read_cr0(void) static void lguest_clts(void) { lazy_hcall(LHCALL_TS, 0, 0, 0); - current_cr0 &= ~8U; + current_cr0 &= ~X86_CR0_TS; } -/* CR2 is the virtual address of the last page fault, which the Guest only ever +/* cr2 is the virtual address of the last page fault, which the Guest only ever * reads. The Host kindly writes this into our "struct lguest_data", so we * just read it out of there. */ static unsigned long lguest_read_cr2(void) @@ -399,7 +398,7 @@ static unsigned long lguest_read_cr2(void) return lguest_data.cr2; } -/* CR3 is the current toplevel pagetable page: the principle is the same as +/* cr3 is the current toplevel pagetable page: the principle is the same as * cr0. Keep a local copy, and tell the Host when it changes. */ static void lguest_write_cr3(unsigned long cr3) { @@ -412,7 +411,7 @@ static unsigned long lguest_read_cr3(void) return current_cr3; } -/* CR4 is used to enable and disable PGE, but we don't care. */ +/* cr4 is used to enable and disable PGE, but we don't care. */ static unsigned long lguest_read_cr4(void) { return 0; @@ -433,7 +432,7 @@ static void lguest_write_cr4(unsigned long val) * maps virtual addresses to physical addresses using "page tables". We could * use one huge index of 1 million entries: each address is 4 bytes, so that's * 1024 pages just to hold the page tables. But since most virtual addresses - * are unused, we use a two level index which saves space. The CR3 register + * are unused, we use a two level index which saves space. The cr3 register * contains the physical address of the top level "page directory" page, which * contains physical addresses of up to 1024 second-level pages. Each of these * second level pages contains up to 1024 physical addresses of actual pages, @@ -441,7 +440,7 @@ static void lguest_write_cr4(unsigned long val) * * Here's a diagram, where arrows indicate physical addresses: * - * CR3 ---> +---------+ + * cr3 ---> +---------+ * | --------->+---------+ * | | | PADDR1 | * Top-level | | PADDR2 | @@ -499,8 +498,7 @@ static void lguest_set_pmd(pmd_t *pmdp, pmd_t pmdval) * * ... except in early boot when the kernel sets up the initial pagetables, * which makes booting astonishingly slow. So we don't even tell the Host - * anything changed until we've done the first page table switch. - */ + * anything changed until we've done the first page table switch. */ static void lguest_set_pte(pte_t *ptep, pte_t pteval) { *ptep = pteval; @@ -721,10 +719,10 @@ static void lguest_time_init(void) /* Set up the timer interrupt (0) to go to our simple timer routine */ set_irq_handler(0, lguest_time_irq); - /* Our clock structure look like arch/i386/kernel/tsc.c if we can use - * the TSC, otherwise it's a dumb nanosecond-resolution clock. Either - * way, the "rating" is initialized so high that it's always chosen - * over any other clocksource. */ + /* Our clock structure looks like arch/x86/kernel/tsc_32.c if we can + * use the TSC, otherwise it's a dumb nanosecond-resolution clock. + * Either way, the "rating" is set so high that it's always chosen over + * any other clocksource. */ if (lguest_data.tsc_khz) lguest_clock.mult = clocksource_khz2mult(lguest_data.tsc_khz, lguest_clock.shift); @@ -750,7 +748,7 @@ static void lguest_time_init(void) * to work. They're pretty simple. */ -/* The Guest needs to tell the host what stack it expects traps to use. For +/* The Guest needs to tell the Host what stack it expects traps to use. For * native hardware, this is part of the Task State Segment mentioned above in * lguest_load_tr_desc(), but to help hypervisors there's this special call. * @@ -851,13 +849,16 @@ static __init char *lguest_memory_setup(void) return "LGUEST"; } -/* Before virtqueues are set up, we use LHCALL_NOTIFY on normal memory to - * produce console output. */ +/* We will eventually use the virtio console device to produce console output, + * but before that is set up we use LHCALL_NOTIFY on normal memory to produce + * console output. */ static __init int early_put_chars(u32 vtermno, const char *buf, int count) { char scratch[17]; unsigned int len = count; + /* We use a nul-terminated string, so we have to make a copy. Icky, + * huh? */ if (len > sizeof(scratch) - 1) len = sizeof(scratch) - 1; scratch[len] = '\0'; @@ -884,7 +885,7 @@ static __init int early_put_chars(u32 vtermno, const char *buf, int count) * Our current solution is to allow the paravirt back end to optionally patch * over the indirect calls to replace them with something more efficient. We * patch the four most commonly called functions: disable interrupts, enable - * interrupts, restore interrupts and save interrupts. We usually have 10 + * interrupts, restore interrupts and save interrupts. We usually have 6 or 10 * bytes to patch into: the Guest versions of these operations are small enough * that we can fit comfortably. * @@ -1016,7 +1017,7 @@ __init void lguest_init(void) asm volatile ("mov %0, %%fs" : : "r" (__KERNEL_DS) : "memory"); /* The Host uses the top of the Guest's virtual address space for the - * Host<->Guest Switcher, and it tells us how much it needs in + * Host<->Guest Switcher, and it tells us how big that is in * lguest_data.reserve_mem, set up on the LGUEST_INIT hypercall. */ reserve_top_address(lguest_data.reserve_mem); @@ -1066,6 +1067,6 @@ __init void lguest_init(void) /* * This marks the end of stage II of our journey, The Guest. * - * It is now time for us to explore the nooks and crannies of the three Guest - * devices and complete our understanding of the Guest in "make Drivers". + * It is now time for us to explore the layer of virtual drivers and complete + * our understanding of the Guest in "make Drivers". */ diff --git a/arch/x86/lguest/i386_head.S b/arch/x86/lguest/i386_head.S index ebc6ac73389..95b6fbcded6 100644 --- a/arch/x86/lguest/i386_head.S +++ b/arch/x86/lguest/i386_head.S @@ -6,7 +6,7 @@ #include <asm/processor-flags.h> /*G:020 This is where we begin: head.S notes that the boot header's platform - * type field is "1" (lguest), so calls us here. The boot header is in %esi. + * type field is "1" (lguest), so calls us here. * * WARNING: be very careful here! We're running at addresses equal to physical * addesses (around 0), not above PAGE_OFFSET as most code expectes @@ -17,13 +17,15 @@ * boot. */ .section .init.text, "ax", @progbits ENTRY(lguest_entry) - /* Make initial hypercall now, so we can set up the pagetables. */ + /* We make the "initialization" hypercall now to tell the Host about + * us, and also find out where it put our page tables. */ movl $LHCALL_LGUEST_INIT, %eax movl $lguest_data - __PAGE_OFFSET, %edx int $LGUEST_TRAP_ENTRY /* The Host put the toplevel pagetable in lguest_data.pgdir. The movsl - * instruction uses %esi implicitly. */ + * instruction uses %esi implicitly as the source for the copy we' + * about to do. */ movl lguest_data - __PAGE_OFFSET + LGUEST_DATA_pgdir, %esi /* Copy first 32 entries of page directory to __PAGE_OFFSET entries. diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c index de5ba479c22..b01dee3ae7f 100644 --- a/block/ll_rw_blk.c +++ b/block/ll_rw_blk.c @@ -1366,9 +1366,7 @@ new_segment: sg = sg_next(sg); } - sg_set_page(sg, bvec->bv_page); - sg->length = nbytes; - sg->offset = bvec->bv_offset; + sg_set_page(sg, bvec->bv_page, nbytes, bvec->bv_offset); nsegs++; } bvprv = bvec; diff --git a/crypto/hmac.c b/crypto/hmac.c index e4eb6ac53b5..6691981bda1 100644 --- a/crypto/hmac.c +++ b/crypto/hmac.c @@ -160,8 +160,7 @@ static int hmac_digest(struct hash_desc *pdesc, struct scatterlist *sg, sg_set_buf(sg1, ipad, bs); - sg_set_page(&sg[1], (void *) sg); - sg1[1].length = 0; + sg_set_page(&sg[1], (void *) sg, 0, 0); sg_set_buf(sg2, opad, bs + ds); err = crypto_hash_digest(&desc, sg1, nbytes + bs, digest); diff --git a/drivers/acpi/sleep/proc.c b/drivers/acpi/sleep/proc.c index 3839efd5eae..1538355c266 100644 --- a/drivers/acpi/sleep/proc.c +++ b/drivers/acpi/sleep/proc.c @@ -194,6 +194,23 @@ static int get_date_field(char **p, u32 * value) return result; } +/* Read a possibly BCD register, always return binary */ +static u32 cmos_bcd_read(int offset, int rtc_control) +{ + u32 val = CMOS_READ(offset); + if (!(rtc_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD) + BCD_TO_BIN(val); + return val; +} + +/* Write binary value into possibly BCD register */ +static void cmos_bcd_write(u32 val, int offset, int rtc_control) +{ + if (!(rtc_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD) + BIN_TO_BCD(val); + CMOS_WRITE(val, offset); +} + static ssize_t acpi_system_write_alarm(struct file *file, const char __user * buffer, size_t count, loff_t * ppos) @@ -258,35 +275,18 @@ acpi_system_write_alarm(struct file *file, spin_lock_irq(&rtc_lock); rtc_control = CMOS_READ(RTC_CONTROL); - if (!(rtc_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD) { - BIN_TO_BCD(yr); - BIN_TO_BCD(mo); - BIN_TO_BCD(day); - BIN_TO_BCD(hr); - BIN_TO_BCD(min); - BIN_TO_BCD(sec); - } if (adjust) { - yr += CMOS_READ(RTC_YEAR); - mo += CMOS_READ(RTC_MONTH); - day += CMOS_READ(RTC_DAY_OF_MONTH); - hr += CMOS_READ(RTC_HOURS); - min += CMOS_READ(RTC_MINUTES); - sec += CMOS_READ(RTC_SECONDS); + yr += cmos_bcd_read(RTC_YEAR, rtc_control); + mo += cmos_bcd_read(RTC_MONTH, rtc_control); + day += cmos_bcd_read(RTC_DAY_OF_MONTH, rtc_control); + hr += cmos_bcd_read(RTC_HOURS, rtc_control); + min += cmos_bcd_read(RTC_MINUTES, rtc_control); + sec += cmos_bcd_read(RTC_SECONDS, rtc_control); } spin_unlock_irq(&rtc_lock); - if (!(rtc_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD) { - BCD_TO_BIN(yr); - BCD_TO_BIN(mo); - BCD_TO_BIN(day); - BCD_TO_BIN(hr); - BCD_TO_BIN(min); - BCD_TO_BIN(sec); - } - if (sec > 59) { min++; sec -= 60; @@ -307,14 +307,6 @@ acpi_system_write_alarm(struct file *file, yr++; mo -= 12; } - if (!(rtc_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD) { - BIN_TO_BCD(yr); - BIN_TO_BCD(mo); - BIN_TO_BCD(day); - BIN_TO_BCD(hr); - BIN_TO_BCD(min); - BIN_TO_BCD(sec); - } spin_lock_irq(&rtc_lock); /* @@ -326,9 +318,9 @@ acpi_system_write_alarm(struct file *file, CMOS_READ(RTC_INTR_FLAGS); /* write the fields the rtc knows about */ - CMOS_WRITE(hr, RTC_HOURS_ALARM); - CMOS_WRITE(min, RTC_MINUTES_ALARM); - CMOS_WRITE(sec, RTC_SECONDS_ALARM); + cmos_bcd_write(hr, RTC_HOURS_ALARM, rtc_control); + cmos_bcd_write(min, RTC_MINUTES_ALARM, rtc_control); + cmos_bcd_write(sec, RTC_SECONDS_ALARM, rtc_control); /* * If the system supports an enhanced alarm it will have non-zero @@ -336,11 +328,11 @@ acpi_system_write_alarm(struct file *file, * to the RTC area of memory. */ if (acpi_gbl_FADT.day_alarm) - CMOS_WRITE(day, acpi_gbl_FADT.day_alarm); + cmos_bcd_write(day, acpi_gbl_FADT.day_alarm, rtc_control); if (acpi_gbl_FADT.month_alarm) - CMOS_WRITE(mo, acpi_gbl_FADT.month_alarm); + cmos_bcd_write(mo, acpi_gbl_FADT.month_alarm, rtc_control); if (acpi_gbl_FADT.century) - CMOS_WRITE(yr / 100, acpi_gbl_FADT.century); + cmos_bcd_write(yr / 100, acpi_gbl_FADT.century, rtc_control); /* enable the rtc alarm interrupt */ rtc_control |= RTC_AIE; CMOS_WRITE(rtc_control, RTC_CONTROL); diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c index 95229e77bff..49cf4cf1a5a 100644 --- a/drivers/ata/ahci.c +++ b/drivers/ata/ahci.c @@ -41,6 +41,7 @@ #include <linux/interrupt.h> #include <linux/dma-mapping.h> #include <linux/device.h> +#include <linux/dmi.h> #include <scsi/scsi_host.h> #include <scsi/scsi_cmnd.h> #include <linux/libata.h> @@ -241,6 +242,7 @@ static void ahci_pmp_attach(struct ata_port *ap); static void ahci_pmp_detach(struct ata_port *ap); static void ahci_error_handler(struct ata_port *ap); static void ahci_vt8251_error_handler(struct ata_port *ap); +static void ahci_p5wdh_error_handler(struct ata_port *ap); static void ahci_post_internal_cmd(struct ata_queued_cmd *qc); static int ahci_port_resume(struct ata_port *ap); static unsigned int ahci_fill_sg(struct ata_queued_cmd *qc, void *cmd_tbl); @@ -339,6 +341,40 @@ static const struct ata_port_operations ahci_vt8251_ops = { .port_stop = ahci_port_stop, }; +static const struct ata_port_operations ahci_p5wdh_ops = { + .check_status = ahci_check_status, + .check_altstatus = ahci_check_status, + .dev_select = ata_noop_dev_select, + + .tf_read = ahci_tf_read, + + .qc_defer = sata_pmp_qc_defer_cmd_switch, + .qc_prep = ahci_qc_prep, + .qc_issue = ahci_qc_issue, + + .irq_clear = ahci_irq_clear, + + .scr_read = ahci_scr_read, + .scr_write = ahci_scr_write, + + .freeze = ahci_freeze, + .thaw = ahci_thaw, + + .error_handler = ahci_p5wdh_error_handler, + .post_internal_cmd = ahci_post_internal_cmd, + + .pmp_attach = ahci_pmp_attach, + .pmp_detach = ahci_pmp_detach, + +#ifdef CONFIG_PM + .port_suspend = ahci_port_suspend, + .port_resume = ahci_port_resume, +#endif + + .port_start = ahci_port_start, + .port_stop = ahci_port_stop, +}; + #define AHCI_HFLAGS(flags) .private_data = (void *)(flags) static const struct ata_port_info ahci_port_info[] = { @@ -1213,6 +1249,53 @@ static int ahci_vt8251_hardreset(struct ata_link *link, unsigned int *class, return rc ?: -EAGAIN; } +static int ahci_p5wdh_hardreset(struct ata_link *link, unsigned int *class, + unsigned long deadline) +{ + struct ata_port *ap = link->ap; + struct ahci_port_priv *pp = ap->private_data; + u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG; + struct ata_taskfile tf; + int rc; + + ahci_stop_engine(ap); + + /* clear D2H reception area to properly wait for D2H FIS */ + ata_tf_init(link->device, &tf); + tf.command = 0x80; + ata_tf_to_fis(&tf, 0, 0, d2h_fis); + + rc = sata_link_hardreset(link, sata_ehc_deb_timing(&link->eh_context), + deadline); + + ahci_start_engine(ap); + + if (rc || ata_link_offline(link)) + return rc; + + /* spec mandates ">= 2ms" before checking status */ + msleep(150); + + /* The pseudo configuration device on SIMG4726 attached to + * ASUS P5W-DH Deluxe doesn't send signature FIS after + * hardreset if no device is attached to the first downstream + * port && the pseudo device locks up on SRST w/ PMP==0. To + * work around this, wait for !BSY only briefly. If BSY isn't + * cleared, perform CLO and proceed to IDENTIFY (achieved by + * ATA_LFLAG_NO_SRST and ATA_LFLAG_ASSUME_ATA). + * + * Wait for two seconds. Devices attached to downstream port + * which can't process the following IDENTIFY after this will + * have to be reset again. For most cases, this should + * suffice while making probing snappish enough. + */ + rc = ata_wait_ready(ap, jiffies + 2 * HZ); + if (rc) + ahci_kick_engine(ap, 0); + + return 0; +} + static void ahci_postreset(struct ata_link *link, unsigned int *class) { struct ata_port *ap = link->ap; @@ -1670,6 +1753,19 @@ static void ahci_vt8251_error_handler(struct ata_port *ap) ahci_postreset); } +static void ahci_p5wdh_error_handler(struct ata_port *ap) +{ + if (!(ap->pflags & ATA_PFLAG_FROZEN)) { + /* restart engine */ + ahci_stop_engine(ap); + ahci_start_engine(ap); + } + + /* perform recovery */ + ata_do_eh(ap, ata_std_prereset, ahci_softreset, ahci_p5wdh_hardreset, + ahci_postreset); +} + static void ahci_post_internal_cmd(struct ata_queued_cmd *qc) { struct ata_port *ap = qc->ap; @@ -1955,6 +2051,51 @@ static void ahci_print_info(struct ata_host *host) ); } +/* On ASUS P5W DH Deluxe, the second port of PCI device 00:1f.2 is + * hardwired to on-board SIMG 4726. The chipset is ICH8 and doesn't + * support PMP and the 4726 either directly exports the device + * attached to the first downstream port or acts as a hardware storage + * controller and emulate a single ATA device (can be RAID 0/1 or some + * other configuration). + * + * When there's no device attached to the first downstream port of the + * 4726, "Config Disk" appears, which is a pseudo ATA device to + * configure the 4726. However, ATA emulation of the device is very + * lame. It doesn't send signature D2H Reg FIS after the initial + * hardreset, pukes on SRST w/ PMP==0 and has bunch of other issues. + * + * The following function works around the problem by always using + * hardreset on the port and not depending on receiving signature FIS + * afterward. If signature FIS isn't received soon, ATA class is + * assumed without follow-up softreset. + */ +static void ahci_p5wdh_workaround(struct ata_host *host) +{ + static struct dmi_system_id sysids[] = { + { + .ident = "P5W DH Deluxe", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, + "ASUSTEK COMPUTER INC"), + DMI_MATCH(DMI_PRODUCT_NAME, "P5W DH Deluxe"), + }, + }, + { } + }; + struct pci_dev *pdev = to_pci_dev(host->dev); + + if (pdev->bus->number == 0 && pdev->devfn == PCI_DEVFN(0x1f, 2) && + dmi_check_system(sysids)) { + struct ata_port *ap = host->ports[1]; + + dev_printk(KERN_INFO, &pdev->dev, "enabling ASUS P5W DH " + "Deluxe on-board SIMG4726 workaround\n"); + + ap->ops = &ahci_p5wdh_ops; + ap->link.flags |= ATA_LFLAG_NO_SRST | ATA_LFLAG_ASSUME_ATA; + } +} + static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) { static int printed_version; @@ -2024,6 +2165,9 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) ap->ops = &ata_dummy_port_ops; } + /* apply workaround for ASUS P5W DH Deluxe mainboard */ + ahci_p5wdh_workaround(host); + /* initialize adapter */ rc = ahci_configure_dma_masks(pdev, hpriv->cap & HOST_CAP_64); if (rc) diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index 2d147b51c97..081e3dfb64d 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c @@ -68,7 +68,8 @@ const unsigned long sata_deb_timing_long[] = { 100, 2000, 5000 }; static unsigned int ata_dev_init_params(struct ata_device *dev, u16 heads, u16 sectors); static unsigned int ata_dev_set_xfermode(struct ata_device *dev); -static unsigned int ata_dev_set_AN(struct ata_device *dev, u8 enable); +static unsigned int ata_dev_set_feature(struct ata_device *dev, + u8 enable, u8 feature); static void ata_dev_xfermask(struct ata_device *dev); static unsigned long ata_dev_blacklisted(const struct ata_device *dev); @@ -1799,13 +1800,7 @@ int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class, * SET_FEATURES spin-up subcommand before it will accept * anything other than the original IDENTIFY command. */ - ata_tf_init(dev, &tf); - tf.command = ATA_CMD_SET_FEATURES; - tf.feature = SETFEATURES_SPINUP; - tf.protocol = ATA_PROT_NODATA; - tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; - err_mask = ata_exec_internal(dev, &tf, NULL, - DMA_NONE, NULL, 0, 0); + err_mask = ata_dev_set_feature(dev, SETFEATURES_SPINUP, 0); if (err_mask && id[2] != 0x738c) { rc = -EIO; reason = "SPINUP failed"; @@ -2075,7 +2070,8 @@ int ata_dev_configure(struct ata_device *dev) unsigned int err_mask; /* issue SET feature command to turn this on */ - err_mask = ata_dev_set_AN(dev, SETFEATURES_SATA_ENABLE); + err_mask = ata_dev_set_feature(dev, + SETFEATURES_SATA_ENABLE, SATA_AN); if (err_mask) ata_dev_printk(dev, KERN_ERR, "failed to enable ATAPI AN " @@ -2886,6 +2882,13 @@ static int ata_dev_set_mode(struct ata_device *dev) dev->pio_mode <= XFER_PIO_2) err_mask &= ~AC_ERR_DEV; + /* Early MWDMA devices do DMA but don't allow DMA mode setting. + Don't fail an MWDMA0 set IFF the device indicates it is in MWDMA0 */ + if (dev->xfer_shift == ATA_SHIFT_MWDMA && + dev->dma_mode == XFER_MW_DMA_0 && + (dev->id[63] >> 8) & 1) + err_mask &= ~AC_ERR_DEV; + if (err_mask) { ata_dev_printk(dev, KERN_ERR, "failed to set xfermode " "(err_mask=0x%x)\n", err_mask); @@ -3947,9 +3950,6 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = { { "_NEC DV5800A", NULL, ATA_HORKAGE_NODMA }, { "SAMSUNG CD-ROM SN-124", "N001", ATA_HORKAGE_NODMA }, { "Seagate STT20000A", NULL, ATA_HORKAGE_NODMA }, - { "IOMEGA ZIP 250 ATAPI", NULL, ATA_HORKAGE_NODMA }, /* temporary fix */ - { "IOMEGA ZIP 250 ATAPI Floppy", - NULL, ATA_HORKAGE_NODMA }, /* Odd clown on sil3726/4726 PMPs */ { "Config Disk", NULL, ATA_HORKAGE_NODMA | ATA_HORKAGE_SKIP_PM }, @@ -4007,7 +4007,7 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = { { } }; -int strn_pattern_cmp(const char *patt, const char *name, int wildchar) +static int strn_pattern_cmp(const char *patt, const char *name, int wildchar) { const char *p; int len; @@ -4181,15 +4181,14 @@ static unsigned int ata_dev_set_xfermode(struct ata_device *dev) DPRINTK("EXIT, err_mask=%x\n", err_mask); return err_mask; } - /** - * ata_dev_set_AN - Issue SET FEATURES - SATA FEATURES + * ata_dev_set_feature - Issue SET FEATURES - SATA FEATURES * @dev: Device to which command will be sent * @enable: Whether to enable or disable the feature + * @feature: The sector count represents the feature to set * * Issue SET FEATURES - SATA FEATURES command to device @dev - * on port @ap with sector count set to indicate Asynchronous - * Notification feature + * on port @ap with sector count * * LOCKING: * PCI/etc. bus probe sem. @@ -4197,7 +4196,8 @@ static unsigned int ata_dev_set_xfermode(struct ata_device *dev) * RETURNS: * 0 on success, AC_ERR_* mask otherwise. */ -static unsigned int ata_dev_set_AN(struct ata_device *dev, u8 enable) +static unsigned int ata_dev_set_feature(struct ata_device *dev, u8 enable, + u8 feature) { struct ata_taskfile tf; unsigned int err_mask; @@ -4210,7 +4210,7 @@ static unsigned int ata_dev_set_AN(struct ata_device *dev, u8 enable) tf.feature = enable; tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; tf.protocol = ATA_PROT_NODATA; - tf.nsect = SATA_AN; + tf.nsect = feature; err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0); @@ -4689,8 +4689,8 @@ static int ata_sg_setup(struct ata_queued_cmd *qc) * data in this function or read data in ata_sg_clean. */ offset = lsg->offset + lsg->length - qc->pad_len; - sg_set_page(psg, nth_page(sg_page(lsg), offset >> PAGE_SHIFT)); - psg->offset = offset_in_page(offset); + sg_set_page(psg, nth_page(sg_page(lsg), offset >> PAGE_SHIFT), + qc->pad_len, offset_in_page(offset)); if (qc->tf.flags & ATA_TFLAG_WRITE) { void *addr = kmap_atomic(sg_page(psg), KM_IRQ0); @@ -6921,7 +6921,7 @@ int ata_host_activate(struct ata_host *host, int irq, * LOCKING: * Kernel thread context (may sleep). */ -void ata_port_detach(struct ata_port *ap) +static void ata_port_detach(struct ata_port *ap) { unsigned long flags; struct ata_link *link; diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c index 93e2b545b43..8cb35bb8760 100644 --- a/drivers/ata/libata-eh.c +++ b/drivers/ata/libata-eh.c @@ -2071,7 +2071,7 @@ int ata_eh_reset(struct ata_link *link, int classify, int try = 0; struct ata_device *dev; unsigned long deadline; - unsigned int action; + unsigned int tmp_action; ata_reset_fn_t reset; unsigned long flags; int rc; @@ -2086,14 +2086,14 @@ int ata_eh_reset(struct ata_link *link, int classify, /* Determine which reset to use and record in ehc->i.action. * prereset() may examine and modify it. */ - action = ehc->i.action; - ehc->i.action &= ~ATA_EH_RESET_MASK; if (softreset && (!hardreset || (!(link->flags & ATA_LFLAG_NO_SRST) && !sata_set_spd_needed(link) && - !(action & ATA_EH_HARDRESET)))) - ehc->i.action |= ATA_EH_SOFTRESET; + !(ehc->i.action & ATA_EH_HARDRESET)))) + tmp_action = ATA_EH_SOFTRESET; else - ehc->i.action |= ATA_EH_HARDRESET; + tmp_action = ATA_EH_HARDRESET; + + ehc->i.action = (ehc->i.action & ~ATA_EH_RESET_MASK) | tmp_action; if (prereset) { rc = prereset(link, jiffies + ATA_EH_PRERESET_TIMEOUT); diff --git a/drivers/ata/pata_icside.c b/drivers/ata/pata_icside.c index be30923566c..842fe08a3c1 100644 --- a/drivers/ata/pata_icside.c +++ b/drivers/ata/pata_icside.c @@ -332,12 +332,13 @@ static void ata_dummy_noret(struct ata_port *port) { } -static void pata_icside_postreset(struct ata_port *ap, unsigned int *classes) +static void pata_icside_postreset(struct ata_link *link, unsigned int *classes) { + struct ata_port *ap = link->ap; struct pata_icside_state *state = ap->host->private_data; if (classes[0] != ATA_DEV_NONE || classes[1] != ATA_DEV_NONE) - return ata_std_postreset(ap, classes); + return ata_std_postreset(link, classes); state->port[ap->port_no].disabled = 1; @@ -395,29 +396,30 @@ static struct ata_port_operations pata_icside_port_ops = { static void __devinit pata_icside_setup_ioaddr(struct ata_port *ap, void __iomem *base, - const struct portinfo *info) + struct pata_icside_info *info, + const struct portinfo *port) { struct ata_ioports *ioaddr = &ap->ioaddr; - void __iomem *cmd = base + info->dataoffset; + void __iomem *cmd = base + port->dataoffset; ioaddr->cmd_addr = cmd; - ioaddr->data_addr = cmd + (ATA_REG_DATA << info->stepping); - ioaddr->error_addr = cmd + (ATA_REG_ERR << info->stepping); - ioaddr->feature_addr = cmd + (ATA_REG_FEATURE << info->stepping); - ioaddr->nsect_addr = cmd + (ATA_REG_NSECT << info->stepping); - ioaddr->lbal_addr = cmd + (ATA_REG_LBAL << info->stepping); - ioaddr->lbam_addr = cmd + (ATA_REG_LBAM << info->stepping); - ioaddr->lbah_addr = cmd + (ATA_REG_LBAH << info->stepping); - ioaddr->device_addr = cmd + (ATA_REG_DEVICE << info->stepping); - ioaddr->status_addr = cmd + (ATA_REG_STATUS << info->stepping); - ioaddr->command_addr = cmd + (ATA_REG_CMD << info->stepping); - - ioaddr->ctl_addr = base + info->ctrloffset; + ioaddr->data_addr = cmd + (ATA_REG_DATA << port->stepping); + ioaddr->error_addr = cmd + (ATA_REG_ERR << port->stepping); + ioaddr->feature_addr = cmd + (ATA_REG_FEATURE << port->stepping); + ioaddr->nsect_addr = cmd + (ATA_REG_NSECT << port->stepping); + ioaddr->lbal_addr = cmd + (ATA_REG_LBAL << port->stepping); + ioaddr->lbam_addr = cmd + (ATA_REG_LBAM << port->stepping); + ioaddr->lbah_addr = cmd + (ATA_REG_LBAH << port->stepping); + ioaddr->device_addr = cmd + (ATA_REG_DEVICE << port->stepping); + ioaddr->status_addr = cmd + (ATA_REG_STATUS << port->stepping); + ioaddr->command_addr = cmd + (ATA_REG_CMD << port->stepping); + + ioaddr->ctl_addr = base + port->ctrloffset; ioaddr->altstatus_addr = ioaddr->ctl_addr; ata_port_desc(ap, "cmd 0x%lx ctl 0x%lx", - info->raw_base + info->dataoffset, - info->raw_base + info->ctrloffset); + info->raw_base + port->dataoffset, + info->raw_base + port->ctrloffset); if (info->raw_ioc_base) ata_port_desc(ap, "iocbase 0x%lx", info->raw_ioc_base); @@ -441,7 +443,7 @@ static int __devinit pata_icside_register_v5(struct pata_icside_info *info) info->nr_ports = 1; info->port[0] = &pata_icside_portinfo_v5; - info->raw_base = ecard_resource_start(ec, ECARD_RES_MEMC); + info->raw_base = ecard_resource_start(info->ec, ECARD_RES_MEMC); return 0; } @@ -522,7 +524,7 @@ static int __devinit pata_icside_add_ports(struct pata_icside_info *info) ap->flags |= ATA_FLAG_SLAVE_POSS; ap->ops = &pata_icside_port_ops; - pata_icside_setup_ioaddr(ap, info->base, info->port[i]); + pata_icside_setup_ioaddr(ap, info->base, info, info->port[i]); } return ata_host_activate(host, ec->irq, ata_interrupt, 0, diff --git a/drivers/ata/sata_nv.c b/drivers/ata/sata_nv.c index 2e0279fdd7a..f1b422f7c74 100644 --- a/drivers/ata/sata_nv.c +++ b/drivers/ata/sata_nv.c @@ -365,9 +365,9 @@ static const struct pci_device_id nv_pci_tbl[] = { { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA2), SWNCQ }, { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA), SWNCQ }, { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2), SWNCQ }, - { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA), SWNCQ }, - { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA2), SWNCQ }, - { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA3), SWNCQ }, + { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA), GENERIC }, + { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA2), GENERIC }, + { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA3), GENERIC }, { } /* terminate list */ }; diff --git a/drivers/block/cryptoloop.c b/drivers/block/cryptoloop.c index 1b58b010797..241167878ed 100644 --- a/drivers/block/cryptoloop.c +++ b/drivers/block/cryptoloop.c @@ -150,13 +150,8 @@ cryptoloop_transfer(struct loop_device *lo, int cmd, u32 iv[4] = { 0, }; iv[0] = cpu_to_le32(IV & 0xffffffff); - sg_set_page(&sg_in, in_page); - sg_in.offset = in_offs; - sg_in.length = sz; - - sg_set_page(&sg_out, out_page); - sg_out.offset = out_offs; - sg_out.length = sz; + sg_set_page(&sg_in, in_page, sz, in_offs); + sg_set_page(&sg_out, out_page, sz, out_offs); desc.info = iv; err = encdecfunc(&desc, &sg_out, &sg_in, sz); diff --git a/drivers/block/sunvdc.c b/drivers/block/sunvdc.c index 7276f7d207c..fac4c6cd04f 100644 --- a/drivers/block/sunvdc.c +++ b/drivers/block/sunvdc.c @@ -15,6 +15,7 @@ #include <linux/delay.h> #include <linux/init.h> #include <linux/list.h> +#include <linux/scatterlist.h> #include <asm/vio.h> #include <asm/ldc.h> diff --git a/drivers/block/ub.c b/drivers/block/ub.c index 14143f2c484..08e909dc794 100644 --- a/drivers/block/ub.c +++ b/drivers/block/ub.c @@ -1428,9 +1428,9 @@ static void ub_state_sense(struct ub_dev *sc, struct ub_scsi_cmd *cmd) scmd->state = UB_CMDST_INIT; scmd->nsg = 1; sg = &scmd->sgv[0]; - sg_set_page(sg, virt_to_page(sc->top_sense)); - sg->offset = (unsigned long)sc->top_sense & (PAGE_SIZE-1); - sg->length = UB_SENSE_SIZE; + sg_init_table(sg, UB_MAX_REQ_SG); + sg_set_page(sg, virt_to_page(sc->top_sense), UB_SENSE_SIZE, + (unsigned long)sc->top_sense & (PAGE_SIZE-1)); scmd->len = UB_SENSE_SIZE; scmd->lun = cmd->lun; scmd->done = ub_top_sense_done; @@ -1864,9 +1864,8 @@ static int ub_sync_read_cap(struct ub_dev *sc, struct ub_lun *lun, cmd->state = UB_CMDST_INIT; cmd->nsg = 1; sg = &cmd->sgv[0]; - sg_set_page(sg, virt_to_page(p)); - sg->offset = (unsigned long)p & (PAGE_SIZE-1); - sg->length = 8; + sg_init_table(sg, UB_MAX_REQ_SG); + sg_set_page(sg, virt_to_page(p), 8, (unsigned long)p & (PAGE_SIZE-1)); cmd->len = 8; cmd->lun = lun; cmd->done = ub_probe_done; diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c index a901eee64ba..3cf7129d83e 100644 --- a/drivers/block/virtio_blk.c +++ b/drivers/block/virtio_blk.c @@ -4,7 +4,9 @@ #include <linux/hdreg.h> #include <linux/virtio.h> #include <linux/virtio_blk.h> -#include <linux/virtio_blk.h> +#include <linux/scatterlist.h> + +#define VIRTIO_MAX_SG (3+MAX_PHYS_SEGMENTS) static unsigned char virtblk_index = 'a'; struct virtio_blk @@ -23,7 +25,7 @@ struct virtio_blk mempool_t *pool; /* Scatterlist: can be too big for stack. */ - struct scatterlist sg[3+MAX_PHYS_SEGMENTS]; + struct scatterlist sg[VIRTIO_MAX_SG]; }; struct virtblk_req @@ -94,8 +96,8 @@ static bool do_req(struct request_queue *q, struct virtio_blk *vblk, if (blk_barrier_rq(vbr->req)) vbr->out_hdr.type |= VIRTIO_BLK_T_BARRIER; - /* We have to zero this, otherwise blk_rq_map_sg gets upset. */ - memset(vblk->sg, 0, sizeof(vblk->sg)); + /* This init could be done at vblk creation time */ + sg_init_table(vblk->sg, VIRTIO_MAX_SG); sg_set_buf(&vblk->sg[0], &vbr->out_hdr, sizeof(vbr->out_hdr)); num = blk_rq_map_sg(q, vbr->req, vblk->sg+1); sg_set_buf(&vblk->sg[num+1], &vbr->in_hdr, sizeof(vbr->in_hdr)); diff --git a/drivers/cdrom/viocd.c b/drivers/cdrom/viocd.c index 880b5dce3a6..d8bb44b98a6 100644 --- a/drivers/cdrom/viocd.c +++ b/drivers/cdrom/viocd.c @@ -41,9 +41,9 @@ #include <linux/completion.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> +#include <linux/scatterlist.h> #include <asm/vio.h> -#include <asm/scatterlist.h> #include <asm/iseries/hv_types.h> #include <asm/iseries/hv_lp_event.h> #include <asm/iseries/vio.h> @@ -258,6 +258,7 @@ static int send_request(struct request *req) cmd = viomajorsubtype_cdio | viocdwrite; } + sg_init_table(&sg, 1); if (blk_rq_map_sg(req->q, req, &sg) == 0) { printk(VIOCD_KERN_WARNING "error setting up scatter/gather list\n"); diff --git a/drivers/ieee1394/dma.c b/drivers/ieee1394/dma.c index 3051e312fdc..f5f4983dfbf 100644 --- a/drivers/ieee1394/dma.c +++ b/drivers/ieee1394/dma.c @@ -111,8 +111,8 @@ int dma_region_alloc(struct dma_region *dma, unsigned long n_bytes, unsigned long va = (unsigned long)dma->kvirt + (i << PAGE_SHIFT); - sg_set_page(&dma->sglist[i], vmalloc_to_page((void *)va)); - dma->sglist[i].length = PAGE_SIZE; + sg_set_page(&dma->sglist[i], vmalloc_to_page((void *)va), + PAGE_SIZE, 0); } /* map sglist to the IOMMU */ diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c index 14159ff2940..4e3128ff73c 100644 --- a/drivers/infiniband/core/umem.c +++ b/drivers/infiniband/core/umem.c @@ -171,9 +171,7 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr, if (vma_list && !is_vm_hugetlb_page(vma_list[i + off])) umem->hugetlb = 0; - sg_set_page(&chunk->page_list[i], page_list[i + off]); - chunk->page_list[i].offset = 0; - chunk->page_list[i].length = PAGE_SIZE; + sg_set_page(&chunk->page_list[i], page_list[i + off], PAGE_SIZE, 0); } chunk->nmap = ib_dma_map_sg(context->device, diff --git a/drivers/infiniband/hw/mthca/mthca_memfree.c b/drivers/infiniband/hw/mthca/mthca_memfree.c index 007b38157fc..1f4d27d7c16 100644 --- a/drivers/infiniband/hw/mthca/mthca_memfree.c +++ b/drivers/infiniband/hw/mthca/mthca_memfree.c @@ -113,9 +113,7 @@ static int mthca_alloc_icm_pages(struct scatterlist *mem, int order, gfp_t gfp_m if (!page) return -ENOMEM; - sg_set_page(mem, page); - mem->length = PAGE_SIZE << order; - mem->offset = 0; + sg_set_page(mem, page, PAGE_SIZE << order, 0); return 0; } @@ -481,9 +479,8 @@ int mthca_map_user_db(struct mthca_dev *dev, struct mthca_uar *uar, if (ret < 0) goto out; - sg_set_page(&db_tab->page[i].mem, pages[0]); - db_tab->page[i].mem.length = MTHCA_ICM_PAGE_SIZE; - db_tab->page[i].mem.offset = uaddr & ~PAGE_MASK; + sg_set_page(&db_tab->page[i].mem, pages[0], MTHCA_ICM_PAGE_SIZE, + uaddr & ~PAGE_MASK); ret = pci_map_sg(dev->pdev, &db_tab->page[i].mem, 1, PCI_DMA_TODEVICE); if (ret < 0) { diff --git a/drivers/lguest/core.c b/drivers/lguest/core.c index 35d19ae58de..cb4c67025d5 100644 --- a/drivers/lguest/core.c +++ b/drivers/lguest/core.c @@ -128,9 +128,12 @@ static void unmap_switcher(void) __free_pages(switcher_page[i], 0); } -/*L:305 +/*H:032 * Dealing With Guest Memory. * + * Before we go too much further into the Host, we need to grok the routines + * we use to deal with Guest memory. + * * When the Guest gives us (what it thinks is) a physical address, we can use * the normal copy_from_user() & copy_to_user() on the corresponding place in * the memory region allocated by the Launcher. diff --git a/drivers/lguest/hypercalls.c b/drivers/lguest/hypercalls.c index 9d5184c7c14..b478affe8f9 100644 --- a/drivers/lguest/hypercalls.c +++ b/drivers/lguest/hypercalls.c @@ -90,6 +90,7 @@ static void do_hcall(struct lguest *lg, struct hcall_args *args) lg->pending_notify = args->arg1; break; default: + /* It should be an architecture-specific hypercall. */ if (lguest_arch_do_hcall(lg, args)) kill_guest(lg, "Bad hypercall %li\n", args->arg0); } @@ -157,7 +158,6 @@ static void do_async_hcalls(struct lguest *lg) * Guest makes a hypercall, we end up here to set things up: */ static void initialize(struct lguest *lg) { - /* You can't do anything until you're initialized. The Guest knows the * rules, so we're unforgiving here. */ if (lg->hcall->arg0 != LHCALL_LGUEST_INIT) { @@ -174,7 +174,8 @@ static void initialize(struct lguest *lg) || get_user(lg->noirq_end, &lg->lguest_data->noirq_end)) kill_guest(lg, "bad guest page %p", lg->lguest_data); - /* We write the current time into the Guest's data page once now. */ + /* We write the current time into the Guest's data page once so it can + * set its clock. */ write_timestamp(lg); /* page_tables.c will also do some setup. */ @@ -182,8 +183,8 @@ static void initialize(struct lguest *lg) /* This is the one case where the above accesses might have been the * first write to a Guest page. This may have caused a copy-on-write - * fault, but the Guest might be referring to the old (read-only) - * page. */ + * fault, but the old page might be (read-only) in the Guest + * pagetable. */ guest_pagetable_clear_all(lg); } @@ -220,7 +221,7 @@ void do_hypercalls(struct lguest *lg) * Normally it doesn't matter: the Guest will run again and * update the trap number before we come back here. * - * However, if we are signalled or the Guest sends DMA to the + * However, if we are signalled or the Guest sends I/O to the * Launcher, the run_guest() loop will exit without running the * Guest. When it comes back it would try to re-run the * hypercall. */ diff --git a/drivers/lguest/interrupts_and_traps.c b/drivers/lguest/interrupts_and_traps.c index 82966982cb3..2b66f79c208 100644 --- a/drivers/lguest/interrupts_and_traps.c +++ b/drivers/lguest/interrupts_and_traps.c @@ -92,8 +92,8 @@ static void set_guest_interrupt(struct lguest *lg, u32 lo, u32 hi, int has_err) /* Remember that we never let the Guest actually disable interrupts, so * the "Interrupt Flag" bit is always set. We copy that bit from the - * Guest's "irq_enabled" field into the eflags word: the Guest copies - * it back in "lguest_iret". */ + * Guest's "irq_enabled" field into the eflags word: we saw the Guest + * copy it back in "lguest_iret". */ eflags = lg->regs->eflags; if (get_user(irq_enable, &lg->lguest_data->irq_enabled) == 0 && !(irq_enable & X86_EFLAGS_IF)) @@ -124,7 +124,7 @@ static void set_guest_interrupt(struct lguest *lg, u32 lo, u32 hi, int has_err) kill_guest(lg, "Disabling interrupts"); } -/*H:200 +/*H:205 * Virtual Interrupts. * * maybe_do_interrupt() gets called before every entry to the Guest, to see if @@ -256,19 +256,21 @@ int deliver_trap(struct lguest *lg, unsigned int num) * bogus one in): if we fail here, the Guest will be killed. */ if (!idt_present(lg->arch.idt[num].a, lg->arch.idt[num].b)) return 0; - set_guest_interrupt(lg, lg->arch.idt[num].a, lg->arch.idt[num].b, has_err(num)); + set_guest_interrupt(lg, lg->arch.idt[num].a, lg->arch.idt[num].b, + has_err(num)); return 1; } /*H:250 Here's the hard part: returning to the Host every time a trap happens * and then calling deliver_trap() and re-entering the Guest is slow. - * Particularly because Guest userspace system calls are traps (trap 128). + * Particularly because Guest userspace system calls are traps (usually trap + * 128). * * So we'd like to set up the IDT to tell the CPU to deliver traps directly * into the Guest. This is possible, but the complexities cause the size of * this file to double! However, 150 lines of code is worth writing for taking * system calls down from 1750ns to 270ns. Plus, if lguest didn't do it, all - * the other hypervisors would tease it. + * the other hypervisors would beat it up at lunchtime. * * This routine indicates if a particular trap number could be delivered * directly. */ @@ -331,7 +333,7 @@ void pin_stack_pages(struct lguest *lg) * change stacks on each context switch. */ void guest_set_stack(struct lguest *lg, u32 seg, u32 esp, unsigned int pages) { - /* You are not allowd have a stack segment with privilege level 0: bad + /* You are not allowed have a stack segment with privilege level 0: bad * Guest! */ if ((seg & 0x3) != GUEST_PL) kill_guest(lg, "bad stack segment %i", seg); @@ -350,7 +352,7 @@ void guest_set_stack(struct lguest *lg, u32 seg, u32 esp, unsigned int pages) * part of the Host: page table handling. */ /*H:235 This is the routine which actually checks the Guest's IDT entry and - * transfers it into our entry in "struct lguest": */ + * transfers it into the entry in "struct lguest": */ static void set_trap(struct lguest *lg, struct desc_struct *trap, unsigned int num, u32 lo, u32 hi) { @@ -456,6 +458,18 @@ void copy_traps(const struct lguest *lg, struct desc_struct *idt, } } +/*H:200 + * The Guest Clock. + * + * There are two sources of virtual interrupts. We saw one in lguest_user.c: + * the Launcher sending interrupts for virtual devices. The other is the Guest + * timer interrupt. + * + * The Guest uses the LHCALL_SET_CLOCKEVENT hypercall to tell us how long to + * the next timer interrupt (in nanoseconds). We use the high-resolution timer + * infrastructure to set a callback at that time. + * + * 0 means "turn off the clock". */ void guest_set_clockevent(struct lguest *lg, unsigned long delta) { ktime_t expires; @@ -466,20 +480,27 @@ void guest_set_clockevent(struct lguest *lg, unsigned long delta) return; } + /* We use wallclock time here, so the Guest might not be running for + * all the time between now and the timer interrupt it asked for. This + * is almost always the right thing to do. */ expires = ktime_add_ns(ktime_get_real(), delta); hrtimer_start(&lg->hrt, expires, HRTIMER_MODE_ABS); } +/* This is the function called when the Guest's timer expires. */ static enum hrtimer_restart clockdev_fn(struct hrtimer *timer) { struct lguest *lg = container_of(timer, struct lguest, hrt); + /* Remember the first interrupt is the timer interrupt. */ set_bit(0, lg->irqs_pending); + /* If the Guest is actually stopped, we need to wake it up. */ if (lg->halted) wake_up_process(lg->tsk); return HRTIMER_NORESTART; } +/* This sets up the timer for this Guest. */ void init_clockdev(struct lguest *lg) { hrtimer_init(&lg->hrt, CLOCK_REALTIME, HRTIMER_MODE_ABS); diff --git a/drivers/lguest/lg.h b/drivers/lguest/lg.h index d9144beca82..86924891b5e 100644 --- a/drivers/lguest/lg.h +++ b/drivers/lguest/lg.h @@ -74,9 +74,6 @@ struct lguest u32 pgdidx; struct pgdir pgdirs[4]; - /* Cached wakeup: we hold a reference to this task. */ - struct task_struct *wake; - unsigned long noirq_start, noirq_end; unsigned long pending_notify; /* pfn from LHCALL_NOTIFY */ @@ -103,7 +100,7 @@ int lguest_address_ok(const struct lguest *lg, void __lgread(struct lguest *, void *, unsigned long, unsigned); void __lgwrite(struct lguest *, unsigned long, const void *, unsigned); -/*L:306 Using memory-copy operations like that is usually inconvient, so we +/*H:035 Using memory-copy operations like that is usually inconvient, so we * have the following helper macros which read and write a specific type (often * an unsigned long). * @@ -191,7 +188,7 @@ void write_timestamp(struct lguest *lg); * Let's step aside for the moment, to study one important routine that's used * widely in the Host code. * - * There are many cases where the Guest does something invalid, like pass crap + * There are many cases where the Guest can do something invalid, like pass crap * to a hypercall. Since only the Guest kernel can make hypercalls, it's quite * acceptable to simply terminate the Guest and give the Launcher a nicely * formatted reason. It's also simpler for the Guest itself, which doesn't diff --git a/drivers/lguest/lguest_device.c b/drivers/lguest/lguest_device.c index 71c64837b43..8904f72f97c 100644 --- a/drivers/lguest/lguest_device.c +++ b/drivers/lguest/lguest_device.c @@ -53,7 +53,8 @@ struct lguest_device { * Device configurations * * The configuration information for a device consists of a series of fields. - * The device will look for these fields during setup. + * We don't really care what they are: the Launcher set them up, and the driver + * will look at them during setup. * * For us these fields come immediately after that device's descriptor in the * lguest_devices page. @@ -122,8 +123,8 @@ static void lg_set_status(struct virtio_device *vdev, u8 status) * The other piece of infrastructure virtio needs is a "virtqueue": a way of * the Guest device registering buffers for the other side to read from or * write into (ie. send and receive buffers). Each device can have multiple - * virtqueues: for example the console has one queue for sending and one for - * receiving. + * virtqueues: for example the console driver uses one queue for sending and + * another for receiving. * * Fortunately for us, a very fast shared-memory-plus-descriptors virtqueue * already exists in virtio_ring.c. We just need to connect it up. @@ -158,7 +159,7 @@ static void lg_notify(struct virtqueue *vq) * * This is kind of an ugly duckling. It'd be nicer to have a standard * representation of a virtqueue in the configuration space, but it seems that - * everyone wants to do it differently. The KVM guys want the Guest to + * everyone wants to do it differently. The KVM coders want the Guest to * allocate its own pages and tell the Host where they are, but for lguest it's * simpler for the Host to simply tell us where the pages are. * @@ -284,6 +285,8 @@ static void add_lguest_device(struct lguest_device_desc *d) { struct lguest_device *ldev; + /* Start with zeroed memory; Linux's device layer seems to count on + * it. */ ldev = kzalloc(sizeof(*ldev), GFP_KERNEL); if (!ldev) { printk(KERN_EMERG "Cannot allocate lguest dev %u\n", diff --git a/drivers/lguest/lguest_user.c b/drivers/lguest/lguest_user.c index ee405b38383..9d716fa42ca 100644 --- a/drivers/lguest/lguest_user.c +++ b/drivers/lguest/lguest_user.c @@ -8,20 +8,22 @@ #include <linux/fs.h> #include "lg.h" -/*L:315 To force the Guest to stop running and return to the Launcher, the - * Waker sets writes LHREQ_BREAK and the value "1" to /dev/lguest. The - * Launcher then writes LHREQ_BREAK and "0" to release the Waker. */ +/*L:055 When something happens, the Waker process needs a way to stop the + * kernel running the Guest and return to the Launcher. So the Waker writes + * LHREQ_BREAK and the value "1" to /dev/lguest to do this. Once the Launcher + * has done whatever needs attention, it writes LHREQ_BREAK and "0" to release + * the Waker. */ static int break_guest_out(struct lguest *lg, const unsigned long __user *input) { unsigned long on; - /* Fetch whether they're turning break on or off.. */ + /* Fetch whether they're turning break on or off. */ if (get_user(on, input) != 0) return -EFAULT; if (on) { lg->break_out = 1; - /* Pop it out (may be running on different CPU) */ + /* Pop it out of the Guest (may be running on different CPU) */ wake_up_process(lg->tsk); /* Wait for them to reset it */ return wait_event_interruptible(lg->break_wq, !lg->break_out); @@ -58,7 +60,7 @@ static ssize_t read(struct file *file, char __user *user, size_t size,loff_t*o) if (!lg) return -EINVAL; - /* If you're not the task which owns the guest, go away. */ + /* If you're not the task which owns the Guest, go away. */ if (current != lg->tsk) return -EPERM; @@ -92,8 +94,8 @@ static ssize_t read(struct file *file, char __user *user, size_t size,loff_t*o) * base: The start of the Guest-physical memory inside the Launcher memory. * * pfnlimit: The highest (Guest-physical) page number the Guest should be - * allowed to access. The Launcher has to live in Guest memory, so it sets - * this to ensure the Guest can't reach it. + * allowed to access. The Guest memory lives inside the Launcher, so it sets + * this to ensure the Guest can only reach its own memory. * * pgdir: The (Guest-physical) address of the top of the initial Guest * pagetables (which are set up by the Launcher). @@ -189,7 +191,7 @@ unlock: } /*L:010 The first operation the Launcher does must be a write. All writes - * start with a 32 bit number: for the first write this must be + * start with an unsigned long number: for the first write this must be * LHREQ_INITIALIZE to set up the Guest. After that the Launcher can use * writes of other values to send interrupts. */ static ssize_t write(struct file *file, const char __user *in, @@ -275,8 +277,7 @@ static int close(struct inode *inode, struct file *file) * The Launcher is the Host userspace program which sets up, runs and services * the Guest. In fact, many comments in the Drivers which refer to "the Host" * doing things are inaccurate: the Launcher does all the device handling for - * the Guest. The Guest can't tell what's done by the the Launcher and what by - * the Host. + * the Guest, but the Guest can't know that. * * Just to confuse you: to the Host kernel, the Launcher *is* the Guest and we * shall see more of that later. diff --git a/drivers/lguest/page_tables.c b/drivers/lguest/page_tables.c index 2a45f0691c9..fffabb32715 100644 --- a/drivers/lguest/page_tables.c +++ b/drivers/lguest/page_tables.c @@ -26,7 +26,8 @@ * * We use two-level page tables for the Guest. If you're not entirely * comfortable with virtual addresses, physical addresses and page tables then - * I recommend you review lguest.c's "Page Table Handling" (with diagrams!). + * I recommend you review arch/x86/lguest/boot.c's "Page Table Handling" (with + * diagrams!). * * The Guest keeps page tables, but we maintain the actual ones here: these are * called "shadow" page tables. Which is a very Guest-centric name: these are @@ -36,11 +37,11 @@ * * Anyway, this is the most complicated part of the Host code. There are seven * parts to this: - * (i) Setting up a page table entry for the Guest when it faults, - * (ii) Setting up the page table entry for the Guest stack, - * (iii) Setting up a page table entry when the Guest tells us it has changed, + * (i) Looking up a page table entry when the Guest faults, + * (ii) Making sure the Guest stack is mapped, + * (iii) Setting up a page table entry when the Guest tells us one has changed, * (iv) Switching page tables, - * (v) Flushing (thowing away) page tables, + * (v) Flushing (throwing away) page tables, * (vi) Mapping the Switcher when the Guest is about to run, * (vii) Setting up the page tables initially. :*/ @@ -57,16 +58,15 @@ static DEFINE_PER_CPU(pte_t *, switcher_pte_pages); #define switcher_pte_page(cpu) per_cpu(switcher_pte_pages, cpu) -/*H:320 With our shadow and Guest types established, we need to deal with - * them: the page table code is curly enough to need helper functions to keep - * it clear and clean. +/*H:320 The page table code is curly enough to need helper functions to keep it + * clear and clean. * * There are two functions which return pointers to the shadow (aka "real") * page tables. * * spgd_addr() takes the virtual address and returns a pointer to the top-level - * page directory entry for that address. Since we keep track of several page - * tables, the "i" argument tells us which one we're interested in (it's + * page directory entry (PGD) for that address. Since we keep track of several + * page tables, the "i" argument tells us which one we're interested in (it's * usually the current one). */ static pgd_t *spgd_addr(struct lguest *lg, u32 i, unsigned long vaddr) { @@ -81,9 +81,9 @@ static pgd_t *spgd_addr(struct lguest *lg, u32 i, unsigned long vaddr) return &lg->pgdirs[i].pgdir[index]; } -/* This routine then takes the PGD entry given above, which contains the - * address of the PTE page. It then returns a pointer to the PTE entry for the - * given address. */ +/* This routine then takes the page directory entry returned above, which + * contains the address of the page table entry (PTE) page. It then returns a + * pointer to the PTE entry for the given address. */ static pte_t *spte_addr(struct lguest *lg, pgd_t spgd, unsigned long vaddr) { pte_t *page = __va(pgd_pfn(spgd) << PAGE_SHIFT); @@ -191,7 +191,7 @@ static void check_gpgd(struct lguest *lg, pgd_t gpgd) } /*H:330 - * (i) Setting up a page table entry for the Guest when it faults + * (i) Looking up a page table entry when the Guest faults. * * We saw this call in run_guest(): when we see a page fault in the Guest, we * come here. That's because we only set up the shadow page tables lazily as @@ -199,7 +199,7 @@ static void check_gpgd(struct lguest *lg, pgd_t gpgd) * and return to the Guest without it knowing. * * If we fixed up the fault (ie. we mapped the address), this routine returns - * true. */ + * true. Otherwise, it was a real fault and we need to tell the Guest. */ int demand_page(struct lguest *lg, unsigned long vaddr, int errcode) { pgd_t gpgd; @@ -246,16 +246,16 @@ int demand_page(struct lguest *lg, unsigned long vaddr, int errcode) if ((errcode & 2) && !(pte_flags(gpte) & _PAGE_RW)) return 0; - /* User access to a kernel page? (bit 3 == user access) */ + /* User access to a kernel-only page? (bit 3 == user access) */ if ((errcode & 4) && !(pte_flags(gpte) & _PAGE_USER)) return 0; /* Check that the Guest PTE flags are OK, and the page number is below * the pfn_limit (ie. not mapping the Launcher binary). */ check_gpte(lg, gpte); + /* Add the _PAGE_ACCESSED and (for a write) _PAGE_DIRTY flag */ gpte = pte_mkyoung(gpte); - if (errcode & 2) gpte = pte_mkdirty(gpte); @@ -272,23 +272,28 @@ int demand_page(struct lguest *lg, unsigned long vaddr, int errcode) else /* If this is a read, don't set the "writable" bit in the page * table entry, even if the Guest says it's writable. That way - * we come back here when a write does actually ocur, so we can - * update the Guest's _PAGE_DIRTY flag. */ + * we will come back here when a write does actually occur, so + * we can update the Guest's _PAGE_DIRTY flag. */ *spte = gpte_to_spte(lg, pte_wrprotect(gpte), 0); /* Finally, we write the Guest PTE entry back: we've set the * _PAGE_ACCESSED and maybe the _PAGE_DIRTY flags. */ lgwrite(lg, gpte_ptr, pte_t, gpte); - /* We succeeded in mapping the page! */ + /* The fault is fixed, the page table is populated, the mapping + * manipulated, the result returned and the code complete. A small + * delay and a trace of alliteration are the only indications the Guest + * has that a page fault occurred at all. */ return 1; } -/*H:360 (ii) Setting up the page table entry for the Guest stack. +/*H:360 + * (ii) Making sure the Guest stack is mapped. * - * Remember pin_stack_pages() which makes sure the stack is mapped? It could - * simply call demand_page(), but as we've seen that logic is quite long, and - * usually the stack pages are already mapped anyway, so it's not required. + * Remember that direct traps into the Guest need a mapped Guest kernel stack. + * pin_stack_pages() calls us here: we could simply call demand_page(), but as + * we've seen that logic is quite long, and usually the stack pages are already + * mapped, so it's overkill. * * This is a quick version which answers the question: is this virtual address * mapped by the shadow page tables, and is it writable? */ @@ -297,7 +302,7 @@ static int page_writable(struct lguest *lg, unsigned long vaddr) pgd_t *spgd; unsigned long flags; - /* Look at the top level entry: is it present? */ + /* Look at the current top level entry: is it present? */ spgd = spgd_addr(lg, lg->pgdidx, vaddr); if (!(pgd_flags(*spgd) & _PAGE_PRESENT)) return 0; @@ -333,15 +338,14 @@ static void release_pgd(struct lguest *lg, pgd_t *spgd) release_pte(ptepage[i]); /* Now we can free the page of PTEs */ free_page((long)ptepage); - /* And zero out the PGD entry we we never release it twice. */ + /* And zero out the PGD entry so we never release it twice. */ *spgd = __pgd(0); } } -/*H:440 (v) Flushing (thowing away) page tables, - * - * We saw flush_user_mappings() called when we re-used a top-level pgdir page. - * It simply releases every PTE page from 0 up to the kernel address. */ +/*H:445 We saw flush_user_mappings() twice: once from the flush_user_mappings() + * hypercall and once in new_pgdir() when we re-used a top-level pgdir page. + * It simply releases every PTE page from 0 up to the Guest's kernel address. */ static void flush_user_mappings(struct lguest *lg, int idx) { unsigned int i; @@ -350,8 +354,10 @@ static void flush_user_mappings(struct lguest *lg, int idx) release_pgd(lg, lg->pgdirs[idx].pgdir + i); } -/* The Guest also has a hypercall to do this manually: it's used when a large - * number of mappings have been changed. */ +/*H:440 (v) Flushing (throwing away) page tables, + * + * The Guest has a hypercall to throw away the page tables: it's used when a + * large number of mappings have been changed. */ void guest_pagetable_flush_user(struct lguest *lg) { /* Drop the userspace part of the current page table. */ @@ -423,8 +429,9 @@ static unsigned int new_pgdir(struct lguest *lg, /*H:430 (iv) Switching page tables * - * This is what happens when the Guest changes page tables (ie. changes the - * top-level pgdir). This happens on almost every context switch. */ + * Now we've seen all the page table setting and manipulation, let's see what + * what happens when the Guest changes page tables (ie. changes the top-level + * pgdir). This occurs on almost every context switch. */ void guest_new_pagetable(struct lguest *lg, unsigned long pgtable) { int newpgdir, repin = 0; @@ -443,7 +450,8 @@ void guest_new_pagetable(struct lguest *lg, unsigned long pgtable) } /*H:470 Finally, a routine which throws away everything: all PGD entries in all - * the shadow page tables. This is used when we destroy the Guest. */ + * the shadow page tables, including the Guest's kernel mappings. This is used + * when we destroy the Guest. */ static void release_all_pagetables(struct lguest *lg) { unsigned int i, j; @@ -458,13 +466,22 @@ static void release_all_pagetables(struct lguest *lg) /* We also throw away everything when a Guest tells us it's changed a kernel * mapping. Since kernel mappings are in every page table, it's easiest to - * throw them all away. This is amazingly slow, but thankfully rare. */ + * throw them all away. This traps the Guest in amber for a while as + * everything faults back in, but it's rare. */ void guest_pagetable_clear_all(struct lguest *lg) { release_all_pagetables(lg); /* We need the Guest kernel stack mapped again. */ pin_stack_pages(lg); } +/*:*/ +/*M:009 Since we throw away all mappings when a kernel mapping changes, our + * performance sucks for guests using highmem. In fact, a guest with + * PAGE_OFFSET 0xc0000000 (the default) and more than about 700MB of RAM is + * usually slower than a Guest with less memory. + * + * This, of course, cannot be fixed. It would take some kind of... well, I + * don't know, but the term "puissant code-fu" comes to mind. :*/ /*H:420 This is the routine which actually sets the page table entry for then * "idx"'th shadow page table. @@ -483,7 +500,7 @@ void guest_pagetable_clear_all(struct lguest *lg) static void do_set_pte(struct lguest *lg, int idx, unsigned long vaddr, pte_t gpte) { - /* Look up the matching shadow page directot entry. */ + /* Look up the matching shadow page directory entry. */ pgd_t *spgd = spgd_addr(lg, idx, vaddr); /* If the top level isn't present, there's no entry to update. */ @@ -500,7 +517,8 @@ static void do_set_pte(struct lguest *lg, int idx, *spte = gpte_to_spte(lg, gpte, pte_flags(gpte) & _PAGE_DIRTY); } else - /* Otherwise we can demand_page() it in later. */ + /* Otherwise kill it and we can demand_page() it in + * later. */ *spte = __pte(0); } } @@ -535,7 +553,7 @@ void guest_set_pte(struct lguest *lg, } /*H:400 - * (iii) Setting up a page table entry when the Guest tells us it has changed. + * (iii) Setting up a page table entry when the Guest tells us one has changed. * * Just like we did in interrupts_and_traps.c, it makes sense for us to deal * with the other side of page tables while we're here: what happens when the @@ -612,9 +630,10 @@ void free_guest_pagetable(struct lguest *lg) /*H:480 (vi) Mapping the Switcher when the Guest is about to run. * - * The Switcher and the two pages for this CPU need to be available to the + * The Switcher and the two pages for this CPU need to be visible in the * Guest (and not the pages for other CPUs). We have the appropriate PTE pages - * for each CPU already set up, we just need to hook them in. */ + * for each CPU already set up, we just need to hook them in now we know which + * Guest is about to run on this CPU. */ void map_switcher_in_guest(struct lguest *lg, struct lguest_pages *pages) { pte_t *switcher_pte_page = __get_cpu_var(switcher_pte_pages); @@ -677,6 +696,18 @@ static __init void populate_switcher_pte_page(unsigned int cpu, __pgprot(_PAGE_PRESENT|_PAGE_ACCESSED)); } +/* We've made it through the page table code. Perhaps our tired brains are + * still processing the details, or perhaps we're simply glad it's over. + * + * If nothing else, note that all this complexity in juggling shadow page + * tables in sync with the Guest's page tables is for one reason: for most + * Guests this page table dance determines how bad performance will be. This + * is why Xen uses exotic direct Guest pagetable manipulation, and why both + * Intel and AMD have implemented shadow page table support directly into + * hardware. + * + * There is just one file remaining in the Host. */ + /*H:510 At boot or module load time, init_pagetables() allocates and populates * the Switcher PTE page for each CPU. */ __init int init_pagetables(struct page **switcher_page, unsigned int pages) diff --git a/drivers/lguest/segments.c b/drivers/lguest/segments.c index c2434ec99f7..9e189cbec7d 100644 --- a/drivers/lguest/segments.c +++ b/drivers/lguest/segments.c @@ -12,8 +12,6 @@ #include "lg.h" /*H:600 - * We've almost completed the Host; there's just one file to go! - * * Segments & The Global Descriptor Table * * (That title sounds like a bad Nerdcore group. Not to suggest that there are @@ -55,7 +53,7 @@ static int ignored_gdt(unsigned int num) || num == GDT_ENTRY_DOUBLEFAULT_TSS); } -/*H:610 Once the GDT has been changed, we fix the new entries up a little. We +/*H:630 Once the Guest gave us new GDT entries, we fix them up a little. We * don't care if they're invalid: the worst that can happen is a General * Protection Fault in the Switcher when it restores a Guest segment register * which tries to use that entry. Then we kill the Guest for causing such a @@ -84,25 +82,33 @@ static void fixup_gdt_table(struct lguest *lg, unsigned start, unsigned end) } } -/* This routine is called at boot or modprobe time for each CPU to set up the - * "constant" GDT entries for Guests running on that CPU. */ +/*H:610 Like the IDT, we never simply use the GDT the Guest gives us. We keep + * a GDT for each CPU, and copy across the Guest's entries each time we want to + * run the Guest on that CPU. + * + * This routine is called at boot or modprobe time for each CPU to set up the + * constant GDT entries: the ones which are the same no matter what Guest we're + * running. */ void setup_default_gdt_entries(struct lguest_ro_state *state) { struct desc_struct *gdt = state->guest_gdt; unsigned long tss = (unsigned long)&state->guest_tss; - /* The hypervisor segments are full 0-4G segments, privilege level 0 */ + /* The Switcher segments are full 0-4G segments, privilege level 0 */ gdt[GDT_ENTRY_LGUEST_CS] = FULL_EXEC_SEGMENT; gdt[GDT_ENTRY_LGUEST_DS] = FULL_SEGMENT; - /* The TSS segment refers to the TSS entry for this CPU, so we cannot - * copy it from the Guest. Forgive the magic flags */ + /* The TSS segment refers to the TSS entry for this particular CPU. + * Forgive the magic flags: the 0x8900 means the entry is Present, it's + * privilege level 0 Available 386 TSS system segment, and the 0x67 + * means Saturn is eclipsed by Mercury in the twelfth house. */ gdt[GDT_ENTRY_TSS].a = 0x00000067 | (tss << 16); gdt[GDT_ENTRY_TSS].b = 0x00008900 | (tss & 0xFF000000) | ((tss >> 16) & 0x000000FF); } -/* This routine is called before the Guest is run for the first time. */ +/* This routine sets up the initial Guest GDT for booting. All entries start + * as 0 (unusable). */ void setup_guest_gdt(struct lguest *lg) { /* Start with full 0-4G segments... */ @@ -114,13 +120,8 @@ void setup_guest_gdt(struct lguest *lg) lg->arch.gdt[GDT_ENTRY_KERNEL_DS].b |= (GUEST_PL << 13); } -/* Like the IDT, we never simply use the GDT the Guest gives us. We set up the - * GDTs for each CPU, then we copy across the entries each time we want to run - * a different Guest on that CPU. */ - -/* A partial GDT load, for the three "thead-local storage" entries. Otherwise - * it's just like load_guest_gdt(). So much, in fact, it would probably be - * neater to have a single hypercall to cover both. */ +/*H:650 An optimization of copy_gdt(), for just the three "thead-local storage" + * entries. */ void copy_gdt_tls(const struct lguest *lg, struct desc_struct *gdt) { unsigned int i; @@ -129,7 +130,9 @@ void copy_gdt_tls(const struct lguest *lg, struct desc_struct *gdt) gdt[i] = lg->arch.gdt[i]; } -/* This is the full version */ +/*H:640 When the Guest is run on a different CPU, or the GDT entries have + * changed, copy_gdt() is called to copy the Guest's GDT entries across to this + * CPU's GDT. */ void copy_gdt(const struct lguest *lg, struct desc_struct *gdt) { unsigned int i; @@ -141,7 +144,8 @@ void copy_gdt(const struct lguest *lg, struct desc_struct *gdt) gdt[i] = lg->arch.gdt[i]; } -/* This is where the Guest asks us to load a new GDT (LHCALL_LOAD_GDT). */ +/*H:620 This is where the Guest asks us to load a new GDT (LHCALL_LOAD_GDT). + * We copy it from the Guest and tweak the entries. */ void load_guest_gdt(struct lguest *lg, unsigned long table, u32 num) { /* We assume the Guest has the same number of GDT entries as the @@ -157,16 +161,22 @@ void load_guest_gdt(struct lguest *lg, unsigned long table, u32 num) lg->changed |= CHANGED_GDT; } +/* This is the fast-track version for just changing the three TLS entries. + * Remember that this happens on every context switch, so it's worth + * optimizing. But wouldn't it be neater to have a single hypercall to cover + * both cases? */ void guest_load_tls(struct lguest *lg, unsigned long gtls) { struct desc_struct *tls = &lg->arch.gdt[GDT_ENTRY_TLS_MIN]; __lgread(lg, tls, gtls, sizeof(*tls)*GDT_ENTRY_TLS_ENTRIES); fixup_gdt_table(lg, GDT_ENTRY_TLS_MIN, GDT_ENTRY_TLS_MAX+1); + /* Note that just the TLS entries have changed. */ lg->changed |= CHANGED_GDT_TLS; } +/*:*/ -/* +/*H:660 * With this, we have finished the Host. * * Five of the seven parts of our task are complete. You have made it through diff --git a/drivers/lguest/x86/core.c b/drivers/lguest/x86/core.c index 9eed12d5a39..482aec2a963 100644 --- a/drivers/lguest/x86/core.c +++ b/drivers/lguest/x86/core.c @@ -63,7 +63,7 @@ static struct lguest_pages *lguest_pages(unsigned int cpu) static DEFINE_PER_CPU(struct lguest *, last_guest); /*S:010 - * We are getting close to the Switcher. + * We approach the Switcher. * * Remember that each CPU has two pages which are visible to the Guest when it * runs on that CPU. This has to contain the state for that Guest: we copy the @@ -134,7 +134,7 @@ static void run_guest_once(struct lguest *lg, struct lguest_pages *pages) * * The lcall also pushes the old code segment (KERNEL_CS) onto the * stack, then the address of this call. This stack layout happens to - * exactly match the stack of an interrupt... */ + * exactly match the stack layout created by an interrupt... */ asm volatile("pushf; lcall *lguest_entry" /* This is how we tell GCC that %eax ("a") and %ebx ("b") * are changed by this routine. The "=" means output. */ @@ -151,40 +151,46 @@ static void run_guest_once(struct lguest *lg, struct lguest_pages *pages) } /*:*/ +/*M:002 There are hooks in the scheduler which we can register to tell when we + * get kicked off the CPU (preempt_notifier_register()). This would allow us + * to lazily disable SYSENTER which would regain some performance, and should + * also simplify copy_in_guest_info(). Note that we'd still need to restore + * things when we exit to Launcher userspace, but that's fairly easy. + * + * The hooks were designed for KVM, but we can also put them to good use. :*/ + /*H:040 This is the i386-specific code to setup and run the Guest. Interrupts * are disabled: we own the CPU. */ void lguest_arch_run_guest(struct lguest *lg) { - /* Remember the awfully-named TS bit? If the Guest has asked - * to set it we set it now, so we can trap and pass that trap - * to the Guest if it uses the FPU. */ + /* Remember the awfully-named TS bit? If the Guest has asked to set it + * we set it now, so we can trap and pass that trap to the Guest if it + * uses the FPU. */ if (lg->ts) lguest_set_ts(); - /* SYSENTER is an optimized way of doing system calls. We - * can't allow it because it always jumps to privilege level 0. - * A normal Guest won't try it because we don't advertise it in - * CPUID, but a malicious Guest (or malicious Guest userspace - * program) could, so we tell the CPU to disable it before - * running the Guest. */ + /* SYSENTER is an optimized way of doing system calls. We can't allow + * it because it always jumps to privilege level 0. A normal Guest + * won't try it because we don't advertise it in CPUID, but a malicious + * Guest (or malicious Guest userspace program) could, so we tell the + * CPU to disable it before running the Guest. */ if (boot_cpu_has(X86_FEATURE_SEP)) wrmsr(MSR_IA32_SYSENTER_CS, 0, 0); - /* Now we actually run the Guest. It will pop back out when - * something interesting happens, and we can examine its - * registers to see what it was doing. */ + /* Now we actually run the Guest. It will return when something + * interesting happens, and we can examine its registers to see what it + * was doing. */ run_guest_once(lg, lguest_pages(raw_smp_processor_id())); - /* The "regs" pointer contains two extra entries which are not - * really registers: a trap number which says what interrupt or - * trap made the switcher code come back, and an error code - * which some traps set. */ + /* Note that the "regs" pointer contains two extra entries which are + * not really registers: a trap number which says what interrupt or + * trap made the switcher code come back, and an error code which some + * traps set. */ - /* If the Guest page faulted, then the cr2 register will tell - * us the bad virtual address. We have to grab this now, - * because once we re-enable interrupts an interrupt could - * fault and thus overwrite cr2, or we could even move off to a - * different CPU. */ + /* If the Guest page faulted, then the cr2 register will tell us the + * bad virtual address. We have to grab this now, because once we + * re-enable interrupts an interrupt could fault and thus overwrite + * cr2, or we could even move off to a different CPU. */ if (lg->regs->trapnum == 14) lg->arch.last_pagefault = read_cr2(); /* Similarly, if we took a trap because the Guest used the FPU, @@ -197,14 +203,15 @@ void lguest_arch_run_guest(struct lguest *lg) wrmsr(MSR_IA32_SYSENTER_CS, __KERNEL_CS, 0); } -/*H:130 Our Guest is usually so well behaved; it never tries to do things it - * isn't allowed to. Unfortunately, Linux's paravirtual infrastructure isn't - * quite complete, because it doesn't contain replacements for the Intel I/O - * instructions. As a result, the Guest sometimes fumbles across one during - * the boot process as it probes for various things which are usually attached - * to a PC. +/*H:130 Now we've examined the hypercall code; our Guest can make requests. + * Our Guest is usually so well behaved; it never tries to do things it isn't + * allowed to, and uses hypercalls instead. Unfortunately, Linux's paravirtual + * infrastructure isn't quite complete, because it doesn't contain replacements + * for the Intel I/O instructions. As a result, the Guest sometimes fumbles + * across one during the boot process as it probes for various things which are + * usually attached to a PC. * - * When the Guest uses one of these instructions, we get trap #13 (General + * When the Guest uses one of these instructions, we get a trap (General * Protection Fault) and come here. We see if it's one of those troublesome * instructions and skip over it. We return true if we did. */ static int emulate_insn(struct lguest *lg) @@ -275,43 +282,43 @@ static int emulate_insn(struct lguest *lg) void lguest_arch_handle_trap(struct lguest *lg) { switch (lg->regs->trapnum) { - case 13: /* We've intercepted a GPF. */ - /* Check if this was one of those annoying IN or OUT - * instructions which we need to emulate. If so, we - * just go back into the Guest after we've done it. */ + case 13: /* We've intercepted a General Protection Fault. */ + /* Check if this was one of those annoying IN or OUT + * instructions which we need to emulate. If so, we just go + * back into the Guest after we've done it. */ if (lg->regs->errcode == 0) { if (emulate_insn(lg)) return; } break; - case 14: /* We've intercepted a page fault. */ - /* The Guest accessed a virtual address that wasn't - * mapped. This happens a lot: we don't actually set - * up most of the page tables for the Guest at all when - * we start: as it runs it asks for more and more, and - * we set them up as required. In this case, we don't - * even tell the Guest that the fault happened. - * - * The errcode tells whether this was a read or a - * write, and whether kernel or userspace code. */ + case 14: /* We've intercepted a Page Fault. */ + /* The Guest accessed a virtual address that wasn't mapped. + * This happens a lot: we don't actually set up most of the + * page tables for the Guest at all when we start: as it runs + * it asks for more and more, and we set them up as + * required. In this case, we don't even tell the Guest that + * the fault happened. + * + * The errcode tells whether this was a read or a write, and + * whether kernel or userspace code. */ if (demand_page(lg, lg->arch.last_pagefault, lg->regs->errcode)) return; - /* OK, it's really not there (or not OK): the Guest - * needs to know. We write out the cr2 value so it - * knows where the fault occurred. - * - * Note that if the Guest were really messed up, this - * could happen before it's done the INITIALIZE - * hypercall, so lg->lguest_data will be NULL */ + /* OK, it's really not there (or not OK): the Guest needs to + * know. We write out the cr2 value so it knows where the + * fault occurred. + * + * Note that if the Guest were really messed up, this could + * happen before it's done the LHCALL_LGUEST_INIT hypercall, so + * lg->lguest_data could be NULL */ if (lg->lguest_data && put_user(lg->arch.last_pagefault, &lg->lguest_data->cr2)) kill_guest(lg, "Writing cr2"); break; case 7: /* We've intercepted a Device Not Available fault. */ - /* If the Guest doesn't want to know, we already - * restored the Floating Point Unit, so we just - * continue without telling it. */ + /* If the Guest doesn't want to know, we already restored the + * Floating Point Unit, so we just continue without telling + * it. */ if (!lg->ts) return; break; @@ -536,9 +543,6 @@ int lguest_arch_init_hypercalls(struct lguest *lg) return 0; } -/* Now we've examined the hypercall code; our Guest can make requests. There - * is one other way we can do things for the Guest, as we see in - * emulate_insn(). :*/ /*L:030 lguest_arch_setup_regs() * @@ -562,7 +566,7 @@ void lguest_arch_setup_regs(struct lguest *lg, unsigned long start) * is supposed to always be "1". Bit 9 (0x200) controls whether * interrupts are enabled. We always leave interrupts enabled while * running the Guest. */ - regs->eflags = 0x202; + regs->eflags = X86_EFLAGS_IF | 0x2; /* The "Extended Instruction Pointer" register says where the Guest is * running. */ @@ -570,8 +574,8 @@ void lguest_arch_setup_regs(struct lguest *lg, unsigned long start) /* %esi points to our boot information, at physical address 0, so don't * touch it. */ + /* There are a couple of GDT entries the Guest expects when first * booting. */ - setup_guest_gdt(lg); } diff --git a/drivers/lguest/x86/switcher_32.S b/drivers/lguest/x86/switcher_32.S index 1010b90b11f..0af8baaa0d4 100644 --- a/drivers/lguest/x86/switcher_32.S +++ b/drivers/lguest/x86/switcher_32.S @@ -6,6 +6,37 @@ * are feeling invigorated and refreshed then the next, more challenging stage * can be found in "make Guest". :*/ +/*M:012 Lguest is meant to be simple: my rule of thumb is that 1% more LOC must + * gain at least 1% more performance. Since neither LOC nor performance can be + * measured beforehand, it generally means implementing a feature then deciding + * if it's worth it. And once it's implemented, who can say no? + * + * This is why I haven't implemented this idea myself. I want to, but I + * haven't. You could, though. + * + * The main place where lguest performance sucks is Guest page faulting. When + * a Guest userspace process hits an unmapped page we switch back to the Host, + * walk the page tables, find it's not mapped, switch back to the Guest page + * fault handler, which calls a hypercall to set the page table entry, then + * finally returns to userspace. That's two round-trips. + * + * If we had a small walker in the Switcher, we could quickly check the Guest + * page table and if the page isn't mapped, immediately reflect the fault back + * into the Guest. This means the Switcher would have to know the top of the + * Guest page table and the page fault handler address. + * + * For simplicity, the Guest should only handle the case where the privilege + * level of the fault is 3 and probably only not present or write faults. It + * should also detect recursive faults, and hand the original fault to the + * Host (which is actually really easy). + * + * Two questions remain. Would the performance gain outweigh the complexity? + * And who would write the verse documenting it? :*/ + +/*M:011 Lguest64 handles NMI. This gave me NMI envy (until I looked at their + * code). It's worth doing though, since it would let us use oprofile in the + * Host when a Guest is running. :*/ + /*S:100 * Welcome to the Switcher itself! * @@ -88,7 +119,7 @@ ENTRY(switch_to_guest) // All saved and there's now five steps before us: // Stack, GDT, IDT, TSS - // And last of all the page tables are flipped. + // Then last of all the page tables are flipped. // Yet beware that our stack pointer must be // Always valid lest an NMI hits @@ -103,25 +134,25 @@ ENTRY(switch_to_guest) lgdt LGUEST_PAGES_guest_gdt_desc(%eax) // The Guest's IDT we did partially - // Move to the "struct lguest_pages" as well. + // Copy to "struct lguest_pages" as well. lidt LGUEST_PAGES_guest_idt_desc(%eax) // The TSS entry which controls traps // Must be loaded up with "ltr" now: + // The GDT entry that TSS uses + // Changes type when we load it: damn Intel! // For after we switch over our page tables - // It (as the rest) will be writable no more. - // (The GDT entry TSS needs - // Changes type when we load it: damn Intel!) + // That entry will be read-only: we'd crash. movl $(GDT_ENTRY_TSS*8), %edx ltr %dx // Look back now, before we take this last step! // The Host's TSS entry was also marked used; - // Let's clear it again, ere we return. + // Let's clear it again for our return. // The GDT descriptor of the Host // Points to the table after two "size" bytes movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %edx - // Clear the type field of "used" (byte 5, bit 2) + // Clear "used" from type field (byte 5, bit 2) andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%edx) // Once our page table's switched, the Guest is live! @@ -131,7 +162,7 @@ ENTRY(switch_to_guest) // The page table change did one tricky thing: // The Guest's register page has been mapped - // Writable onto our %esp (stack) -- + // Writable under our %esp (stack) -- // We can simply pop off all Guest regs. popl %eax popl %ebx @@ -152,16 +183,15 @@ ENTRY(switch_to_guest) addl $8, %esp // The last five stack slots hold return address - // And everything needed to change privilege - // Into the Guest privilege level of 1, + // And everything needed to switch privilege + // From Switcher's level 0 to Guest's 1, // And the stack where the Guest had last left it. // Interrupts are turned back on: we are Guest. iret -// There are two paths where we switch to the Host +// We treat two paths to switch back to the Host +// Yet both must save Guest state and restore Host // So we put the routine in a macro. -// We are on our way home, back to the Host -// Interrupted out of the Guest, we come here. #define SWITCH_TO_HOST \ /* We save the Guest state: all registers first \ * Laid out just as "struct lguest_regs" defines */ \ @@ -194,7 +224,7 @@ ENTRY(switch_to_guest) movl %esp, %eax; \ andl $(~(1 << PAGE_SHIFT - 1)), %eax; \ /* Save our trap number: the switch will obscure it \ - * (The Guest regs are not mapped here in the Host) \ + * (In the Host the Guest regs are not mapped here) \ * %ebx holds it safe for deliver_to_host */ \ movl LGUEST_PAGES_regs_trapnum(%eax), %ebx; \ /* The Host GDT, IDT and stack! \ @@ -210,9 +240,9 @@ ENTRY(switch_to_guest) /* Switch to Host's GDT, IDT. */ \ lgdt LGUEST_PAGES_host_gdt_desc(%eax); \ lidt LGUEST_PAGES_host_idt_desc(%eax); \ - /* Restore the Host's stack where it's saved regs lie */ \ + /* Restore the Host's stack where its saved regs lie */ \ movl LGUEST_PAGES_host_sp(%eax), %esp; \ - /* Last the TSS: our Host is complete */ \ + /* Last the TSS: our Host is returned */ \ movl $(GDT_ENTRY_TSS*8), %edx; \ ltr %dx; \ /* Restore now the regs saved right at the first. */ \ @@ -222,14 +252,15 @@ ENTRY(switch_to_guest) popl %ds; \ popl %es -// Here's where we come when the Guest has just trapped: -// (Which trap we'll see has been pushed on the stack). +// The first path is trod when the Guest has trapped: +// (Which trap it was has been pushed on the stack). // We need only switch back, and the Host will decode // Why we came home, and what needs to be done. return_to_host: SWITCH_TO_HOST iret +// We are lead to the second path like so: // An interrupt, with some cause external // Has ajerked us rudely from the Guest's code // Again we must return home to the Host @@ -238,7 +269,7 @@ deliver_to_host: // But now we must go home via that place // Where that interrupt was supposed to go // Had we not been ensconced, running the Guest. - // Here we see the cleverness of our stack: + // Here we see the trickness of run_guest_once(): // The Host stack is formed like an interrupt // With EIP, CS and EFLAGS layered. // Interrupt handlers end with "iret" @@ -263,7 +294,7 @@ deliver_to_host: xorw %ax, %ax orl %eax, %edx // Now the address of the handler's in %edx - // We call it now: its "iret" takes us home. + // We call it now: its "iret" drops us home. jmp *%edx // Every interrupt can come to us here diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c index ac54f697c50..1c159ac68c9 100644 --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c @@ -351,14 +351,10 @@ static int crypt_convert(struct crypt_config *cc, struct scatterlist sg_in, sg_out; sg_init_table(&sg_in, 1); - sg_set_page(&sg_in, bv_in->bv_page); - sg_in.offset = bv_in->bv_offset + ctx->offset_in; - sg_in.length = 1 << SECTOR_SHIFT; + sg_set_page(&sg_in, bv_in->bv_page, 1 << SECTOR_SHIFT, bv_in->bv_offset + ctx->offset_in); sg_init_table(&sg_out, 1); - sg_set_page(&sg_out, bv_out->bv_page); - sg_out.offset = bv_out->bv_offset + ctx->offset_out; - sg_out.length = 1 << SECTOR_SHIFT; + sg_set_page(&sg_out, bv_out->bv_page, 1 << SECTOR_SHIFT, bv_out->bv_offset + ctx->offset_out); ctx->offset_in += sg_in.length; if (ctx->offset_in >= bv_in->bv_len) { diff --git a/drivers/media/common/saa7146_core.c b/drivers/media/common/saa7146_core.c index 2b1f8b4be00..cb034ead95a 100644 --- a/drivers/media/common/saa7146_core.c +++ b/drivers/media/common/saa7146_core.c @@ -118,8 +118,7 @@ static struct scatterlist* vmalloc_to_sg(unsigned char *virt, int nr_pages) if (NULL == pg) goto err; BUG_ON(PageHighMem(pg)); - sg_set_page(&sglist[i], pg); - sglist[i].length = PAGE_SIZE; + sg_set_page(&sglist[i], pg, PAGE_SIZE, 0); } return sglist; diff --git a/drivers/media/video/ivtv/ivtv-udma.c b/drivers/media/video/ivtv/ivtv-udma.c index 912b424e520..460db03b0ba 100644 --- a/drivers/media/video/ivtv/ivtv-udma.c +++ b/drivers/media/video/ivtv/ivtv-udma.c @@ -49,8 +49,6 @@ int ivtv_udma_fill_sg_list (struct ivtv_user_dma *dma, struct ivtv_dma_page_info unsigned int len = (i == dma_page->page_count - 1) ? dma_page->tail : PAGE_SIZE - offset; - dma->SGlist[map_offset].length = len; - dma->SGlist[map_offset].offset = offset; if (PageHighMem(dma->map[map_offset])) { void *src; @@ -63,10 +61,10 @@ int ivtv_udma_fill_sg_list (struct ivtv_user_dma *dma, struct ivtv_dma_page_info memcpy(page_address(dma->bouncemap[map_offset]) + offset, src, len); kunmap_atomic(src, KM_BOUNCE_READ); local_irq_restore(flags); - sg_set_page(&dma->SGlist[map_offset], dma->bouncemap[map_offset]); + sg_set_page(&dma->SGlist[map_offset], dma->bouncemap[map_offset], len, offset); } else { - sg_set_page(&dma->SGlist[map_offset], dma->map[map_offset]); + sg_set_page(&dma->SGlist[map_offset], dma->map[map_offset], len, offset); } offset = 0; map_offset++; diff --git a/drivers/media/video/videobuf-dma-sg.c b/drivers/media/video/videobuf-dma-sg.c index 9ab94a749d8..44ee408e145 100644 --- a/drivers/media/video/videobuf-dma-sg.c +++ b/drivers/media/video/videobuf-dma-sg.c @@ -67,8 +67,7 @@ videobuf_vmalloc_to_sg(unsigned char *virt, int nr_pages) if (NULL == pg) goto err; BUG_ON(PageHighMem(pg)); - sg_set_page(&sglist[i], pg); - sglist[i].length = PAGE_SIZE; + sg_set_page(&sglist[i], pg, PAGE_SIZE, 0); } return sglist; @@ -95,16 +94,13 @@ videobuf_pages_to_sg(struct page **pages, int nr_pages, int offset) if (PageHighMem(pages[0])) /* DMA to highmem pages might not work */ goto highmem; - sg_set_page(&sglist[0], pages[0]); - sglist[0].offset = offset; - sglist[0].length = PAGE_SIZE - offset; + sg_set_page(&sglist[0], pages[0], PAGE_SIZE - offset, offset); for (i = 1; i < nr_pages; i++) { if (NULL == pages[i]) goto nopage; if (PageHighMem(pages[i])) goto highmem; - sg_set_page(&sglist[i], pages[i]); - sglist[i].length = PAGE_SIZE; + sg_set_page(&sglist[i], pages[i], PAGE_SIZE, 0); } return sglist; diff --git a/drivers/message/i2o/i2o_block.c b/drivers/message/i2o/i2o_block.c index d602ba6d541..682406168de 100644 --- a/drivers/message/i2o/i2o_block.c +++ b/drivers/message/i2o/i2o_block.c @@ -284,6 +284,7 @@ static inline struct i2o_block_request *i2o_block_request_alloc(void) return ERR_PTR(-ENOMEM); INIT_LIST_HEAD(&ireq->queue); + sg_init_table(ireq->sg_table, I2O_MAX_PHYS_SEGMENTS); return ireq; }; diff --git a/drivers/mmc/host/au1xmmc.c b/drivers/mmc/host/au1xmmc.c index bcbb6d247bf..c77fadc0dfa 100644 --- a/drivers/mmc/host/au1xmmc.c +++ b/drivers/mmc/host/au1xmmc.c @@ -40,13 +40,13 @@ #include <linux/mm.h> #include <linux/interrupt.h> #include <linux/dma-mapping.h> +#include <scatterlist/scatterlist.h> #include <linux/mmc/host.h> #include <asm/io.h> #include <asm/mach-au1x00/au1000.h> #include <asm/mach-au1x00/au1xxx_dbdma.h> #include <asm/mach-au1x00/au1100_mmc.h> -#include <asm/scatterlist.h> #include <au1xxx.h> #include "au1xmmc.h" diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c index d0eb0a2abf4..95244a7e735 100644 --- a/drivers/mmc/host/mmci.c +++ b/drivers/mmc/host/mmci.c @@ -20,11 +20,11 @@ #include <linux/mmc/host.h> #include <linux/amba/bus.h> #include <linux/clk.h> +#include <linux/scatterlist.h> #include <asm/cacheflush.h> #include <asm/div64.h> #include <asm/io.h> -#include <asm/scatterlist.h> #include <asm/sizes.h> #include <asm/mach/mmc.h> @@ -167,7 +167,7 @@ mmci_data_irq(struct mmci_host *host, struct mmc_data *data, * partially written to a page is properly coherent. */ if (host->sg_len && data->flags & MMC_DATA_READ) - flush_dcache_page(host->sg_ptr->page); + flush_dcache_page(sg_page(host->sg_ptr)); } if (status & MCI_DATAEND) { mmci_stop_data(host); @@ -319,7 +319,7 @@ static irqreturn_t mmci_pio_irq(int irq, void *dev_id) * page, ensure that the data cache is coherent. */ if (status & MCI_RXACTIVE) - flush_dcache_page(host->sg_ptr->page); + flush_dcache_page(sg_page(host->sg_ptr)); if (!mmci_next_sg(host)) break; diff --git a/drivers/mmc/host/pxamci.c b/drivers/mmc/host/pxamci.c index 0601e01aa2c..a25ee71998a 100644 --- a/drivers/mmc/host/pxamci.c +++ b/drivers/mmc/host/pxamci.c @@ -29,7 +29,6 @@ #include <asm/dma.h> #include <asm/io.h> -#include <asm/scatterlist.h> #include <asm/sizes.h> #include <asm/arch/pxa-regs.h> diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c index d7c5b94d8c5..6b80bf77a4e 100644 --- a/drivers/mmc/host/sdhci.c +++ b/drivers/mmc/host/sdhci.c @@ -17,8 +17,6 @@ #include <linux/mmc/host.h> -#include <asm/scatterlist.h> - #include "sdhci.h" #define DRIVER_NAME "sdhci" diff --git a/drivers/mmc/host/wbsd.c b/drivers/mmc/host/wbsd.c index fa4c8c53cc7..4d5f3742187 100644 --- a/drivers/mmc/host/wbsd.c +++ b/drivers/mmc/host/wbsd.c @@ -33,10 +33,10 @@ #include <linux/pnp.h> #include <linux/highmem.h> #include <linux/mmc/host.h> +#include <linux/scatterlist.h> #include <asm/io.h> #include <asm/dma.h> -#include <asm/scatterlist.h> #include "wbsd.h" diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 6909becb10f..6937ef0e727 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -188,6 +188,7 @@ struct bond_parm_tbl arp_validate_tbl[] = { /*-------------------------- Forward declarations ---------------------------*/ static void bond_send_gratuitous_arp(struct bonding *bond); +static void bond_deinit(struct net_device *bond_dev); /*---------------------------- General routines -----------------------------*/ @@ -3681,7 +3682,7 @@ static int bond_open(struct net_device *bond_dev) } if (bond->params.mode == BOND_MODE_8023AD) { - INIT_DELAYED_WORK(&bond->ad_work, bond_alb_monitor); + INIT_DELAYED_WORK(&bond->ad_work, bond_3ad_state_machine_handler); queue_delayed_work(bond->wq, &bond->ad_work, 0); /* register to receive LACPDUs */ bond_register_lacpdu(bond); @@ -4449,7 +4450,7 @@ static int bond_init(struct net_device *bond_dev, struct bond_params *params) /* De-initialize device specific data. * Caller must hold rtnl_lock. */ -void bond_deinit(struct net_device *bond_dev) +static void bond_deinit(struct net_device *bond_dev) { struct bonding *bond = bond_dev->priv; diff --git a/drivers/net/bonding/bonding.h b/drivers/net/bonding/bonding.h index d1ed14bf1cc..61c1b4536d3 100644 --- a/drivers/net/bonding/bonding.h +++ b/drivers/net/bonding/bonding.h @@ -302,7 +302,6 @@ int bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb, struct net_de int bond_create(char *name, struct bond_params *params, struct bonding **newbond); void bond_destroy(struct bonding *bond); int bond_release_and_destroy(struct net_device *bond_dev, struct net_device *slave_dev); -void bond_deinit(struct net_device *bond_dev); int bond_create_sysfs(void); void bond_destroy_sysfs(void); void bond_destroy_sysfs_entry(struct bonding *bond); diff --git a/drivers/net/cpmac.c b/drivers/net/cpmac.c index 57541d2d9e1..6fd95a2c8ce 100644 --- a/drivers/net/cpmac.c +++ b/drivers/net/cpmac.c @@ -34,6 +34,7 @@ #include <linux/skbuff.h> #include <linux/mii.h> #include <linux/phy.h> +#include <linux/phy_fixed.h> #include <linux/platform_device.h> #include <linux/dma-mapping.h> #include <asm/gpio.h> @@ -53,12 +54,6 @@ MODULE_PARM_DESC(debug_level, "Number of NETIF_MSG bits to enable"); MODULE_PARM_DESC(dumb_switch, "Assume switch is not connected to MDIO bus"); #define CPMAC_VERSION "0.5.0" -/* stolen from net/ieee80211.h */ -#ifndef MAC_FMT -#define MAC_FMT "%02x:%02x:%02x:%02x:%02x:%02x" -#define MAC_ARG(x) ((u8*)(x))[0], ((u8*)(x))[1], ((u8*)(x))[2], \ - ((u8*)(x))[3], ((u8*)(x))[4], ((u8*)(x))[5] -#endif /* frame size + 802.1q tag */ #define CPMAC_SKB_SIZE (ETH_FRAME_LEN + 4) #define CPMAC_QUEUES 8 @@ -211,6 +206,7 @@ struct cpmac_priv { struct net_device *dev; struct work_struct reset_work; struct platform_device *pdev; + struct napi_struct napi; }; static irqreturn_t cpmac_irq(int, void *); @@ -362,47 +358,48 @@ static void cpmac_set_multicast_list(struct net_device *dev) } } -static struct sk_buff *cpmac_rx_one(struct net_device *dev, - struct cpmac_priv *priv, +static struct sk_buff *cpmac_rx_one(struct cpmac_priv *priv, struct cpmac_desc *desc) { struct sk_buff *skb, *result = NULL; if (unlikely(netif_msg_hw(priv))) - cpmac_dump_desc(dev, desc); + cpmac_dump_desc(priv->dev, desc); cpmac_write(priv->regs, CPMAC_RX_ACK(0), (u32)desc->mapping); if (unlikely(!desc->datalen)) { if (netif_msg_rx_err(priv) && net_ratelimit()) printk(KERN_WARNING "%s: rx: spurious interrupt\n", - dev->name); + priv->dev->name); return NULL; } - skb = netdev_alloc_skb(dev, CPMAC_SKB_SIZE); + skb = netdev_alloc_skb(priv->dev, CPMAC_SKB_SIZE); if (likely(skb)) { skb_reserve(skb, 2); skb_put(desc->skb, desc->datalen); - desc->skb->protocol = eth_type_trans(desc->skb, dev); + desc->skb->protocol = eth_type_trans(desc->skb, priv->dev); desc->skb->ip_summed = CHECKSUM_NONE; - dev->stats.rx_packets++; - dev->stats.rx_bytes += desc->datalen; + priv->dev->stats.rx_packets++; + priv->dev->stats.rx_bytes += desc->datalen; result = desc->skb; - dma_unmap_single(&dev->dev, desc->data_mapping, CPMAC_SKB_SIZE, - DMA_FROM_DEVICE); + dma_unmap_single(&priv->dev->dev, desc->data_mapping, + CPMAC_SKB_SIZE, DMA_FROM_DEVICE); desc->skb = skb; - desc->data_mapping = dma_map_single(&dev->dev, skb->data, + desc->data_mapping = dma_map_single(&priv->dev->dev, skb->data, CPMAC_SKB_SIZE, DMA_FROM_DEVICE); desc->hw_data = (u32)desc->data_mapping; if (unlikely(netif_msg_pktdata(priv))) { - printk(KERN_DEBUG "%s: received packet:\n", dev->name); - cpmac_dump_skb(dev, result); + printk(KERN_DEBUG "%s: received packet:\n", + priv->dev->name); + cpmac_dump_skb(priv->dev, result); } } else { if (netif_msg_rx_err(priv) && net_ratelimit()) printk(KERN_WARNING - "%s: low on skbs, dropping packet\n", dev->name); - dev->stats.rx_dropped++; + "%s: low on skbs, dropping packet\n", + priv->dev->name); + priv->dev->stats.rx_dropped++; } desc->buflen = CPMAC_SKB_SIZE; @@ -411,25 +408,25 @@ static struct sk_buff *cpmac_rx_one(struct net_device *dev, return result; } -static int cpmac_poll(struct net_device *dev, int *budget) +static int cpmac_poll(struct napi_struct *napi, int budget) { struct sk_buff *skb; struct cpmac_desc *desc; - int received = 0, quota = min(dev->quota, *budget); - struct cpmac_priv *priv = netdev_priv(dev); + int received = 0; + struct cpmac_priv *priv = container_of(napi, struct cpmac_priv, napi); spin_lock(&priv->rx_lock); if (unlikely(!priv->rx_head)) { if (netif_msg_rx_err(priv) && net_ratelimit()) printk(KERN_WARNING "%s: rx: polling, but no queue\n", - dev->name); - netif_rx_complete(dev); + priv->dev->name); + netif_rx_complete(priv->dev, napi); return 0; } desc = priv->rx_head; - while ((received < quota) && ((desc->dataflags & CPMAC_OWN) == 0)) { - skb = cpmac_rx_one(dev, priv, desc); + while (((desc->dataflags & CPMAC_OWN) == 0) && (received < budget)) { + skb = cpmac_rx_one(priv, desc); if (likely(skb)) { netif_receive_skb(skb); received++; @@ -439,13 +436,11 @@ static int cpmac_poll(struct net_device *dev, int *budget) priv->rx_head = desc; spin_unlock(&priv->rx_lock); - *budget -= received; - dev->quota -= received; if (unlikely(netif_msg_rx_status(priv))) - printk(KERN_DEBUG "%s: poll processed %d packets\n", dev->name, - received); + printk(KERN_DEBUG "%s: poll processed %d packets\n", + priv->dev->name, received); if (desc->dataflags & CPMAC_OWN) { - netif_rx_complete(dev); + netif_rx_complete(priv->dev, napi); cpmac_write(priv->regs, CPMAC_RX_PTR(0), (u32)desc->mapping); cpmac_write(priv->regs, CPMAC_RX_INT_ENABLE, 1); return 0; @@ -655,6 +650,7 @@ static void cpmac_hw_error(struct work_struct *work) spin_unlock(&priv->rx_lock); cpmac_clear_tx(priv->dev); cpmac_hw_start(priv->dev); + napi_enable(&priv->napi); netif_start_queue(priv->dev); } @@ -681,8 +677,10 @@ static irqreturn_t cpmac_irq(int irq, void *dev_id) if (status & MAC_INT_RX) { queue = (status >> 8) & 7; - netif_rx_schedule(dev); - cpmac_write(priv->regs, CPMAC_RX_INT_CLEAR, 1 << queue); + if (netif_rx_schedule_prep(dev, &priv->napi)) { + cpmac_write(priv->regs, CPMAC_RX_INT_CLEAR, 1 << queue); + __netif_rx_schedule(dev, &priv->napi); + } } cpmac_write(priv->regs, CPMAC_MAC_EOI_VECTOR, 0); @@ -692,6 +690,7 @@ static irqreturn_t cpmac_irq(int irq, void *dev_id) printk(KERN_ERR "%s: hw error, resetting...\n", dev->name); netif_stop_queue(dev); + napi_disable(&priv->napi); cpmac_hw_stop(dev); schedule_work(&priv->reset_work); if (unlikely(netif_msg_hw(priv))) @@ -849,6 +848,15 @@ static void cpmac_adjust_link(struct net_device *dev) spin_unlock(&priv->lock); } +static int cpmac_link_update(struct net_device *dev, + struct fixed_phy_status *status) +{ + status->link = 1; + status->speed = 100; + status->duplex = 1; + return 0; +} + static int cpmac_open(struct net_device *dev) { int i, size, res; @@ -857,15 +865,6 @@ static int cpmac_open(struct net_device *dev) struct cpmac_desc *desc; struct sk_buff *skb; - priv->phy = phy_connect(dev, priv->phy_name, &cpmac_adjust_link, - 0, PHY_INTERFACE_MODE_MII); - if (IS_ERR(priv->phy)) { - if (netif_msg_drv(priv)) - printk(KERN_ERR "%s: Could not attach to PHY\n", - dev->name); - return PTR_ERR(priv->phy); - } - mem = platform_get_resource_byname(priv->pdev, IORESOURCE_MEM, "regs"); if (!request_mem_region(mem->start, mem->end - mem->start, dev->name)) { if (netif_msg_drv(priv)) @@ -927,6 +926,7 @@ static int cpmac_open(struct net_device *dev) INIT_WORK(&priv->reset_work, cpmac_hw_error); cpmac_hw_start(dev); + napi_enable(&priv->napi); priv->phy->state = PHY_CHANGELINK; phy_start(priv->phy); @@ -951,8 +951,6 @@ fail_remap: release_mem_region(mem->start, mem->end - mem->start); fail_reserve: - phy_disconnect(priv->phy); - return res; } @@ -965,9 +963,8 @@ static int cpmac_stop(struct net_device *dev) netif_stop_queue(dev); cancel_work_sync(&priv->reset_work); + napi_disable(&priv->napi); phy_stop(priv->phy); - phy_disconnect(priv->phy); - priv->phy = NULL; cpmac_hw_stop(dev); @@ -1001,11 +998,13 @@ static int external_switch; static int __devinit cpmac_probe(struct platform_device *pdev) { - int rc, phy_id; + int rc, phy_id, i; struct resource *mem; struct cpmac_priv *priv; struct net_device *dev; struct plat_cpmac_data *pdata; + struct fixed_info *fixed_phy; + DECLARE_MAC_BUF(mac); pdata = pdev->dev.platform_data; @@ -1053,21 +1052,51 @@ static int __devinit cpmac_probe(struct platform_device *pdev) dev->set_multicast_list = cpmac_set_multicast_list; dev->tx_timeout = cpmac_tx_timeout; dev->ethtool_ops = &cpmac_ethtool_ops; - dev->poll = cpmac_poll; - dev->weight = 64; dev->features |= NETIF_F_MULTI_QUEUE; + netif_napi_add(dev, &priv->napi, cpmac_poll, 64); + spin_lock_init(&priv->lock); spin_lock_init(&priv->rx_lock); priv->dev = dev; priv->ring_size = 64; priv->msg_enable = netif_msg_init(debug_level, 0xff); memcpy(dev->dev_addr, pdata->dev_addr, sizeof(dev->dev_addr)); + if (phy_id == 31) { - snprintf(priv->phy_name, BUS_ID_SIZE, PHY_ID_FMT, - cpmac_mii.id, phy_id); - } else - snprintf(priv->phy_name, BUS_ID_SIZE, "fixed@%d:%d", 100, 1); + snprintf(priv->phy_name, BUS_ID_SIZE, PHY_ID_FMT, cpmac_mii.id, + phy_id); + } else { + /* Let's try to get a free fixed phy... */ + for (i = 0; i < MAX_PHY_AMNT; i++) { + fixed_phy = fixed_mdio_get_phydev(i); + if (!fixed_phy) + continue; + if (!fixed_phy->phydev->attached_dev) { + strncpy(priv->phy_name, + fixed_phy->phydev->dev.bus_id, + BUS_ID_SIZE); + fixed_mdio_set_link_update(fixed_phy->phydev, + &cpmac_link_update); + goto phy_found; + } + } + if (netif_msg_drv(priv)) + printk(KERN_ERR "%s: Could not find fixed PHY\n", + dev->name); + rc = -ENODEV; + goto fail; + } + +phy_found: + priv->phy = phy_connect(dev, priv->phy_name, &cpmac_adjust_link, 0, + PHY_INTERFACE_MODE_MII); + if (IS_ERR(priv->phy)) { + if (netif_msg_drv(priv)) + printk(KERN_ERR "%s: Could not attach to PHY\n", + dev->name); + return PTR_ERR(priv->phy); + } if ((rc = register_netdev(dev))) { printk(KERN_ERR "cpmac: error %i registering device %s\n", rc, @@ -1077,9 +1106,9 @@ static int __devinit cpmac_probe(struct platform_device *pdev) if (netif_msg_probe(priv)) { printk(KERN_INFO - "cpmac: device %s (regs: %p, irq: %d, phy: %s, mac: " - MAC_FMT ")\n", dev->name, (void *)mem->start, dev->irq, - priv->phy_name, MAC_ARG(dev->dev_addr)); + "cpmac: device %s (regs: %p, irq: %d, phy: %s, " + "mac: %s)\n", dev->name, (void *)mem->start, dev->irq, + priv->phy_name, print_mac(mac, dev->dev_addr)); } return 0; diff --git a/drivers/net/ehea/ehea.h b/drivers/net/ehea/ehea.h index b557bb44a36..4b4b74e47a6 100644 --- a/drivers/net/ehea/ehea.h +++ b/drivers/net/ehea/ehea.h @@ -40,7 +40,7 @@ #include <asm/io.h> #define DRV_NAME "ehea" -#define DRV_VERSION "EHEA_0078" +#define DRV_VERSION "EHEA_0079" /* eHEA capability flags */ #define DLPAR_PORT_ADD_REM 1 diff --git a/drivers/net/ehea/ehea_main.c b/drivers/net/ehea/ehea_main.c index 2809c99906e..0a7e7892554 100644 --- a/drivers/net/ehea/ehea_main.c +++ b/drivers/net/ehea/ehea_main.c @@ -2329,7 +2329,7 @@ static void port_napi_disable(struct ehea_port *port) { int i; - for (i = 0; i < port->num_def_qps; i++) + for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) napi_disable(&port->port_res[i].napi); } @@ -2337,7 +2337,7 @@ static void port_napi_enable(struct ehea_port *port) { int i; - for (i = 0; i < port->num_def_qps; i++) + for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) napi_enable(&port->port_res[i].napi); } @@ -2373,8 +2373,6 @@ static int ehea_down(struct net_device *dev) ehea_drop_multicast_list(dev); ehea_free_interrupts(dev); - port_napi_disable(port); - port->state = EHEA_PORT_DOWN; ret = ehea_clean_all_portres(port); @@ -2396,6 +2394,7 @@ static int ehea_stop(struct net_device *dev) flush_scheduled_work(); down(&port->port_lock); netif_stop_queue(dev); + port_napi_disable(port); ret = ehea_down(dev); up(&port->port_lock); return ret; diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c index 70ddf1acfd8..92ce2e38f0d 100644 --- a/drivers/net/forcedeth.c +++ b/drivers/net/forcedeth.c @@ -5597,6 +5597,22 @@ static struct pci_device_id pci_tbl[] = { PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_31), .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR, }, + { /* MCP77 Ethernet Controller */ + PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_32), + .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, + }, + { /* MCP77 Ethernet Controller */ + PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_33), + .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, + }, + { /* MCP77 Ethernet Controller */ + PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_34), + .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, + }, + { /* MCP77 Ethernet Controller */ + PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_35), + .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, + }, {0,}, }; diff --git a/drivers/net/ipg.c b/drivers/net/ipg.c index 68887235d7e..dbd23bb65d1 100644 --- a/drivers/net/ipg.c +++ b/drivers/net/ipg.c @@ -55,6 +55,26 @@ MODULE_DESCRIPTION("IC Plus IP1000 Gigabit Ethernet Adapter Linux Driver " DrvVer); MODULE_LICENSE("GPL"); +//variable record -- index by leading revision/length +//Revision/Length(=N*4), Address1, Data1, Address2, Data2,...,AddressN,DataN +static unsigned short DefaultPhyParam[] = { + // 11/12/03 IP1000A v1-3 rev=0x40 + /*-------------------------------------------------------------------------- + (0x4000|(15*4)), 31, 0x0001, 27, 0x01e0, 31, 0x0002, 22, 0x85bd, 24, 0xfff2, + 27, 0x0c10, 28, 0x0c10, 29, 0x2c10, 31, 0x0003, 23, 0x92f6, + 31, 0x0000, 23, 0x003d, 30, 0x00de, 20, 0x20e7, 9, 0x0700, + --------------------------------------------------------------------------*/ + // 12/17/03 IP1000A v1-4 rev=0x40 + (0x4000 | (07 * 4)), 31, 0x0001, 27, 0x01e0, 31, 0x0002, 27, 0xeb8e, 31, + 0x0000, + 30, 0x005e, 9, 0x0700, + // 01/09/04 IP1000A v1-5 rev=0x41 + (0x4100 | (07 * 4)), 31, 0x0001, 27, 0x01e0, 31, 0x0002, 27, 0xeb8e, 31, + 0x0000, + 30, 0x005e, 9, 0x0700, + 0x0000 +}; + static const char *ipg_brand_name[] = { "IC PLUS IP1000 1000/100/10 based NIC", "Sundance Technology ST2021 based NIC", @@ -990,7 +1010,7 @@ static void ipg_nic_txcleanup(struct net_device *dev) } /* Provides statistical information about the IPG NIC. */ -struct net_device_stats *ipg_nic_get_stats(struct net_device *dev) +static struct net_device_stats *ipg_nic_get_stats(struct net_device *dev) { struct ipg_nic_private *sp = netdev_priv(dev); void __iomem *ioaddr = sp->ioaddr; diff --git a/drivers/net/ipg.h b/drivers/net/ipg.h index e418b9035ca..d5d092c9d0a 100644 --- a/drivers/net/ipg.h +++ b/drivers/net/ipg.h @@ -833,24 +833,4 @@ struct ipg_nic_private { struct delayed_work task; }; -//variable record -- index by leading revision/length -//Revision/Length(=N*4), Address1, Data1, Address2, Data2,...,AddressN,DataN -unsigned short DefaultPhyParam[] = { - // 11/12/03 IP1000A v1-3 rev=0x40 - /*-------------------------------------------------------------------------- - (0x4000|(15*4)), 31, 0x0001, 27, 0x01e0, 31, 0x0002, 22, 0x85bd, 24, 0xfff2, - 27, 0x0c10, 28, 0x0c10, 29, 0x2c10, 31, 0x0003, 23, 0x92f6, - 31, 0x0000, 23, 0x003d, 30, 0x00de, 20, 0x20e7, 9, 0x0700, - --------------------------------------------------------------------------*/ - // 12/17/03 IP1000A v1-4 rev=0x40 - (0x4000 | (07 * 4)), 31, 0x0001, 27, 0x01e0, 31, 0x0002, 27, 0xeb8e, 31, - 0x0000, - 30, 0x005e, 9, 0x0700, - // 01/09/04 IP1000A v1-5 rev=0x41 - (0x4100 | (07 * 4)), 31, 0x0001, 27, 0x01e0, 31, 0x0002, 27, 0xeb8e, 31, - 0x0000, - 30, 0x005e, 9, 0x0700, - 0x0000 -}; - #endif /* __LINUX_IPG_H */ diff --git a/drivers/net/mlx4/icm.c b/drivers/net/mlx4/icm.c index 887633b207d..2a5bef6388f 100644 --- a/drivers/net/mlx4/icm.c +++ b/drivers/net/mlx4/icm.c @@ -101,9 +101,7 @@ static int mlx4_alloc_icm_pages(struct scatterlist *mem, int order, gfp_t gfp_ma if (!page) return -ENOMEM; - sg_set_page(mem, page); - mem->length = PAGE_SIZE << order; - mem->offset = 0; + sg_set_page(mem, page, PAGE_SIZE << order, 0); return 0; } diff --git a/drivers/net/natsemi.c b/drivers/net/natsemi.c index 953117152bb..87cde062fd6 100644 --- a/drivers/net/natsemi.c +++ b/drivers/net/natsemi.c @@ -864,6 +864,7 @@ static int __devinit natsemi_probe1 (struct pci_dev *pdev, np = netdev_priv(dev); netif_napi_add(dev, &np->napi, natsemi_poll, 64); + np->dev = dev; np->pci_dev = pdev; pci_set_drvdata(pdev, dev); diff --git a/drivers/net/usb/rndis_host.c b/drivers/net/usb/rndis_host.c index cd991a0f75b..1ebe3259be0 100644 --- a/drivers/net/usb/rndis_host.c +++ b/drivers/net/usb/rndis_host.c @@ -512,11 +512,19 @@ static int rndis_bind(struct usbnet *dev, struct usb_interface *intf) } tmp = le32_to_cpu(u.init_c->max_transfer_size); if (tmp < dev->hard_mtu) { - dev_err(&intf->dev, - "dev can't take %u byte packets (max %u)\n", - dev->hard_mtu, tmp); - retval = -EINVAL; - goto fail_and_release; + if (tmp <= net->hard_header_len) { + dev_err(&intf->dev, + "dev can't take %u byte packets (max %u)\n", + dev->hard_mtu, tmp); + retval = -EINVAL; + goto fail_and_release; + } + dev->hard_mtu = tmp; + net->mtu = dev->hard_mtu - net->hard_header_len; + dev_warn(&intf->dev, + "dev can't take %u byte packets (max %u), " + "adjusting MTU to %u\n", + dev->hard_mtu, tmp, net->mtu); } /* REVISIT: peripheral "alignment" request is ignored ... */ diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c index fd5d0c1570d..00118499018 100644 --- a/drivers/s390/scsi/zfcp_aux.c +++ b/drivers/s390/scsi/zfcp_aux.c @@ -562,8 +562,6 @@ zfcp_sg_list_alloc(struct zfcp_sg_list *sg_list, size_t size) sg_init_table(sg_list->sg, sg_list->count); for (i = 0, sg = sg_list->sg; i < sg_list->count; i++, sg++) { - sg->length = min(size, PAGE_SIZE); - sg->offset = 0; address = (void *) get_zeroed_page(GFP_KERNEL); if (address == NULL) { sg_list->count = i; @@ -571,7 +569,7 @@ zfcp_sg_list_alloc(struct zfcp_sg_list *sg_list, size_t size) retval = -ENOMEM; goto out; } - zfcp_address_to_sg(address, sg); + zfcp_address_to_sg(address, sg, min(size, PAGE_SIZE)); size -= sg->length; } @@ -1518,13 +1516,13 @@ zfcp_gid_pn_buffers_alloc(struct zfcp_gid_pn_data **gid_pn, mempool_t *pool) return -ENOMEM; memset(data, 0, sizeof(*data)); + sg_init_table(&data->req , 1); + sg_init_table(&data->resp , 1); data->ct.req = &data->req; data->ct.resp = &data->resp; data->ct.req_count = data->ct.resp_count = 1; - zfcp_address_to_sg(&data->ct_iu_req, &data->req); - zfcp_address_to_sg(&data->ct_iu_resp, &data->resp); - data->req.length = sizeof(struct ct_iu_gid_pn_req); - data->resp.length = sizeof(struct ct_iu_gid_pn_resp); + zfcp_address_to_sg(&data->ct_iu_req, &data->req, sizeof(struct ct_iu_gid_pn_req)); + zfcp_address_to_sg(&data->ct_iu_resp, &data->resp, sizeof(struct ct_iu_gid_pn_resp)); *gid_pn = data; return 0; diff --git a/drivers/s390/scsi/zfcp_def.h b/drivers/s390/scsi/zfcp_def.h index 326e7ee232c..0754542978b 100644 --- a/drivers/s390/scsi/zfcp_def.h +++ b/drivers/s390/scsi/zfcp_def.h @@ -74,8 +74,7 @@ zfcp_sg_to_address(struct scatterlist *list) static inline void zfcp_address_to_sg(void *address, struct scatterlist *list) { - sg_set_page(list, virt_to_page(address)); - list->offset = ((unsigned long) address) & (PAGE_SIZE - 1); + sg_set_buf(list, address, 0); } #define REQUEST_LIST_SIZE 128 diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c index 9438d0b2879..5552b755c08 100644 --- a/drivers/s390/scsi/zfcp_erp.c +++ b/drivers/s390/scsi/zfcp_erp.c @@ -322,9 +322,9 @@ zfcp_erp_adisc(struct zfcp_port *port) if (address == NULL) goto nomem; - zfcp_address_to_sg(address, send_els->req); + zfcp_address_to_sg(address, send_els->req, sizeof(struct zfcp_ls_adisc)); address += PAGE_SIZE >> 1; - zfcp_address_to_sg(address, send_els->resp); + zfcp_address_to_sg(address, send_els->resp, sizeof(struct zfcp_ls_adisc_acc)); send_els->req_count = send_els->resp_count = 1; send_els->adapter = adapter; @@ -336,9 +336,6 @@ zfcp_erp_adisc(struct zfcp_port *port) adisc = zfcp_sg_to_address(send_els->req); send_els->ls_code = adisc->code = ZFCP_LS_ADISC; - send_els->req->length = sizeof(struct zfcp_ls_adisc); - send_els->resp->length = sizeof(struct zfcp_ls_adisc_acc); - /* acc. to FC-FS, hard_nport_id in ADISC should not be set for ports without FC-AL-2 capability, so we don't set it */ adisc->wwpn = fc_host_port_name(adapter->scsi_host); diff --git a/drivers/scsi/atari_NCR5380.c b/drivers/scsi/atari_NCR5380.c index d1780980fb2..a9680b5e8ac 100644 --- a/drivers/scsi/atari_NCR5380.c +++ b/drivers/scsi/atari_NCR5380.c @@ -477,10 +477,9 @@ static void merge_contiguous_buffers(Scsi_Cmnd *cmd) for (endaddr = virt_to_phys(cmd->SCp.ptr + cmd->SCp.this_residual - 1) + 1; cmd->SCp.buffers_residual && - virt_to_phys(page_address(cmd->SCp.buffer[1].page) + - cmd->SCp.buffer[1].offset) == endaddr;) { + virt_to_phys(sg_virt(&cmd->SCp.buffer[1])) == endaddr;) { MER_PRINTK("VTOP(%p) == %08lx -> merging\n", - page_address(cmd->SCp.buffer[1].page), endaddr); + page_address(sg_page(&cmd->SCp.buffer[1])), endaddr); #if (NDEBUG & NDEBUG_MERGING) ++cnt; #endif diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c index 439b97a6a26..0841df01bc1 100644 --- a/drivers/scsi/ipr.c +++ b/drivers/scsi/ipr.c @@ -2890,7 +2890,7 @@ static struct ipr_sglist *ipr_alloc_ucode_buffer(int buf_len) return NULL; } - sg_set_page(&scatterlist[i], page); + sg_set_page(&scatterlist[i], page, 0, 0); } return sglist; diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c index 6ce4109efdf..097a136398c 100644 --- a/drivers/scsi/iscsi_tcp.c +++ b/drivers/scsi/iscsi_tcp.c @@ -79,9 +79,7 @@ static inline void iscsi_buf_init_sg(struct iscsi_buf *ibuf, struct scatterlist *sg) { sg_init_table(&ibuf->sg, 1); - sg_set_page(&ibuf->sg, sg_page(sg)); - ibuf->sg.offset = sg->offset; - ibuf->sg.length = sg->length; + sg_set_page(&ibuf->sg, sg_page(sg), sg->length, sg->offset); /* * Fastpath: sg element fits into single page */ diff --git a/drivers/scsi/osst.c b/drivers/scsi/osst.c index 1c5c4b68f20..4652ad22516 100644 --- a/drivers/scsi/osst.c +++ b/drivers/scsi/osst.c @@ -5256,8 +5256,7 @@ static int enlarge_buffer(struct osst_buffer *STbuffer, int need_dma) STbuffer->sg[0].offset = 0; if (page != NULL) { - sg_set_page(&STbuffer->sg[0], page); - STbuffer->sg[0].length = b_size; + sg_set_page(&STbuffer->sg[0], page, b_size, 0); STbuffer->b_data = page_address(page); break; } @@ -5285,8 +5284,7 @@ static int enlarge_buffer(struct osst_buffer *STbuffer, int need_dma) normalize_buffer(STbuffer); return 0; } - sg_set_page(&STbuffer->sg[segs], page); - STbuffer->sg[segs].length = (OS_FRAME_SIZE - got <= PAGE_SIZE / 2) ? (OS_FRAME_SIZE - got) : b_size; + sg_set_page(&STbuffer->sg[segs], page, (OS_FRAME_SIZE - got <= PAGE_SIZE / 2) ? (OS_FRAME_SIZE - got) : b_size, 0); got += STbuffer->sg[segs].length; STbuffer->buffer_size = got; STbuffer->sg_segs = ++segs; diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c index cc197100284..b5fa4f09138 100644 --- a/drivers/scsi/sg.c +++ b/drivers/scsi/sg.c @@ -1717,16 +1717,12 @@ st_map_user_pages(struct scatterlist *sgl, const unsigned int max_pages, goto out_unlock; */ } - sg_set_page(sgl, pages[0]); - sgl[0].offset = uaddr & ~PAGE_MASK; + sg_set_page(sgl, pages[0], 0, uaddr & ~PAGE_MASK); if (nr_pages > 1) { sgl[0].length = PAGE_SIZE - sgl[0].offset; count -= sgl[0].length; - for (i=1; i < nr_pages ; i++) { - sg_set_page(&sgl[i], pages[i]); - sgl[i].length = count < PAGE_SIZE ? count : PAGE_SIZE; - count -= PAGE_SIZE; - } + for (i=1; i < nr_pages ; i++) + sg_set_page(&sgl[i], pages[i], count < PAGE_SIZE ? count : PAGE_SIZE, 0); } else { sgl[0].length = count; @@ -1854,8 +1850,7 @@ sg_build_indirect(Sg_scatter_hold * schp, Sg_fd * sfp, int buff_size) scatter_elem_sz_prev = ret_sz; } } - sg_set_page(sg, p); - sg->length = (ret_sz > num) ? num : ret_sz; + sg_set_page(sg, p, (ret_sz > num) ? num : ret_sz, 0); SCSI_LOG_TIMEOUT(5, printk("sg_build_indirect: k=%d, num=%d, " "ret_sz=%d\n", k, num, ret_sz)); diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c index ce69b9efc10..98dfd6ea209 100644 --- a/drivers/scsi/st.c +++ b/drivers/scsi/st.c @@ -3797,13 +3797,11 @@ static void buf_to_sg(struct st_buffer *STbp, unsigned int length) sg = &(STbp->sg[0]); frp = STbp->frp; for (i=count=0; count < length; i++) { - sg_set_page(&sg[i], frp[i].page); if (length - count > frp[i].length) - sg[i].length = frp[i].length; + sg_set_page(&sg[i], frp[i].page, frp[i].length, 0); else - sg[i].length = length - count; + sg_set_page(&sg[i], frp[i].page, length - count, 0); count += sg[i].length; - sg[i].offset = 0; } STbp->sg_segs = i; STbp->frp_sg_current = length; @@ -4446,15 +4444,13 @@ static int sgl_map_user_pages(struct scatterlist *sgl, const unsigned int max_pa } /* Populate the scatter/gather list */ - sg_set_page(&sgl[0], pages[0]); - sgl[0].offset = uaddr & ~PAGE_MASK; + sg_set_page(&sgl[0], pages[0], 0, uaddr & ~PAGE_MASK); if (nr_pages > 1) { sgl[0].length = PAGE_SIZE - sgl[0].offset; count -= sgl[0].length; for (i=1; i < nr_pages ; i++) { - sg_set_page(&sgl[i], pages[i]);; - sgl[i].offset = 0; - sgl[i].length = count < PAGE_SIZE ? count : PAGE_SIZE; + sg_set_page(&sgl[i], pages[i], + count < PAGE_SIZE ? count : PAGE_SIZE, 0);; count -= PAGE_SIZE; } } diff --git a/drivers/scsi/sun3x_esp.c b/drivers/scsi/sun3x_esp.c index 80fb3f88af2..1bc41907a03 100644 --- a/drivers/scsi/sun3x_esp.c +++ b/drivers/scsi/sun3x_esp.c @@ -332,8 +332,8 @@ static void dma_mmu_get_scsi_sgl (struct NCR_ESP *esp, Scsi_Cmnd *sp) struct scatterlist *sg = sp->SCp.buffer; while (sz >= 0) { - sg[sz].dma_address = dvma_map((unsigned long)page_address(sg[sz].page) + - sg[sz].offset, sg[sz].length); + sg[sz].dma_address = dvma_map((unsigned long)sg_virt(&sg[sz]), + sg[sz].length); sz--; } sp->SCp.ptr=(char *)((unsigned long)sp->SCp.buffer->dma_address); diff --git a/fs/ecryptfs/crypto.c b/fs/ecryptfs/crypto.c index 0a9882edf56..7a472b12999 100644 --- a/fs/ecryptfs/crypto.c +++ b/fs/ecryptfs/crypto.c @@ -282,10 +282,8 @@ int virt_to_scatterlist(const void *addr, int size, struct scatterlist *sg, while (size > 0 && i < sg_size) { pg = virt_to_page(addr); offset = offset_in_page(addr); - if (sg) { - sg_set_page(&sg[i], pg); - sg[i].offset = offset; - } + if (sg) + sg_set_page(&sg[i], pg, 0, offset); remainder_of_page = PAGE_CACHE_SIZE - offset; if (size >= remainder_of_page) { if (sg) @@ -716,12 +714,8 @@ ecryptfs_encrypt_page_offset(struct ecryptfs_crypt_stat *crypt_stat, sg_init_table(&src_sg, 1); sg_init_table(&dst_sg, 1); - sg_set_page(&src_sg, src_page); - src_sg.offset = src_offset; - src_sg.length = size; - sg_set_page(&dst_sg, dst_page); - dst_sg.offset = dst_offset; - dst_sg.length = size; + sg_set_page(&src_sg, src_page, size, src_offset); + sg_set_page(&dst_sg, dst_page, size, dst_offset); return encrypt_scatterlist(crypt_stat, &dst_sg, &src_sg, size, iv); } @@ -746,14 +740,11 @@ ecryptfs_decrypt_page_offset(struct ecryptfs_crypt_stat *crypt_stat, struct scatterlist src_sg, dst_sg; sg_init_table(&src_sg, 1); + sg_set_page(&src_sg, src_page, size, src_offset); + sg_init_table(&dst_sg, 1); + sg_set_page(&dst_sg, dst_page, size, dst_offset); - sg_set_page(&src_sg, src_page); - src_sg.offset = src_offset; - src_sg.length = size; - sg_set_page(&dst_sg, dst_page); - dst_sg.offset = dst_offset; - dst_sg.length = size; return decrypt_scatterlist(crypt_stat, &dst_sg, &src_sg, size, iv); } diff --git a/fs/mbcache.c b/fs/mbcache.c index 1046cbefbfb..eb31b73e7d6 100644 --- a/fs/mbcache.c +++ b/fs/mbcache.c @@ -403,9 +403,9 @@ mb_cache_entry_alloc(struct mb_cache *cache) { struct mb_cache_entry *ce; - atomic_inc(&cache->c_entry_count); ce = kmem_cache_alloc(cache->c_entry_cache, GFP_KERNEL); if (ce) { + atomic_inc(&cache->c_entry_count); INIT_LIST_HEAD(&ce->e_lru_list); INIT_LIST_HEAD(&ce->e_block_list); ce->e_cache = cache; diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c index 680c429bfa2..4e57fcf8598 100644 --- a/fs/proc/proc_sysctl.c +++ b/fs/proc/proc_sysctl.c @@ -171,7 +171,8 @@ static ssize_t proc_sys_read(struct file *filp, char __user *buf, struct dentry *dentry = filp->f_dentry; struct ctl_table_header *head; struct ctl_table *table; - ssize_t error, res; + ssize_t error; + size_t res; table = do_proc_sys_lookup(dentry->d_parent, &dentry->d_name, &head); /* Has the sysctl entry disappeared on us? */ @@ -209,7 +210,8 @@ static ssize_t proc_sys_write(struct file *filp, const char __user *buf, struct dentry *dentry = filp->f_dentry; struct ctl_table_header *head; struct ctl_table *table; - ssize_t error, res; + ssize_t error; + size_t res; table = do_proc_sys_lookup(dentry->d_parent, &dentry->d_name, &head); /* Has the sysctl entry disappeared on us? */ diff --git a/include/asm-avr32/dma-mapping.h b/include/asm-avr32/dma-mapping.h index a7131630c05..57dc672bab8 100644 --- a/include/asm-avr32/dma-mapping.h +++ b/include/asm-avr32/dma-mapping.h @@ -3,7 +3,7 @@ #include <linux/mm.h> #include <linux/device.h> -#include <asm/scatterlist.h> +#include <linux/scatterlist.h> #include <asm/processor.h> #include <asm/cacheflush.h> #include <asm/io.h> diff --git a/include/asm-frv/scatterlist.h b/include/asm-frv/scatterlist.h index 99ba76edc42..2e7143b5a7a 100644 --- a/include/asm-frv/scatterlist.h +++ b/include/asm-frv/scatterlist.h @@ -16,8 +16,7 @@ * * can be rewritten as * - * sg_set_page(virt_to_page(some_ptr)); - * sg->offset = (unsigned long) some_ptr & ~PAGE_MASK; + * sg_set_buf(sg, some_ptr, length); * * and that's it. There's no excuse for not highmem enabling YOUR driver. /jens */ diff --git a/include/asm-x86/lguest_hcall.h b/include/asm-x86/lguest_hcall.h index f948491eb56..9c5092b6aa9 100644 --- a/include/asm-x86/lguest_hcall.h +++ b/include/asm-x86/lguest_hcall.h @@ -18,12 +18,17 @@ #define LHCALL_LOAD_TLS 16 #define LHCALL_NOTIFY 17 +#define LGUEST_TRAP_ENTRY 0x1F + +#ifndef __ASSEMBLY__ +#include <asm/hw_irq.h> + /*G:031 First, how does our Guest contact the Host to ask for privileged * operations? There are two ways: the direct way is to make a "hypercall", * to make requests of the Host Itself. * * Our hypercall mechanism uses the highest unused trap code (traps 32 and - * above are used by real hardware interrupts). Seventeen hypercalls are + * above are used by real hardware interrupts). Fifteen hypercalls are * available: the hypercall number is put in the %eax register, and the * arguments (when required) are placed in %edx, %ebx and %ecx. If a return * value makes sense, it's returned in %eax. @@ -31,20 +36,15 @@ * Grossly invalid calls result in Sudden Death at the hands of the vengeful * Host, rather than returning failure. This reflects Winston Churchill's * definition of a gentleman: "someone who is only rude intentionally". */ -#define LGUEST_TRAP_ENTRY 0x1F - -#ifndef __ASSEMBLY__ -#include <asm/hw_irq.h> - static inline unsigned long hcall(unsigned long call, unsigned long arg1, unsigned long arg2, unsigned long arg3) { /* "int" is the Intel instruction to trigger a trap. */ asm volatile("int $" __stringify(LGUEST_TRAP_ENTRY) - /* The call is in %eax (aka "a"), and can be replaced */ + /* The call in %eax (aka "a") might be overwritten */ : "=a"(call) - /* The other arguments are in %eax, %edx, %ebx & %ecx */ + /* The arguments are in %eax, %edx, %ebx & %ecx */ : "a"(call), "d"(arg1), "b"(arg2), "c"(arg3) /* "memory" means this might write somewhere in memory. * This isn't true for all calls, but it's safe to tell diff --git a/include/asm-xtensa/dma-mapping.h b/include/asm-xtensa/dma-mapping.h index 8bd9d2c02a2..3c7d537dd15 100644 --- a/include/asm-xtensa/dma-mapping.h +++ b/include/asm-xtensa/dma-mapping.h @@ -11,10 +11,10 @@ #ifndef _XTENSA_DMA_MAPPING_H #define _XTENSA_DMA_MAPPING_H -#include <asm/scatterlist.h> #include <asm/cache.h> #include <asm/io.h> #include <linux/mm.h> +#include <linux/scatterlist.h> /* * DMA-consistent mapping functions. diff --git a/include/linux/compiler.h b/include/linux/compiler.h index c811c8b979a..c68b67b86ef 100644 --- a/include/linux/compiler.h +++ b/include/linux/compiler.h @@ -101,6 +101,12 @@ extern void __chk_io_ptr(const volatile void __iomem *); #undef __must_check #define __must_check #endif +#ifndef CONFIG_ENABLE_WARN_DEPRECATED +#undef __deprecated +#undef __deprecated_for_modules +#define __deprecated +#define __deprecated_for_modules +#endif /* * Allow us to avoid 'defined but not used' warnings on functions and data, diff --git a/include/linux/completion.h b/include/linux/completion.h index 268c5a4a2bd..33d6aaf9444 100644 --- a/include/linux/completion.h +++ b/include/linux/completion.h @@ -42,15 +42,15 @@ static inline void init_completion(struct completion *x) init_waitqueue_head(&x->wait); } -extern void FASTCALL(wait_for_completion(struct completion *)); -extern int FASTCALL(wait_for_completion_interruptible(struct completion *x)); -extern unsigned long FASTCALL(wait_for_completion_timeout(struct completion *x, - unsigned long timeout)); -extern unsigned long FASTCALL(wait_for_completion_interruptible_timeout( - struct completion *x, unsigned long timeout)); - -extern void FASTCALL(complete(struct completion *)); -extern void FASTCALL(complete_all(struct completion *)); +extern void wait_for_completion(struct completion *); +extern int wait_for_completion_interruptible(struct completion *x); +extern unsigned long wait_for_completion_timeout(struct completion *x, + unsigned long timeout); +extern unsigned long wait_for_completion_interruptible_timeout( + struct completion *x, unsigned long timeout); + +extern void complete(struct completion *); +extern void complete_all(struct completion *); #define INIT_COMPLETION(x) ((x).done = 0) diff --git a/include/linux/lguest.h b/include/linux/lguest.h index 8beb2913462..175e63f4a8c 100644 --- a/include/linux/lguest.h +++ b/include/linux/lguest.h @@ -12,8 +12,8 @@ #define LG_CLOCK_MAX_DELTA ULONG_MAX /*G:032 The second method of communicating with the Host is to via "struct - * lguest_data". The Guest's very first hypercall is to tell the Host where - * this is, and then the Guest and Host both publish information in it. :*/ + * lguest_data". Once the Guest's initialization hypercall tells the Host where + * this is, the Guest and Host both publish information in it. :*/ struct lguest_data { /* 512 == enabled (same as eflags in normal hardware). The Guest diff --git a/include/linux/lguest_launcher.h b/include/linux/lguest_launcher.h index 61e1e3e6b1c..697104da91f 100644 --- a/include/linux/lguest_launcher.h +++ b/include/linux/lguest_launcher.h @@ -1,17 +1,7 @@ -#ifndef _ASM_LGUEST_USER -#define _ASM_LGUEST_USER +#ifndef _LINUX_LGUEST_LAUNCHER +#define _LINUX_LGUEST_LAUNCHER /* Everything the "lguest" userspace program needs to know. */ #include <linux/types.h> -/* They can register up to 32 arrays of lguest_dma. */ -#define LGUEST_MAX_DMA 32 -/* At most we can dma 16 lguest_dma in one op. */ -#define LGUEST_MAX_DMA_SECTIONS 16 - -/* How many devices? Assume each one wants up to two dma arrays per device. */ -#define LGUEST_MAX_DEVICES (LGUEST_MAX_DMA/2) - -/* Where the Host expects the Guest to SEND_DMA console output to. */ -#define LGUEST_CONSOLE_DMA_KEY 0 /*D:010 * Drivers @@ -20,7 +10,11 @@ * real devices (think of the damage it could do!) we provide virtual devices. * We could emulate a PCI bus with various devices on it, but that is a fairly * complex burden for the Host and suboptimal for the Guest, so we have our own - * "lguest" bus and simple drivers. + * simple lguest bus and we use "virtio" drivers. These drivers need a set of + * routines from us which will actually do the virtual I/O, but they handle all + * the net/block/console stuff themselves. This means that if we want to add + * a new device, we simply need to write a new virtio driver and create support + * for it in the Launcher: this code won't need to change. * * Devices are described by a simplified ID, a status byte, and some "config" * bytes which describe this device's configuration. This is placed by the @@ -51,9 +45,9 @@ struct lguest_vqconfig { /* Write command first word is a request. */ enum lguest_req { - LHREQ_INITIALIZE, /* + pfnlimit, pgdir, start, pageoffset */ + LHREQ_INITIALIZE, /* + base, pfnlimit, pgdir, start */ LHREQ_GETDMA, /* No longer used */ LHREQ_IRQ, /* + irq */ LHREQ_BREAK, /* + on/off flag (on blocks until someone does off) */ }; -#endif /* _ASM_LGUEST_USER */ +#endif /* _LINUX_LGUEST_LAUNCHER */ diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index 4e10a074ca5..e44aac8cf5f 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h @@ -1236,6 +1236,10 @@ #define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP67_IDE 0x0560 #define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP73_IDE 0x056C #define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP77_IDE 0x0759 +#define PCI_DEVICE_ID_NVIDIA_NVENET_32 0x0760 +#define PCI_DEVICE_ID_NVIDIA_NVENET_33 0x0761 +#define PCI_DEVICE_ID_NVIDIA_NVENET_34 0x0762 +#define PCI_DEVICE_ID_NVIDIA_NVENET_35 0x0763 #define PCI_VENDOR_ID_IMS 0x10e0 #define PCI_DEVICE_ID_IMS_TT128 0x9128 diff --git a/include/linux/scatterlist.h b/include/linux/scatterlist.h index df7ddcee7c4..45712317138 100644 --- a/include/linux/scatterlist.h +++ b/include/linux/scatterlist.h @@ -1,6 +1,7 @@ #ifndef _LINUX_SCATTERLIST_H #define _LINUX_SCATTERLIST_H +#include <asm/types.h> #include <asm/scatterlist.h> #include <linux/mm.h> #include <linux/string.h> @@ -26,18 +27,16 @@ #define SG_MAGIC 0x87654321 /** - * sg_set_page - Set sg entry to point at given page - * @sg: SG entry - * @page: The page + * sg_assign_page - Assign a given page to an SG entry + * @sg: SG entry + * @page: The page * * Description: - * Use this function to set an sg entry pointing at a page, never assign - * the page directly. We encode sg table information in the lower bits - * of the page pointer. See sg_page() for looking up the page belonging - * to an sg entry. + * Assign page to sg entry. Also see sg_set_page(), the most commonly used + * variant. * **/ -static inline void sg_set_page(struct scatterlist *sg, struct page *page) +static inline void sg_assign_page(struct scatterlist *sg, struct page *page) { unsigned long page_link = sg->page_link & 0x3; @@ -52,6 +51,28 @@ static inline void sg_set_page(struct scatterlist *sg, struct page *page) sg->page_link = page_link | (unsigned long) page; } +/** + * sg_set_page - Set sg entry to point at given page + * @sg: SG entry + * @page: The page + * @len: Length of data + * @offset: Offset into page + * + * Description: + * Use this function to set an sg entry pointing at a page, never assign + * the page directly. We encode sg table information in the lower bits + * of the page pointer. See sg_page() for looking up the page belonging + * to an sg entry. + * + **/ +static inline void sg_set_page(struct scatterlist *sg, struct page *page, + unsigned int len, unsigned int offset) +{ + sg_assign_page(sg, page); + sg->offset = offset; + sg->length = len; +} + #define sg_page(sg) ((struct page *) ((sg)->page_link & ~0x3)) /** @@ -64,9 +85,7 @@ static inline void sg_set_page(struct scatterlist *sg, struct page *page) static inline void sg_set_buf(struct scatterlist *sg, const void *buf, unsigned int buflen) { - sg_set_page(sg, virt_to_page(buf)); - sg->offset = offset_in_page(buf); - sg->length = buflen; + sg_set_page(sg, virt_to_page(buf), buflen, offset_in_page(buf)); } /* @@ -237,7 +256,7 @@ static inline void sg_init_table(struct scatterlist *sgl, unsigned int nents) * on the sg page. * **/ -static inline unsigned long sg_phys(struct scatterlist *sg) +static inline dma_addr_t sg_phys(struct scatterlist *sg) { return page_to_phys(sg_page(sg)) + sg->offset; } diff --git a/include/linux/sched.h b/include/linux/sched.h index 13df99fb276..24e08d1d900 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -828,12 +828,17 @@ struct sched_class { struct task_struct * (*pick_next_task) (struct rq *rq); void (*put_prev_task) (struct rq *rq, struct task_struct *p); +#ifdef CONFIG_SMP unsigned long (*load_balance) (struct rq *this_rq, int this_cpu, - struct rq *busiest, - unsigned long max_nr_move, unsigned long max_load_move, + struct rq *busiest, unsigned long max_load_move, struct sched_domain *sd, enum cpu_idle_type idle, int *all_pinned, int *this_best_prio); + int (*move_one_task) (struct rq *this_rq, int this_cpu, + struct rq *busiest, struct sched_domain *sd, + enum cpu_idle_type idle); +#endif + void (*set_curr_task) (struct rq *rq); void (*task_tick) (struct rq *rq, struct task_struct *p); void (*task_new) (struct rq *rq, struct task_struct *p); @@ -1196,7 +1201,7 @@ static inline int rt_prio(int prio) return 0; } -static inline int rt_task(struct task_struct *p) +static inline int rt_task(const struct task_struct *p) { return rt_prio(p->prio); } @@ -1211,22 +1216,22 @@ static inline void set_task_pgrp(struct task_struct *tsk, pid_t pgrp) tsk->signal->__pgrp = pgrp; } -static inline struct pid *task_pid(struct task_struct *task) +static inline struct pid *task_pid(const struct task_struct *task) { return task->pids[PIDTYPE_PID].pid; } -static inline struct pid *task_tgid(struct task_struct *task) +static inline struct pid *task_tgid(const struct task_struct *task) { return task->group_leader->pids[PIDTYPE_PID].pid; } -static inline struct pid *task_pgrp(struct task_struct *task) +static inline struct pid *task_pgrp(const struct task_struct *task) { return task->group_leader->pids[PIDTYPE_PGID].pid; } -static inline struct pid *task_session(struct task_struct *task) +static inline struct pid *task_session(const struct task_struct *task) { return task->group_leader->pids[PIDTYPE_SID].pid; } @@ -1255,7 +1260,7 @@ struct pid_namespace; * see also pid_nr() etc in include/linux/pid.h */ -static inline pid_t task_pid_nr(struct task_struct *tsk) +static inline pid_t task_pid_nr(const struct task_struct *tsk) { return tsk->pid; } @@ -1268,7 +1273,7 @@ static inline pid_t task_pid_vnr(struct task_struct *tsk) } -static inline pid_t task_tgid_nr(struct task_struct *tsk) +static inline pid_t task_tgid_nr(const struct task_struct *tsk) { return tsk->tgid; } @@ -1281,7 +1286,7 @@ static inline pid_t task_tgid_vnr(struct task_struct *tsk) } -static inline pid_t task_pgrp_nr(struct task_struct *tsk) +static inline pid_t task_pgrp_nr(const struct task_struct *tsk) { return tsk->signal->__pgrp; } @@ -1294,7 +1299,7 @@ static inline pid_t task_pgrp_vnr(struct task_struct *tsk) } -static inline pid_t task_session_nr(struct task_struct *tsk) +static inline pid_t task_session_nr(const struct task_struct *tsk) { return tsk->signal->__session; } @@ -1321,7 +1326,7 @@ static inline pid_t task_ppid_nr_ns(struct task_struct *tsk, * If pid_alive fails, then pointers within the task structure * can be stale and must not be dereferenced. */ -static inline int pid_alive(struct task_struct *p) +static inline int pid_alive(const struct task_struct *p) { return p->pids[PIDTYPE_PID].pid != NULL; } @@ -1332,7 +1337,7 @@ static inline int pid_alive(struct task_struct *p) * * Check if a task structure is the first user space task the kernel created. */ -static inline int is_global_init(struct task_struct *tsk) +static inline int is_global_init(const struct task_struct *tsk) { return tsk->pid == 1; } @@ -1469,7 +1474,7 @@ extern int rt_mutex_getprio(struct task_struct *p); extern void rt_mutex_setprio(struct task_struct *p, int prio); extern void rt_mutex_adjust_pi(struct task_struct *p); #else -static inline int rt_mutex_getprio(struct task_struct *p) +static inline int rt_mutex_getprio(const struct task_struct *p) { return p->normal_prio; } @@ -1721,7 +1726,7 @@ extern void wait_task_inactive(struct task_struct * p); * all we care about is that we have a task with the appropriate * pid, we don't actually care if we have the right task. */ -static inline int has_group_leader_pid(struct task_struct *p) +static inline int has_group_leader_pid(const struct task_struct *p) { return p->pid == p->tgid; } @@ -1738,7 +1743,7 @@ static inline struct task_struct *next_thread(const struct task_struct *p) struct task_struct, thread_group); } -static inline int thread_group_empty(struct task_struct *p) +static inline int thread_group_empty(const struct task_struct *p) { return list_empty(&p->thread_group); } diff --git a/init/Kconfig b/init/Kconfig index b7dffa83792..8b88d0bedcb 100644 --- a/init/Kconfig +++ b/init/Kconfig @@ -322,7 +322,6 @@ config CPUSETS config FAIR_GROUP_SCHED bool "Fair group CPU scheduler" default y - depends on EXPERIMENTAL help This feature lets CPU scheduler recognize task groups and control CPU bandwidth allocation to such task groups. diff --git a/kernel/profile.c b/kernel/profile.c index 631b75c25d7..5e95330e512 100644 --- a/kernel/profile.c +++ b/kernel/profile.c @@ -60,6 +60,7 @@ static int __init profile_setup(char * str) int par; if (!strncmp(str, sleepstr, strlen(sleepstr))) { +#ifdef CONFIG_SCHEDSTATS prof_on = SLEEP_PROFILING; if (str[strlen(sleepstr)] == ',') str += strlen(sleepstr) + 1; @@ -68,6 +69,10 @@ static int __init profile_setup(char * str) printk(KERN_INFO "kernel sleep profiling enabled (shift: %ld)\n", prof_shift); +#else + printk(KERN_WARNING + "kernel sleep profiling requires CONFIG_SCHEDSTATS\n"); +#endif /* CONFIG_SCHEDSTATS */ } else if (!strncmp(str, schedstr, strlen(schedstr))) { prof_on = SCHED_PROFILING; if (str[strlen(schedstr)] == ',') diff --git a/kernel/sched.c b/kernel/sched.c index 2810e562a99..b4fbbc44045 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -66,6 +66,7 @@ #include <linux/pagemap.h> #include <asm/tlb.h> +#include <asm/irq_regs.h> /* * Scheduler clock - returns current time in nanosec units. @@ -837,11 +838,18 @@ struct rq_iterator { struct task_struct *(*next)(void *); }; -static int balance_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest, - unsigned long max_nr_move, unsigned long max_load_move, - struct sched_domain *sd, enum cpu_idle_type idle, - int *all_pinned, unsigned long *load_moved, - int *this_best_prio, struct rq_iterator *iterator); +#ifdef CONFIG_SMP +static unsigned long +balance_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest, + unsigned long max_load_move, struct sched_domain *sd, + enum cpu_idle_type idle, int *all_pinned, + int *this_best_prio, struct rq_iterator *iterator); + +static int +iter_move_one_task(struct rq *this_rq, int this_cpu, struct rq *busiest, + struct sched_domain *sd, enum cpu_idle_type idle, + struct rq_iterator *iterator); +#endif #include "sched_stats.h" #include "sched_idletask.c" @@ -2223,17 +2231,17 @@ int can_migrate_task(struct task_struct *p, struct rq *rq, int this_cpu, return 1; } -static int balance_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest, - unsigned long max_nr_move, unsigned long max_load_move, - struct sched_domain *sd, enum cpu_idle_type idle, - int *all_pinned, unsigned long *load_moved, - int *this_best_prio, struct rq_iterator *iterator) +static unsigned long +balance_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest, + unsigned long max_load_move, struct sched_domain *sd, + enum cpu_idle_type idle, int *all_pinned, + int *this_best_prio, struct rq_iterator *iterator) { int pulled = 0, pinned = 0, skip_for_load; struct task_struct *p; long rem_load_move = max_load_move; - if (max_nr_move == 0 || max_load_move == 0) + if (max_load_move == 0) goto out; pinned = 1; @@ -2266,7 +2274,7 @@ next: * We only want to steal up to the prescribed number of tasks * and the prescribed amount of weighted load. */ - if (pulled < max_nr_move && rem_load_move > 0) { + if (rem_load_move > 0) { if (p->prio < *this_best_prio) *this_best_prio = p->prio; p = iterator->next(iterator->arg); @@ -2274,7 +2282,7 @@ next: } out: /* - * Right now, this is the only place pull_task() is called, + * Right now, this is one of only two places pull_task() is called, * so we can safely collect pull_task() stats here rather than * inside pull_task(). */ @@ -2282,8 +2290,8 @@ out: if (all_pinned) *all_pinned = pinned; - *load_moved = max_load_move - rem_load_move; - return pulled; + + return max_load_move - rem_load_move; } /* @@ -2305,7 +2313,7 @@ static int move_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest, do { total_load_moved += class->load_balance(this_rq, this_cpu, busiest, - ULONG_MAX, max_load_move - total_load_moved, + max_load_move - total_load_moved, sd, idle, all_pinned, &this_best_prio); class = class->next; } while (class && max_load_move > total_load_moved); @@ -2313,6 +2321,32 @@ static int move_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest, return total_load_moved > 0; } +static int +iter_move_one_task(struct rq *this_rq, int this_cpu, struct rq *busiest, + struct sched_domain *sd, enum cpu_idle_type idle, + struct rq_iterator *iterator) +{ + struct task_struct *p = iterator->start(iterator->arg); + int pinned = 0; + + while (p) { + if (can_migrate_task(p, busiest, this_cpu, sd, idle, &pinned)) { + pull_task(busiest, p, this_rq, this_cpu); + /* + * Right now, this is only the second place pull_task() + * is called, so we can safely collect pull_task() + * stats here rather than inside pull_task(). + */ + schedstat_inc(sd, lb_gained[idle]); + + return 1; + } + p = iterator->next(iterator->arg); + } + + return 0; +} + /* * move_one_task tries to move exactly one task from busiest to this_rq, as * part of active balancing operations within "domain". @@ -2324,12 +2358,9 @@ static int move_one_task(struct rq *this_rq, int this_cpu, struct rq *busiest, struct sched_domain *sd, enum cpu_idle_type idle) { const struct sched_class *class; - int this_best_prio = MAX_PRIO; for (class = sched_class_highest; class; class = class->next) - if (class->load_balance(this_rq, this_cpu, busiest, - 1, ULONG_MAX, sd, idle, NULL, - &this_best_prio)) + if (class->move_one_task(this_rq, this_cpu, busiest, sd, idle)) return 1; return 0; @@ -3266,18 +3297,6 @@ static inline void idle_balance(int cpu, struct rq *rq) { } -/* Avoid "used but not defined" warning on UP */ -static int balance_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest, - unsigned long max_nr_move, unsigned long max_load_move, - struct sched_domain *sd, enum cpu_idle_type idle, - int *all_pinned, unsigned long *load_moved, - int *this_best_prio, struct rq_iterator *iterator) -{ - *load_moved = 0; - - return 0; -} - #endif DEFINE_PER_CPU(struct kernel_stat, kstat); @@ -3507,12 +3526,19 @@ EXPORT_SYMBOL(sub_preempt_count); */ static noinline void __schedule_bug(struct task_struct *prev) { - printk(KERN_ERR "BUG: scheduling while atomic: %s/0x%08x/%d\n", - prev->comm, preempt_count(), task_pid_nr(prev)); + struct pt_regs *regs = get_irq_regs(); + + printk(KERN_ERR "BUG: scheduling while atomic: %s/%d/0x%08x\n", + prev->comm, prev->pid, preempt_count()); + debug_show_held_locks(prev); if (irqs_disabled()) print_irqtrace_events(prev); - dump_stack(); + + if (regs) + show_regs(regs); + else + dump_stack(); } /* @@ -3820,7 +3846,7 @@ __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr_exclusive) } EXPORT_SYMBOL_GPL(__wake_up_sync); /* For internal use only */ -void fastcall complete(struct completion *x) +void complete(struct completion *x) { unsigned long flags; @@ -3832,7 +3858,7 @@ void fastcall complete(struct completion *x) } EXPORT_SYMBOL(complete); -void fastcall complete_all(struct completion *x) +void complete_all(struct completion *x) { unsigned long flags; @@ -3884,13 +3910,13 @@ wait_for_common(struct completion *x, long timeout, int state) return timeout; } -void fastcall __sched wait_for_completion(struct completion *x) +void __sched wait_for_completion(struct completion *x) { wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_UNINTERRUPTIBLE); } EXPORT_SYMBOL(wait_for_completion); -unsigned long fastcall __sched +unsigned long __sched wait_for_completion_timeout(struct completion *x, unsigned long timeout) { return wait_for_common(x, timeout, TASK_UNINTERRUPTIBLE); @@ -3906,7 +3932,7 @@ int __sched wait_for_completion_interruptible(struct completion *x) } EXPORT_SYMBOL(wait_for_completion_interruptible); -unsigned long fastcall __sched +unsigned long __sched wait_for_completion_interruptible_timeout(struct completion *x, unsigned long timeout) { @@ -5461,11 +5487,12 @@ static void register_sched_domain_sysctl(void) struct ctl_table *entry = sd_alloc_ctl_entry(cpu_num + 1); char buf[32]; + WARN_ON(sd_ctl_dir[0].child); + sd_ctl_dir[0].child = entry; + if (entry == NULL) return; - sd_ctl_dir[0].child = entry; - for_each_online_cpu(i) { snprintf(buf, 32, "cpu%d", i); entry->procname = kstrdup(buf, GFP_KERNEL); @@ -5473,14 +5500,19 @@ static void register_sched_domain_sysctl(void) entry->child = sd_alloc_ctl_cpu_table(i); entry++; } + + WARN_ON(sd_sysctl_header); sd_sysctl_header = register_sysctl_table(sd_ctl_root); } +/* may be called multiple times per register */ static void unregister_sched_domain_sysctl(void) { - unregister_sysctl_table(sd_sysctl_header); + if (sd_sysctl_header) + unregister_sysctl_table(sd_sysctl_header); sd_sysctl_header = NULL; - sd_free_ctl_entry(&sd_ctl_dir[0].child); + if (sd_ctl_dir[0].child) + sd_free_ctl_entry(&sd_ctl_dir[0].child); } #else static void register_sched_domain_sysctl(void) @@ -5611,101 +5643,101 @@ int nr_cpu_ids __read_mostly = NR_CPUS; EXPORT_SYMBOL(nr_cpu_ids); #ifdef CONFIG_SCHED_DEBUG -static void sched_domain_debug(struct sched_domain *sd, int cpu) + +static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level) { - int level = 0; + struct sched_group *group = sd->groups; + cpumask_t groupmask; + char str[NR_CPUS]; - if (!sd) { - printk(KERN_DEBUG "CPU%d attaching NULL sched-domain.\n", cpu); - return; + cpumask_scnprintf(str, NR_CPUS, sd->span); + cpus_clear(groupmask); + + printk(KERN_DEBUG "%*s domain %d: ", level, "", level); + + if (!(sd->flags & SD_LOAD_BALANCE)) { + printk("does not load-balance\n"); + if (sd->parent) + printk(KERN_ERR "ERROR: !SD_LOAD_BALANCE domain" + " has parent"); + return -1; } - printk(KERN_DEBUG "CPU%d attaching sched-domain:\n", cpu); + printk(KERN_CONT "span %s\n", str); + + if (!cpu_isset(cpu, sd->span)) { + printk(KERN_ERR "ERROR: domain->span does not contain " + "CPU%d\n", cpu); + } + if (!cpu_isset(cpu, group->cpumask)) { + printk(KERN_ERR "ERROR: domain->groups does not contain" + " CPU%d\n", cpu); + } + printk(KERN_DEBUG "%*s groups:", level + 1, ""); do { - int i; - char str[NR_CPUS]; - struct sched_group *group = sd->groups; - cpumask_t groupmask; - - cpumask_scnprintf(str, NR_CPUS, sd->span); - cpus_clear(groupmask); - - printk(KERN_DEBUG); - for (i = 0; i < level + 1; i++) - printk(" "); - printk("domain %d: ", level); - - if (!(sd->flags & SD_LOAD_BALANCE)) { - printk("does not load-balance\n"); - if (sd->parent) - printk(KERN_ERR "ERROR: !SD_LOAD_BALANCE domain" - " has parent"); + if (!group) { + printk("\n"); + printk(KERN_ERR "ERROR: group is NULL\n"); break; } - printk("span %s\n", str); + if (!group->__cpu_power) { + printk(KERN_CONT "\n"); + printk(KERN_ERR "ERROR: domain->cpu_power not " + "set\n"); + break; + } - if (!cpu_isset(cpu, sd->span)) - printk(KERN_ERR "ERROR: domain->span does not contain " - "CPU%d\n", cpu); - if (!cpu_isset(cpu, group->cpumask)) - printk(KERN_ERR "ERROR: domain->groups does not contain" - " CPU%d\n", cpu); + if (!cpus_weight(group->cpumask)) { + printk(KERN_CONT "\n"); + printk(KERN_ERR "ERROR: empty group\n"); + break; + } - printk(KERN_DEBUG); - for (i = 0; i < level + 2; i++) - printk(" "); - printk("groups:"); - do { - if (!group) { - printk("\n"); - printk(KERN_ERR "ERROR: group is NULL\n"); - break; - } + if (cpus_intersects(groupmask, group->cpumask)) { + printk(KERN_CONT "\n"); + printk(KERN_ERR "ERROR: repeated CPUs\n"); + break; + } - if (!group->__cpu_power) { - printk(KERN_CONT "\n"); - printk(KERN_ERR "ERROR: domain->cpu_power not " - "set\n"); - break; - } + cpus_or(groupmask, groupmask, group->cpumask); - if (!cpus_weight(group->cpumask)) { - printk(KERN_CONT "\n"); - printk(KERN_ERR "ERROR: empty group\n"); - break; - } + cpumask_scnprintf(str, NR_CPUS, group->cpumask); + printk(KERN_CONT " %s", str); - if (cpus_intersects(groupmask, group->cpumask)) { - printk(KERN_CONT "\n"); - printk(KERN_ERR "ERROR: repeated CPUs\n"); - break; - } + group = group->next; + } while (group != sd->groups); + printk(KERN_CONT "\n"); - cpus_or(groupmask, groupmask, group->cpumask); + if (!cpus_equal(sd->span, groupmask)) + printk(KERN_ERR "ERROR: groups don't span domain->span\n"); - cpumask_scnprintf(str, NR_CPUS, group->cpumask); - printk(KERN_CONT " %s", str); + if (sd->parent && !cpus_subset(groupmask, sd->parent->span)) + printk(KERN_ERR "ERROR: parent span is not a superset " + "of domain->span\n"); + return 0; +} - group = group->next; - } while (group != sd->groups); - printk(KERN_CONT "\n"); +static void sched_domain_debug(struct sched_domain *sd, int cpu) +{ + int level = 0; - if (!cpus_equal(sd->span, groupmask)) - printk(KERN_ERR "ERROR: groups don't span " - "domain->span\n"); + if (!sd) { + printk(KERN_DEBUG "CPU%d attaching NULL sched-domain.\n", cpu); + return; + } + + printk(KERN_DEBUG "CPU%d attaching sched-domain:\n", cpu); + for (;;) { + if (sched_domain_debug_one(sd, cpu, level)) + break; level++; sd = sd->parent; if (!sd) - continue; - - if (!cpus_subset(groupmask, sd->span)) - printk(KERN_ERR "ERROR: parent span is not a superset " - "of domain->span\n"); - - } while (sd); + break; + } } #else # define sched_domain_debug(sd, cpu) do { } while (0) @@ -6424,13 +6456,17 @@ static cpumask_t fallback_doms; */ static int arch_init_sched_domains(const cpumask_t *cpu_map) { + int err; + ndoms_cur = 1; doms_cur = kmalloc(sizeof(cpumask_t), GFP_KERNEL); if (!doms_cur) doms_cur = &fallback_doms; cpus_andnot(*doms_cur, *cpu_map, cpu_isolated_map); + err = build_sched_domains(doms_cur); register_sched_domain_sysctl(); - return build_sched_domains(doms_cur); + + return err; } static void arch_destroy_sched_domains(const cpumask_t *cpu_map) @@ -6479,6 +6515,9 @@ void partition_sched_domains(int ndoms_new, cpumask_t *doms_new) { int i, j; + /* always unregister in case we don't destroy any domains */ + unregister_sched_domain_sysctl(); + if (doms_new == NULL) { ndoms_new = 1; doms_new = &fallback_doms; @@ -6514,6 +6553,8 @@ match2: kfree(doms_cur); doms_cur = doms_new; ndoms_cur = ndoms_new; + + register_sched_domain_sysctl(); } #if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT) @@ -7101,25 +7142,25 @@ unsigned long sched_group_shares(struct task_group *tg) #ifdef CONFIG_FAIR_CGROUP_SCHED /* return corresponding task_group object of a cgroup */ -static inline struct task_group *cgroup_tg(struct cgroup *cont) +static inline struct task_group *cgroup_tg(struct cgroup *cgrp) { - return container_of(cgroup_subsys_state(cont, cpu_cgroup_subsys_id), - struct task_group, css); + return container_of(cgroup_subsys_state(cgrp, cpu_cgroup_subsys_id), + struct task_group, css); } static struct cgroup_subsys_state * -cpu_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont) +cpu_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cgrp) { struct task_group *tg; - if (!cont->parent) { + if (!cgrp->parent) { /* This is early initialization for the top cgroup */ - init_task_group.css.cgroup = cont; + init_task_group.css.cgroup = cgrp; return &init_task_group.css; } /* we support only 1-level deep hierarchical scheduler atm */ - if (cont->parent->parent) + if (cgrp->parent->parent) return ERR_PTR(-EINVAL); tg = sched_create_group(); @@ -7127,21 +7168,21 @@ cpu_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont) return ERR_PTR(-ENOMEM); /* Bind the cgroup to task_group object we just created */ - tg->css.cgroup = cont; + tg->css.cgroup = cgrp; return &tg->css; } static void cpu_cgroup_destroy(struct cgroup_subsys *ss, - struct cgroup *cont) + struct cgroup *cgrp) { - struct task_group *tg = cgroup_tg(cont); + struct task_group *tg = cgroup_tg(cgrp); sched_destroy_group(tg); } static int cpu_cgroup_can_attach(struct cgroup_subsys *ss, - struct cgroup *cont, struct task_struct *tsk) + struct cgroup *cgrp, struct task_struct *tsk) { /* We don't support RT-tasks being in separate groups */ if (tsk->sched_class != &fair_sched_class) @@ -7151,38 +7192,21 @@ static int cpu_cgroup_can_attach(struct cgroup_subsys *ss, } static void -cpu_cgroup_attach(struct cgroup_subsys *ss, struct cgroup *cont, +cpu_cgroup_attach(struct cgroup_subsys *ss, struct cgroup *cgrp, struct cgroup *old_cont, struct task_struct *tsk) { sched_move_task(tsk); } -static ssize_t cpu_shares_write(struct cgroup *cont, struct cftype *cftype, - struct file *file, const char __user *userbuf, - size_t nbytes, loff_t *ppos) +static int cpu_shares_write_uint(struct cgroup *cgrp, struct cftype *cftype, + u64 shareval) { - unsigned long shareval; - struct task_group *tg = cgroup_tg(cont); - char buffer[2*sizeof(unsigned long) + 1]; - int rc; - - if (nbytes > 2*sizeof(unsigned long)) /* safety check */ - return -E2BIG; - - if (copy_from_user(buffer, userbuf, nbytes)) - return -EFAULT; - - buffer[nbytes] = 0; /* nul-terminate */ - shareval = simple_strtoul(buffer, NULL, 10); - - rc = sched_group_set_shares(tg, shareval); - - return (rc < 0 ? rc : nbytes); + return sched_group_set_shares(cgroup_tg(cgrp), shareval); } -static u64 cpu_shares_read_uint(struct cgroup *cont, struct cftype *cft) +static u64 cpu_shares_read_uint(struct cgroup *cgrp, struct cftype *cft) { - struct task_group *tg = cgroup_tg(cont); + struct task_group *tg = cgroup_tg(cgrp); return (u64) tg->shares; } @@ -7190,7 +7214,7 @@ static u64 cpu_shares_read_uint(struct cgroup *cont, struct cftype *cft) static struct cftype cpu_shares = { .name = "shares", .read_uint = cpu_shares_read_uint, - .write = cpu_shares_write, + .write_uint = cpu_shares_write_uint, }; static int cpu_cgroup_populate(struct cgroup_subsys *ss, struct cgroup *cont) diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index 166ed6db600..9971831b560 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c @@ -876,6 +876,7 @@ static void put_prev_task_fair(struct rq *rq, struct task_struct *prev) } } +#ifdef CONFIG_SMP /************************************************** * Fair scheduling class load-balancing methods: */ @@ -936,12 +937,11 @@ static int cfs_rq_best_prio(struct cfs_rq *cfs_rq) static unsigned long load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest, - unsigned long max_nr_move, unsigned long max_load_move, + unsigned long max_load_move, struct sched_domain *sd, enum cpu_idle_type idle, int *all_pinned, int *this_best_prio) { struct cfs_rq *busy_cfs_rq; - unsigned long load_moved, total_nr_moved = 0, nr_moved; long rem_load_move = max_load_move; struct rq_iterator cfs_rq_iterator; @@ -969,25 +969,48 @@ load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest, #else # define maxload rem_load_move #endif - /* pass busy_cfs_rq argument into + /* + * pass busy_cfs_rq argument into * load_balance_[start|next]_fair iterators */ cfs_rq_iterator.arg = busy_cfs_rq; - nr_moved = balance_tasks(this_rq, this_cpu, busiest, - max_nr_move, maxload, sd, idle, all_pinned, - &load_moved, this_best_prio, &cfs_rq_iterator); - - total_nr_moved += nr_moved; - max_nr_move -= nr_moved; - rem_load_move -= load_moved; + rem_load_move -= balance_tasks(this_rq, this_cpu, busiest, + maxload, sd, idle, all_pinned, + this_best_prio, + &cfs_rq_iterator); - if (max_nr_move <= 0 || rem_load_move <= 0) + if (rem_load_move <= 0) break; } return max_load_move - rem_load_move; } +static int +move_one_task_fair(struct rq *this_rq, int this_cpu, struct rq *busiest, + struct sched_domain *sd, enum cpu_idle_type idle) +{ + struct cfs_rq *busy_cfs_rq; + struct rq_iterator cfs_rq_iterator; + + cfs_rq_iterator.start = load_balance_start_fair; + cfs_rq_iterator.next = load_balance_next_fair; + + for_each_leaf_cfs_rq(busiest, busy_cfs_rq) { + /* + * pass busy_cfs_rq argument into + * load_balance_[start|next]_fair iterators + */ + cfs_rq_iterator.arg = busy_cfs_rq; + if (iter_move_one_task(this_rq, this_cpu, busiest, sd, idle, + &cfs_rq_iterator)) + return 1; + } + + return 0; +} +#endif + /* * scheduler tick hitting a task of our scheduling class: */ @@ -1063,7 +1086,10 @@ static const struct sched_class fair_sched_class = { .pick_next_task = pick_next_task_fair, .put_prev_task = put_prev_task_fair, +#ifdef CONFIG_SMP .load_balance = load_balance_fair, + .move_one_task = move_one_task_fair, +#endif .set_curr_task = set_curr_task_fair, .task_tick = task_tick_fair, diff --git a/kernel/sched_idletask.c b/kernel/sched_idletask.c index 6e2ead41516..bf9c25c15b8 100644 --- a/kernel/sched_idletask.c +++ b/kernel/sched_idletask.c @@ -37,15 +37,24 @@ static void put_prev_task_idle(struct rq *rq, struct task_struct *prev) { } +#ifdef CONFIG_SMP static unsigned long load_balance_idle(struct rq *this_rq, int this_cpu, struct rq *busiest, - unsigned long max_nr_move, unsigned long max_load_move, - struct sched_domain *sd, enum cpu_idle_type idle, - int *all_pinned, int *this_best_prio) + unsigned long max_load_move, + struct sched_domain *sd, enum cpu_idle_type idle, + int *all_pinned, int *this_best_prio) { return 0; } +static int +move_one_task_idle(struct rq *this_rq, int this_cpu, struct rq *busiest, + struct sched_domain *sd, enum cpu_idle_type idle) +{ + return 0; +} +#endif + static void task_tick_idle(struct rq *rq, struct task_struct *curr) { } @@ -69,7 +78,10 @@ const struct sched_class idle_sched_class = { .pick_next_task = pick_next_task_idle, .put_prev_task = put_prev_task_idle, +#ifdef CONFIG_SMP .load_balance = load_balance_idle, + .move_one_task = move_one_task_idle, +#endif .set_curr_task = set_curr_task_idle, .task_tick = task_tick_idle, diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c index d0097a0634e..8abd752a0eb 100644 --- a/kernel/sched_rt.c +++ b/kernel/sched_rt.c @@ -98,6 +98,7 @@ static void put_prev_task_rt(struct rq *rq, struct task_struct *p) p->se.exec_start = 0; } +#ifdef CONFIG_SMP /* * Load-balancing iterator. Note: while the runqueue stays locked * during the whole iteration, the current task might be @@ -172,13 +173,11 @@ static struct task_struct *load_balance_next_rt(void *arg) static unsigned long load_balance_rt(struct rq *this_rq, int this_cpu, struct rq *busiest, - unsigned long max_nr_move, unsigned long max_load_move, - struct sched_domain *sd, enum cpu_idle_type idle, - int *all_pinned, int *this_best_prio) + unsigned long max_load_move, + struct sched_domain *sd, enum cpu_idle_type idle, + int *all_pinned, int *this_best_prio) { - int nr_moved; struct rq_iterator rt_rq_iterator; - unsigned long load_moved; rt_rq_iterator.start = load_balance_start_rt; rt_rq_iterator.next = load_balance_next_rt; @@ -187,12 +186,24 @@ load_balance_rt(struct rq *this_rq, int this_cpu, struct rq *busiest, */ rt_rq_iterator.arg = busiest; - nr_moved = balance_tasks(this_rq, this_cpu, busiest, max_nr_move, - max_load_move, sd, idle, all_pinned, &load_moved, - this_best_prio, &rt_rq_iterator); + return balance_tasks(this_rq, this_cpu, busiest, max_load_move, sd, + idle, all_pinned, this_best_prio, &rt_rq_iterator); +} + +static int +move_one_task_rt(struct rq *this_rq, int this_cpu, struct rq *busiest, + struct sched_domain *sd, enum cpu_idle_type idle) +{ + struct rq_iterator rt_rq_iterator; + + rt_rq_iterator.start = load_balance_start_rt; + rt_rq_iterator.next = load_balance_next_rt; + rt_rq_iterator.arg = busiest; - return load_moved; + return iter_move_one_task(this_rq, this_cpu, busiest, sd, idle, + &rt_rq_iterator); } +#endif static void task_tick_rt(struct rq *rq, struct task_struct *p) { @@ -236,7 +247,10 @@ const struct sched_class rt_sched_class = { .pick_next_task = pick_next_task_rt, .put_prev_task = put_prev_task_rt, +#ifdef CONFIG_SMP .load_balance = load_balance_rt, + .move_one_task = move_one_task_rt, +#endif .set_curr_task = set_curr_task_rt, .task_tick = task_tick_rt, diff --git a/kernel/user.c b/kernel/user.c index e91331c457e..0f3aa023410 100644 --- a/kernel/user.c +++ b/kernel/user.c @@ -129,7 +129,7 @@ static inline void uids_mutex_unlock(void) } /* return cpu shares held by the user */ -ssize_t cpu_shares_show(struct kset *kset, char *buffer) +static ssize_t cpu_shares_show(struct kset *kset, char *buffer) { struct user_struct *up = container_of(kset, struct user_struct, kset); @@ -137,7 +137,8 @@ ssize_t cpu_shares_show(struct kset *kset, char *buffer) } /* modify cpu shares held by the user */ -ssize_t cpu_shares_store(struct kset *kset, const char *buffer, size_t size) +static ssize_t cpu_shares_store(struct kset *kset, const char *buffer, + size_t size) { struct user_struct *up = container_of(kset, struct user_struct, kset); unsigned long shares; diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 1faa5087dc8..1e5f207b907 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -9,6 +9,14 @@ config PRINTK_TIME operations. This is useful for identifying long delays in kernel startup. +config ENABLE_WARN_DEPRECATED + bool "Enable __deprecated logic" + default y + help + Enable the __deprecated logic in the kernel build. + Disable this to suppress the "warning: 'foo' is deprecated + (declared at kernel/power/somefile.c:1234)" messages. + config ENABLE_MUST_CHECK bool "Enable __must_check logic" default y diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 4e2c84fcf27..7b7c6c44c2d 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -2045,9 +2045,7 @@ skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len) if (copy > 0) { if (copy > len) copy = len; - sg_set_page(&sg[elt], virt_to_page(skb->data + offset)); - sg[elt].offset = (unsigned long)(skb->data + offset) % PAGE_SIZE; - sg[elt].length = copy; + sg_set_buf(sg, skb->data + offset, copy); elt++; if ((len -= copy) == 0) return elt; @@ -2065,9 +2063,8 @@ skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len) if (copy > len) copy = len; - sg_set_page(&sg[elt], frag->page); - sg[elt].offset = frag->page_offset+offset-start; - sg[elt].length = copy; + sg_set_page(&sg[elt], frag->page, copy, + frag->page_offset+offset-start); elt++; if (!(len -= copy)) return elt; diff --git a/net/ieee80211/ieee80211_crypt_tkip.c b/net/ieee80211/ieee80211_crypt_tkip.c index 811777682e2..4cce3534e40 100644 --- a/net/ieee80211/ieee80211_crypt_tkip.c +++ b/net/ieee80211/ieee80211_crypt_tkip.c @@ -25,7 +25,7 @@ #include <net/ieee80211.h> #include <linux/crypto.h> -#include <asm/scatterlist.h> +#include <linux/scatterlist.h> #include <linux/crc32.h> MODULE_AUTHOR("Jouni Malinen"); @@ -537,13 +537,8 @@ static int michael_mic(struct crypto_hash *tfm_michael, u8 * key, u8 * hdr, return -1; } sg_init_table(sg, 2); - sg_set_page(&sg[0], virt_to_page(hdr)); - sg[0].offset = offset_in_page(hdr); - sg[0].length = 16; - - sg_set_page(&sg[1], virt_to_page(data)); - sg[1].offset = offset_in_page(data); - sg[1].length = data_len; + sg_set_buf(&sg[0], hdr, 16); + sg_set_buf(&sg[1], data, data_len); if (crypto_hash_setkey(tfm_michael, key, 8)) return -1; diff --git a/net/ieee80211/ieee80211_crypt_wep.c b/net/ieee80211/ieee80211_crypt_wep.c index 9693429489e..866fc04c44f 100644 --- a/net/ieee80211/ieee80211_crypt_wep.c +++ b/net/ieee80211/ieee80211_crypt_wep.c @@ -22,7 +22,7 @@ #include <net/ieee80211.h> #include <linux/crypto.h> -#include <asm/scatterlist.h> +#include <linux/scatterlist.h> #include <linux/crc32.h> MODULE_AUTHOR("Jouni Malinen"); diff --git a/net/sctp/auth.c b/net/sctp/auth.c index cbd64b216cc..621113a109b 100644 --- a/net/sctp/auth.c +++ b/net/sctp/auth.c @@ -727,9 +727,7 @@ void sctp_auth_calculate_hmac(const struct sctp_association *asoc, /* set up scatter list */ end = skb_tail_pointer(skb); sg_init_table(&sg, 1); - sg_set_page(&sg, virt_to_page(auth)); - sg.offset = (unsigned long)(auth) % PAGE_SIZE; - sg.length = end - (unsigned char *)auth; + sg_set_buf(&sg, auth, end - (unsigned char *)auth); desc.tfm = asoc->ep->auth_hmacs[hmac_id]; desc.flags = 0; diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c index 658476c4d58..c055212875f 100644 --- a/net/sctp/sm_make_chunk.c +++ b/net/sctp/sm_make_chunk.c @@ -1514,9 +1514,7 @@ static sctp_cookie_param_t *sctp_pack_cookie(const struct sctp_endpoint *ep, /* Sign the message. */ sg_init_table(&sg, 1); - sg_set_page(&sg, virt_to_page(&cookie->c)); - sg.offset = (unsigned long)(&cookie->c) % PAGE_SIZE; - sg.length = bodysize; + sg_set_buf(&sg, &cookie->c, bodysize); keylen = SCTP_SECRET_SIZE; key = (char *)ep->secret_key[ep->current_key]; desc.tfm = sctp_sk(ep->base.sk)->hmac; @@ -1587,9 +1585,7 @@ struct sctp_association *sctp_unpack_cookie( /* Check the signature. */ keylen = SCTP_SECRET_SIZE; sg_init_table(&sg, 1); - sg_set_page(&sg, virt_to_page(bear_cookie)); - sg.offset = (unsigned long)(bear_cookie) % PAGE_SIZE; - sg.length = bodysize; + sg_set_buf(&sg, bear_cookie, bodysize); key = (char *)ep->secret_key[ep->current_key]; desc.tfm = sctp_sk(ep->base.sk)->hmac; desc.flags = 0; diff --git a/net/sunrpc/auth_gss/gss_krb5_crypto.c b/net/sunrpc/auth_gss/gss_krb5_crypto.c index 32be431affc..24711be4b2d 100644 --- a/net/sunrpc/auth_gss/gss_krb5_crypto.c +++ b/net/sunrpc/auth_gss/gss_krb5_crypto.c @@ -199,7 +199,7 @@ encryptor(struct scatterlist *sg, void *data) } else { in_page = sg_page(sg); } - sg_set_page(&desc->infrags[desc->fragno], in_page); + sg_assign_page(&desc->infrags[desc->fragno], in_page); desc->fragno++; desc->fraglen += sg->length; desc->pos += sg->length; @@ -215,11 +215,10 @@ encryptor(struct scatterlist *sg, void *data) if (ret) return ret; if (fraglen) { - sg_set_page(&desc->outfrags[0], sg_page(sg)); - desc->outfrags[0].offset = sg->offset + sg->length - fraglen; - desc->outfrags[0].length = fraglen; + sg_set_page(&desc->outfrags[0], sg_page(sg), fraglen, + sg->offset + sg->length - fraglen); desc->infrags[0] = desc->outfrags[0]; - sg_set_page(&desc->infrags[0], in_page); + sg_assign_page(&desc->infrags[0], in_page); desc->fragno = 1; desc->fraglen = fraglen; } else { @@ -287,9 +286,8 @@ decryptor(struct scatterlist *sg, void *data) if (ret) return ret; if (fraglen) { - sg_set_page(&desc->frags[0], sg_page(sg)); - desc->frags[0].offset = sg->offset + sg->length - fraglen; - desc->frags[0].length = fraglen; + sg_set_page(&desc->frags[0], sg_page(sg), fraglen, + sg->offset + sg->length - fraglen); desc->fragno = 1; desc->fraglen = fraglen; } else { diff --git a/net/sunrpc/xdr.c b/net/sunrpc/xdr.c index 3d1f7cdf9dd..f38dac30481 100644 --- a/net/sunrpc/xdr.c +++ b/net/sunrpc/xdr.c @@ -1059,9 +1059,7 @@ xdr_process_buf(struct xdr_buf *buf, unsigned int offset, unsigned int len, do { if (thislen > page_len) thislen = page_len; - sg_set_page(sg, buf->pages[i]); - sg->offset = page_offset; - sg->length = thislen; + sg_set_page(sg, buf->pages[i], thislen, page_offset); ret = actor(sg, data); if (ret) goto out; diff --git a/net/xfrm/xfrm_algo.c b/net/xfrm/xfrm_algo.c index 313d4bed3aa..fa45989a716 100644 --- a/net/xfrm/xfrm_algo.c +++ b/net/xfrm/xfrm_algo.c @@ -553,9 +553,7 @@ int skb_icv_walk(const struct sk_buff *skb, struct hash_desc *desc, if (copy > len) copy = len; - sg_set_page(&sg, virt_to_page(skb->data + offset)); - sg.offset = (unsigned long)(skb->data + offset) % PAGE_SIZE; - sg.length = copy; + sg_set_buf(&sg, skb->data + offset, copy); err = icv_update(desc, &sg, copy); if (unlikely(err)) @@ -578,9 +576,8 @@ int skb_icv_walk(const struct sk_buff *skb, struct hash_desc *desc, if (copy > len) copy = len; - sg_set_page(&sg, frag->page); - sg.offset = frag->page_offset + offset-start; - sg.length = copy; + sg_set_page(&sg, frag->page, copy, + frag->page_offset + offset-start); err = icv_update(desc, &sg, copy); if (unlikely(err)) |