aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Documentation/fb/cmap_xfbdev.txt53
-rw-r--r--Documentation/fb/metronomefb.txt38
-rw-r--r--drivers/hwmon/Kconfig5
-rw-r--r--drivers/hwmon/ibmpex.c13
-rw-r--r--drivers/md/md.c12
-rw-r--r--drivers/md/raid5.c6
-rw-r--r--drivers/memstick/core/memstick.c9
-rw-r--r--drivers/memstick/core/mspro_block.c94
-rw-r--r--drivers/memstick/host/jmb38x_ms.c113
-rw-r--r--drivers/memstick/host/tifm_ms.c17
-rw-r--r--drivers/rtc/Kconfig2
-rw-r--r--drivers/rtc/rtc-at91sam9.c6
-rw-r--r--drivers/video/Kconfig14
-rw-r--r--drivers/video/Makefile1
-rw-r--r--drivers/video/fb_defio.c22
-rw-r--r--drivers/video/i810/i810_main.c2
-rw-r--r--drivers/video/metronomefb.c999
-rw-r--r--fs/aio.c8
-rw-r--r--fs/buffer.c4
-rw-r--r--fs/dquot.c2
-rw-r--r--fs/ecryptfs/dentry.c2
-rw-r--r--fs/ext3/acl.c8
-rw-r--r--fs/ext3/resize.c4
-rw-r--r--fs/ext3/xattr.c4
-rw-r--r--fs/fs-writeback.c6
-rw-r--r--fs/isofs/compress.c11
-rw-r--r--fs/jbd/journal.c5
-rw-r--r--fs/jbd/recovery.c2
-rw-r--r--fs/jbd/transaction.c8
-rw-r--r--fs/jbd2/recovery.c2
-rw-r--r--fs/locks.c4
-rw-r--r--fs/namei.c6
-rw-r--r--fs/romfs/inode.c30
-rw-r--r--fs/super.c6
-rw-r--r--fs/ufs/balloc.c4
-rw-r--r--include/linux/jbd.h11
-rw-r--r--include/linux/memstick.h1
-rw-r--r--include/linux/rcupreempt.h4
-rw-r--r--kernel/time/clocksource.c14
-rw-r--r--mm/filemap.c20
-rw-r--r--mm/fremap.c2
-rw-r--r--mm/highmem.c30
-rw-r--r--mm/memcontrol.c2
-rw-r--r--mm/oom_kill.c9
-rw-r--r--mm/pagewalk.c10
-rw-r--r--mm/readahead.c5
-rw-r--r--mm/rmap.c13
-rw-r--r--mm/shmem.c25
-rw-r--r--mm/slab.c5
-rw-r--r--mm/swap.c5
-rw-r--r--mm/swap_state.c2
-rw-r--r--mm/tiny-shmem.c8
-rw-r--r--mm/vmalloc.c6
-rw-r--r--security/smack/smack_lsm.c9
54 files changed, 1456 insertions, 247 deletions
diff --git a/Documentation/fb/cmap_xfbdev.txt b/Documentation/fb/cmap_xfbdev.txt
new file mode 100644
index 00000000000..55e1f0a3d2b
--- /dev/null
+++ b/Documentation/fb/cmap_xfbdev.txt
@@ -0,0 +1,53 @@
+Understanding fbdev's cmap
+--------------------------
+
+These notes explain how X's dix layer uses fbdev's cmap structures.
+
+*. example of relevant structures in fbdev as used for a 3-bit grayscale cmap
+struct fb_var_screeninfo {
+ .bits_per_pixel = 8,
+ .grayscale = 1,
+ .red = { 4, 3, 0 },
+ .green = { 0, 0, 0 },
+ .blue = { 0, 0, 0 },
+}
+struct fb_fix_screeninfo {
+ .visual = FB_VISUAL_STATIC_PSEUDOCOLOR,
+}
+for (i = 0; i < 8; i++)
+ info->cmap.red[i] = (((2*i)+1)*(0xFFFF))/16;
+memcpy(info->cmap.green, info->cmap.red, sizeof(u16)*8);
+memcpy(info->cmap.blue, info->cmap.red, sizeof(u16)*8);
+
+*. X11 apps do something like the following when trying to use grayscale.
+for (i=0; i < 8; i++) {
+ char colorspec[64];
+ memset(colorspec,0,64);
+ sprintf(colorspec, "rgb:%x/%x/%x", i*36,i*36,i*36);
+ if (!XParseColor(outputDisplay, testColormap, colorspec, &wantedColor))
+ printf("Can't get color %s\n",colorspec);
+ XAllocColor(outputDisplay, testColormap, &wantedColor);
+ grays[i] = wantedColor;
+}
+There's also named equivalents like gray1..x provided you have an rgb.txt.
+
+Somewhere in X's callchain, this results in a call to X code that handles the
+colormap. For example, Xfbdev hits the following:
+
+xc-011010/programs/Xserver/dix/colormap.c:
+
+FindBestPixel(pentFirst, size, prgb, channel)
+
+dr = (long) pent->co.local.red - prgb->red;
+dg = (long) pent->co.local.green - prgb->green;
+db = (long) pent->co.local.blue - prgb->blue;
+sq = dr * dr;
+UnsignedToBigNum (sq, &sum);
+BigNumAdd (&sum, &temp, &sum);
+
+co.local.red are entries that were brought in through FBIOGETCMAP which come
+directly from the info->cmap.red that was listed above. The prgb is the rgb
+that the app wants to match to. The above code is doing what looks like a least
+squares matching function. That's why the cmap entries can't be set to the left
+hand side boundaries of a color range.
+
diff --git a/Documentation/fb/metronomefb.txt b/Documentation/fb/metronomefb.txt
new file mode 100644
index 00000000000..b9a2e7b7e83
--- /dev/null
+++ b/Documentation/fb/metronomefb.txt
@@ -0,0 +1,38 @@
+ Metronomefb
+ -----------
+Maintained by Jaya Kumar <jayakumar.lkml.gmail.com>
+Last revised: Nov 20, 2007
+
+Metronomefb is a driver for the Metronome display controller. The controller
+is from E-Ink Corporation. It is intended to be used to drive the E-Ink
+Vizplex display media. E-Ink hosts some details of this controller and the
+display media here http://www.e-ink.com/products/matrix/metronome.html .
+
+Metronome is interfaced to the host CPU through the AMLCD interface. The
+host CPU generates the control information and the image in a framebuffer
+which is then delivered to the AMLCD interface by a host specific method.
+Currently, that's implemented for the PXA's LCDC controller. The display and
+error status are each pulled through individual GPIOs.
+
+Metronomefb was written for the PXA255/gumstix/lyre combination and
+therefore currently has board set specific code in it. If other boards based on
+other architectures are available, then the host specific code can be separated
+and abstracted out.
+
+Metronomefb requires waveform information which is delivered via the AMLCD
+interface to the metronome controller. The waveform information is expected to
+be delivered from userspace via the firmware class interface. The waveform file
+can be compressed as long as your udev or hotplug script is aware of the need
+to uncompress it before delivering it. metronomefb will ask for waveform.wbf
+which would typically go into /lib/firmware/waveform.wbf depending on your
+udev/hotplug setup. I have only tested with a single waveform file which was
+originally labeled 23P01201_60_WT0107_MTC. I do not know what it stands for.
+Caution should be exercised when manipulating the waveform as there may be
+a possibility that it could have some permanent effects on the display media.
+I neither have access to nor know exactly what the waveform does in terms of
+the physical media.
+
+Metronomefb uses the deferred IO interface so that it can provide a memory
+mappable frame buffer. It has been tested with tinyx (Xfbdev). It is known
+to work at this time with xeyes, xclock, xloadimage, xpdf.
+
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index 368879ff5d8..4dc76bc45c9 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -337,8 +337,9 @@ config SENSORS_IBMPEX
help
If you say yes here you get support for the temperature and
power sensors in various IBM System X servers that support
- PowerExecutive. So far this includes the x3550, x3650, x3655,
- x3755, and certain HS20 blades.
+ PowerExecutive. So far this includes the x3350, x3550, x3650,
+ x3655, and x3755; the x3800, x3850, and x3950 models that have
+ PCI Express; and some of the HS2x, LS2x, and QS2x blades.
This driver can also be built as a module. If so, the module
will be called ibmpex.
diff --git a/drivers/hwmon/ibmpex.c b/drivers/hwmon/ibmpex.c
index 9c9cdb0685e..4e9b19c6732 100644
--- a/drivers/hwmon/ibmpex.c
+++ b/drivers/hwmon/ibmpex.c
@@ -327,10 +327,14 @@ static int is_temp_sensor(const char *sensor_id, int len)
return 0;
}
-static int power_sensor_multiplier(const char *sensor_id, int len)
+static int power_sensor_multiplier(struct ibmpex_bmc_data *data,
+ const char *sensor_id, int len)
{
int i;
+ if (data->sensor_major == 2)
+ return 1000000;
+
for (i = PEX_SENSOR_TYPE_LEN; i < len - 1; i++)
if (!memcmp(&sensor_id[i], watt_sensor_sig, PEX_MULT_LEN))
return 1000000;
@@ -398,14 +402,15 @@ static int ibmpex_find_sensors(struct ibmpex_bmc_data *data)
num_power++;
sensor_counter = num_power;
data->sensors[i].multiplier =
- power_sensor_multiplier(data->rx_msg_data,
- data->rx_msg_len);
+ power_sensor_multiplier(data,
+ data->rx_msg_data,
+ data->rx_msg_len);
} else if (is_temp_sensor(data->rx_msg_data,
data->rx_msg_len)) {
sensor_type = TEMP_SENSOR;
num_temp++;
sensor_counter = num_temp;
- data->sensors[i].multiplier = 1;
+ data->sensors[i].multiplier = 1000;
} else
continue;
diff --git a/drivers/md/md.c b/drivers/md/md.c
index ccbbf63727c..61ccbd2683f 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -1864,17 +1864,6 @@ static struct rdev_sysfs_entry rdev_state =
__ATTR(state, S_IRUGO|S_IWUSR, state_show, state_store);
static ssize_t
-super_show(mdk_rdev_t *rdev, char *page)
-{
- if (rdev->sb_loaded && rdev->sb_size) {
- memcpy(page, page_address(rdev->sb_page), rdev->sb_size);
- return rdev->sb_size;
- } else
- return 0;
-}
-static struct rdev_sysfs_entry rdev_super = __ATTR_RO(super);
-
-static ssize_t
errors_show(mdk_rdev_t *rdev, char *page)
{
return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
@@ -2060,7 +2049,6 @@ __ATTR(size, S_IRUGO|S_IWUSR, rdev_size_show, rdev_size_store);
static struct attribute *rdev_default_attrs[] = {
&rdev_state.attr,
- &rdev_super.attr,
&rdev_errors.attr,
&rdev_slot.attr,
&rdev_offset.attr,
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 2d6f1a51359..c574cf5efb5 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -1143,7 +1143,7 @@ static void raid5_end_read_request(struct bio * bi, int error)
rdev = conf->disks[i].rdev;
printk(KERN_INFO "raid5:%s: read error corrected (%lu sectors at %llu on %s)\n",
mdname(conf->mddev), STRIPE_SECTORS,
- (unsigned long long)sh->sector + rdev->data_offset,
+ (unsigned long long)(sh->sector + rdev->data_offset),
bdevname(rdev->bdev, b));
clear_bit(R5_ReadError, &sh->dev[i].flags);
clear_bit(R5_ReWrite, &sh->dev[i].flags);
@@ -1160,13 +1160,13 @@ static void raid5_end_read_request(struct bio * bi, int error)
if (conf->mddev->degraded)
printk(KERN_WARNING "raid5:%s: read error not correctable (sector %llu on %s).\n",
mdname(conf->mddev),
- (unsigned long long)sh->sector + rdev->data_offset,
+ (unsigned long long)(sh->sector + rdev->data_offset),
bdn);
else if (test_bit(R5_ReWrite, &sh->dev[i].flags))
/* Oh, no!!! */
printk(KERN_WARNING "raid5:%s: read error NOT corrected!! (sector %llu on %s).\n",
mdname(conf->mddev),
- (unsigned long long)sh->sector + rdev->data_offset,
+ (unsigned long long)(sh->sector + rdev->data_offset),
bdn);
else if (atomic_read(&rdev->read_errors)
> conf->max_nr_stripes)
diff --git a/drivers/memstick/core/memstick.c b/drivers/memstick/core/memstick.c
index de80dba12f9..946e3d3506a 100644
--- a/drivers/memstick/core/memstick.c
+++ b/drivers/memstick/core/memstick.c
@@ -276,8 +276,6 @@ void memstick_init_req_sg(struct memstick_request *mrq, unsigned char tpc,
mrq->need_card_int = 1;
else
mrq->need_card_int = 0;
-
- mrq->get_int_reg = 0;
}
EXPORT_SYMBOL(memstick_init_req_sg);
@@ -311,8 +309,6 @@ void memstick_init_req(struct memstick_request *mrq, unsigned char tpc,
mrq->need_card_int = 1;
else
mrq->need_card_int = 0;
-
- mrq->get_int_reg = 0;
}
EXPORT_SYMBOL(memstick_init_req);
@@ -342,6 +338,7 @@ static int h_memstick_read_dev_id(struct memstick_dev *card,
card->id.class = id_reg.class;
}
complete(&card->mrq_complete);
+ dev_dbg(&card->dev, "if_mode = %02x\n", id_reg.if_mode);
return -EAGAIN;
}
}
@@ -422,7 +419,6 @@ static void memstick_power_on(struct memstick_host *host)
{
host->set_param(host, MEMSTICK_POWER, MEMSTICK_POWER_ON);
host->set_param(host, MEMSTICK_INTERFACE, MEMSTICK_SERIAL);
- msleep(1);
}
static void memstick_check(struct work_struct *work)
@@ -579,7 +575,8 @@ EXPORT_SYMBOL(memstick_suspend_host);
void memstick_resume_host(struct memstick_host *host)
{
mutex_lock(&host->lock);
- host->set_param(host, MEMSTICK_POWER, MEMSTICK_POWER_ON);
+ if (host->card)
+ memstick_power_on(host);
mutex_unlock(&host->lock);
memstick_detect_change(host);
}
diff --git a/drivers/memstick/core/mspro_block.c b/drivers/memstick/core/mspro_block.c
index 1d637e4561d..557dbbba5cb 100644
--- a/drivers/memstick/core/mspro_block.c
+++ b/drivers/memstick/core/mspro_block.c
@@ -133,6 +133,7 @@ struct mspro_devinfo {
struct mspro_block_data {
struct memstick_dev *card;
unsigned int usage_count;
+ unsigned int caps;
struct gendisk *disk;
struct request_queue *queue;
spinlock_t q_lock;
@@ -577,7 +578,6 @@ static int h_mspro_block_wait_for_ced(struct memstick_dev *card,
static int h_mspro_block_transfer_data(struct memstick_dev *card,
struct memstick_request **mrq)
{
- struct memstick_host *host = card->host;
struct mspro_block_data *msb = memstick_get_drvdata(card);
unsigned char t_val = 0;
struct scatterlist t_sg = { 0 };
@@ -591,12 +591,12 @@ static int h_mspro_block_transfer_data(struct memstick_dev *card,
switch ((*mrq)->tpc) {
case MS_TPC_WRITE_REG:
memstick_init_req(*mrq, MS_TPC_SET_CMD, &msb->transfer_cmd, 1);
- (*mrq)->get_int_reg = 1;
+ (*mrq)->need_card_int = 1;
return 0;
case MS_TPC_SET_CMD:
t_val = (*mrq)->int_reg;
memstick_init_req(*mrq, MS_TPC_GET_INT, NULL, 1);
- if (host->caps & MEMSTICK_CAP_AUTO_GET_INT)
+ if (msb->caps & MEMSTICK_CAP_AUTO_GET_INT)
goto has_int_reg;
return 0;
case MS_TPC_GET_INT:
@@ -646,12 +646,12 @@ has_int_reg:
? MS_TPC_READ_LONG_DATA
: MS_TPC_WRITE_LONG_DATA,
&t_sg);
- (*mrq)->get_int_reg = 1;
+ (*mrq)->need_card_int = 1;
return 0;
case MS_TPC_READ_LONG_DATA:
case MS_TPC_WRITE_LONG_DATA:
msb->current_page++;
- if (host->caps & MEMSTICK_CAP_AUTO_GET_INT) {
+ if (msb->caps & MEMSTICK_CAP_AUTO_GET_INT) {
t_val = (*mrq)->int_reg;
goto has_int_reg;
} else {
@@ -816,12 +816,13 @@ static int mspro_block_wait_for_ced(struct memstick_dev *card)
return card->current_mrq.error;
}
-static int mspro_block_switch_to_parallel(struct memstick_dev *card)
+static int mspro_block_set_interface(struct memstick_dev *card,
+ unsigned char sys_reg)
{
struct memstick_host *host = card->host;
struct mspro_block_data *msb = memstick_get_drvdata(card);
struct mspro_param_register param = {
- .system = MEMSTICK_SYS_PAR4,
+ .system = sys_reg,
.data_count = 0,
.data_address = 0,
.tpc_param = 0
@@ -833,41 +834,70 @@ static int mspro_block_switch_to_parallel(struct memstick_dev *card)
sizeof(param));
memstick_new_req(host);
wait_for_completion(&card->mrq_complete);
- if (card->current_mrq.error)
- return card->current_mrq.error;
+ return card->current_mrq.error;
+}
+
+static int mspro_block_switch_interface(struct memstick_dev *card)
+{
+ struct memstick_host *host = card->host;
+ struct mspro_block_data *msb = memstick_get_drvdata(card);
+ int rc = 0;
+
+ if (msb->caps & MEMSTICK_CAP_PAR4)
+ rc = mspro_block_set_interface(card, MEMSTICK_SYS_PAR4);
+ else
+ return 0;
+
+ if (rc) {
+ printk(KERN_WARNING
+ "%s: could not switch to 4-bit mode, error %d\n",
+ card->dev.bus_id, rc);
+ return 0;
+ }
msb->system = MEMSTICK_SYS_PAR4;
host->set_param(host, MEMSTICK_INTERFACE, MEMSTICK_PAR4);
+ printk(KERN_INFO "%s: switching to 4-bit parallel mode\n",
+ card->dev.bus_id);
+
+ if (msb->caps & MEMSTICK_CAP_PAR8) {
+ rc = mspro_block_set_interface(card, MEMSTICK_SYS_PAR8);
+
+ if (!rc) {
+ msb->system = MEMSTICK_SYS_PAR8;
+ host->set_param(host, MEMSTICK_INTERFACE,
+ MEMSTICK_PAR8);
+ printk(KERN_INFO
+ "%s: switching to 8-bit parallel mode\n",
+ card->dev.bus_id);
+ } else
+ printk(KERN_WARNING
+ "%s: could not switch to 8-bit mode, error %d\n",
+ card->dev.bus_id, rc);
+ }
card->next_request = h_mspro_block_req_init;
msb->mrq_handler = h_mspro_block_default;
memstick_init_req(&card->current_mrq, MS_TPC_GET_INT, NULL, 1);
memstick_new_req(card->host);
wait_for_completion(&card->mrq_complete);
+ rc = card->current_mrq.error;
- if (card->current_mrq.error) {
+ if (rc) {
+ printk(KERN_WARNING
+ "%s: interface error, trying to fall back to serial\n",
+ card->dev.bus_id);
msb->system = MEMSTICK_SYS_SERIAL;
host->set_param(host, MEMSTICK_POWER, MEMSTICK_POWER_OFF);
- msleep(1000);
+ msleep(10);
host->set_param(host, MEMSTICK_POWER, MEMSTICK_POWER_ON);
host->set_param(host, MEMSTICK_INTERFACE, MEMSTICK_SERIAL);
- if (memstick_set_rw_addr(card))
- return card->current_mrq.error;
-
- param.system = msb->system;
-
- card->next_request = h_mspro_block_req_init;
- msb->mrq_handler = h_mspro_block_default;
- memstick_init_req(&card->current_mrq, MS_TPC_WRITE_REG, &param,
- sizeof(param));
- memstick_new_req(host);
- wait_for_completion(&card->mrq_complete);
-
- return -EFAULT;
+ rc = memstick_set_rw_addr(card);
+ if (!rc)
+ rc = mspro_block_set_interface(card, msb->system);
}
-
- return 0;
+ return rc;
}
/* Memory allocated for attributes by this function should be freed by
@@ -1052,16 +1082,18 @@ static int mspro_block_init_card(struct memstick_dev *card)
if (memstick_set_rw_addr(card))
return -EIO;
- if (host->caps & MEMSTICK_CAP_PAR4) {
- if (mspro_block_switch_to_parallel(card))
- printk(KERN_WARNING "%s: could not switch to "
- "parallel interface\n", card->dev.bus_id);
- }
+ msb->caps = host->caps;
+ rc = mspro_block_switch_interface(card);
+ if (rc)
+ return rc;
+ msleep(200);
rc = mspro_block_wait_for_ced(card);
if (rc)
return rc;
dev_dbg(&card->dev, "card activated\n");
+ if (msb->system != MEMSTICK_SYS_SERIAL)
+ msb->caps |= MEMSTICK_CAP_AUTO_GET_INT;
card->next_request = h_mspro_block_req_init;
msb->mrq_handler = h_mspro_block_get_ro;
diff --git a/drivers/memstick/host/jmb38x_ms.c b/drivers/memstick/host/jmb38x_ms.c
index 03fe8783b1e..8770a5fac3b 100644
--- a/drivers/memstick/host/jmb38x_ms.c
+++ b/drivers/memstick/host/jmb38x_ms.c
@@ -12,6 +12,7 @@
#include <linux/spinlock.h>
#include <linux/interrupt.h>
#include <linux/pci.h>
+#include <linux/dma-mapping.h>
#include <linux/delay.h>
#include <linux/highmem.h>
#include <linux/memstick.h>
@@ -56,8 +57,6 @@ struct jmb38x_ms_host {
unsigned long timeout_jiffies;
struct timer_list timer;
struct memstick_request *req;
- unsigned char eject:1,
- use_dma:1;
unsigned char cmd_flags;
unsigned char io_pos;
unsigned int io_word[2];
@@ -94,9 +93,22 @@ struct jmb38x_ms {
#define HOST_CONTROL_IF_PAR4 0x1
#define HOST_CONTROL_IF_PAR8 0x3
+#define STATUS_BUSY 0x00080000
+#define STATUS_MS_DAT7 0x00040000
+#define STATUS_MS_DAT6 0x00020000
+#define STATUS_MS_DAT5 0x00010000
+#define STATUS_MS_DAT4 0x00008000
+#define STATUS_MS_DAT3 0x00004000
+#define STATUS_MS_DAT2 0x00002000
+#define STATUS_MS_DAT1 0x00001000
+#define STATUS_MS_DAT0 0x00000800
#define STATUS_HAS_MEDIA 0x00000400
#define STATUS_FIFO_EMPTY 0x00000200
#define STATUS_FIFO_FULL 0x00000100
+#define STATUS_MS_CED 0x00000080
+#define STATUS_MS_ERR 0x00000040
+#define STATUS_MS_BRQ 0x00000020
+#define STATUS_MS_CNK 0x00000001
#define INT_STATUS_TPC_ERR 0x00080000
#define INT_STATUS_CRC_ERR 0x00040000
@@ -119,11 +131,17 @@ struct jmb38x_ms {
#define PAD_PU_PD_ON_MS_SOCK0 0x5f8f0000
#define PAD_PU_PD_ON_MS_SOCK1 0x0f0f0000
+#define CLOCK_CONTROL_40MHZ 0x00000001
+#define CLOCK_CONTROL_50MHZ 0x00000002
+#define CLOCK_CONTROL_60MHZ 0x00000008
+#define CLOCK_CONTROL_62_5MHZ 0x0000000c
+#define CLOCK_CONTROL_OFF 0x00000000
+
enum {
CMD_READY = 0x01,
FIFO_READY = 0x02,
REG_DATA = 0x04,
- AUTO_GET_INT = 0x08
+ DMA_DATA = 0x08
};
static unsigned int jmb38x_ms_read_data(struct jmb38x_ms_host *host,
@@ -273,7 +291,7 @@ static int jmb38x_ms_transfer_data(struct jmb38x_ms_host *host)
{
unsigned int length;
unsigned int off;
- unsigned int t_size, p_off, p_cnt;
+ unsigned int t_size, p_cnt;
unsigned char *buf;
struct page *pg;
unsigned long flags = 0;
@@ -287,6 +305,8 @@ static int jmb38x_ms_transfer_data(struct jmb38x_ms_host *host)
}
while (length) {
+ unsigned int uninitialized_var(p_off);
+
if (host->req->long_data) {
pg = nth_page(sg_page(&host->req->sg),
off >> PAGE_SHIFT);
@@ -364,28 +384,27 @@ static int jmb38x_ms_issue_cmd(struct memstick_host *msh)
cmd |= TPC_DIR;
if (host->req->need_card_int)
cmd |= TPC_WAIT_INT;
- if (host->req->get_int_reg)
- cmd |= TPC_GET_INT;
data = host->req->data;
- host->use_dma = !no_dma;
+ if (!no_dma)
+ host->cmd_flags |= DMA_DATA;
if (host->req->long_data) {
data_len = host->req->sg.length;
} else {
data_len = host->req->data_len;
- host->use_dma = 0;
+ host->cmd_flags &= ~DMA_DATA;
}
if (data_len <= 8) {
cmd &= ~(TPC_DATA_SEL | 0xf);
host->cmd_flags |= REG_DATA;
cmd |= data_len & 0xf;
- host->use_dma = 0;
+ host->cmd_flags &= ~DMA_DATA;
}
- if (host->use_dma) {
+ if (host->cmd_flags & DMA_DATA) {
if (1 != pci_map_sg(host->chip->pdev, &host->req->sg, 1,
host->req->data_dir == READ
? PCI_DMA_FROMDEVICE
@@ -448,13 +467,12 @@ static void jmb38x_ms_complete_cmd(struct memstick_host *msh, int last)
readl(host->addr + INT_STATUS));
dev_dbg(msh->cdev.dev, "c hstatus %08x\n", readl(host->addr + STATUS));
- if (host->req->get_int_reg) {
- t_val = readl(host->addr + TPC_P0);
- host->req->int_reg = (t_val & 0xff);
- }
+ host->req->int_reg = readl(host->addr + STATUS) & 0xff;
+
+ writel(0, host->addr + BLOCK);
+ writel(0, host->addr + DMA_CONTROL);
- if (host->use_dma) {
- writel(0, host->addr + DMA_CONTROL);
+ if (host->cmd_flags & DMA_DATA) {
pci_unmap_sg(host->chip->pdev, &host->req->sg, 1,
host->req->data_dir == READ
? PCI_DMA_FROMDEVICE : PCI_DMA_TODEVICE);
@@ -506,7 +524,7 @@ static irqreturn_t jmb38x_ms_isr(int irq, void *dev_id)
else
host->req->error = -ETIME;
} else {
- if (host->use_dma) {
+ if (host->cmd_flags & DMA_DATA) {
if (irq_status & INT_STATUS_EOTRAN)
host->cmd_flags |= FIFO_READY;
} else {
@@ -595,19 +613,18 @@ static void jmb38x_ms_reset(struct jmb38x_ms_host *host)
{
unsigned int host_ctl = readl(host->addr + HOST_CONTROL);
- writel(host_ctl | HOST_CONTROL_RESET_REQ | HOST_CONTROL_RESET,
- host->addr + HOST_CONTROL);
+ writel(HOST_CONTROL_RESET_REQ, host->addr + HOST_CONTROL);
while (HOST_CONTROL_RESET_REQ
& (host_ctl = readl(host->addr + HOST_CONTROL))) {
- ndelay(100);
- dev_dbg(&host->chip->pdev->dev, "reset\n");
+ ndelay(20);
+ dev_dbg(&host->chip->pdev->dev, "reset %08x\n", host_ctl);
}
- writel(INT_STATUS_ALL, host->addr + INT_STATUS_ENABLE);
+ writel(HOST_CONTROL_RESET, host->addr + HOST_CONTROL);
+ mmiowb();
writel(INT_STATUS_ALL, host->addr + INT_SIGNAL_ENABLE);
-
- dev_dbg(&host->chip->pdev->dev, "reset\n");
+ writel(INT_STATUS_ALL, host->addr + INT_STATUS_ENABLE);
}
static void jmb38x_ms_set_param(struct memstick_host *msh,
@@ -615,10 +632,8 @@ static void jmb38x_ms_set_param(struct memstick_host *msh,
int value)
{
struct jmb38x_ms_host *host = memstick_priv(msh);
- unsigned int host_ctl;
- unsigned long flags;
-
- spin_lock_irqsave(&host->lock, flags);
+ unsigned int host_ctl = readl(host->addr + HOST_CONTROL);
+ unsigned int clock_ctl = CLOCK_CONTROL_40MHZ, clock_delay = 0;
switch (param) {
case MEMSTICK_POWER:
@@ -626,60 +641,57 @@ static void jmb38x_ms_set_param(struct memstick_host *msh,
jmb38x_ms_reset(host);
writel(host->id ? PAD_PU_PD_ON_MS_SOCK1
- : PAD_PU_PD_ON_MS_SOCK0,
+ : PAD_PU_PD_ON_MS_SOCK0,
host->addr + PAD_PU_PD);
writel(PAD_OUTPUT_ENABLE_MS,
host->addr + PAD_OUTPUT_ENABLE);
- host_ctl = readl(host->addr + HOST_CONTROL);
- host_ctl |= 7;
- writel(host_ctl | (HOST_CONTROL_POWER_EN
- | HOST_CONTROL_CLOCK_EN),
- host->addr + HOST_CONTROL);
+ host_ctl = 7;
+ host_ctl |= HOST_CONTROL_POWER_EN
+ | HOST_CONTROL_CLOCK_EN;
+ writel(host_ctl, host->addr + HOST_CONTROL);
dev_dbg(&host->chip->pdev->dev, "power on\n");
} else if (value == MEMSTICK_POWER_OFF) {
- writel(readl(host->addr + HOST_CONTROL)
- & ~(HOST_CONTROL_POWER_EN
- | HOST_CONTROL_CLOCK_EN),
- host->addr + HOST_CONTROL);
+ host_ctl &= ~(HOST_CONTROL_POWER_EN
+ | HOST_CONTROL_CLOCK_EN);
+ writel(host_ctl, host->addr + HOST_CONTROL);
writel(0, host->addr + PAD_OUTPUT_ENABLE);
writel(PAD_PU_PD_OFF, host->addr + PAD_PU_PD);
dev_dbg(&host->chip->pdev->dev, "power off\n");
}
break;
case MEMSTICK_INTERFACE:
- /* jmb38x_ms_reset(host); */
-
- host_ctl = readl(host->addr + HOST_CONTROL);
host_ctl &= ~(3 << HOST_CONTROL_IF_SHIFT);
- /* host_ctl |= 7; */
if (value == MEMSTICK_SERIAL) {
host_ctl &= ~HOST_CONTROL_FAST_CLK;
host_ctl |= HOST_CONTROL_IF_SERIAL
<< HOST_CONTROL_IF_SHIFT;
host_ctl |= HOST_CONTROL_REI;
- writel(0, host->addr + CLOCK_DELAY);
+ clock_ctl = CLOCK_CONTROL_40MHZ;
+ clock_delay = 0;
} else if (value == MEMSTICK_PAR4) {
host_ctl |= HOST_CONTROL_FAST_CLK;
host_ctl |= HOST_CONTROL_IF_PAR4
<< HOST_CONTROL_IF_SHIFT;
host_ctl &= ~HOST_CONTROL_REI;
- writel(4, host->addr + CLOCK_DELAY);
+ clock_ctl = CLOCK_CONTROL_40MHZ;
+ clock_delay = 4;
} else if (value == MEMSTICK_PAR8) {
host_ctl |= HOST_CONTROL_FAST_CLK;
host_ctl |= HOST_CONTROL_IF_PAR8
<< HOST_CONTROL_IF_SHIFT;
host_ctl &= ~HOST_CONTROL_REI;
- writel(4, host->addr + CLOCK_DELAY);
+ clock_ctl = CLOCK_CONTROL_60MHZ;
+ clock_delay = 0;
}
writel(host_ctl, host->addr + HOST_CONTROL);
+ writel(clock_ctl, host->addr + CLOCK_CONTROL);
+ writel(clock_delay, host->addr + CLOCK_DELAY);
break;
};
-
- spin_unlock_irqrestore(&host->lock, flags);
}
#ifdef CONFIG_PM
@@ -772,13 +784,10 @@ static struct memstick_host *jmb38x_ms_alloc_host(struct jmb38x_ms *jm, int cnt)
snprintf(host->host_id, DEVICE_ID_SIZE, DRIVER_NAME ":slot%d",
host->id);
host->irq = jm->pdev->irq;
- host->timeout_jiffies = msecs_to_jiffies(4000);
+ host->timeout_jiffies = msecs_to_jiffies(1000);
msh->request = jmb38x_ms_request;
msh->set_param = jmb38x_ms_set_param;
- /*
- msh->caps = MEMSTICK_CAP_AUTO_GET_INT | MEMSTICK_CAP_PAR4
- | MEMSTICK_CAP_PAR8;
- */
+
msh->caps = MEMSTICK_CAP_PAR4 | MEMSTICK_CAP_PAR8;
setup_timer(&host->timer, jmb38x_ms_abort, (unsigned long)msh);
diff --git a/drivers/memstick/host/tifm_ms.c b/drivers/memstick/host/tifm_ms.c
index 2b5bf52a830..eb150dfb637 100644
--- a/drivers/memstick/host/tifm_ms.c
+++ b/drivers/memstick/host/tifm_ms.c
@@ -340,11 +340,20 @@ static void tifm_ms_complete_cmd(struct tifm_ms *host)
del_timer(&host->timer);
- if (host->use_dma)
+ host->req->int_reg = readl(sock->addr + SOCK_MS_STATUS) & 0xff;
+ host->req->int_reg = (host->req->int_reg & 1)
+ | ((host->req->int_reg << 4) & 0xe0);
+
+ writel(TIFM_FIFO_INT_SETALL,
+ sock->addr + SOCK_DMA_FIFO_INT_ENABLE_CLEAR);
+ writel(TIFM_DMA_RESET, sock->addr + SOCK_DMA_CONTROL);
+
+ if (host->use_dma) {
tifm_unmap_sg(sock, &host->req->sg, 1,
host->req->data_dir == READ
? PCI_DMA_FROMDEVICE
: PCI_DMA_TODEVICE);
+ }
writel((~TIFM_CTRL_LED) & readl(sock->addr + SOCK_CONTROL),
sock->addr + SOCK_CONTROL);
@@ -424,12 +433,6 @@ static void tifm_ms_card_event(struct tifm_dev *sock)
else if (host_status & TIFM_MS_STAT_CRC)
host->req->error = -EILSEQ;
- if (host->req->error) {
- writel(TIFM_FIFO_INT_SETALL,
- sock->addr + SOCK_DMA_FIFO_INT_ENABLE_CLEAR);
- writel(TIFM_DMA_RESET, sock->addr + SOCK_DMA_CONTROL);
- }
-
if (host_status & TIFM_MS_STAT_RDY)
host->cmd_flags |= CMD_READY;
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
index 82f5ad9c3af..9e7de63b26e 100644
--- a/drivers/rtc/Kconfig
+++ b/drivers/rtc/Kconfig
@@ -16,7 +16,7 @@ menuconfig RTC_CLASS
probably want to enable one or more of the interfaces below.
This driver can also be built as a module. If so, the module
- will be called rtc-class.
+ will be called rtc-core.
if RTC_CLASS
diff --git a/drivers/rtc/rtc-at91sam9.c b/drivers/rtc/rtc-at91sam9.c
index bbf10ecf416..56728a2a338 100644
--- a/drivers/rtc/rtc-at91sam9.c
+++ b/drivers/rtc/rtc-at91sam9.c
@@ -274,7 +274,7 @@ static irqreturn_t at91_rtc_interrupt(int irq, void *_rtc)
* SR clears it, so we must only read it in this irq handler!
*/
mr = rtt_readl(rtc, MR) & (AT91_RTT_ALMIEN | AT91_RTT_RTTINCIEN);
- sr = rtt_readl(rtc, SR) & mr;
+ sr = rtt_readl(rtc, SR) & (mr >> 16);
if (!sr)
return IRQ_NONE;
@@ -321,6 +321,10 @@ static int __init at91_rtc_probe(struct platform_device *pdev)
if (!rtc)
return -ENOMEM;
+ /* platform setup code should have handled this; sigh */
+ if (!device_can_wakeup(&pdev->dev))
+ device_init_wakeup(&pdev->dev, 1);
+
platform_set_drvdata(pdev, rtc);
rtc->rtt = (void __force __iomem *) (AT91_VA_BASE_SYS - AT91_BASE_SYS);
rtc->rtt += r->start;
diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig
index e0b0580705e..1bd5fb30237 100644
--- a/drivers/video/Kconfig
+++ b/drivers/video/Kconfig
@@ -1893,6 +1893,20 @@ config FB_XILINX
framebuffer. ML300 carries a 640*480 LCD display on the board,
ML403 uses a standard DB15 VGA connector.
+config FB_METRONOME
+ tristate "Metronome display controller support"
+ depends on FB && ARCH_PXA && MMU
+ select FB_SYS_FILLRECT
+ select FB_SYS_COPYAREA
+ select FB_SYS_IMAGEBLIT
+ select FB_SYS_FOPS
+ select FB_DEFERRED_IO
+ help
+ This enables support for the Metronome display controller. Tested
+ with an E-Ink 800x600 display and Gumstix Connex through an AMLCD
+ interface. Please read <file:Documentation/fb/metronomefb.txt>
+ for more information.
+
config FB_VIRTUAL
tristate "Virtual Frame Buffer support (ONLY FOR TESTING!)"
depends on FB
diff --git a/drivers/video/Makefile b/drivers/video/Makefile
index 03371c78903..11c0e5e05f2 100644
--- a/drivers/video/Makefile
+++ b/drivers/video/Makefile
@@ -103,6 +103,7 @@ obj-$(CONFIG_FB_PMAG_AA) += pmag-aa-fb.o
obj-$(CONFIG_FB_PMAG_BA) += pmag-ba-fb.o
obj-$(CONFIG_FB_PMAGB_B) += pmagb-b-fb.o
obj-$(CONFIG_FB_MAXINE) += maxinefb.o
+obj-$(CONFIG_FB_METRONOME) += metronomefb.o
obj-$(CONFIG_FB_S1D13XXX) += s1d13xxxfb.o
obj-$(CONFIG_FB_IMX) += imxfb.o
obj-$(CONFIG_FB_S3C2410) += s3c2410fb.o
diff --git a/drivers/video/fb_defio.c b/drivers/video/fb_defio.c
index 0f8cfb988c9..24843fdd539 100644
--- a/drivers/video/fb_defio.c
+++ b/drivers/video/fb_defio.c
@@ -4,7 +4,7 @@
* Copyright (C) 2006 Jaya Kumar
*
* This file is subject to the terms and conditions of the GNU General Public
- * License. See the file COPYING in the main directory of this archive
+ * License. See the file COPYING in the main directory of this archive
* for more details.
*/
@@ -31,7 +31,7 @@ static int fb_deferred_io_fault(struct vm_area_struct *vma,
unsigned long offset;
struct page *page;
struct fb_info *info = vma->vm_private_data;
- /* info->screen_base is in System RAM */
+ /* info->screen_base is virtual memory */
void *screen_base = (void __force *) info->screen_base;
offset = vmf->pgoff << PAGE_SHIFT;
@@ -43,6 +43,15 @@ static int fb_deferred_io_fault(struct vm_area_struct *vma,
return VM_FAULT_SIGBUS;
get_page(page);
+
+ if (vma->vm_file)
+ page->mapping = vma->vm_file->f_mapping;
+ else
+ printk(KERN_ERR "no mapping available\n");
+
+ BUG_ON(!page->mapping);
+ page->index = vmf->pgoff;
+
vmf->page = page;
return 0;
}
@@ -138,11 +147,20 @@ EXPORT_SYMBOL_GPL(fb_deferred_io_init);
void fb_deferred_io_cleanup(struct fb_info *info)
{
+ void *screen_base = (void __force *) info->screen_base;
struct fb_deferred_io *fbdefio = info->fbdefio;
+ struct page *page;
+ int i;
BUG_ON(!fbdefio);
cancel_delayed_work(&info->deferred_work);
flush_scheduled_work();
+
+ /* clear out the mapping that we setup */
+ for (i = 0 ; i < info->fix.smem_len; i += PAGE_SIZE) {
+ page = vmalloc_to_page(screen_base + i);
+ page->mapping = NULL;
+ }
}
EXPORT_SYMBOL_GPL(fb_deferred_io_cleanup);
diff --git a/drivers/video/i810/i810_main.c b/drivers/video/i810/i810_main.c
index 1d13dd099af..a24e680d2b9 100644
--- a/drivers/video/i810/i810_main.c
+++ b/drivers/video/i810/i810_main.c
@@ -1476,7 +1476,7 @@ static int i810fb_cursor(struct fb_info *info, struct fb_cursor *cursor)
struct i810fb_par *par = info->par;
u8 __iomem *mmio = par->mmio_start_virtual;
- if (!(par->dev_flags & LOCKUP))
+ if (par->dev_flags & LOCKUP)
return -ENXIO;
if (cursor->image.width > 64 || cursor->image.height > 64)
diff --git a/drivers/video/metronomefb.c b/drivers/video/metronomefb.c
new file mode 100644
index 00000000000..e9a89fd8275
--- /dev/null
+++ b/drivers/video/metronomefb.c
@@ -0,0 +1,999 @@
+/*
+ * linux/drivers/video/metronomefb.c -- FB driver for Metronome controller
+ *
+ * Copyright (C) 2008, Jaya Kumar
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file COPYING in the main directory of this archive for
+ * more details.
+ *
+ * Layout is based on skeletonfb.c by James Simmons and Geert Uytterhoeven.
+ *
+ * This work was made possible by help and equipment support from E-Ink
+ * Corporation. http://support.eink.com/community
+ *
+ * This driver is written to be used with the Metronome display controller.
+ * It was tested with an E-Ink 800x600 Vizplex EPD on a Gumstix Connex board
+ * using the Lyre interface board.
+ *
+ * General notes:
+ * - User must set metronomefb_enable=1 to enable it.
+ * - See Documentation/fb/metronomefb.txt for how metronome works.
+ */
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/fb.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/list.h>
+#include <linux/firmware.h>
+#include <linux/dma-mapping.h>
+#include <linux/uaccess.h>
+#include <linux/irq.h>
+
+#include <asm/arch/pxa-regs.h>
+#include <asm/unaligned.h>
+
+#define DEBUG 1
+#ifdef DEBUG
+#define DPRINTK(f, a...) printk(KERN_DEBUG "%s: " f, __func__ , ## a)
+#else
+#define DPRINTK(f, a...)
+#endif
+
+
+/* Display specific information */
+#define DPY_W 832
+#define DPY_H 622
+
+struct metromem_desc {
+ u32 mFDADR0;
+ u32 mFSADR0;
+ u32 mFIDR0;
+ u32 mLDCMD0;
+};
+
+struct metromem_cmd {
+ u16 opcode;
+ u16 args[((64-2)/2)];
+ u16 csum;
+};
+
+struct metronomefb_par {
+ unsigned char *metromem;
+ struct metromem_desc *metromem_desc;
+ struct metromem_cmd *metromem_cmd;
+ unsigned char *metromem_wfm;
+ unsigned char *metromem_img;
+ u16 *metromem_img_csum;
+ u16 *csum_table;
+ int metromemsize;
+ dma_addr_t metromem_dma;
+ dma_addr_t metromem_desc_dma;
+ struct fb_info *info;
+ wait_queue_head_t waitq;
+ u8 frame_count;
+};
+
+/* frame differs from image. frame includes non-visible pixels */
+struct epd_frame {
+ int fw; /* frame width */
+ int fh; /* frame height */
+};
+
+static struct epd_frame epd_frame_table[] = {
+ {
+ .fw = 832,
+ .fh = 622
+ },
+};
+
+static struct fb_fix_screeninfo metronomefb_fix __devinitdata = {
+ .id = "metronomefb",
+ .type = FB_TYPE_PACKED_PIXELS,
+ .visual = FB_VISUAL_STATIC_PSEUDOCOLOR,
+ .xpanstep = 0,
+ .ypanstep = 0,
+ .ywrapstep = 0,
+ .line_length = DPY_W,
+ .accel = FB_ACCEL_NONE,
+};
+
+static struct fb_var_screeninfo metronomefb_var __devinitdata = {
+ .xres = DPY_W,
+ .yres = DPY_H,
+ .xres_virtual = DPY_W,
+ .yres_virtual = DPY_H,
+ .bits_per_pixel = 8,
+ .grayscale = 1,
+ .nonstd = 1,
+ .red = { 4, 3, 0 },
+ .green = { 0, 0, 0 },
+ .blue = { 0, 0, 0 },
+ .transp = { 0, 0, 0 },
+};
+
+static unsigned int metronomefb_enable;
+
+struct waveform_hdr {
+ u8 stuff[32];
+
+ u8 wmta[3];
+ u8 fvsn;
+
+ u8 luts;
+ u8 mc;
+ u8 trc;
+ u8 stuff3;
+
+ u8 endb;
+ u8 swtb;
+ u8 stuff2a[2];
+
+ u8 stuff2b[3];
+ u8 wfm_cs;
+} __attribute__ ((packed));
+
+/* main metronomefb functions */
+static u8 calc_cksum(int start, int end, u8 *mem)
+{
+ u8 tmp = 0;
+ int i;
+
+ for (i = start; i < end; i++)
+ tmp += mem[i];
+
+ return tmp;
+}
+
+static u16 calc_img_cksum(u16 *start, int length)
+{
+ u16 tmp = 0;
+
+ while (length--)
+ tmp += *start++;
+
+ return tmp;
+}
+
+/* here we decode the incoming waveform file and populate metromem */
+#define EXP_WFORM_SIZE 47001
+static int load_waveform(u8 *mem, size_t size, u8 *metromem, int m, int t,
+ u8 *frame_count)
+{
+ int tta;
+ int wmta;
+ int trn = 0;
+ int i;
+ unsigned char v;
+ u8 cksum;
+ int cksum_idx;
+ int wfm_idx, owfm_idx;
+ int mem_idx = 0;
+ struct waveform_hdr *wfm_hdr;
+
+ if (size != EXP_WFORM_SIZE) {
+ printk(KERN_ERR "Error: unexpected size %d != %d\n", size,
+ EXP_WFORM_SIZE);
+ return -EINVAL;
+ }
+
+ wfm_hdr = (struct waveform_hdr *) mem;
+
+ if (wfm_hdr->fvsn != 1) {
+ printk(KERN_ERR "Error: bad fvsn %x\n", wfm_hdr->fvsn);
+ return -EINVAL;
+ }
+ if (wfm_hdr->luts != 0) {
+ printk(KERN_ERR "Error: bad luts %x\n", wfm_hdr->luts);
+ return -EINVAL;
+ }
+ cksum = calc_cksum(32, 47, mem);
+ if (cksum != wfm_hdr->wfm_cs) {
+ printk(KERN_ERR "Error: bad cksum %x != %x\n", cksum,
+ wfm_hdr->wfm_cs);
+ return -EINVAL;
+ }
+ wfm_hdr->mc += 1;
+ wfm_hdr->trc += 1;
+ for (i = 0; i < 5; i++) {
+ if (*(wfm_hdr->stuff2a + i) != 0) {
+ printk(KERN_ERR "Error: unexpected value in padding\n");
+ return -EINVAL;
+ }
+ }
+
+ /* calculating trn. trn is something used to index into
+ the waveform. presumably selecting the right one for the
+ desired temperature. it works out the offset of the first
+ v that exceeds the specified temperature */
+ if ((sizeof(*wfm_hdr) + wfm_hdr->trc) > size)
+ return -EINVAL;
+
+ for (i = sizeof(*wfm_hdr); i <= sizeof(*wfm_hdr) + wfm_hdr->trc; i++) {
+ if (mem[i] > t) {
+ trn = i - sizeof(*wfm_hdr) - 1;
+ break;
+ }
+ }
+
+ /* check temperature range table checksum */
+ cksum_idx = sizeof(*wfm_hdr) + wfm_hdr->trc + 1;
+ if (cksum_idx > size)
+ return -EINVAL;
+ cksum = calc_cksum(sizeof(*wfm_hdr), cksum_idx, mem);
+ if (cksum != mem[cksum_idx]) {
+ printk(KERN_ERR "Error: bad temperature range table cksum"
+ " %x != %x\n", cksum, mem[cksum_idx]);
+ return -EINVAL;
+ }
+
+ /* check waveform mode table address checksum */
+ wmta = le32_to_cpu(get_unaligned((__le32 *) wfm_hdr->wmta));
+ wmta &= 0x00FFFFFF;
+ cksum_idx = wmta + m*4 + 3;
+ if (cksum_idx > size)
+ return -EINVAL;
+ cksum = calc_cksum(cksum_idx - 3, cksum_idx, mem);
+ if (cksum != mem[cksum_idx]) {
+ printk(KERN_ERR "Error: bad mode table address cksum"
+ " %x != %x\n", cksum, mem[cksum_idx]);
+ return -EINVAL;
+ }
+
+ /* check waveform temperature table address checksum */
+ tta = le32_to_cpu(get_unaligned((int *) (mem + wmta + m*4)));
+ tta &= 0x00FFFFFF;
+ cksum_idx = tta + trn*4 + 3;
+ if (cksum_idx > size)
+ return -EINVAL;
+ cksum = calc_cksum(cksum_idx - 3, cksum_idx, mem);
+ if (cksum != mem[cksum_idx]) {
+ printk(KERN_ERR "Error: bad temperature table address cksum"
+ " %x != %x\n", cksum, mem[cksum_idx]);
+ return -EINVAL;
+ }
+
+ /* here we do the real work of putting the waveform into the
+ metromem buffer. this does runlength decoding of the waveform */
+ wfm_idx = le32_to_cpu(get_unaligned((__le32 *) (mem + tta + trn*4)));
+ wfm_idx &= 0x00FFFFFF;
+ owfm_idx = wfm_idx;
+ if (wfm_idx > size)
+ return -EINVAL;
+ while (wfm_idx < size) {
+ unsigned char rl;
+ v = mem[wfm_idx++];
+ if (v == wfm_hdr->swtb) {
+ while (((v = mem[wfm_idx++]) != wfm_hdr->swtb) &&
+ wfm_idx < size)
+ metromem[mem_idx++] = v;
+
+ continue;
+ }
+
+ if (v == wfm_hdr->endb)
+ break;
+
+ rl = mem[wfm_idx++];
+ for (i = 0; i <= rl; i++)
+ metromem[mem_idx++] = v;
+ }
+
+ cksum_idx = wfm_idx;
+ if (cksum_idx > size)
+ return -EINVAL;
+ cksum = calc_cksum(owfm_idx, cksum_idx, mem);
+ if (cksum != mem[cksum_idx]) {
+ printk(KERN_ERR "Error: bad waveform data cksum"
+ " %x != %x\n", cksum, mem[cksum_idx]);
+ return -EINVAL;
+ }
+ *frame_count = (mem_idx/64);
+
+ return 0;
+}
+
+/* register offsets for gpio control */
+#define LED_GPIO_PIN 51
+#define STDBY_GPIO_PIN 48
+#define RST_GPIO_PIN 49
+#define RDY_GPIO_PIN 32
+#define ERR_GPIO_PIN 17
+#define PCBPWR_GPIO_PIN 16
+
+#define AF_SEL_GPIO_N 0x3
+#define GAFR0_U_OFFSET(pin) ((pin - 16) * 2)
+#define GAFR1_L_OFFSET(pin) ((pin - 32) * 2)
+#define GAFR1_U_OFFSET(pin) ((pin - 48) * 2)
+#define GPDR1_OFFSET(pin) (pin - 32)
+#define GPCR1_OFFSET(pin) (pin - 32)
+#define GPSR1_OFFSET(pin) (pin - 32)
+#define GPCR0_OFFSET(pin) (pin)
+#define GPSR0_OFFSET(pin) (pin)
+
+static void metronome_set_gpio_output(int pin, int val)
+{
+ u8 index;
+
+ index = pin >> 4;
+
+ switch (index) {
+ case 1:
+ if (val)
+ GPSR0 |= (1 << GPSR0_OFFSET(pin));
+ else
+ GPCR0 |= (1 << GPCR0_OFFSET(pin));
+ break;
+ case 2:
+ break;
+ case 3:
+ if (val)
+ GPSR1 |= (1 << GPSR1_OFFSET(pin));
+ else
+ GPCR1 |= (1 << GPCR1_OFFSET(pin));
+ break;
+ default:
+ printk(KERN_ERR "unimplemented\n");
+ }
+}
+
+static void __devinit metronome_init_gpio_pin(int pin, int dir)
+{
+ u8 index;
+ /* dir 0 is output, 1 is input
+ - do 2 things here:
+ - set gpio alternate function to standard gpio
+ - set gpio direction to input or output */
+
+ index = pin >> 4;
+ switch (index) {
+ case 1:
+ GAFR0_U &= ~(AF_SEL_GPIO_N << GAFR0_U_OFFSET(pin));
+
+ if (dir)
+ GPDR0 &= ~(1 << pin);
+ else
+ GPDR0 |= (1 << pin);
+ break;
+ case 2:
+ GAFR1_L &= ~(AF_SEL_GPIO_N << GAFR1_L_OFFSET(pin));
+
+ if (dir)
+ GPDR1 &= ~(1 << GPDR1_OFFSET(pin));
+ else
+ GPDR1 |= (1 << GPDR1_OFFSET(pin));
+ break;
+ case 3:
+ GAFR1_U &= ~(AF_SEL_GPIO_N << GAFR1_U_OFFSET(pin));
+
+ if (dir)
+ GPDR1 &= ~(1 << GPDR1_OFFSET(pin));
+ else
+ GPDR1 |= (1 << GPDR1_OFFSET(pin));
+ break;
+ default:
+ printk(KERN_ERR "unimplemented\n");
+ }
+}
+
+static void __devinit metronome_init_gpio_regs(void)
+{
+ metronome_init_gpio_pin(LED_GPIO_PIN, 0);
+ metronome_set_gpio_output(LED_GPIO_PIN, 0);
+
+ metronome_init_gpio_pin(STDBY_GPIO_PIN, 0);
+ metronome_set_gpio_output(STDBY_GPIO_PIN, 0);
+
+ metronome_init_gpio_pin(RST_GPIO_PIN, 0);
+ metronome_set_gpio_output(RST_GPIO_PIN, 0);
+
+ metronome_init_gpio_pin(RDY_GPIO_PIN, 1);
+
+ metronome_init_gpio_pin(ERR_GPIO_PIN, 1);
+
+ metronome_init_gpio_pin(PCBPWR_GPIO_PIN, 0);
+ metronome_set_gpio_output(PCBPWR_GPIO_PIN, 0);
+}
+
+static void metronome_disable_lcd_controller(struct metronomefb_par *par)
+{
+ LCSR = 0xffffffff; /* Clear LCD Status Register */
+ LCCR0 |= LCCR0_DIS; /* Disable LCD Controller */
+
+ /* we reset and just wait for things to settle */
+ msleep(200);
+}
+
+static void metronome_enable_lcd_controller(struct metronomefb_par *par)
+{
+ LCSR = 0xffffffff;
+ FDADR0 = par->metromem_desc_dma;
+ LCCR0 |= LCCR0_ENB;
+}
+
+static void __devinit metronome_init_lcdc_regs(struct metronomefb_par *par)
+{
+ /* here we do:
+ - disable the lcd controller
+ - setup lcd control registers
+ - setup dma descriptor
+ - reenable lcd controller
+ */
+
+ /* disable the lcd controller */
+ metronome_disable_lcd_controller(par);
+
+ /* setup lcd control registers */
+ LCCR0 = LCCR0_LDM | LCCR0_SFM | LCCR0_IUM | LCCR0_EFM | LCCR0_PAS
+ | LCCR0_QDM | LCCR0_BM | LCCR0_OUM;
+
+ LCCR1 = (epd_frame_table[0].fw/2 - 1) /* pixels per line */
+ | (27 << 10) /* hsync pulse width - 1 */
+ | (33 << 16) /* eol pixel count */
+ | (33 << 24); /* bol pixel count */
+
+ LCCR2 = (epd_frame_table[0].fh - 1) /* lines per panel */
+ | (24 << 10) /* vsync pulse width - 1 */
+ | (2 << 16) /* eof pixel count */
+ | (0 << 24); /* bof pixel count */
+
+ LCCR3 = 2 /* pixel clock divisor */
+ | (24 << 8) /* AC Bias pin freq */
+ | LCCR3_16BPP /* BPP */
+ | LCCR3_PCP; /* PCP falling edge */
+
+ /* setup dma descriptor */
+ par->metromem_desc->mFDADR0 = par->metromem_desc_dma;
+ par->metromem_desc->mFSADR0 = par->metromem_dma;
+ par->metromem_desc->mFIDR0 = 0;
+ par->metromem_desc->mLDCMD0 = epd_frame_table[0].fw
+ * epd_frame_table[0].fh;
+ /* reenable lcd controller */
+ metronome_enable_lcd_controller(par);
+}
+
+static int metronome_display_cmd(struct metronomefb_par *par)
+{
+ int i;
+ u16 cs;
+ u16 opcode;
+ static u8 borderval;
+ u8 *ptr;
+
+ /* setup display command
+ we can't immediately set the opcode since the controller
+ will try parse the command before we've set it all up
+ so we just set cs here and set the opcode at the end */
+
+ ptr = par->metromem;
+
+ if (par->metromem_cmd->opcode == 0xCC40)
+ opcode = cs = 0xCC41;
+ else
+ opcode = cs = 0xCC40;
+
+ /* set the args ( 2 bytes ) for display */
+ i = 0;
+ par->metromem_cmd->args[i] = 1 << 3 /* border update */
+ | ((borderval++ % 4) & 0x0F) << 4
+ | (par->frame_count - 1) << 8;
+ cs += par->metromem_cmd->args[i++];
+
+ /* the rest are 0 */
+ memset((u8 *) (par->metromem_cmd->args + i), 0, (32-i)*2);
+
+ par->metromem_cmd->csum = cs;
+ par->metromem_cmd->opcode = opcode; /* display cmd */
+
+ i = wait_event_interruptible_timeout(par->waitq, (GPLR1 & 0x01), HZ);
+ return i;
+}
+
+static int __devinit metronome_powerup_cmd(struct metronomefb_par *par)
+{
+ int i;
+ u16 cs;
+
+ /* setup power up command */
+ par->metromem_cmd->opcode = 0x1234; /* pwr up pseudo cmd */
+ cs = par->metromem_cmd->opcode;
+
+ /* set pwr1,2,3 to 1024 */
+ for (i = 0; i < 3; i++) {
+ par->metromem_cmd->args[i] = 1024;
+ cs += par->metromem_cmd->args[i];
+ }
+
+ /* the rest are 0 */
+ memset((u8 *) (par->metromem_cmd->args + i), 0, (32-i)*2);
+
+ par->metromem_cmd->csum = cs;
+
+ msleep(1);
+ metronome_set_gpio_output(RST_GPIO_PIN, 1);
+
+ msleep(1);
+ metronome_set_gpio_output(STDBY_GPIO_PIN, 1);
+
+ i = wait_event_timeout(par->waitq, (GPLR1 & 0x01), HZ);
+ return i;
+}
+
+static int __devinit metronome_config_cmd(struct metronomefb_par *par)
+{
+ int i;
+ u16 cs;
+
+ /* setup config command
+ we can't immediately set the opcode since the controller
+ will try parse the command before we've set it all up
+ so we just set cs here and set the opcode at the end */
+
+ cs = 0xCC10;
+
+ /* set the 12 args ( 8 bytes ) for config. see spec for meanings */
+ i = 0;
+ par->metromem_cmd->args[i] = 15 /* sdlew */
+ | 2 << 8 /* sdosz */
+ | 0 << 11 /* sdor */
+ | 0 << 12 /* sdces */
+ | 0 << 15; /* sdcer */
+ cs += par->metromem_cmd->args[i++];
+
+ par->metromem_cmd->args[i] = 42 /* gdspl */
+ | 1 << 8 /* gdr1 */
+ | 1 << 9 /* sdshr */
+ | 0 << 15; /* gdspp */
+ cs += par->metromem_cmd->args[i++];
+
+ par->metromem_cmd->args[i] = 18 /* gdspw */
+ | 0 << 15; /* dispc */
+ cs += par->metromem_cmd->args[i++];
+
+ par->metromem_cmd->args[i] = 599 /* vdlc */
+ | 0 << 11 /* dsi */
+ | 0 << 12; /* dsic */
+ cs += par->metromem_cmd->args[i++];
+
+ /* the rest are 0 */
+ memset((u8 *) (par->metromem_cmd->args + i), 0, (32-i)*2);
+
+ par->metromem_cmd->csum = cs;
+ par->metromem_cmd->opcode = 0xCC10; /* config cmd */
+
+ i = wait_event_timeout(par->waitq, (GPLR1 & 0x01), HZ);
+ return i;
+}
+
+static int __devinit metronome_init_cmd(struct metronomefb_par *par)
+{
+ int i;
+ u16 cs;
+
+ /* setup init command
+ we can't immediately set the opcode since the controller
+ will try parse the command before we've set it all up
+ so we just set cs here and set the opcode at the end */
+
+ cs = 0xCC20;
+
+ /* set the args ( 2 bytes ) for init */
+ i = 0;
+ par->metromem_cmd->args[i] = 0;
+ cs += par->metromem_cmd->args[i++];
+
+ /* the rest are 0 */
+ memset((u8 *) (par->metromem_cmd->args + i), 0, (32-i)*2);
+
+ par->metromem_cmd->csum = cs;
+ par->metromem_cmd->opcode = 0xCC20; /* init cmd */
+
+ i = wait_event_timeout(par->waitq, (GPLR1 & 0x01), HZ);
+ return i;
+}
+
+static int __devinit metronome_init_regs(struct metronomefb_par *par)
+{
+ int res;
+
+ metronome_init_gpio_regs();
+ metronome_init_lcdc_regs(par);
+
+ res = metronome_powerup_cmd(par);
+ if (res)
+ return res;
+
+ res = metronome_config_cmd(par);
+ if (res)
+ return res;
+
+ res = metronome_init_cmd(par);
+ if (res)
+ return res;
+
+ return res;
+}
+
+static void metronomefb_dpy_update(struct metronomefb_par *par)
+{
+ u16 cksum;
+ unsigned char *buf = (unsigned char __force *)par->info->screen_base;
+
+ /* copy from vm to metromem */
+ memcpy(par->metromem_img, buf, DPY_W*DPY_H);
+
+ cksum = calc_img_cksum((u16 *) par->metromem_img,
+ (epd_frame_table[0].fw * DPY_H)/2);
+ *((u16 *) (par->metromem_img) +
+ (epd_frame_table[0].fw * DPY_H)/2) = cksum;
+ metronome_display_cmd(par);
+}
+
+static u16 metronomefb_dpy_update_page(struct metronomefb_par *par, int index)
+{
+ int i;
+ u16 csum = 0;
+ u16 *buf = (u16 __force *) (par->info->screen_base + index);
+ u16 *img = (u16 *) (par->metromem_img + index);
+
+ /* swizzle from vm to metromem and recalc cksum at the same time*/
+ for (i = 0; i < PAGE_SIZE/2; i++) {
+ *(img + i) = (buf[i] << 5) & 0xE0E0;
+ csum += *(img + i);
+ }
+ return csum;
+}
+
+/* this is called back from the deferred io workqueue */
+static void metronomefb_dpy_deferred_io(struct fb_info *info,
+ struct list_head *pagelist)
+{
+ u16 cksum;
+ struct page *cur;
+ struct fb_deferred_io *fbdefio = info->fbdefio;
+ struct metronomefb_par *par = info->par;
+
+ /* walk the written page list and swizzle the data */
+ list_for_each_entry(cur, &fbdefio->pagelist, lru) {
+ cksum = metronomefb_dpy_update_page(par,
+ (cur->index << PAGE_SHIFT));
+ par->metromem_img_csum -= par->csum_table[cur->index];
+ par->csum_table[cur->index] = cksum;
+ par->metromem_img_csum += cksum;
+ }
+
+ metronome_display_cmd(par);
+}
+
+static void metronomefb_fillrect(struct fb_info *info,
+ const struct fb_fillrect *rect)
+{
+ struct metronomefb_par *par = info->par;
+
+ cfb_fillrect(info, rect);
+ metronomefb_dpy_update(par);
+}
+
+static void metronomefb_copyarea(struct fb_info *info,
+ const struct fb_copyarea *area)
+{
+ struct metronomefb_par *par = info->par;
+
+ cfb_copyarea(info, area);
+ metronomefb_dpy_update(par);
+}
+
+static void metronomefb_imageblit(struct fb_info *info,
+ const struct fb_image *image)
+{
+ struct metronomefb_par *par = info->par;
+
+ cfb_imageblit(info, image);
+ metronomefb_dpy_update(par);
+}
+
+/*
+ * this is the slow path from userspace. they can seek and write to
+ * the fb. it is based on fb_sys_write
+ */
+static ssize_t metronomefb_write(struct fb_info *info, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct metronomefb_par *par = info->par;
+ unsigned long p = *ppos;
+ void *dst;
+ int err = 0;
+ unsigned long total_size;
+
+ if (info->state != FBINFO_STATE_RUNNING)
+ return -EPERM;
+
+ total_size = info->fix.smem_len;
+
+ if (p > total_size)
+ return -EFBIG;
+
+ if (count > total_size) {
+ err = -EFBIG;
+ count = total_size;
+ }
+
+ if (count + p > total_size) {
+ if (!err)
+ err = -ENOSPC;
+
+ count = total_size - p;
+ }
+
+ dst = (void __force *) (info->screen_base + p);
+
+ if (copy_from_user(dst, buf, count))
+ err = -EFAULT;
+
+ if (!err)
+ *ppos += count;
+
+ metronomefb_dpy_update(par);
+
+ return (err) ? err : count;
+}
+
+static struct fb_ops metronomefb_ops = {
+ .owner = THIS_MODULE,
+ .fb_write = metronomefb_write,
+ .fb_fillrect = metronomefb_fillrect,
+ .fb_copyarea = metronomefb_copyarea,
+ .fb_imageblit = metronomefb_imageblit,
+};
+
+static struct fb_deferred_io metronomefb_defio = {
+ .delay = HZ,
+ .deferred_io = metronomefb_dpy_deferred_io,
+};
+
+static irqreturn_t metronome_handle_irq(int irq, void *dev_id)
+{
+ struct fb_info *info = dev_id;
+ struct metronomefb_par *par = info->par;
+
+ wake_up_interruptible(&par->waitq);
+ return IRQ_HANDLED;
+}
+
+static int __devinit metronomefb_probe(struct platform_device *dev)
+{
+ struct fb_info *info;
+ int retval = -ENOMEM;
+ int videomemorysize;
+ unsigned char *videomemory;
+ struct metronomefb_par *par;
+ const struct firmware *fw_entry;
+ int cmd_size, wfm_size, img_size, padding_size, totalsize;
+ int i;
+
+ /* we have two blocks of memory.
+ info->screen_base which is vm, and is the fb used by apps.
+ par->metromem which is physically contiguous memory and
+ contains the display controller commands, waveform,
+ processed image data and padding. this is the data pulled
+ by the pxa255's LCD controller and pushed to Metronome */
+
+ videomemorysize = (DPY_W*DPY_H);
+ videomemory = vmalloc(videomemorysize);
+ if (!videomemory)
+ return retval;
+
+ memset(videomemory, 0, videomemorysize);
+
+ info = framebuffer_alloc(sizeof(struct metronomefb_par), &dev->dev);
+ if (!info)
+ goto err_vfree;
+
+ info->screen_base = (char __iomem *) videomemory;
+ info->fbops = &metronomefb_ops;
+
+ info->var = metronomefb_var;
+ info->fix = metronomefb_fix;
+ info->fix.smem_len = videomemorysize;
+ par = info->par;
+ par->info = info;
+ init_waitqueue_head(&par->waitq);
+
+ /* this table caches per page csum values. */
+ par->csum_table = vmalloc(videomemorysize/PAGE_SIZE);
+ if (!par->csum_table)
+ goto err_csum_table;
+
+ /* the metromem buffer is divided as follows:
+ command | CRC | padding
+ 16kb waveform data | CRC | padding
+ image data | CRC
+ and an extra 256 bytes for dma descriptors
+ eg: IW=832 IH=622 WS=128
+ */
+
+ cmd_size = 1 * epd_frame_table[0].fw;
+ wfm_size = ((16*1024 + 2 + epd_frame_table[0].fw - 1)
+ / epd_frame_table[0].fw) * epd_frame_table[0].fw;
+ img_size = epd_frame_table[0].fh * epd_frame_table[0].fw;
+ padding_size = 4 * epd_frame_table[0].fw;
+ totalsize = cmd_size + wfm_size + img_size + padding_size;
+ par->metromemsize = PAGE_ALIGN(totalsize + 256);
+ DPRINTK("desired memory size = %d\n", par->metromemsize);
+ dev->dev.coherent_dma_mask = 0xffffffffull;
+ par->metromem = dma_alloc_writecombine(&dev->dev, par->metromemsize,
+ &par->metromem_dma, GFP_KERNEL);
+ if (!par->metromem) {
+ printk(KERN_ERR
+ "metronomefb: unable to allocate dma buffer\n");
+ goto err_vfree;
+ }
+
+ info->fix.smem_start = par->metromem_dma;
+ par->metromem_cmd = (struct metromem_cmd *) par->metromem;
+ par->metromem_wfm = par->metromem + cmd_size;
+ par->metromem_img = par->metromem + cmd_size + wfm_size;
+ par->metromem_img_csum = (u16 *) (par->metromem_img +
+ (epd_frame_table[0].fw * DPY_H));
+ DPRINTK("img offset=0x%x\n", cmd_size + wfm_size);
+ par->metromem_desc = (struct metromem_desc *) (par->metromem + cmd_size
+ + wfm_size + img_size + padding_size);
+ par->metromem_desc_dma = par->metromem_dma + cmd_size + wfm_size
+ + img_size + padding_size;
+
+ /* load the waveform in. assume mode 3, temp 31 for now */
+ /* a) request the waveform file from userspace
+ b) process waveform and decode into metromem */
+
+ retval = request_firmware(&fw_entry, "waveform.wbf", &dev->dev);
+ if (retval < 0) {
+ printk(KERN_ERR "metronomefb: couldn't get waveform\n");
+ goto err_dma_free;
+ }
+
+ retval = load_waveform((u8 *) fw_entry->data, fw_entry->size,
+ par->metromem_wfm, 3, 31, &par->frame_count);
+ if (retval < 0) {
+ printk(KERN_ERR "metronomefb: couldn't process waveform\n");
+ goto err_ld_wfm;
+ }
+ release_firmware(fw_entry);
+
+ retval = request_irq(IRQ_GPIO(RDY_GPIO_PIN), metronome_handle_irq,
+ IRQF_DISABLED, "Metronome", info);
+ if (retval) {
+ dev_err(&dev->dev, "request_irq failed: %d\n", retval);
+ goto err_ld_wfm;
+ }
+ set_irq_type(IRQ_GPIO(RDY_GPIO_PIN), IRQT_FALLING);
+
+ retval = metronome_init_regs(par);
+ if (retval < 0)
+ goto err_free_irq;
+
+ info->flags = FBINFO_FLAG_DEFAULT;
+
+ info->fbdefio = &metronomefb_defio;
+ fb_deferred_io_init(info);
+
+ retval = fb_alloc_cmap(&info->cmap, 8, 0);
+ if (retval < 0) {
+ printk(KERN_ERR "Failed to allocate colormap\n");
+ goto err_fb_rel;
+ }
+
+ /* set cmap */
+ for (i = 0; i < 8; i++)
+ info->cmap.red[i] = (((2*i)+1)*(0xFFFF))/16;
+ memcpy(info->cmap.green, info->cmap.red, sizeof(u16)*8);
+ memcpy(info->cmap.blue, info->cmap.red, sizeof(u16)*8);
+
+ retval = register_framebuffer(info);
+ if (retval < 0)
+ goto err_cmap;
+
+ platform_set_drvdata(dev, info);
+
+ printk(KERN_INFO
+ "fb%d: Metronome frame buffer device, using %dK of video"
+ " memory\n", info->node, videomemorysize >> 10);
+
+ return 0;
+
+err_cmap:
+ fb_dealloc_cmap(&info->cmap);
+err_fb_rel:
+ framebuffer_release(info);
+err_free_irq:
+ free_irq(IRQ_GPIO(RDY_GPIO_PIN), info);
+err_ld_wfm:
+ release_firmware(fw_entry);
+err_dma_free:
+ dma_free_writecombine(&dev->dev, par->metromemsize, par->metromem,
+ par->metromem_dma);
+err_csum_table:
+ vfree(par->csum_table);
+err_vfree:
+ vfree(videomemory);
+ return retval;
+}
+
+static int __devexit metronomefb_remove(struct platform_device *dev)
+{
+ struct fb_info *info = platform_get_drvdata(dev);
+
+ if (info) {
+ struct metronomefb_par *par = info->par;
+ fb_deferred_io_cleanup(info);
+ dma_free_writecombine(&dev->dev, par->metromemsize,
+ par->metromem, par->metromem_dma);
+ fb_dealloc_cmap(&info->cmap);
+ vfree(par->csum_table);
+ unregister_framebuffer(info);
+ vfree((void __force *)info->screen_base);
+ free_irq(IRQ_GPIO(RDY_GPIO_PIN), info);
+ framebuffer_release(info);
+ }
+ return 0;
+}
+
+static struct platform_driver metronomefb_driver = {
+ .probe = metronomefb_probe,
+ .remove = metronomefb_remove,
+ .driver = {
+ .name = "metronomefb",
+ },
+};
+
+static struct platform_device *metronomefb_device;
+
+static int __init metronomefb_init(void)
+{
+ int ret;
+
+ if (!metronomefb_enable) {
+ printk(KERN_ERR
+ "Use metronomefb_enable to enable the device\n");
+ return -ENXIO;
+ }
+
+ ret = platform_driver_register(&metronomefb_driver);
+ if (!ret) {
+ metronomefb_device = platform_device_alloc("metronomefb", 0);
+ if (metronomefb_device)
+ ret = platform_device_add(metronomefb_device);
+ else
+ ret = -ENOMEM;
+
+ if (ret) {
+ platform_device_put(metronomefb_device);
+ platform_driver_unregister(&metronomefb_driver);
+ }
+ }
+ return ret;
+
+}
+
+static void __exit metronomefb_exit(void)
+{
+ platform_device_unregister(metronomefb_device);
+ platform_driver_unregister(&metronomefb_driver);
+}
+
+module_param(metronomefb_enable, uint, 0);
+MODULE_PARM_DESC(metronomefb_enable, "Enable communication with Metronome");
+
+module_init(metronomefb_init);
+module_exit(metronomefb_exit);
+
+MODULE_DESCRIPTION("fbdev driver for Metronome controller");
+MODULE_AUTHOR("Jaya Kumar");
+MODULE_LICENSE("GPL");
diff --git a/fs/aio.c b/fs/aio.c
index b74c567383b..6af92194062 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -996,6 +996,14 @@ put_rq:
/* everything turned out well, dispose of the aiocb. */
ret = __aio_put_req(ctx, iocb);
+ /*
+ * We have to order our ring_info tail store above and test
+ * of the wait list below outside the wait lock. This is
+ * like in wake_up_bit() where clearing a bit has to be
+ * ordered with the unlocked test.
+ */
+ smp_mb();
+
if (waitqueue_active(&ctx->wait))
wake_up(&ctx->wait);
diff --git a/fs/buffer.c b/fs/buffer.c
index ddfdd2c80bf..7ba58386bee 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -3213,7 +3213,7 @@ static int buffer_cpu_notify(struct notifier_block *self,
}
/**
- * bh_uptodate_or_lock: Test whether the buffer is uptodate
+ * bh_uptodate_or_lock - Test whether the buffer is uptodate
* @bh: struct buffer_head
*
* Return true if the buffer is up-to-date and false,
@@ -3232,7 +3232,7 @@ int bh_uptodate_or_lock(struct buffer_head *bh)
EXPORT_SYMBOL(bh_uptodate_or_lock);
/**
- * bh_submit_read: Submit a locked buffer for reading
+ * bh_submit_read - Submit a locked buffer for reading
* @bh: struct buffer_head
*
* Returns zero on success and -EIO on error.
diff --git a/fs/dquot.c b/fs/dquot.c
index 9c7feb62eed..41b9dbd68b0 100644
--- a/fs/dquot.c
+++ b/fs/dquot.c
@@ -1522,8 +1522,8 @@ int vfs_quota_off(struct super_block *sb, int type)
truncate_inode_pages(&toputinode[cnt]->i_data, 0);
mutex_unlock(&toputinode[cnt]->i_mutex);
mark_inode_dirty(toputinode[cnt]);
- iput(toputinode[cnt]);
}
+ iput(toputinode[cnt]);
mutex_unlock(&dqopt->dqonoff_mutex);
}
if (sb->s_bdev)
diff --git a/fs/ecryptfs/dentry.c b/fs/ecryptfs/dentry.c
index 841a032050a..5e596583946 100644
--- a/fs/ecryptfs/dentry.c
+++ b/fs/ecryptfs/dentry.c
@@ -80,8 +80,8 @@ static void ecryptfs_d_release(struct dentry *dentry)
{
if (ecryptfs_dentry_to_private(dentry)) {
if (ecryptfs_dentry_to_lower(dentry)) {
- mntput(ecryptfs_dentry_to_lower_mnt(dentry));
dput(ecryptfs_dentry_to_lower(dentry));
+ mntput(ecryptfs_dentry_to_lower_mnt(dentry));
}
kmem_cache_free(ecryptfs_dentry_info_cache,
ecryptfs_dentry_to_private(dentry));
diff --git a/fs/ext3/acl.c b/fs/ext3/acl.c
index d34e9967430..a754d184817 100644
--- a/fs/ext3/acl.c
+++ b/fs/ext3/acl.c
@@ -37,7 +37,7 @@ ext3_acl_from_disk(const void *value, size_t size)
return ERR_PTR(-EINVAL);
if (count == 0)
return NULL;
- acl = posix_acl_alloc(count, GFP_KERNEL);
+ acl = posix_acl_alloc(count, GFP_NOFS);
if (!acl)
return ERR_PTR(-ENOMEM);
for (n=0; n < count; n++) {
@@ -91,7 +91,7 @@ ext3_acl_to_disk(const struct posix_acl *acl, size_t *size)
*size = ext3_acl_size(acl->a_count);
ext_acl = kmalloc(sizeof(ext3_acl_header) + acl->a_count *
- sizeof(ext3_acl_entry), GFP_KERNEL);
+ sizeof(ext3_acl_entry), GFP_NOFS);
if (!ext_acl)
return ERR_PTR(-ENOMEM);
ext_acl->a_version = cpu_to_le32(EXT3_ACL_VERSION);
@@ -187,7 +187,7 @@ ext3_get_acl(struct inode *inode, int type)
}
retval = ext3_xattr_get(inode, name_index, "", NULL, 0);
if (retval > 0) {
- value = kmalloc(retval, GFP_KERNEL);
+ value = kmalloc(retval, GFP_NOFS);
if (!value)
return ERR_PTR(-ENOMEM);
retval = ext3_xattr_get(inode, name_index, "", value, retval);
@@ -335,7 +335,7 @@ ext3_init_acl(handle_t *handle, struct inode *inode, struct inode *dir)
if (error)
goto cleanup;
}
- clone = posix_acl_clone(acl, GFP_KERNEL);
+ clone = posix_acl_clone(acl, GFP_NOFS);
error = -ENOMEM;
if (!clone)
goto cleanup;
diff --git a/fs/ext3/resize.c b/fs/ext3/resize.c
index 9397d779c43..0e97b6e07cb 100644
--- a/fs/ext3/resize.c
+++ b/fs/ext3/resize.c
@@ -485,7 +485,7 @@ static int add_new_gdb(handle_t *handle, struct inode *inode,
goto exit_dindj;
n_group_desc = kmalloc((gdb_num + 1) * sizeof(struct buffer_head *),
- GFP_KERNEL);
+ GFP_NOFS);
if (!n_group_desc) {
err = -ENOMEM;
ext3_warning (sb, __FUNCTION__,
@@ -568,7 +568,7 @@ static int reserve_backup_gdb(handle_t *handle, struct inode *inode,
int res, i;
int err;
- primary = kmalloc(reserved_gdb * sizeof(*primary), GFP_KERNEL);
+ primary = kmalloc(reserved_gdb * sizeof(*primary), GFP_NOFS);
if (!primary)
return -ENOMEM;
diff --git a/fs/ext3/xattr.c b/fs/ext3/xattr.c
index fb89c299bec..a6ea4d6a8bb 100644
--- a/fs/ext3/xattr.c
+++ b/fs/ext3/xattr.c
@@ -728,7 +728,7 @@ ext3_xattr_block_set(handle_t *handle, struct inode *inode,
ce = NULL;
}
ea_bdebug(bs->bh, "cloning");
- s->base = kmalloc(bs->bh->b_size, GFP_KERNEL);
+ s->base = kmalloc(bs->bh->b_size, GFP_NOFS);
error = -ENOMEM;
if (s->base == NULL)
goto cleanup;
@@ -740,7 +740,7 @@ ext3_xattr_block_set(handle_t *handle, struct inode *inode,
}
} else {
/* Allocate a buffer where we construct the new block. */
- s->base = kzalloc(sb->s_blocksize, GFP_KERNEL);
+ s->base = kzalloc(sb->s_blocksize, GFP_NOFS);
/* assert(header == s->base) */
error = -ENOMEM;
if (s->base == NULL)
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
index c0076077d33..06557679ca4 100644
--- a/fs/fs-writeback.c
+++ b/fs/fs-writeback.c
@@ -751,7 +751,7 @@ int generic_osync_inode(struct inode *inode, struct address_space *mapping, int
EXPORT_SYMBOL(generic_osync_inode);
/**
- * writeback_acquire: attempt to get exclusive writeback access to a device
+ * writeback_acquire - attempt to get exclusive writeback access to a device
* @bdi: the device's backing_dev_info structure
*
* It is a waste of resources to have more than one pdflush thread blocked on
@@ -768,7 +768,7 @@ int writeback_acquire(struct backing_dev_info *bdi)
}
/**
- * writeback_in_progress: determine whether there is writeback in progress
+ * writeback_in_progress - determine whether there is writeback in progress
* @bdi: the device's backing_dev_info structure.
*
* Determine whether there is writeback in progress against a backing device.
@@ -779,7 +779,7 @@ int writeback_in_progress(struct backing_dev_info *bdi)
}
/**
- * writeback_release: relinquish exclusive writeback access against a device.
+ * writeback_release - relinquish exclusive writeback access against a device.
* @bdi: the device's backing_dev_info structure
*/
void writeback_release(struct backing_dev_info *bdi)
diff --git a/fs/isofs/compress.c b/fs/isofs/compress.c
index 37dbd640478..defb932eee9 100644
--- a/fs/isofs/compress.c
+++ b/fs/isofs/compress.c
@@ -72,6 +72,17 @@ static int zisofs_readpage(struct file *file, struct page *page)
offset = index & ~zisofs_block_page_mask;
blockindex = offset >> zisofs_block_page_shift;
maxpage = (inode->i_size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
+
+ /*
+ * If this page is wholly outside i_size we just return zero;
+ * do_generic_file_read() will handle this for us
+ */
+ if (page->index >= maxpage) {
+ SetPageUptodate(page);
+ unlock_page(page);
+ return 0;
+ }
+
maxpage = min(zisofs_block_pages, maxpage-offset);
for ( i = 0 ; i < maxpage ; i++, offset++ ) {
diff --git a/fs/jbd/journal.c b/fs/jbd/journal.c
index 3943a8905eb..9816293442a 100644
--- a/fs/jbd/journal.c
+++ b/fs/jbd/journal.c
@@ -697,13 +697,14 @@ fail:
*/
/**
- * journal_t * journal_init_dev() - creates an initialises a journal structure
+ * journal_t * journal_init_dev() - creates and initialises a journal structure
* @bdev: Block device on which to create the journal
* @fs_dev: Device which hold journalled filesystem for this journal.
* @start: Block nr Start of journal.
* @len: Length of the journal in blocks.
* @blocksize: blocksize of journalling device
- * @returns: a newly created journal_t *
+ *
+ * Returns: a newly created journal_t *
*
* journal_init_dev creates a journal which maps a fixed contiguous
* range of blocks on an arbitrary block device.
diff --git a/fs/jbd/recovery.c b/fs/jbd/recovery.c
index 2b8edf4d6ea..43bc5e5ed06 100644
--- a/fs/jbd/recovery.c
+++ b/fs/jbd/recovery.c
@@ -478,7 +478,7 @@ static int do_one_pass(journal_t *journal,
memcpy(nbh->b_data, obh->b_data,
journal->j_blocksize);
if (flags & JFS_FLAG_ESCAPE) {
- *((__be32 *)bh->b_data) =
+ *((__be32 *)nbh->b_data) =
cpu_to_be32(JFS_MAGIC_NUMBER);
}
diff --git a/fs/jbd/transaction.c b/fs/jbd/transaction.c
index c6cbb6cd59b..2c9e8f5d13a 100644
--- a/fs/jbd/transaction.c
+++ b/fs/jbd/transaction.c
@@ -1426,7 +1426,8 @@ int journal_stop(handle_t *handle)
return err;
}
-/**int journal_force_commit() - force any uncommitted transactions
+/**
+ * int journal_force_commit() - force any uncommitted transactions
* @journal: journal to force
*
* For synchronous operations: force any uncommitted transactions
@@ -1903,13 +1904,12 @@ zap_buffer_unlocked:
}
/**
- * void journal_invalidatepage()
- * @journal: journal to use for flush...
+ * void journal_invalidatepage() - invalidate a journal page
+ * @journal: journal to use for flush
* @page: page to flush
* @offset: length of page to invalidate.
*
* Reap page buffers containing data after offset in page.
- *
*/
void journal_invalidatepage(journal_t *journal,
struct page *page,
diff --git a/fs/jbd2/recovery.c b/fs/jbd2/recovery.c
index 146411387ad..5d0405a9e7c 100644
--- a/fs/jbd2/recovery.c
+++ b/fs/jbd2/recovery.c
@@ -535,7 +535,7 @@ static int do_one_pass(journal_t *journal,
memcpy(nbh->b_data, obh->b_data,
journal->j_blocksize);
if (flags & JBD2_FLAG_ESCAPE) {
- *((__be32 *)bh->b_data) =
+ *((__be32 *)nbh->b_data) =
cpu_to_be32(JBD2_MAGIC_NUMBER);
}
diff --git a/fs/locks.c b/fs/locks.c
index f36f0e61558..d83fab1b77b 100644
--- a/fs/locks.c
+++ b/fs/locks.c
@@ -1275,13 +1275,13 @@ out:
EXPORT_SYMBOL(__break_lease);
/**
- * lease_get_mtime
+ * lease_get_mtime - get the last modified time of an inode
* @inode: the inode
* @time: pointer to a timespec which will contain the last modified time
*
* This is to force NFS clients to flush their caches for files with
* exclusive leases. The justification is that if someone has an
- * exclusive lease, then they could be modifiying it.
+ * exclusive lease, then they could be modifying it.
*/
void lease_get_mtime(struct inode *inode, struct timespec *time)
{
diff --git a/fs/namei.c b/fs/namei.c
index 941c8e8228c..6b7a0eef409 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -1364,13 +1364,13 @@ static int __lookup_one_len(const char *name, struct qstr *this,
}
/**
- * lookup_one_len: filesystem helper to lookup single pathname component
+ * lookup_one_len - filesystem helper to lookup single pathname component
* @name: pathname component to lookup
* @base: base directory to lookup from
* @len: maximum length @len should be interpreted to
*
- * Note that this routine is purely a helper for filesystem useage and should
- * not be called by generic code. Also note that by using this function to
+ * Note that this routine is purely a helper for filesystem usage and should
+ * not be called by generic code. Also note that by using this function the
* nameidata argument is passed to the filesystem methods and a filesystem
* using this helper needs to be prepared for that.
*/
diff --git a/fs/romfs/inode.c b/fs/romfs/inode.c
index 00b6f0a518c..3f13d491c7c 100644
--- a/fs/romfs/inode.c
+++ b/fs/romfs/inode.c
@@ -340,8 +340,9 @@ static struct dentry *
romfs_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd)
{
unsigned long offset, maxoff;
- int fslen, res;
- struct inode *inode;
+ long res;
+ int fslen;
+ struct inode *inode = NULL;
char fsname[ROMFS_MAXFN]; /* XXX dynamic? */
struct romfs_inode ri;
const char *name; /* got from dentry */
@@ -351,7 +352,7 @@ romfs_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd)
offset = dir->i_ino & ROMFH_MASK;
lock_kernel();
if (romfs_copyfrom(dir, &ri, offset, ROMFH_SIZE) <= 0)
- goto out;
+ goto error;
maxoff = romfs_maxsize(dir->i_sb);
offset = be32_to_cpu(ri.spec) & ROMFH_MASK;
@@ -364,9 +365,9 @@ romfs_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd)
for(;;) {
if (!offset || offset >= maxoff)
- goto out0;
+ goto success; /* negative success */
if (romfs_copyfrom(dir, &ri, offset, ROMFH_SIZE) <= 0)
- goto out;
+ goto error;
/* try to match the first 16 bytes of name */
fslen = romfs_strnlen(dir, offset+ROMFH_SIZE, ROMFH_SIZE);
@@ -397,23 +398,14 @@ romfs_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd)
inode = romfs_iget(dir->i_sb, offset);
if (IS_ERR(inode)) {
res = PTR_ERR(inode);
- goto out;
+ goto error;
}
- /*
- * it's a bit funky, _lookup needs to return an error code
- * (negative) or a NULL, both as a dentry. ENOENT should not
- * be returned, instead we need to create a negative dentry by
- * d_add(dentry, NULL); and return 0 as no error.
- * (Although as I see, it only matters on writable file
- * systems).
- */
-
-out0: inode = NULL;
+success:
+ d_add(dentry, inode);
res = 0;
- d_add (dentry, inode);
-
-out: unlock_kernel();
+error:
+ unlock_kernel();
return ERR_PTR(res);
}
diff --git a/fs/super.c b/fs/super.c
index 010446d8c40..d0a941a4e62 100644
--- a/fs/super.c
+++ b/fs/super.c
@@ -556,11 +556,11 @@ out:
}
/**
- * mark_files_ro
+ * mark_files_ro - mark all files read-only
* @sb: superblock in question
*
- * All files are marked read/only. We don't care about pending
- * delete files so this should be used in 'force' mode only
+ * All files are marked read-only. We don't care about pending
+ * delete files so this should be used in 'force' mode only.
*/
static void mark_files_ro(struct super_block *sb)
diff --git a/fs/ufs/balloc.c b/fs/ufs/balloc.c
index 1fca381f0ce..1e7598fb978 100644
--- a/fs/ufs/balloc.c
+++ b/fs/ufs/balloc.c
@@ -315,8 +315,8 @@ static void ufs_change_blocknr(struct inode *inode, sector_t beg,
}
UFSD(" change from %llu to %llu, pos %u\n",
- (unsigned long long)pos + oldb,
- (unsigned long long)pos + newb, pos);
+ (unsigned long long)(pos + oldb),
+ (unsigned long long)(pos + newb), pos);
bh->b_blocknr = newb + pos;
unmap_underlying_metadata(bh->b_bdev,
diff --git a/include/linux/jbd.h b/include/linux/jbd.h
index b18fd3b9b83..423f5827218 100644
--- a/include/linux/jbd.h
+++ b/include/linux/jbd.h
@@ -348,8 +348,7 @@ static inline void jbd_unlock_bh_journal_head(struct buffer_head *bh)
struct jbd_revoke_table_s;
/**
- * struct handle_s - The handle_s type is the concrete type associated with
- * handle_t.
+ * struct handle_s - this is the concrete type associated with handle_t.
* @h_transaction: Which compound transaction is this update a part of?
* @h_buffer_credits: Number of remaining buffers we are allowed to dirty.
* @h_ref: Reference count on this handle
@@ -358,12 +357,7 @@ struct jbd_revoke_table_s;
* @h_jdata: flag to force data journaling
* @h_aborted: flag indicating fatal error on handle
* @h_lockdep_map: lockdep info for debugging lock problems
- **/
-
-/* Docbook can't yet cope with the bit fields, but will leave the documentation
- * in so it can be fixed later.
*/
-
struct handle_s
{
/* Which compound transaction is this update a part of? */
@@ -558,8 +552,7 @@ struct transaction_s
};
/**
- * struct journal_s - The journal_s type is the concrete type associated with
- * journal_t.
+ * struct journal_s - this is the concrete type associated with journal_t.
* @j_flags: General journaling state flags
* @j_errno: Is there an outstanding uncleared error on the journal (from a
* prior abort)?
diff --git a/include/linux/memstick.h b/include/linux/memstick.h
index b7ee2588883..3e686ec6a96 100644
--- a/include/linux/memstick.h
+++ b/include/linux/memstick.h
@@ -239,7 +239,6 @@ struct memstick_request {
unsigned char tpc;
unsigned char data_dir:1,
need_card_int:1,
- get_int_reg:1,
long_data:1;
unsigned char int_reg;
int error;
diff --git a/include/linux/rcupreempt.h b/include/linux/rcupreempt.h
index 01152ed532c..d038aa6e5ee 100644
--- a/include/linux/rcupreempt.h
+++ b/include/linux/rcupreempt.h
@@ -87,15 +87,15 @@ DECLARE_PER_CPU(long, dynticks_progress_counter);
static inline void rcu_enter_nohz(void)
{
+ smp_mb(); /* CPUs seeing ++ must see prior RCU read-side crit sects */
__get_cpu_var(dynticks_progress_counter)++;
WARN_ON(__get_cpu_var(dynticks_progress_counter) & 0x1);
- mb();
}
static inline void rcu_exit_nohz(void)
{
- mb();
__get_cpu_var(dynticks_progress_counter)++;
+ smp_mb(); /* CPUs seeing ++ must see later RCU read-side crit sects */
WARN_ON(!(__get_cpu_var(dynticks_progress_counter) & 0x1));
}
diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c
index 548c436a776..278534bbca9 100644
--- a/kernel/time/clocksource.c
+++ b/kernel/time/clocksource.c
@@ -141,13 +141,8 @@ static void clocksource_watchdog(unsigned long data)
}
if (!list_empty(&watchdog_list)) {
- /* Cycle through CPUs to check if the CPUs stay synchronized to
- * each other. */
- int next_cpu = next_cpu(raw_smp_processor_id(), cpu_online_map);
- if (next_cpu >= NR_CPUS)
- next_cpu = first_cpu(cpu_online_map);
- watchdog_timer.expires += WATCHDOG_INTERVAL;
- add_timer_on(&watchdog_timer, next_cpu);
+ __mod_timer(&watchdog_timer,
+ watchdog_timer.expires + WATCHDOG_INTERVAL);
}
spin_unlock(&watchdog_lock);
}
@@ -169,7 +164,7 @@ static void clocksource_check_watchdog(struct clocksource *cs)
if (!started && watchdog) {
watchdog_last = watchdog->read();
watchdog_timer.expires = jiffies + WATCHDOG_INTERVAL;
- add_timer_on(&watchdog_timer, first_cpu(cpu_online_map));
+ add_timer(&watchdog_timer);
}
} else {
if (cs->flags & CLOCK_SOURCE_IS_CONTINUOUS)
@@ -190,8 +185,7 @@ static void clocksource_check_watchdog(struct clocksource *cs)
watchdog_last = watchdog->read();
watchdog_timer.expires =
jiffies + WATCHDOG_INTERVAL;
- add_timer_on(&watchdog_timer,
- first_cpu(cpu_online_map));
+ add_timer(&watchdog_timer);
}
}
}
diff --git a/mm/filemap.c b/mm/filemap.c
index df343d1e634..07e9d9258b4 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -343,7 +343,7 @@ int sync_page_range(struct inode *inode, struct address_space *mapping,
EXPORT_SYMBOL(sync_page_range);
/**
- * sync_page_range_nolock
+ * sync_page_range_nolock - write & wait on all pages in the passed range without locking
* @inode: target inode
* @mapping: target address_space
* @pos: beginning offset in pages to write
@@ -611,7 +611,10 @@ int __lock_page_killable(struct page *page)
sync_page_killable, TASK_KILLABLE);
}
-/*
+/**
+ * __lock_page_nosync - get a lock on the page, without calling sync_page()
+ * @page: the page to lock
+ *
* Variant of lock_page that does not require the caller to hold a reference
* on the page's mapping.
*/
@@ -1538,9 +1541,20 @@ repeat:
return page;
}
-/*
+/**
+ * read_cache_page_async - read into page cache, fill it if needed
+ * @mapping: the page's address_space
+ * @index: the page index
+ * @filler: function to perform the read
+ * @data: destination for read data
+ *
* Same as read_cache_page, but don't wait for page to become unlocked
* after submitting it to the filler.
+ *
+ * Read into the page cache. If a page already exists, and PageUptodate() is
+ * not set, try to fill the page but don't wait for it to become unlocked.
+ *
+ * If the page does not get brought uptodate, return -EIO.
*/
struct page *read_cache_page_async(struct address_space *mapping,
pgoff_t index,
diff --git a/mm/fremap.c b/mm/fremap.c
index 69a37c2bdf8..07a9c82ce1a 100644
--- a/mm/fremap.c
+++ b/mm/fremap.c
@@ -113,7 +113,7 @@ static int populate_range(struct mm_struct *mm, struct vm_area_struct *vma,
* mmap()/mremap() it does not create any new vmas. The new mappings are
* also safe across swapout.
*
- * NOTE: the 'prot' parameter right now is ignored (but must be zero),
+ * NOTE: the @prot parameter right now is ignored (but must be zero),
* and the vma's default protection is used. Arbitrary protections
* might be implemented in the future.
*/
diff --git a/mm/highmem.c b/mm/highmem.c
index 35d47733cde..7da4a7b6af1 100644
--- a/mm/highmem.c
+++ b/mm/highmem.c
@@ -104,8 +104,9 @@ static void flush_all_zero_pkmaps(void)
flush_tlb_kernel_range(PKMAP_ADDR(0), PKMAP_ADDR(LAST_PKMAP));
}
-/* Flush all unused kmap mappings in order to remove stray
- mappings. */
+/**
+ * kmap_flush_unused - flush all unused kmap mappings in order to remove stray mappings
+ */
void kmap_flush_unused(void)
{
spin_lock(&kmap_lock);
@@ -163,6 +164,14 @@ start:
return vaddr;
}
+/**
+ * kmap_high - map a highmem page into memory
+ * @page: &struct page to map
+ *
+ * Returns the page's virtual memory address.
+ *
+ * We cannot call this from interrupts, as it may block.
+ */
void *kmap_high(struct page *page)
{
unsigned long vaddr;
@@ -170,8 +179,6 @@ void *kmap_high(struct page *page)
/*
* For highmem pages, we can't trust "virtual" until
* after we have the lock.
- *
- * We cannot call this from interrupts, as it may block
*/
spin_lock(&kmap_lock);
vaddr = (unsigned long)page_address(page);
@@ -185,6 +192,10 @@ void *kmap_high(struct page *page)
EXPORT_SYMBOL(kmap_high);
+/**
+ * kunmap_high - map a highmem page into memory
+ * @page: &struct page to unmap
+ */
void kunmap_high(struct page *page)
{
unsigned long vaddr;
@@ -259,6 +270,12 @@ static struct page_address_slot *page_slot(struct page *page)
return &page_address_htable[hash_ptr(page, PA_HASH_ORDER)];
}
+/**
+ * page_address - get the mapped virtual address of a page
+ * @page: &struct page to get the virtual address of
+ *
+ * Returns the page's virtual address.
+ */
void *page_address(struct page *page)
{
unsigned long flags;
@@ -288,6 +305,11 @@ done:
EXPORT_SYMBOL(page_address);
+/**
+ * set_page_address - set a page's virtual address
+ * @page: &struct page to set
+ * @virtual: virtual address to use
+ */
void set_page_address(struct page *page, void *virtual)
{
unsigned long flags;
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 8b9f6cae938..9b648bd6345 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -1079,7 +1079,7 @@ static void mem_cgroup_move_task(struct cgroup_subsys *ss,
* Only thread group leaders are allowed to migrate, the mm_struct is
* in effect owned by the leader
*/
- if (p->tgid != p->pid)
+ if (!thread_group_leader(p))
goto out;
css_get(&mem->css);
diff --git a/mm/oom_kill.c b/mm/oom_kill.c
index 44b2da11bf4..f255eda693b 100644
--- a/mm/oom_kill.c
+++ b/mm/oom_kill.c
@@ -37,6 +37,7 @@ static DEFINE_SPINLOCK(zone_scan_mutex);
* badness - calculate a numeric value for how bad this task has been
* @p: task struct of which task we should calculate
* @uptime: current uptime in seconds
+ * @mem: target memory controller
*
* The formula used is relatively simple and documented inline in the
* function. The main rationale is that we want to select a good task
@@ -264,6 +265,9 @@ static struct task_struct *select_bad_process(unsigned long *ppoints,
}
/**
+ * dump_tasks - dump current memory state of all system tasks
+ * @mem: target memory controller
+ *
* Dumps the current memory state of all system tasks, excluding kernel threads.
* State information includes task's pid, uid, tgid, vm size, rss, cpu, oom_adj
* score, and name.
@@ -298,7 +302,7 @@ static void dump_tasks(const struct mem_cgroup *mem)
} while_each_thread(g, p);
}
-/**
+/*
* Send SIGKILL to the selected process irrespective of CAP_SYS_RAW_IO
* flag though it's unlikely that we select a process with CAP_SYS_RAW_IO
* set.
@@ -504,6 +508,9 @@ void clear_zonelist_oom(struct zonelist *zonelist)
/**
* out_of_memory - kill the "best" process when we run out of memory
+ * @zonelist: zonelist pointer
+ * @gfp_mask: memory allocation flags
+ * @order: amount of memory being requested as a power of 2
*
* If we run out of memory, we have the choice between either
* killing a random task (bad), letting the system crash (worse)
diff --git a/mm/pagewalk.c b/mm/pagewalk.c
index b4f27d22da9..1cf1417ef8b 100644
--- a/mm/pagewalk.c
+++ b/mm/pagewalk.c
@@ -77,11 +77,11 @@ static int walk_pud_range(pgd_t *pgd, unsigned long addr, unsigned long end,
/**
* walk_page_range - walk a memory map's page tables with a callback
- * @mm - memory map to walk
- * @addr - starting address
- * @end - ending address
- * @walk - set of callbacks to invoke for each level of the tree
- * @private - private data passed to the callback function
+ * @mm: memory map to walk
+ * @addr: starting address
+ * @end: ending address
+ * @walk: set of callbacks to invoke for each level of the tree
+ * @private: private data passed to the callback function
*
* Recursively walk the page table for the memory area in a VMA,
* calling supplied callbacks. Callbacks are called in-order (first
diff --git a/mm/readahead.c b/mm/readahead.c
index c9c50ca1ec3..8762e898897 100644
--- a/mm/readahead.c
+++ b/mm/readahead.c
@@ -443,9 +443,10 @@ EXPORT_SYMBOL_GPL(page_cache_sync_readahead);
* pagecache pages
*
* page_cache_async_ondemand() should be called when a page is used which
- * has the PG_readahead flag: this is a marker to suggest that the application
+ * has the PG_readahead flag; this is a marker to suggest that the application
* has used up enough of the readahead window that we should start pulling in
- * more pages. */
+ * more pages.
+ */
void
page_cache_async_readahead(struct address_space *mapping,
struct file_ra_state *ra, struct file *filp,
diff --git a/mm/rmap.c b/mm/rmap.c
index 0c9a2df06c3..997f06907b6 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -335,6 +335,7 @@ static int page_referenced_anon(struct page *page,
/**
* page_referenced_file - referenced check for object-based rmap
* @page: the page we're checking references on.
+ * @mem_cont: target memory controller
*
* For an object-based mapped page, find all the places it is mapped and
* check/clear the referenced flag. This is done by following the page->mapping
@@ -402,6 +403,7 @@ static int page_referenced_file(struct page *page,
* page_referenced - test if the page was referenced
* @page: the page to test
* @is_locked: caller holds lock on the page
+ * @mem_cont: target memory controller
*
* Quick test_and_clear_referenced for all mappings to a page,
* returns the number of ptes which referenced the page.
@@ -506,7 +508,7 @@ int page_mkclean(struct page *page)
EXPORT_SYMBOL_GPL(page_mkclean);
/**
- * page_set_anon_rmap - setup new anonymous rmap
+ * __page_set_anon_rmap - setup new anonymous rmap
* @page: the page to add the mapping to
* @vma: the vm area in which the mapping is added
* @address: the user virtual address mapped
@@ -530,7 +532,7 @@ static void __page_set_anon_rmap(struct page *page,
}
/**
- * page_set_anon_rmap - sanity check anonymous rmap addition
+ * __page_check_anon_rmap - sanity check anonymous rmap addition
* @page: the page to add the mapping to
* @vma: the vm area in which the mapping is added
* @address: the user virtual address mapped
@@ -583,7 +585,7 @@ void page_add_anon_rmap(struct page *page,
}
}
-/*
+/**
* page_add_new_anon_rmap - add pte mapping to a new anonymous page
* @page: the page to add the mapping to
* @vma: the vm area in which the mapping is added
@@ -623,6 +625,8 @@ void page_add_file_rmap(struct page *page)
/**
* page_dup_rmap - duplicate pte mapping to a page
* @page: the page to add the mapping to
+ * @vma: the vm area being duplicated
+ * @address: the user virtual address mapped
*
* For copy_page_range only: minimal extract from page_add_file_rmap /
* page_add_anon_rmap, avoiding unnecessary tests (already checked) so it's
@@ -642,6 +646,7 @@ void page_dup_rmap(struct page *page, struct vm_area_struct *vma, unsigned long
/**
* page_remove_rmap - take down pte mapping from a page
* @page: page to remove mapping from
+ * @vma: the vm area in which the mapping is removed
*
* The caller needs to hold the pte lock.
*/
@@ -890,6 +895,7 @@ static int try_to_unmap_anon(struct page *page, int migration)
/**
* try_to_unmap_file - unmap file page using the object-based rmap method
* @page: the page to unmap
+ * @migration: migration flag
*
* Find all the mappings of a page using the mapping pointer and the vma chains
* contained in the address_space struct it points to.
@@ -986,6 +992,7 @@ out:
/**
* try_to_unmap - try to remove all page table mappings to a page
* @page: the page to get unmapped
+ * @migration: migration flag
*
* Tries to remove all the page table entries which are mapping this
* page, used in the pageout path. Caller must hold the page lock.
diff --git a/mm/shmem.c b/mm/shmem.c
index 3372bc579e8..f514dd392cd 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -244,9 +244,8 @@ static void shmem_free_inode(struct super_block *sb)
}
}
-/*
+/**
* shmem_recalc_inode - recalculate the size of an inode
- *
* @inode: inode to recalc
*
* We have to calculate the free blocks since the mm can drop
@@ -270,9 +269,8 @@ static void shmem_recalc_inode(struct inode *inode)
}
}
-/*
+/**
* shmem_swp_entry - find the swap vector position in the info structure
- *
* @info: info structure for the inode
* @index: index of the page to find
* @page: optional page to add to the structure. Has to be preset to
@@ -374,13 +372,13 @@ static void shmem_swp_set(struct shmem_inode_info *info, swp_entry_t *entry, uns
}
}
-/*
+/**
* shmem_swp_alloc - get the position of the swap entry for the page.
- * If it does not exist allocate the entry.
- *
* @info: info structure for the inode
* @index: index of the page to find
* @sgp: check and recheck i_size? skip allocation?
+ *
+ * If the entry does not exist, allocate it.
*/
static swp_entry_t *shmem_swp_alloc(struct shmem_inode_info *info, unsigned long index, enum sgp_type sgp)
{
@@ -440,9 +438,8 @@ static swp_entry_t *shmem_swp_alloc(struct shmem_inode_info *info, unsigned long
return entry;
}
-/*
+/**
* shmem_free_swp - free some swap entries in a directory
- *
* @dir: pointer to the directory
* @edir: pointer after last entry of the directory
* @punch_lock: pointer to spinlock when needed for the holepunch case
@@ -2022,7 +2019,7 @@ static const struct inode_operations shmem_symlink_inode_operations = {
};
#ifdef CONFIG_TMPFS_POSIX_ACL
-/**
+/*
* Superblocks without xattr inode operations will get security.* xattr
* support from the VFS "for free". As soon as we have any other xattrs
* like ACLs, we also need to implement the security.* handlers at
@@ -2561,12 +2558,11 @@ out4:
}
module_init(init_tmpfs)
-/*
+/**
* shmem_file_setup - get an unlinked file living in tmpfs
- *
* @name: name for dentry (to be seen in /proc/<pid>/maps
* @size: size to be set for the file
- *
+ * @flags: vm_flags
*/
struct file *shmem_file_setup(char *name, loff_t size, unsigned long flags)
{
@@ -2621,9 +2617,8 @@ put_memory:
return ERR_PTR(error);
}
-/*
+/**
* shmem_zero_setup - setup a shared anonymous mapping
- *
* @vma: the vma to be mmapped is prepared by do_mmap_pgoff
*/
int shmem_zero_setup(struct vm_area_struct *vma)
diff --git a/mm/slab.c b/mm/slab.c
index e6c698f5567..bb4070e1079 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -3624,12 +3624,11 @@ void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
EXPORT_SYMBOL(kmem_cache_alloc);
/**
- * kmem_ptr_validate - check if an untrusted pointer might
- * be a slab entry.
+ * kmem_ptr_validate - check if an untrusted pointer might be a slab entry.
* @cachep: the cache we're checking against
* @ptr: pointer to validate
*
- * This verifies that the untrusted pointer looks sane:
+ * This verifies that the untrusted pointer looks sane;
* it is _not_ a guarantee that the pointer is actually
* part of the slab cache in question, but it at least
* validates that the pointer can be dereferenced and
diff --git a/mm/swap.c b/mm/swap.c
index d4ec59aa5c4..aa1139ccf3a 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -78,12 +78,11 @@ void put_page(struct page *page)
EXPORT_SYMBOL(put_page);
/**
- * put_pages_list(): release a list of pages
+ * put_pages_list() - release a list of pages
+ * @pages: list of pages threaded on page->lru
*
* Release a list of pages which are strung together on page.lru. Currently
* used by read_cache_pages() and related error recovery code.
- *
- * @pages: list of pages threaded on page->lru
*/
void put_pages_list(struct list_head *pages)
{
diff --git a/mm/swap_state.c b/mm/swap_state.c
index ec42f01a8d0..50757ee3f9f 100644
--- a/mm/swap_state.c
+++ b/mm/swap_state.c
@@ -115,6 +115,7 @@ void __delete_from_swap_cache(struct page *page)
/**
* add_to_swap - allocate swap space for a page
* @page: page we want to move to swap
+ * @gfp_mask: memory allocation flags
*
* Allocate swap space for the page and add the page to the
* swap cache. Caller needs to hold the page lock.
@@ -315,6 +316,7 @@ struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
/**
* swapin_readahead - swap in pages in hope we need them soon
* @entry: swap entry of this memory
+ * @gfp_mask: memory allocation flags
* @vma: user vma this address belongs to
* @addr: target address for mempolicy
*
diff --git a/mm/tiny-shmem.c b/mm/tiny-shmem.c
index 702083638c1..f0f55875dd6 100644
--- a/mm/tiny-shmem.c
+++ b/mm/tiny-shmem.c
@@ -39,12 +39,11 @@ static int __init init_tmpfs(void)
}
module_init(init_tmpfs)
-/*
+/**
* shmem_file_setup - get an unlinked file living in tmpfs
- *
* @name: name for dentry (to be seen in /proc/<pid>/maps
* @size: size to be set for the file
- *
+ * @flags: vm_flags
*/
struct file *shmem_file_setup(char *name, loff_t size, unsigned long flags)
{
@@ -95,9 +94,8 @@ put_memory:
return ERR_PTR(error);
}
-/*
+/**
* shmem_zero_setup - setup a shared anonymous mapping
- *
* @vma: the vma to be mmapped is prepared by do_mmap_pgoff
*/
int shmem_zero_setup(struct vm_area_struct *vma)
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 950c0be9ca8..ecf91f8034b 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -757,7 +757,8 @@ finished:
* @vma: vma to cover (map full range of vma)
* @addr: vmalloc memory
* @pgoff: number of pages into addr before first page to map
- * @returns: 0 for success, -Exxx on failure
+ *
+ * Returns: 0 for success, -Exxx on failure
*
* This function checks that addr is a valid vmalloc'ed area, and
* that it is big enough to cover the vma. Will return failure if
@@ -829,7 +830,8 @@ static int f(pte_t *pte, pgtable_t table, unsigned long addr, void *data)
/**
* alloc_vm_area - allocate a range of kernel address space
* @size: size of the area
- * @returns: NULL on failure, vm_struct on success
+ *
+ * Returns: NULL on failure, vm_struct on success
*
* This function reserves a range of kernel address space, and
* allocates pagetables to map that range. No actual mappings
diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c
index 0241fd35967..38d707593b3 100644
--- a/security/smack/smack_lsm.c
+++ b/security/smack/smack_lsm.c
@@ -1508,7 +1508,7 @@ static int smack_shm_associate(struct shmid_kernel *shp, int shmflg)
*/
static int smack_shm_shmctl(struct shmid_kernel *shp, int cmd)
{
- char *ssp = smack_of_shm(shp);
+ char *ssp;
int may;
switch (cmd) {
@@ -1532,6 +1532,7 @@ static int smack_shm_shmctl(struct shmid_kernel *shp, int cmd)
return -EINVAL;
}
+ ssp = smack_of_shm(shp);
return smk_curacc(ssp, may);
}
@@ -1616,7 +1617,7 @@ static int smack_sem_associate(struct sem_array *sma, int semflg)
*/
static int smack_sem_semctl(struct sem_array *sma, int cmd)
{
- char *ssp = smack_of_sem(sma);
+ char *ssp;
int may;
switch (cmd) {
@@ -1645,6 +1646,7 @@ static int smack_sem_semctl(struct sem_array *sma, int cmd)
return -EINVAL;
}
+ ssp = smack_of_sem(sma);
return smk_curacc(ssp, may);
}
@@ -1730,7 +1732,7 @@ static int smack_msg_queue_associate(struct msg_queue *msq, int msqflg)
*/
static int smack_msg_queue_msgctl(struct msg_queue *msq, int cmd)
{
- char *msp = smack_of_msq(msq);
+ char *msp;
int may;
switch (cmd) {
@@ -1752,6 +1754,7 @@ static int smack_msg_queue_msgctl(struct msg_queue *msq, int cmd)
return -EINVAL;
}
+ msp = smack_of_msq(msq);
return smk_curacc(msp, may);
}