aboutsummaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/ata/libata-core.c10
-rw-r--r--drivers/ata/libata-scsi.c2
-rw-r--r--drivers/base/memory.c9
-rw-r--r--drivers/block/DAC960.c2
-rw-r--r--drivers/block/cciss.c4
-rw-r--r--drivers/block/cpqarray.c3
-rw-r--r--drivers/block/cryptoloop.c12
-rw-r--r--drivers/block/sunvdc.c1
-rw-r--r--drivers/block/sx8.c1
-rw-r--r--drivers/block/ub.c11
-rw-r--r--drivers/block/viodasd.c2
-rw-r--r--drivers/firewire/fw-ohci.c13
-rw-r--r--drivers/ide/cris/ide-cris.c4
-rw-r--r--drivers/ide/ide-probe.c4
-rw-r--r--drivers/ide/ide-taskfile.c2
-rw-r--r--drivers/ide/mips/au1xxx-ide.c6
-rw-r--r--drivers/ieee1394/dma.c2
-rw-r--r--drivers/ieee1394/sbp2.c2
-rw-r--r--drivers/infiniband/core/umem.c11
-rw-r--r--drivers/infiniband/hw/ipath/ipath_dma.c4
-rw-r--r--drivers/infiniband/hw/ipath/ipath_mr.c2
-rw-r--r--drivers/infiniband/hw/mthca/mthca_memfree.c24
-rw-r--r--drivers/infiniband/ulp/iser/iser_memory.c8
-rw-r--r--drivers/md/dm-crypt.c21
-rw-r--r--drivers/media/common/ir-keymaps.c70
-rw-r--r--drivers/media/common/saa7146_core.c3
-rw-r--r--drivers/media/dvb/cinergyT2/cinergyT2.c42
-rw-r--r--drivers/media/dvb/dvb-core/dvb_ca_en50221.c2
-rw-r--r--drivers/media/dvb/dvb-usb/dib0700_devices.c2
-rw-r--r--drivers/media/radio/miropcm20-radio.c1
-rw-r--r--drivers/media/radio/radio-gemtek.c1
-rw-r--r--drivers/media/video/arv.c1
-rw-r--r--drivers/media/video/bt8xx/bttv-driver.c3
-rw-r--r--drivers/media/video/bw-qcam.c1
-rw-r--r--drivers/media/video/c-qcam.c1
-rw-r--r--drivers/media/video/cpia.c5
-rw-r--r--drivers/media/video/cpia2/cpia2_v4l.c5
-rw-r--r--drivers/media/video/cx23885/cx23885-core.c6
-rw-r--r--drivers/media/video/cx88/cx88-alsa.c86
-rw-r--r--drivers/media/video/cx88/cx88-blackbird.c57
-rw-r--r--drivers/media/video/cx88/cx88-dvb.c3
-rw-r--r--drivers/media/video/cx88/cx88-mpeg.c133
-rw-r--r--drivers/media/video/cx88/cx88-video.c1
-rw-r--r--drivers/media/video/cx88/cx88-vp3054-i2c.c16
-rw-r--r--drivers/media/video/cx88/cx88.h24
-rw-r--r--drivers/media/video/em28xx/em28xx-core.c3
-rw-r--r--drivers/media/video/em28xx/em28xx-video.c2
-rw-r--r--drivers/media/video/et61x251/et61x251_core.c1
-rw-r--r--drivers/media/video/ir-kbd-i2c.c1
-rw-r--r--drivers/media/video/ivtv/ivtv-driver.c11
-rw-r--r--drivers/media/video/ivtv/ivtv-fileops.c8
-rw-r--r--drivers/media/video/ivtv/ivtv-ioctl.c13
-rw-r--r--drivers/media/video/ivtv/ivtv-streams.c116
-rw-r--r--drivers/media/video/ivtv/ivtv-streams.h1
-rw-r--r--drivers/media/video/ivtv/ivtv-udma.c4
-rw-r--r--drivers/media/video/ivtv/ivtv-yuv.c160
-rw-r--r--drivers/media/video/ivtv/ivtv-yuv.h1
-rw-r--r--drivers/media/video/ivtv/ivtvfb.c92
-rw-r--r--drivers/media/video/meye.c1
-rw-r--r--drivers/media/video/ov511.c1
-rw-r--r--drivers/media/video/planb.c1
-rw-r--r--drivers/media/video/pms.c1
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-encoder.c6
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h11
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-hdw.c3
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-v4l2.c3
-rw-r--r--drivers/media/video/pwc/pwc-if.c1
-rw-r--r--drivers/media/video/saa7134/saa6752hs.c111
-rw-r--r--drivers/media/video/saa7134/saa7134-core.c44
-rw-r--r--drivers/media/video/saa7134/saa7134-empress.c12
-rw-r--r--drivers/media/video/saa7134/saa7134-input.c29
-rw-r--r--drivers/media/video/saa7134/saa7134-tvaudio.c32
-rw-r--r--drivers/media/video/saa7134/saa7134-video.c28
-rw-r--r--drivers/media/video/saa7134/saa7134.h7
-rw-r--r--drivers/media/video/se401.c1
-rw-r--r--drivers/media/video/sn9c102/sn9c102_core.c1
-rw-r--r--drivers/media/video/stradis.c1
-rw-r--r--drivers/media/video/stv680.c1
-rw-r--r--drivers/media/video/tuner-core.c2
-rw-r--r--drivers/media/video/usbvideo/usbvideo.c1
-rw-r--r--drivers/media/video/usbvideo/vicam.c1
-rw-r--r--drivers/media/video/usbvision/usbvision-video.c3
-rw-r--r--drivers/media/video/v4l2-common.c2
-rw-r--r--drivers/media/video/videobuf-core.c2
-rw-r--r--drivers/media/video/videobuf-dma-sg.c8
-rw-r--r--drivers/media/video/videocodec.c4
-rw-r--r--drivers/media/video/videodev.c42
-rw-r--r--drivers/media/video/vivi.c1
-rw-r--r--drivers/media/video/w9966.c1
-rw-r--r--drivers/media/video/w9968cf.c1
-rw-r--r--drivers/media/video/zc0301/zc0301_core.c1
-rw-r--r--drivers/media/video/zoran_card.c10
-rw-r--r--drivers/media/video/zoran_driver.c2
-rw-r--r--drivers/mmc/card/queue.c15
-rw-r--r--drivers/mmc/host/at91_mci.c8
-rw-r--r--drivers/mmc/host/au1xmmc.c11
-rw-r--r--drivers/mmc/host/imxmmc.c2
-rw-r--r--drivers/mmc/host/mmc_spi.c8
-rw-r--r--drivers/mmc/host/omap.c4
-rw-r--r--drivers/mmc/host/sdhci.c2
-rw-r--r--drivers/mmc/host/tifm_sd.c8
-rw-r--r--drivers/mmc/host/wbsd.c6
-rw-r--r--drivers/net/mlx4/icm.c14
-rw-r--r--drivers/net/ppp_mppe.c6
-rw-r--r--drivers/pci/Makefile3
-rw-r--r--drivers/pci/dmar.c329
-rw-r--r--drivers/pci/intel-iommu.c2271
-rw-r--r--drivers/pci/intel-iommu.h325
-rw-r--r--drivers/pci/iova.c394
-rw-r--r--drivers/pci/iova.h63
-rw-r--r--drivers/pci/pci.h1
-rw-r--r--drivers/pci/probe.c14
-rw-r--r--drivers/pci/search.c34
-rw-r--r--drivers/power/apm_power.c141
-rw-r--r--drivers/scsi/3w-9xxx.c4
-rw-r--r--drivers/scsi/3w-xxxx.c2
-rw-r--r--drivers/scsi/NCR5380.c6
-rw-r--r--drivers/scsi/NCR53C9x.c4
-rw-r--r--drivers/scsi/NCR53c406a.c6
-rw-r--r--drivers/scsi/aacraid/aachba.c2
-rw-r--r--drivers/scsi/aha152x.c2
-rw-r--r--drivers/scsi/aha1542.c8
-rw-r--r--drivers/scsi/arcmsr/arcmsr_hba.c4
-rw-r--r--drivers/scsi/atari_NCR5380.c6
-rw-r--r--drivers/scsi/eata_pio.c4
-rw-r--r--drivers/scsi/fd_mcs.c6
-rw-r--r--drivers/scsi/fdomain.c7
-rw-r--r--drivers/scsi/gdth.c6
-rw-r--r--drivers/scsi/ibmmca.c2
-rw-r--r--drivers/scsi/ide-scsi.c12
-rw-r--r--drivers/scsi/imm.c8
-rw-r--r--drivers/scsi/in2000.c4
-rw-r--r--drivers/scsi/ipr.c19
-rw-r--r--drivers/scsi/ips.c6
-rw-r--r--drivers/scsi/iscsi_tcp.c15
-rw-r--r--drivers/scsi/megaraid.c8
-rw-r--r--drivers/scsi/megaraid/megaraid_mbox.c12
-rw-r--r--drivers/scsi/oktagon_esp.c6
-rw-r--r--drivers/scsi/osst.c32
-rw-r--r--drivers/scsi/pcmcia/nsp_cs.h2
-rw-r--r--drivers/scsi/pcmcia/sym53c500_cs.c6
-rw-r--r--drivers/scsi/ppa.c7
-rw-r--r--drivers/scsi/ps3rom.c6
-rw-r--r--drivers/scsi/qlogicfas408.c2
-rw-r--r--drivers/scsi/scsi_debug.c4
-rw-r--r--drivers/scsi/scsi_lib.c13
-rw-r--r--drivers/scsi/seagate.c8
-rw-r--r--drivers/scsi/sg.c30
-rw-r--r--drivers/scsi/st.c8
-rw-r--r--drivers/scsi/sun3_NCR5380.c3
-rw-r--r--drivers/scsi/sym53c416.c2
-rw-r--r--drivers/scsi/tmscsim.c5
-rw-r--r--drivers/scsi/ultrastor.c2
-rw-r--r--drivers/scsi/wd33c93.c6
-rw-r--r--drivers/scsi/wd7000.c2
-rw-r--r--drivers/usb/core/message.c6
-rw-r--r--drivers/usb/image/microtek.c5
-rw-r--r--drivers/usb/misc/usbtest.c4
-rw-r--r--drivers/usb/storage/protocol.c2
159 files changed, 4379 insertions, 1006 deletions
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 629eadbd0ec..69092bce1ad 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -4296,7 +4296,7 @@ void ata_sg_clean(struct ata_queued_cmd *qc)
sg_last(sg, qc->orig_n_elem)->length += qc->pad_len;
if (pad_buf) {
struct scatterlist *psg = &qc->pad_sgent;
- void *addr = kmap_atomic(psg->page, KM_IRQ0);
+ void *addr = kmap_atomic(sg_page(psg), KM_IRQ0);
memcpy(addr + psg->offset, pad_buf, qc->pad_len);
kunmap_atomic(addr, KM_IRQ0);
}
@@ -4686,11 +4686,11 @@ static int ata_sg_setup(struct ata_queued_cmd *qc)
* data in this function or read data in ata_sg_clean.
*/
offset = lsg->offset + lsg->length - qc->pad_len;
- psg->page = nth_page(lsg->page, offset >> PAGE_SHIFT);
+ sg_set_page(psg, nth_page(sg_page(lsg), offset >> PAGE_SHIFT));
psg->offset = offset_in_page(offset);
if (qc->tf.flags & ATA_TFLAG_WRITE) {
- void *addr = kmap_atomic(psg->page, KM_IRQ0);
+ void *addr = kmap_atomic(sg_page(psg), KM_IRQ0);
memcpy(pad_buf, addr + psg->offset, qc->pad_len);
kunmap_atomic(addr, KM_IRQ0);
}
@@ -4836,7 +4836,7 @@ static void ata_pio_sector(struct ata_queued_cmd *qc)
if (qc->curbytes == qc->nbytes - qc->sect_size)
ap->hsm_task_state = HSM_ST_LAST;
- page = qc->cursg->page;
+ page = sg_page(qc->cursg);
offset = qc->cursg->offset + qc->cursg_ofs;
/* get the current page and offset */
@@ -4988,7 +4988,7 @@ next_sg:
sg = qc->cursg;
- page = sg->page;
+ page = sg_page(sg);
offset = sg->offset + qc->cursg_ofs;
/* get the current page and offset */
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index 9fbb39cd0f5..5b758b9ad0b 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -1544,7 +1544,7 @@ static unsigned int ata_scsi_rbuf_get(struct scsi_cmnd *cmd, u8 **buf_out)
struct scatterlist *sg = scsi_sglist(cmd);
if (sg) {
- buf = kmap_atomic(sg->page, KM_IRQ0) + sg->offset;
+ buf = kmap_atomic(sg_page(sg), KM_IRQ0) + sg->offset;
buflen = sg->length;
} else {
buf = NULL;
diff --git a/drivers/base/memory.c b/drivers/base/memory.c
index c41d0728efe..7868707c7ed 100644
--- a/drivers/base/memory.c
+++ b/drivers/base/memory.c
@@ -137,7 +137,7 @@ static ssize_t show_mem_state(struct sys_device *dev, char *buf)
return len;
}
-static inline int memory_notify(unsigned long val, void *v)
+int memory_notify(unsigned long val, void *v)
{
return blocking_notifier_call_chain(&memory_chain, val, v);
}
@@ -183,7 +183,6 @@ memory_block_action(struct memory_block *mem, unsigned long action)
break;
case MEM_OFFLINE:
mem->state = MEM_GOING_OFFLINE;
- memory_notify(MEM_GOING_OFFLINE, NULL);
start_paddr = page_to_pfn(first_page) << PAGE_SHIFT;
ret = remove_memory(start_paddr,
PAGES_PER_SECTION << PAGE_SHIFT);
@@ -191,7 +190,6 @@ memory_block_action(struct memory_block *mem, unsigned long action)
mem->state = old_state;
break;
}
- memory_notify(MEM_MAPPING_INVALID, NULL);
break;
default:
printk(KERN_WARNING "%s(%p, %ld) unknown action: %ld\n",
@@ -199,11 +197,6 @@ memory_block_action(struct memory_block *mem, unsigned long action)
WARN_ON(1);
ret = -EINVAL;
}
- /*
- * For now, only notify on successful memory operations
- */
- if (!ret)
- memory_notify(action, NULL);
return ret;
}
diff --git a/drivers/block/DAC960.c b/drivers/block/DAC960.c
index 84d6aa500e2..53505422867 100644
--- a/drivers/block/DAC960.c
+++ b/drivers/block/DAC960.c
@@ -345,6 +345,7 @@ static bool DAC960_CreateAuxiliaryStructures(DAC960_Controller_T *Controller)
Command->V1.ScatterGatherList =
(DAC960_V1_ScatterGatherSegment_T *)ScatterGatherCPU;
Command->V1.ScatterGatherListDMA = ScatterGatherDMA;
+ sg_init_table(Command->cmd_sglist, DAC960_V1_ScatterGatherLimit);
} else {
Command->cmd_sglist = Command->V2.ScatterList;
Command->V2.ScatterGatherList =
@@ -353,6 +354,7 @@ static bool DAC960_CreateAuxiliaryStructures(DAC960_Controller_T *Controller)
Command->V2.RequestSense =
(DAC960_SCSI_RequestSense_T *)RequestSenseCPU;
Command->V2.RequestSenseDMA = RequestSenseDMA;
+ sg_init_table(Command->cmd_sglist, DAC960_V2_ScatterGatherLimit);
}
}
return true;
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
index 7c2cfde08f1..5a6fe17fc63 100644
--- a/drivers/block/cciss.c
+++ b/drivers/block/cciss.c
@@ -2610,7 +2610,7 @@ static void do_cciss_request(struct request_queue *q)
(int)creq->nr_sectors);
#endif /* CCISS_DEBUG */
- memset(tmp_sg, 0, sizeof(tmp_sg));
+ sg_init_table(tmp_sg, MAXSGENTRIES);
seg = blk_rq_map_sg(q, creq, tmp_sg);
/* get the DMA records for the setup */
@@ -2621,7 +2621,7 @@ static void do_cciss_request(struct request_queue *q)
for (i = 0; i < seg; i++) {
c->SG[i].Len = tmp_sg[i].length;
- temp64.val = (__u64) pci_map_page(h->pdev, tmp_sg[i].page,
+ temp64.val = (__u64) pci_map_page(h->pdev, sg_page(&tmp_sg[i]),
tmp_sg[i].offset,
tmp_sg[i].length, dir);
c->SG[i].Addr.lower = temp64.val32.lower;
diff --git a/drivers/block/cpqarray.c b/drivers/block/cpqarray.c
index 568603d3043..efab27fa108 100644
--- a/drivers/block/cpqarray.c
+++ b/drivers/block/cpqarray.c
@@ -918,6 +918,7 @@ queue_next:
DBGPX(
printk("sector=%d, nr_sectors=%d\n", creq->sector, creq->nr_sectors);
);
+ sg_init_table(tmp_sg, SG_MAX);
seg = blk_rq_map_sg(q, creq, tmp_sg);
/* Now do all the DMA Mappings */
@@ -929,7 +930,7 @@ DBGPX(
{
c->req.sg[i].size = tmp_sg[i].length;
c->req.sg[i].addr = (__u32) pci_map_page(h->pci_dev,
- tmp_sg[i].page,
+ sg_page(&tmp_sg[i]),
tmp_sg[i].offset,
tmp_sg[i].length, dir);
}
diff --git a/drivers/block/cryptoloop.c b/drivers/block/cryptoloop.c
index 40535036e89..1b58b010797 100644
--- a/drivers/block/cryptoloop.c
+++ b/drivers/block/cryptoloop.c
@@ -26,6 +26,7 @@
#include <linux/crypto.h>
#include <linux/blkdev.h>
#include <linux/loop.h>
+#include <linux/scatterlist.h>
#include <asm/semaphore.h>
#include <asm/uaccess.h>
@@ -119,14 +120,17 @@ cryptoloop_transfer(struct loop_device *lo, int cmd,
.tfm = tfm,
.flags = CRYPTO_TFM_REQ_MAY_SLEEP,
};
- struct scatterlist sg_out = { NULL, };
- struct scatterlist sg_in = { NULL, };
+ struct scatterlist sg_out;
+ struct scatterlist sg_in;
encdec_cbc_t encdecfunc;
struct page *in_page, *out_page;
unsigned in_offs, out_offs;
int err;
+ sg_init_table(&sg_out, 1);
+ sg_init_table(&sg_in, 1);
+
if (cmd == READ) {
in_page = raw_page;
in_offs = raw_off;
@@ -146,11 +150,11 @@ cryptoloop_transfer(struct loop_device *lo, int cmd,
u32 iv[4] = { 0, };
iv[0] = cpu_to_le32(IV & 0xffffffff);
- sg_in.page = in_page;
+ sg_set_page(&sg_in, in_page);
sg_in.offset = in_offs;
sg_in.length = sz;
- sg_out.page = out_page;
+ sg_set_page(&sg_out, out_page);
sg_out.offset = out_offs;
sg_out.length = sz;
diff --git a/drivers/block/sunvdc.c b/drivers/block/sunvdc.c
index 317a790c153..7276f7d207c 100644
--- a/drivers/block/sunvdc.c
+++ b/drivers/block/sunvdc.c
@@ -388,6 +388,7 @@ static int __send_request(struct request *req)
op = VD_OP_BWRITE;
}
+ sg_init_table(sg, port->ring_cookies);
nsg = blk_rq_map_sg(req->q, req, sg);
len = 0;
diff --git a/drivers/block/sx8.c b/drivers/block/sx8.c
index 402209fec59..282a69558e8 100644
--- a/drivers/block/sx8.c
+++ b/drivers/block/sx8.c
@@ -522,6 +522,7 @@ static struct carm_request *carm_get_request(struct carm_host *host)
host->n_msgs++;
assert(host->n_msgs <= CARM_MAX_REQ);
+ sg_init_table(crq->sg, CARM_MAX_REQ_SG);
return crq;
}
diff --git a/drivers/block/ub.c b/drivers/block/ub.c
index c57dd2b3a0c..14143f2c484 100644
--- a/drivers/block/ub.c
+++ b/drivers/block/ub.c
@@ -25,6 +25,7 @@
#include <linux/usb_usual.h>
#include <linux/blkdev.h>
#include <linux/timer.h>
+#include <linux/scatterlist.h>
#include <scsi/scsi.h>
#define DRV_NAME "ub"
@@ -656,6 +657,7 @@ static int ub_request_fn_1(struct ub_lun *lun, struct request *rq)
if ((cmd = ub_get_cmd(lun)) == NULL)
return -1;
memset(cmd, 0, sizeof(struct ub_scsi_cmd));
+ sg_init_table(cmd->sgv, UB_MAX_REQ_SG);
blkdev_dequeue_request(rq);
@@ -1309,9 +1311,8 @@ static void ub_data_start(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
else
pipe = sc->send_bulk_pipe;
sc->last_pipe = pipe;
- usb_fill_bulk_urb(&sc->work_urb, sc->dev, pipe,
- page_address(sg->page) + sg->offset, sg->length,
- ub_urb_complete, sc);
+ usb_fill_bulk_urb(&sc->work_urb, sc->dev, pipe, sg_virt(sg),
+ sg->length, ub_urb_complete, sc);
sc->work_urb.actual_length = 0;
sc->work_urb.error_count = 0;
sc->work_urb.status = 0;
@@ -1427,7 +1428,7 @@ static void ub_state_sense(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
scmd->state = UB_CMDST_INIT;
scmd->nsg = 1;
sg = &scmd->sgv[0];
- sg->page = virt_to_page(sc->top_sense);
+ sg_set_page(sg, virt_to_page(sc->top_sense));
sg->offset = (unsigned long)sc->top_sense & (PAGE_SIZE-1);
sg->length = UB_SENSE_SIZE;
scmd->len = UB_SENSE_SIZE;
@@ -1863,7 +1864,7 @@ static int ub_sync_read_cap(struct ub_dev *sc, struct ub_lun *lun,
cmd->state = UB_CMDST_INIT;
cmd->nsg = 1;
sg = &cmd->sgv[0];
- sg->page = virt_to_page(p);
+ sg_set_page(sg, virt_to_page(p));
sg->offset = (unsigned long)p & (PAGE_SIZE-1);
sg->length = 8;
cmd->len = 8;
diff --git a/drivers/block/viodasd.c b/drivers/block/viodasd.c
index e824b672e05..ab5d404faa1 100644
--- a/drivers/block/viodasd.c
+++ b/drivers/block/viodasd.c
@@ -41,6 +41,7 @@
#include <linux/dma-mapping.h>
#include <linux/completion.h>
#include <linux/device.h>
+#include <linux/scatterlist.h>
#include <asm/uaccess.h>
#include <asm/vio.h>
@@ -270,6 +271,7 @@ static int send_request(struct request *req)
d = req->rq_disk->private_data;
/* Now build the scatter-gather list */
+ sg_init_table(sg, VIOMAXBLOCKDMA);
nsg = blk_rq_map_sg(req->q, req, sg);
nsg = dma_map_sg(d->dev, sg, nsg, direction);
diff --git a/drivers/firewire/fw-ohci.c b/drivers/firewire/fw-ohci.c
index 2f307c4df33..67588326ae5 100644
--- a/drivers/firewire/fw-ohci.c
+++ b/drivers/firewire/fw-ohci.c
@@ -606,7 +606,7 @@ static int
at_context_queue_packet(struct context *ctx, struct fw_packet *packet)
{
struct fw_ohci *ohci = ctx->ohci;
- dma_addr_t d_bus, payload_bus;
+ dma_addr_t d_bus, uninitialized_var(payload_bus);
struct driver_data *driver_data;
struct descriptor *d, *last;
__le32 *header;
@@ -1459,7 +1459,7 @@ ohci_allocate_iso_context(struct fw_card *card, int type, size_t header_size)
/* FIXME: We need a fallback for pre 1.1 OHCI. */
if (callback == handle_ir_dualbuffer_packet &&
ohci->version < OHCI_VERSION_1_1)
- return ERR_PTR(-EINVAL);
+ return ERR_PTR(-ENOSYS);
spin_lock_irqsave(&ohci->lock, flags);
index = ffs(*mask) - 1;
@@ -1778,7 +1778,7 @@ ohci_queue_iso(struct fw_iso_context *base,
buffer, payload);
else
/* FIXME: Implement fallback for OHCI 1.0 controllers. */
- return -EINVAL;
+ return -ENOSYS;
}
static const struct fw_card_driver ohci_driver = {
@@ -1898,7 +1898,12 @@ pci_probe(struct pci_dev *dev, const struct pci_device_id *ent)
ohci->version = reg_read(ohci, OHCI1394_Version) & 0x00ff00ff;
fw_notify("Added fw-ohci device %s, OHCI version %x.%x\n",
dev->dev.bus_id, ohci->version >> 16, ohci->version & 0xff);
-
+ if (ohci->version < OHCI_VERSION_1_1) {
+ fw_notify(" Isochronous I/O is not yet implemented for "
+ "OHCI 1.0 chips.\n");
+ fw_notify(" Cameras, audio devices etc. won't work on "
+ "this controller with this driver version.\n");
+ }
return 0;
fail_self_id:
diff --git a/drivers/ide/cris/ide-cris.c b/drivers/ide/cris/ide-cris.c
index ff20377b4c8..e196aefa207 100644
--- a/drivers/ide/cris/ide-cris.c
+++ b/drivers/ide/cris/ide-cris.c
@@ -935,11 +935,11 @@ static int cris_ide_build_dmatable (ide_drive_t *drive)
* than two possibly non-adjacent physical 4kB pages.
*/
/* group sequential buffers into one large buffer */
- addr = page_to_phys(sg->page) + sg->offset;
+ addr = sg_phys(sg);
size = sg_dma_len(sg);
while (--i) {
sg = sg_next(sg);
- if ((addr + size) != page_to_phys(sg->page) + sg->offset)
+ if ((addr + size) != sg_phys(sg))
break;
size += sg_dma_len(sg);
}
diff --git a/drivers/ide/ide-probe.c b/drivers/ide/ide-probe.c
index d5146c57e5b..ec55a173c08 100644
--- a/drivers/ide/ide-probe.c
+++ b/drivers/ide/ide-probe.c
@@ -1317,12 +1317,14 @@ static int hwif_init(ide_hwif_t *hwif)
if (!hwif->sg_max_nents)
hwif->sg_max_nents = PRD_ENTRIES;
- hwif->sg_table = kzalloc(sizeof(struct scatterlist)*hwif->sg_max_nents,
+ hwif->sg_table = kmalloc(sizeof(struct scatterlist)*hwif->sg_max_nents,
GFP_KERNEL);
if (!hwif->sg_table) {
printk(KERN_ERR "%s: unable to allocate SG table.\n", hwif->name);
goto out;
}
+
+ sg_init_table(hwif->sg_table, hwif->sg_max_nents);
if (init_irq(hwif) == 0)
goto done;
diff --git a/drivers/ide/ide-taskfile.c b/drivers/ide/ide-taskfile.c
index 73ef6bf5fbc..d066546f283 100644
--- a/drivers/ide/ide-taskfile.c
+++ b/drivers/ide/ide-taskfile.c
@@ -261,7 +261,7 @@ static void ide_pio_sector(ide_drive_t *drive, unsigned int write)
hwif->cursg = sg;
}
- page = cursg->page;
+ page = sg_page(cursg);
offset = cursg->offset + hwif->cursg_ofs * SECTOR_SIZE;
/* get the current page and offset */
diff --git a/drivers/ide/mips/au1xxx-ide.c b/drivers/ide/mips/au1xxx-ide.c
index 1de58566e5b..a4ce3ba15d6 100644
--- a/drivers/ide/mips/au1xxx-ide.c
+++ b/drivers/ide/mips/au1xxx-ide.c
@@ -276,8 +276,7 @@ static int auide_build_dmatable(ide_drive_t *drive)
if (iswrite) {
if(!put_source_flags(ahwif->tx_chan,
- (void*)(page_address(sg->page)
- + sg->offset),
+ (void*) sg_virt(sg),
tc, flags)) {
printk(KERN_ERR "%s failed %d\n",
__FUNCTION__, __LINE__);
@@ -285,8 +284,7 @@ static int auide_build_dmatable(ide_drive_t *drive)
} else
{
if(!put_dest_flags(ahwif->rx_chan,
- (void*)(page_address(sg->page)
- + sg->offset),
+ (void*) sg_virt(sg),
tc, flags)) {
printk(KERN_ERR "%s failed %d\n",
__FUNCTION__, __LINE__);
diff --git a/drivers/ieee1394/dma.c b/drivers/ieee1394/dma.c
index 45d60558192..25e113b50d8 100644
--- a/drivers/ieee1394/dma.c
+++ b/drivers/ieee1394/dma.c
@@ -111,7 +111,7 @@ int dma_region_alloc(struct dma_region *dma, unsigned long n_bytes,
unsigned long va =
(unsigned long)dma->kvirt + (i << PAGE_SHIFT);
- dma->sglist[i].page = vmalloc_to_page((void *)va);
+ sg_set_page(&dma->sglist[i], vmalloc_to_page((void *)va));
dma->sglist[i].length = PAGE_SIZE;
}
diff --git a/drivers/ieee1394/sbp2.c b/drivers/ieee1394/sbp2.c
index 1b353b964b3..d5dfe11aa5c 100644
--- a/drivers/ieee1394/sbp2.c
+++ b/drivers/ieee1394/sbp2.c
@@ -1466,7 +1466,7 @@ static void sbp2_prep_command_orb_sg(struct sbp2_command_orb *orb,
cmd->dma_size = sgpnt[0].length;
cmd->dma_type = CMD_DMA_PAGE;
cmd->cmd_dma = dma_map_page(hi->host->device.parent,
- sgpnt[0].page, sgpnt[0].offset,
+ sg_page(&sgpnt[0]), sgpnt[0].offset,
cmd->dma_size, cmd->dma_dir);
orb->data_descriptor_lo = cmd->cmd_dma;
diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c
index 2f54e29dc7a..14159ff2940 100644
--- a/drivers/infiniband/core/umem.c
+++ b/drivers/infiniband/core/umem.c
@@ -55,9 +55,11 @@ static void __ib_umem_release(struct ib_device *dev, struct ib_umem *umem, int d
ib_dma_unmap_sg(dev, chunk->page_list,
chunk->nents, DMA_BIDIRECTIONAL);
for (i = 0; i < chunk->nents; ++i) {
+ struct page *page = sg_page(&chunk->page_list[i]);
+
if (umem->writable && dirty)
- set_page_dirty_lock(chunk->page_list[i].page);
- put_page(chunk->page_list[i].page);
+ set_page_dirty_lock(page);
+ put_page(page);
}
kfree(chunk);
@@ -164,11 +166,12 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
}
chunk->nents = min_t(int, ret, IB_UMEM_MAX_PAGE_CHUNK);
+ sg_init_table(chunk->page_list, chunk->nents);
for (i = 0; i < chunk->nents; ++i) {
if (vma_list &&
!is_vm_hugetlb_page(vma_list[i + off]))
umem->hugetlb = 0;
- chunk->page_list[i].page = page_list[i + off];
+ sg_set_page(&chunk->page_list[i], page_list[i + off]);
chunk->page_list[i].offset = 0;
chunk->page_list[i].length = PAGE_SIZE;
}
@@ -179,7 +182,7 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
DMA_BIDIRECTIONAL);
if (chunk->nmap <= 0) {
for (i = 0; i < chunk->nents; ++i)
- put_page(chunk->page_list[i].page);
+ put_page(sg_page(&chunk->page_list[i]));
kfree(chunk);
ret = -ENOMEM;
diff --git a/drivers/infiniband/hw/ipath/ipath_dma.c b/drivers/infiniband/hw/ipath/ipath_dma.c
index 22709a4f8fc..e90a0ea538a 100644
--- a/drivers/infiniband/hw/ipath/ipath_dma.c
+++ b/drivers/infiniband/hw/ipath/ipath_dma.c
@@ -108,7 +108,7 @@ static int ipath_map_sg(struct ib_device *dev, struct scatterlist *sgl,
BUG_ON(!valid_dma_direction(direction));
for_each_sg(sgl, sg, nents, i) {
- addr = (u64) page_address(sg->page);
+ addr = (u64) page_address(sg_page(sg));
/* TODO: handle highmem pages */
if (!addr) {
ret = 0;
@@ -127,7 +127,7 @@ static void ipath_unmap_sg(struct ib_device *dev,
static u64 ipath_sg_dma_address(struct ib_device *dev, struct scatterlist *sg)
{
- u64 addr = (u64) page_address(sg->page);
+ u64 addr = (u64) page_address(sg_page(sg));
if (addr)
addr += sg->offset;
diff --git a/drivers/infiniband/hw/ipath/ipath_mr.c b/drivers/infiniband/hw/ipath/ipath_mr.c
index e442470a237..db4ba92f79f 100644
--- a/drivers/infiniband/hw/ipath/ipath_mr.c
+++ b/drivers/infiniband/hw/ipath/ipath_mr.c
@@ -225,7 +225,7 @@ struct ib_mr *ipath_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
for (i = 0; i < chunk->nents; i++) {
void *vaddr;
- vaddr = page_address(chunk->page_list[i].page);
+ vaddr = page_address(sg_page(&chunk->page_list[i]));
if (!vaddr) {
ret = ERR_PTR(-EINVAL);
goto bail;
diff --git a/drivers/infiniband/hw/mthca/mthca_memfree.c b/drivers/infiniband/hw/mthca/mthca_memfree.c
index e61f3e62698..007b38157fc 100644
--- a/drivers/infiniband/hw/mthca/mthca_memfree.c
+++ b/drivers/infiniband/hw/mthca/mthca_memfree.c
@@ -71,7 +71,7 @@ static void mthca_free_icm_pages(struct mthca_dev *dev, struct mthca_icm_chunk *
PCI_DMA_BIDIRECTIONAL);
for (i = 0; i < chunk->npages; ++i)
- __free_pages(chunk->mem[i].page,
+ __free_pages(sg_page(&chunk->mem[i]),
get_order(chunk->mem[i].length));
}
@@ -81,7 +81,7 @@ static void mthca_free_icm_coherent(struct mthca_dev *dev, struct mthca_icm_chun
for (i = 0; i < chunk->npages; ++i) {
dma_free_coherent(&dev->pdev->dev, chunk->mem[i].length,
- lowmem_page_address(chunk->mem[i].page),
+ lowmem_page_address(sg_page(&chunk->mem[i])),
sg_dma_address(&chunk->mem[i]));
}
}
@@ -107,10 +107,13 @@ void mthca_free_icm(struct mthca_dev *dev, struct mthca_icm *icm, int coherent)
static int mthca_alloc_icm_pages(struct scatterlist *mem, int order, gfp_t gfp_mask)
{
- mem->page = alloc_pages(gfp_mask, order);
- if (!mem->page)
+ struct page *page;
+
+ page = alloc_pages(gfp_mask, order);
+ if (!page)
return -ENOMEM;
+ sg_set_page(mem, page);
mem->length = PAGE_SIZE << order;
mem->offset = 0;
return 0;
@@ -157,6 +160,7 @@ struct mthca_icm *mthca_alloc_icm(struct mthca_dev *dev, int npages,
if (!chunk)
goto fail;
+ sg_init_table(chunk->mem, MTHCA_ICM_CHUNK_LEN);
chunk->npages = 0;
chunk->nsg = 0;
list_add_tail(&chunk->list, &icm->chunk_list);
@@ -304,7 +308,7 @@ void *mthca_table_find(struct mthca_icm_table *table, int obj, dma_addr_t *dma_h
* so if we found the page, dma_handle has already
* been assigned to. */
if (chunk->mem[i].length > offset) {
- page = chunk->mem[i].page;
+ page = sg_page(&chunk->mem[i]);
goto out;
}
offset -= chunk->mem[i].length;
@@ -445,6 +449,7 @@ static u64 mthca_uarc_virt(struct mthca_dev *dev, struct mthca_uar *uar, int pag
int mthca_map_user_db(struct mthca_dev *dev, struct mthca_uar *uar,
struct mthca_user_db_table *db_tab, int index, u64 uaddr)
{
+ struct page *pages[1];
int ret = 0;
u8 status;
int i;
@@ -472,16 +477,17 @@ int mthca_map_user_db(struct mthca_dev *dev, struct mthca_uar *uar,
}
ret = get_user_pages(current, current->mm, uaddr & PAGE_MASK, 1, 1, 0,
- &db_tab->page[i].mem.page, NULL);
+ pages, NULL);
if (ret < 0)
goto out;
+ sg_set_page(&db_tab->page[i].mem, pages[0]);
db_tab->page[i].mem.length = MTHCA_ICM_PAGE_SIZE;
db_tab->page[i].mem.offset = uaddr & ~PAGE_MASK;
ret = pci_map_sg(dev->pdev, &db_tab->page[i].mem, 1, PCI_DMA_TODEVICE);
if (ret < 0) {
- put_page(db_tab->page[i].mem.page);
+ put_page(pages[0]);
goto out;
}
@@ -491,7 +497,7 @@ int mthca_map_user_db(struct mthca_dev *dev, struct mthca_uar *uar,
ret = -EINVAL;
if (ret) {
pci_unmap_sg(dev->pdev, &db_tab->page[i].mem, 1, PCI_DMA_TODEVICE);
- put_page(db_tab->page[i].mem.page);
+ put_page(sg_page(&db_tab->page[i].mem));
goto out;
}
@@ -557,7 +563,7 @@ void mthca_cleanup_user_db_tab(struct mthca_dev *dev, struct mthca_uar *uar,
if (db_tab->page[i].uvirt) {
mthca_UNMAP_ICM(dev, mthca_uarc_virt(dev, uar, i), 1, &status);
pci_unmap_sg(dev->pdev, &db_tab->page[i].mem, 1, PCI_DMA_TODEVICE);
- put_page(db_tab->page[i].mem.page);
+ put_page(sg_page(&db_tab->page[i].mem));
}
}
diff --git a/drivers/infiniband/ulp/iser/iser_memory.c b/drivers/infiniband/ulp/iser/iser_memory.c
index f3529b6f0a3..d6879806179 100644
--- a/drivers/infiniband/ulp/iser/iser_memory.c
+++ b/drivers/infiniband/ulp/iser/iser_memory.c
@@ -131,7 +131,7 @@ static int iser_start_rdma_unaligned_sg(struct iscsi_iser_cmd_task *iser_ctask,
p = mem;
for_each_sg(sgl, sg, data->size, i) {
- from = kmap_atomic(sg->page, KM_USER0);
+ from = kmap_atomic(sg_page(sg), KM_USER0);
memcpy(p,
from + sg->offset,
sg->length);
@@ -191,7 +191,7 @@ void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_cmd_task *iser_ctask,
p = mem;
for_each_sg(sgl, sg, sg_size, i) {
- to = kmap_atomic(sg->page, KM_SOFTIRQ0);
+ to = kmap_atomic(sg_page(sg), KM_SOFTIRQ0);
memcpy(to + sg->offset,
p,
sg->length);
@@ -300,7 +300,7 @@ static unsigned int iser_data_buf_aligned_len(struct iser_data_buf *data,
for_each_sg(sgl, sg, data->dma_nents, i) {
/* iser_dbg("Checking sg iobuf [%d]: phys=0x%08lX "
"offset: %ld sz: %ld\n", i,
- (unsigned long)page_to_phys(sg->page),
+ (unsigned long)sg_phys(sg),
(unsigned long)sg->offset,
(unsigned long)sg->length); */
end_addr = ib_sg_dma_address(ibdev, sg) +
@@ -336,7 +336,7 @@ static void iser_data_buf_dump(struct iser_data_buf *data,
iser_err("sg[%d] dma_addr:0x%lX page:0x%p "
"off:0x%x sz:0x%x dma_len:0x%x\n",
i, (unsigned long)ib_sg_dma_address(ibdev, sg),
- sg->page, sg->offset,
+ sg_page(sg), sg->offset,
sg->length, ib_sg_dma_len(ibdev, sg));
}
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index 0eb5416798b..ac54f697c50 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -348,16 +348,17 @@ static int crypt_convert(struct crypt_config *cc,
ctx->idx_out < ctx->bio_out->bi_vcnt) {
struct bio_vec *bv_in = bio_iovec_idx(ctx->bio_in, ctx->idx_in);
struct bio_vec *bv_out = bio_iovec_idx(ctx->bio_out, ctx->idx_out);
- struct scatterlist sg_in = {
- .page = bv_in->bv_page,
- .offset = bv_in->bv_offset + ctx->offset_in,
- .length = 1 << SECTOR_SHIFT
- };
- struct scatterlist sg_out = {
- .page = bv_out->bv_page,
- .offset = bv_out->bv_offset + ctx->offset_out,
- .length = 1 << SECTOR_SHIFT
- };
+ struct scatterlist sg_in, sg_out;
+
+ sg_init_table(&sg_in, 1);
+ sg_set_page(&sg_in, bv_in->bv_page);
+ sg_in.offset = bv_in->bv_offset + ctx->offset_in;
+ sg_in.length = 1 << SECTOR_SHIFT;
+
+ sg_init_table(&sg_out, 1);
+ sg_set_page(&sg_out, bv_out->bv_page);
+ sg_out.offset = bv_out->bv_offset + ctx->offset_out;
+ sg_out.length = 1 << SECTOR_SHIFT;
ctx->offset_in += sg_in.length;
if (ctx->offset_in >= bv_in->bv_len) {
diff --git a/drivers/media/common/ir-keymaps.c b/drivers/media/common/ir-keymaps.c
index aefcf28da1c..185e8a860c1 100644
--- a/drivers/media/common/ir-keymaps.c
+++ b/drivers/media/common/ir-keymaps.c
@@ -1074,41 +1074,41 @@ EXPORT_SYMBOL_GPL(ir_codes_manli);
/* Mike Baikov <mike@baikov.com> */
IR_KEYTAB_TYPE ir_codes_gotview7135[IR_KEYTAB_SIZE] = {
- [ 0x21 ] = KEY_POWER,
- [ 0x69 ] = KEY_TV,
- [ 0x33 ] = KEY_0,
- [ 0x51 ] = KEY_1,
- [ 0x31 ] = KEY_2,
- [ 0x71 ] = KEY_3,
- [ 0x3b ] = KEY_4,
- [ 0x58 ] = KEY_5,
- [ 0x41 ] = KEY_6,
- [ 0x48 ] = KEY_7,
- [ 0x30 ] = KEY_8,
- [ 0x53 ] = KEY_9,
- [ 0x73 ] = KEY_AGAIN, /* LOOP */
- [ 0x0a ] = KEY_AUDIO,
- [ 0x61 ] = KEY_PRINT, /* PREVIEW */
- [ 0x7a ] = KEY_VIDEO,
- [ 0x20 ] = KEY_CHANNELUP,
- [ 0x40 ] = KEY_CHANNELDOWN,
- [ 0x18 ] = KEY_VOLUMEDOWN,
- [ 0x50 ] = KEY_VOLUMEUP,
- [ 0x10 ] = KEY_MUTE,
- [ 0x4a ] = KEY_SEARCH,
- [ 0x7b ] = KEY_SHUFFLE, /* SNAPSHOT */
- [ 0x22 ] = KEY_RECORD,
- [ 0x62 ] = KEY_STOP,
- [ 0x78 ] = KEY_PLAY,
- [ 0x39 ] = KEY_REWIND,
- [ 0x59 ] = KEY_PAUSE,
- [ 0x19 ] = KEY_FORWARD,
- [ 0x09 ] = KEY_ZOOM,
-
- [ 0x52 ] = KEY_F21, /* LIVE TIMESHIFT */
- [ 0x1a ] = KEY_F22, /* MIN TIMESHIFT */
- [ 0x3a ] = KEY_F23, /* TIMESHIFT */
- [ 0x70 ] = KEY_F24, /* NORMAL TIMESHIFT */
+ [ 0x11 ] = KEY_POWER,
+ [ 0x35 ] = KEY_TV,
+ [ 0x1b ] = KEY_0,
+ [ 0x29 ] = KEY_1,
+ [ 0x19 ] = KEY_2,
+ [ 0x39 ] = KEY_3,
+ [ 0x1f ] = KEY_4,
+ [ 0x2c ] = KEY_5,
+ [ 0x21 ] = KEY_6,
+ [ 0x24 ] = KEY_7,
+ [ 0x18 ] = KEY_8,
+ [ 0x2b ] = KEY_9,
+ [ 0x3b ] = KEY_AGAIN, /* LOOP */
+ [ 0x06 ] = KEY_AUDIO,
+ [ 0x31 ] = KEY_PRINT, /* PREVIEW */
+ [ 0x3e ] = KEY_VIDEO,
+ [ 0x10 ] = KEY_CHANNELUP,
+ [ 0x20 ] = KEY_CHANNELDOWN,
+ [ 0x0c ] = KEY_VOLUMEDOWN,
+ [ 0x28 ] = KEY_VOLUMEUP,
+ [ 0x08 ] = KEY_MUTE,
+ [ 0x26 ] = KEY_SEARCH, /*SCAN*/
+ [ 0x3f ] = KEY_SHUFFLE, /* SNAPSHOT */
+ [ 0x12 ] = KEY_RECORD,
+ [ 0x32 ] = KEY_STOP,
+ [ 0x3c ] = KEY_PLAY,
+ [ 0x1d ] = KEY_REWIND,
+ [ 0x2d ] = KEY_PAUSE,
+ [ 0x0d ] = KEY_FORWARD,
+ [ 0x05 ] = KEY_ZOOM, /*FULL*/
+
+ [ 0x2a ] = KEY_F21, /* LIVE TIMESHIFT */
+ [ 0x0e ] = KEY_F22, /* MIN TIMESHIFT */
+ [ 0x1e ] = KEY_F23, /* TIMESHIFT */
+ [ 0x38 ] = KEY_F24, /* NORMAL TIMESHIFT */
};
EXPORT_SYMBOL_GPL(ir_codes_gotview7135);
diff --git a/drivers/media/common/saa7146_core.c b/drivers/media/common/saa7146_core.c
index 365a22118a0..2b1f8b4be00 100644
--- a/drivers/media/common/saa7146_core.c
+++ b/drivers/media/common/saa7146_core.c
@@ -112,12 +112,13 @@ static struct scatterlist* vmalloc_to_sg(unsigned char *virt, int nr_pages)
sglist = kcalloc(nr_pages, sizeof(struct scatterlist), GFP_KERNEL);
if (NULL == sglist)
return NULL;
+ sg_init_table(sglist, nr_pages);
for (i = 0; i < nr_pages; i++, virt += PAGE_SIZE) {
pg = vmalloc_to_page(virt);
if (NULL == pg)
goto err;
BUG_ON(PageHighMem(pg));
- sglist[i].page = pg;
+ sg_set_page(&sglist[i], pg);
sglist[i].length = PAGE_SIZE;
}
return sglist;
diff --git a/drivers/media/dvb/cinergyT2/cinergyT2.c b/drivers/media/dvb/cinergyT2/cinergyT2.c
index a05e5c18228..db08b0a8888 100644
--- a/drivers/media/dvb/cinergyT2/cinergyT2.c
+++ b/drivers/media/dvb/cinergyT2/cinergyT2.c
@@ -345,7 +345,9 @@ static int cinergyt2_start_feed(struct dvb_demux_feed *dvbdmxfeed)
struct dvb_demux *demux = dvbdmxfeed->demux;
struct cinergyt2 *cinergyt2 = demux->priv;
- if (cinergyt2->disconnect_pending || mutex_lock_interruptible(&cinergyt2->sem))
+ if (cinergyt2->disconnect_pending)
+ return -EAGAIN;
+ if (mutex_lock_interruptible(&cinergyt2->sem))
return -ERESTARTSYS;
if (cinergyt2->streaming == 0)
@@ -361,7 +363,9 @@ static int cinergyt2_stop_feed(struct dvb_demux_feed *dvbdmxfeed)
struct dvb_demux *demux = dvbdmxfeed->demux;
struct cinergyt2 *cinergyt2 = demux->priv;
- if (cinergyt2->disconnect_pending || mutex_lock_interruptible(&cinergyt2->sem))
+ if (cinergyt2->disconnect_pending)
+ return -EAGAIN;
+ if (mutex_lock_interruptible(&cinergyt2->sem))
return -ERESTARTSYS;
if (--cinergyt2->streaming == 0)
@@ -481,12 +485,16 @@ static int cinergyt2_open (struct inode *inode, struct file *file)
{
struct dvb_device *dvbdev = file->private_data;
struct cinergyt2 *cinergyt2 = dvbdev->priv;
- int err = -ERESTARTSYS;
+ int err = -EAGAIN;
- if (cinergyt2->disconnect_pending || mutex_lock_interruptible(&cinergyt2->wq_sem))
+ if (cinergyt2->disconnect_pending)
+ goto out;
+ err = mutex_lock_interruptible(&cinergyt2->wq_sem);
+ if (err)
goto out;
- if (mutex_lock_interruptible(&cinergyt2->sem))
+ err = mutex_lock_interruptible(&cinergyt2->sem);
+ if (err)
goto out_unlock1;
if ((err = dvb_generic_open(inode, file)))
@@ -550,7 +558,9 @@ static unsigned int cinergyt2_poll (struct file *file, struct poll_table_struct
struct cinergyt2 *cinergyt2 = dvbdev->priv;
unsigned int mask = 0;
- if (cinergyt2->disconnect_pending || mutex_lock_interruptible(&cinergyt2->sem))
+ if (cinergyt2->disconnect_pending)
+ return -EAGAIN;
+ if (mutex_lock_interruptible(&cinergyt2->sem))
return -ERESTARTSYS;
poll_wait(file, &cinergyt2->poll_wq, wait);
@@ -625,7 +635,9 @@ static int cinergyt2_ioctl (struct inode *inode, struct file *file,
if (copy_from_user(&p, (void __user*) arg, sizeof(p)))
return -EFAULT;
- if (cinergyt2->disconnect_pending || mutex_lock_interruptible(&cinergyt2->sem))
+ if (cinergyt2->disconnect_pending)
+ return -EAGAIN;
+ if (mutex_lock_interruptible(&cinergyt2->sem))
return -ERESTARTSYS;
param->cmd = CINERGYT2_EP1_SET_TUNER_PARAMETERS;
@@ -996,7 +1008,9 @@ static int cinergyt2_suspend (struct usb_interface *intf, pm_message_t state)
{
struct cinergyt2 *cinergyt2 = usb_get_intfdata (intf);
- if (cinergyt2->disconnect_pending || mutex_lock_interruptible(&cinergyt2->wq_sem))
+ if (cinergyt2->disconnect_pending)
+ return -EAGAIN;
+ if (mutex_lock_interruptible(&cinergyt2->wq_sem))
return -ERESTARTSYS;
cinergyt2_suspend_rc(cinergyt2);
@@ -1017,16 +1031,18 @@ static int cinergyt2_resume (struct usb_interface *intf)
{
struct cinergyt2 *cinergyt2 = usb_get_intfdata (intf);
struct dvbt_set_parameters_msg *param = &cinergyt2->param;
- int err = -ERESTARTSYS;
+ int err = -EAGAIN;
- if (cinergyt2->disconnect_pending || mutex_lock_interruptible(&cinergyt2->wq_sem))
+ if (cinergyt2->disconnect_pending)
+ goto out;
+ err = mutex_lock_interruptible(&cinergyt2->wq_sem);
+ if (err)
goto out;
- if (mutex_lock_interruptible(&cinergyt2->sem))
+ err = mutex_lock_interruptible(&cinergyt2->sem);
+ if (err)
goto out_unlock1;
- err = 0;
-
if (!cinergyt2->sleeping) {
cinergyt2_sleep(cinergyt2, 0);
cinergyt2_command(cinergyt2, (char *) param, sizeof(*param), NULL, 0);
diff --git a/drivers/media/dvb/dvb-core/dvb_ca_en50221.c b/drivers/media/dvb/dvb-core/dvb_ca_en50221.c
index 084a508a03d..89437fdab8b 100644
--- a/drivers/media/dvb/dvb-core/dvb_ca_en50221.c
+++ b/drivers/media/dvb/dvb-core/dvb_ca_en50221.c
@@ -972,7 +972,7 @@ static int dvb_ca_en50221_thread(void *data)
/* main loop */
while (!kthread_should_stop()) {
/* sleep for a bit */
- while (!ca->wakeup) {
+ if (!ca->wakeup) {
set_current_state(TASK_INTERRUPTIBLE);
schedule_timeout(ca->delay);
if (kthread_should_stop())
diff --git a/drivers/media/dvb/dvb-usb/dib0700_devices.c b/drivers/media/dvb/dvb-usb/dib0700_devices.c
index e8c4a869453..58452b52002 100644
--- a/drivers/media/dvb/dvb-usb/dib0700_devices.c
+++ b/drivers/media/dvb/dvb-usb/dib0700_devices.c
@@ -828,7 +828,7 @@ MODULE_DEVICE_TABLE(usb, dib0700_usb_id_table);
#define DIB0700_DEFAULT_DEVICE_PROPERTIES \
.caps = DVB_USB_IS_AN_I2C_ADAPTER, \
.usb_ctrl = DEVICE_SPECIFIC, \
- .firmware = "dvb-usb-dib0700-03-pre1.fw", \
+ .firmware = "dvb-usb-dib0700-1.10.fw", \
.download_firmware = dib0700_download_firmware, \
.no_reconnect = 1, \
.size_of_priv = sizeof(struct dib0700_state), \
diff --git a/drivers/media/radio/miropcm20-radio.c b/drivers/media/radio/miropcm20-radio.c
index c7c9d1dc069..3ae56fef8c9 100644
--- a/drivers/media/radio/miropcm20-radio.c
+++ b/drivers/media/radio/miropcm20-radio.c
@@ -229,7 +229,6 @@ static struct video_device pcm20_radio = {
.owner = THIS_MODULE,
.name = "Miro PCM 20 radio",
.type = VID_TYPE_TUNER,
- .hardware = VID_HARDWARE_RTRACK,
.fops = &pcm20_fops,
.priv = &pcm20_unit
};
diff --git a/drivers/media/radio/radio-gemtek.c b/drivers/media/radio/radio-gemtek.c
index 0c963db0361..5e4b9ddb23c 100644
--- a/drivers/media/radio/radio-gemtek.c
+++ b/drivers/media/radio/radio-gemtek.c
@@ -554,7 +554,6 @@ static struct video_device gemtek_radio = {
.owner = THIS_MODULE,
.name = "GemTek Radio card",
.type = VID_TYPE_TUNER,
- .hardware = VID_HARDWARE_GEMTEK,
.fops = &gemtek_fops,
.vidioc_querycap = vidioc_querycap,
.vidioc_g_tuner = vidioc_g_tuner,
diff --git a/drivers/media/video/arv.c b/drivers/media/video/arv.c
index 19e9929ffa0..c94a4d0f280 100644
--- a/drivers/media/video/arv.c
+++ b/drivers/media/video/arv.c
@@ -755,7 +755,6 @@ static struct video_device ar_template = {
.owner = THIS_MODULE,
.name = "Colour AR VGA",
.type = VID_TYPE_CAPTURE,
- .hardware = VID_HARDWARE_ARV,
.fops = &ar_fops,
.release = ar_release,
.minor = -1,
diff --git a/drivers/media/video/bt8xx/bttv-driver.c b/drivers/media/video/bt8xx/bttv-driver.c
index 7a332b3efe5..9feeb636ff9 100644
--- a/drivers/media/video/bt8xx/bttv-driver.c
+++ b/drivers/media/video/bt8xx/bttv-driver.c
@@ -3877,7 +3877,6 @@ static struct video_device bttv_video_template =
.name = "UNSET",
.type = VID_TYPE_CAPTURE|VID_TYPE_TUNER|
VID_TYPE_CLIPPING|VID_TYPE_SCALES,
- .hardware = VID_HARDWARE_BT848,
.fops = &bttv_fops,
.minor = -1,
};
@@ -3886,7 +3885,6 @@ static struct video_device bttv_vbi_template =
{
.name = "bt848/878 vbi",
.type = VID_TYPE_TUNER|VID_TYPE_TELETEXT,
- .hardware = VID_HARDWARE_BT848,
.fops = &bttv_fops,
.minor = -1,
};
@@ -4034,7 +4032,6 @@ static struct video_device radio_template =
{
.name = "bt848/878 radio",
.type = VID_TYPE_TUNER,
- .hardware = VID_HARDWARE_BT848,
.fops = &radio_fops,
.minor = -1,
};
diff --git a/drivers/media/video/bw-qcam.c b/drivers/media/video/bw-qcam.c
index 7f7e3d3398d..58423525591 100644
--- a/drivers/media/video/bw-qcam.c
+++ b/drivers/media/video/bw-qcam.c
@@ -899,7 +899,6 @@ static struct video_device qcam_template=
.owner = THIS_MODULE,
.name = "Connectix Quickcam",
.type = VID_TYPE_CAPTURE,
- .hardware = VID_HARDWARE_QCAM_BW,
.fops = &qcam_fops,
};
diff --git a/drivers/media/video/c-qcam.c b/drivers/media/video/c-qcam.c
index f76c6a6c376..cf1546b5a7f 100644
--- a/drivers/media/video/c-qcam.c
+++ b/drivers/media/video/c-qcam.c
@@ -699,7 +699,6 @@ static struct video_device qcam_template=
.owner = THIS_MODULE,
.name = "Colour QuickCam",
.type = VID_TYPE_CAPTURE,
- .hardware = VID_HARDWARE_QCAM_C,
.fops = &qcam_fops,
};
diff --git a/drivers/media/video/cpia.c b/drivers/media/video/cpia.c
index a1d02e5ce0f..7c630f5ee72 100644
--- a/drivers/media/video/cpia.c
+++ b/drivers/media/video/cpia.c
@@ -65,10 +65,6 @@ MODULE_PARM_DESC(colorspace_conv,
#define ABOUT "V4L-Driver for Vision CPiA based cameras"
-#ifndef VID_HARDWARE_CPIA
-#define VID_HARDWARE_CPIA 24 /* FIXME -> from linux/videodev.h */
-#endif
-
#define CPIA_MODULE_CPIA (0<<5)
#define CPIA_MODULE_SYSTEM (1<<5)
#define CPIA_MODULE_VP_CTRL (5<<5)
@@ -3804,7 +3800,6 @@ static struct video_device cpia_template = {
.owner = THIS_MODULE,
.name = "CPiA Camera",
.type = VID_TYPE_CAPTURE,
- .hardware = VID_HARDWARE_CPIA,
.fops = &cpia_fops,
};
diff --git a/drivers/media/video/cpia2/cpia2_v4l.c b/drivers/media/video/cpia2/cpia2_v4l.c
index e3aaba1e0e0..e378abec806 100644
--- a/drivers/media/video/cpia2/cpia2_v4l.c
+++ b/drivers/media/video/cpia2/cpia2_v4l.c
@@ -86,10 +86,6 @@ MODULE_LICENSE("GPL");
#define ABOUT "V4L-Driver for Vision CPiA2 based cameras"
-#ifndef VID_HARDWARE_CPIA2
-#error "VID_HARDWARE_CPIA2 should have been defined in linux/videodev.h"
-#endif
-
struct control_menu_info {
int value;
char name[32];
@@ -1942,7 +1938,6 @@ static struct video_device cpia2_template = {
.type= VID_TYPE_CAPTURE,
.type2 = V4L2_CAP_VIDEO_CAPTURE |
V4L2_CAP_STREAMING,
- .hardware= VID_HARDWARE_CPIA2,
.minor= -1,
.fops= &fops_template,
.release= video_device_release,
diff --git a/drivers/media/video/cx23885/cx23885-core.c b/drivers/media/video/cx23885/cx23885-core.c
index af16505bd2e..3cdd136477e 100644
--- a/drivers/media/video/cx23885/cx23885-core.c
+++ b/drivers/media/video/cx23885/cx23885-core.c
@@ -793,7 +793,7 @@ static int cx23885_dev_setup(struct cx23885_dev *dev)
dev->pci->subsystem_device);
cx23885_devcount--;
- goto fail_free;
+ return -ENODEV;
}
/* PCIe stuff */
@@ -835,10 +835,6 @@ static int cx23885_dev_setup(struct cx23885_dev *dev)
}
return 0;
-
-fail_free:
- kfree(dev);
- return -ENODEV;
}
void cx23885_dev_unregister(struct cx23885_dev *dev)
diff --git a/drivers/media/video/cx88/cx88-alsa.c b/drivers/media/video/cx88/cx88-alsa.c
index 141dadf7cf1..40ffd7a5579 100644
--- a/drivers/media/video/cx88/cx88-alsa.c
+++ b/drivers/media/video/cx88/cx88-alsa.c
@@ -39,6 +39,7 @@
#include <sound/pcm_params.h>
#include <sound/control.h>
#include <sound/initval.h>
+#include <sound/tlv.h>
#include "cx88.h"
#include "cx88-reg.h"
@@ -82,6 +83,7 @@ typedef struct cx88_audio_dev snd_cx88_card_t;
+
/****************************************************************************
Module global static vars
****************************************************************************/
@@ -545,8 +547,8 @@ static int __devinit snd_cx88_pcm(snd_cx88_card_t *chip, int device, char *name)
/****************************************************************************
CONTROL INTERFACE
****************************************************************************/
-static int snd_cx88_capture_volume_info(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_info *info)
+static int snd_cx88_volume_info(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_info *info)
{
info->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
info->count = 2;
@@ -556,9 +558,8 @@ static int snd_cx88_capture_volume_info(struct snd_kcontrol *kcontrol,
return 0;
}
-/* OK - TODO: test it */
-static int snd_cx88_capture_volume_get(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *value)
+static int snd_cx88_volume_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *value)
{
snd_cx88_card_t *chip = snd_kcontrol_chip(kcontrol);
struct cx88_core *core=chip->core;
@@ -573,8 +574,8 @@ static int snd_cx88_capture_volume_get(struct snd_kcontrol *kcontrol,
}
/* OK - TODO: test it */
-static int snd_cx88_capture_volume_put(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *value)
+static int snd_cx88_volume_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *value)
{
snd_cx88_card_t *chip = snd_kcontrol_chip(kcontrol);
struct cx88_core *core=chip->core;
@@ -605,14 +606,67 @@ static int snd_cx88_capture_volume_put(struct snd_kcontrol *kcontrol,
return changed;
}
-static struct snd_kcontrol_new snd_cx88_capture_volume = {
+static const DECLARE_TLV_DB_SCALE(snd_cx88_db_scale, -6300, 100, 0);
+
+static struct snd_kcontrol_new snd_cx88_volume = {
+ .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE |
+ SNDRV_CTL_ELEM_ACCESS_TLV_READ,
+ .name = "Playback Volume",
+ .info = snd_cx88_volume_info,
+ .get = snd_cx88_volume_get,
+ .put = snd_cx88_volume_put,
+ .tlv.p = snd_cx88_db_scale,
+};
+
+static int snd_cx88_switch_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *value)
+{
+ snd_cx88_card_t *chip = snd_kcontrol_chip(kcontrol);
+ struct cx88_core *core = chip->core;
+ u32 bit = kcontrol->private_value;
+
+ value->value.integer.value[0] = !(cx_read(AUD_VOL_CTL) & bit);
+ return 0;
+}
+
+static int snd_cx88_switch_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *value)
+{
+ snd_cx88_card_t *chip = snd_kcontrol_chip(kcontrol);
+ struct cx88_core *core = chip->core;
+ u32 bit = kcontrol->private_value;
+ int ret = 0;
+ u32 vol;
+
+ spin_lock_irq(&chip->reg_lock);
+ vol = cx_read(AUD_VOL_CTL);
+ if (value->value.integer.value[0] != !(vol & bit)) {
+ vol ^= bit;
+ cx_write(AUD_VOL_CTL, vol);
+ ret = 1;
+ }
+ spin_unlock_irq(&chip->reg_lock);
+ return ret;
+}
+
+static struct snd_kcontrol_new snd_cx88_dac_switch = {
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
- .name = "Capture Volume",
- .info = snd_cx88_capture_volume_info,
- .get = snd_cx88_capture_volume_get,
- .put = snd_cx88_capture_volume_put,
+ .name = "Playback Switch",
+ .info = snd_ctl_boolean_mono_info,
+ .get = snd_cx88_switch_get,
+ .put = snd_cx88_switch_put,
+ .private_value = (1<<8),
};
+static struct snd_kcontrol_new snd_cx88_source_switch = {
+ .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+ .name = "Capture Switch",
+ .info = snd_ctl_boolean_mono_info,
+ .get = snd_cx88_switch_get,
+ .put = snd_cx88_switch_put,
+ .private_value = (1<<6),
+};
/****************************************************************************
Basic Flow for Sound Devices
@@ -762,7 +816,13 @@ static int __devinit cx88_audio_initdev(struct pci_dev *pci,
if (err < 0)
goto error;
- err = snd_ctl_add(card, snd_ctl_new1(&snd_cx88_capture_volume, chip));
+ err = snd_ctl_add(card, snd_ctl_new1(&snd_cx88_volume, chip));
+ if (err < 0)
+ goto error;
+ err = snd_ctl_add(card, snd_ctl_new1(&snd_cx88_dac_switch, chip));
+ if (err < 0)
+ goto error;
+ err = snd_ctl_add(card, snd_ctl_new1(&snd_cx88_source_switch, chip));
if (err < 0)
goto error;
diff --git a/drivers/media/video/cx88/cx88-blackbird.c b/drivers/media/video/cx88/cx88-blackbird.c
index 6d6f5048d76..f33f0b47142 100644
--- a/drivers/media/video/cx88/cx88-blackbird.c
+++ b/drivers/media/video/cx88/cx88-blackbird.c
@@ -527,44 +527,6 @@ static void blackbird_codec_settings(struct cx8802_dev *dev)
cx2341x_update(dev, blackbird_mbox_func, NULL, &dev->params);
}
-static struct v4l2_mpeg_compression default_mpeg_params = {
- .st_type = V4L2_MPEG_PS_2,
- .st_bitrate = {
- .mode = V4L2_BITRATE_CBR,
- .min = 0,
- .target = 0,
- .max = 0
- },
- .ts_pid_pmt = 16,
- .ts_pid_audio = 260,
- .ts_pid_video = 256,
- .ts_pid_pcr = 259,
- .ps_size = 0,
- .au_type = V4L2_MPEG_AU_2_II,
- .au_bitrate = {
- .mode = V4L2_BITRATE_CBR,
- .min = 224,
- .target = 224,
- .max = 224
- },
- .au_sample_rate = 48000,
- .au_pesid = 0,
- .vi_type = V4L2_MPEG_VI_2,
- .vi_aspect_ratio = V4L2_MPEG_ASPECT_4_3,
- .vi_bitrate = {
- .mode = V4L2_BITRATE_CBR,
- .min = 4000,
- .target = 4500,
- .max = 6000
- },
- .vi_frame_rate = 25,
- .vi_frames_per_gop = 12,
- .vi_bframes_count = 2,
- .vi_pesid = 0,
- .closed_gops = 1,
- .pulldown = 0
-};
-
static int blackbird_initialize_codec(struct cx8802_dev *dev)
{
struct cx88_core *core = dev->core;
@@ -852,23 +814,6 @@ static int vidioc_streamoff(struct file *file, void *priv, enum v4l2_buf_type i)
return videobuf_streamoff(&fh->mpegq);
}
-static int vidioc_g_mpegcomp (struct file *file, void *fh,
- struct v4l2_mpeg_compression *f)
-{
- printk(KERN_WARNING "VIDIOC_G_MPEGCOMP is obsolete. "
- "Replace with VIDIOC_G_EXT_CTRLS!");
- memcpy(f,&default_mpeg_params,sizeof(*f));
- return 0;
-}
-
-static int vidioc_s_mpegcomp (struct file *file, void *fh,
- struct v4l2_mpeg_compression *f)
-{
- printk(KERN_WARNING "VIDIOC_S_MPEGCOMP is obsolete. "
- "Replace with VIDIOC_S_EXT_CTRLS!");
- return 0;
-}
-
static int vidioc_g_ext_ctrls (struct file *file, void *priv,
struct v4l2_ext_controls *f)
{
@@ -1216,8 +1161,6 @@ static struct video_device cx8802_mpeg_template =
.vidioc_dqbuf = vidioc_dqbuf,
.vidioc_streamon = vidioc_streamon,
.vidioc_streamoff = vidioc_streamoff,
- .vidioc_g_mpegcomp = vidioc_g_mpegcomp,
- .vidioc_s_mpegcomp = vidioc_s_mpegcomp,
.vidioc_g_ext_ctrls = vidioc_g_ext_ctrls,
.vidioc_s_ext_ctrls = vidioc_s_ext_ctrls,
.vidioc_try_ext_ctrls = vidioc_try_ext_ctrls,
diff --git a/drivers/media/video/cx88/cx88-dvb.c b/drivers/media/video/cx88/cx88-dvb.c
index d16e5c6d21c..fce19caf9d0 100644
--- a/drivers/media/video/cx88/cx88-dvb.c
+++ b/drivers/media/video/cx88/cx88-dvb.c
@@ -475,8 +475,9 @@ static int dvb_register(struct cx8802_dev *dev)
break;
case CX88_BOARD_DNTV_LIVE_DVB_T_PRO:
#if defined(CONFIG_VIDEO_CX88_VP3054) || (defined(CONFIG_VIDEO_CX88_VP3054_MODULE) && defined(MODULE))
+ /* MT352 is on a secondary I2C bus made from some GPIO lines */
dev->dvb.frontend = dvb_attach(mt352_attach, &dntv_live_dvbt_pro_config,
- &((struct vp3054_i2c_state *)dev->card_priv)->adap);
+ &dev->vp3054->adap);
if (dev->dvb.frontend != NULL) {
dvb_attach(dvb_pll_attach, dev->dvb.frontend, 0x61,
&dev->core->i2c_adap, DVB_PLL_FMD1216ME);
diff --git a/drivers/media/video/cx88/cx88-mpeg.c b/drivers/media/video/cx88/cx88-mpeg.c
index a652f294d23..448c6738094 100644
--- a/drivers/media/video/cx88/cx88-mpeg.c
+++ b/drivers/media/video/cx88/cx88-mpeg.c
@@ -79,7 +79,8 @@ static int cx8802_start_dma(struct cx8802_dev *dev,
{
struct cx88_core *core = dev->core;
- dprintk(1, "cx8802_start_dma w: %d, h: %d, f: %d\n", dev->width, dev->height, buf->vb.field);
+ dprintk(1, "cx8802_start_dma w: %d, h: %d, f: %d\n",
+ buf->vb.width, buf->vb.height, buf->vb.field);
/* setup fifo + format */
cx88_sram_channel_setup(core, &cx88_sram_channels[SRAM_CH28],
@@ -177,7 +178,6 @@ static int cx8802_restart_queue(struct cx8802_dev *dev,
struct cx88_dmaqueue *q)
{
struct cx88_buffer *buf;
- struct list_head *item;
dprintk( 1, "cx8802_restart_queue\n" );
if (list_empty(&q->active))
@@ -223,10 +223,8 @@ static int cx8802_restart_queue(struct cx8802_dev *dev,
dprintk(2,"restart_queue [%p/%d]: restart dma\n",
buf, buf->vb.i);
cx8802_start_dma(dev, q, buf);
- list_for_each(item,&q->active) {
- buf = list_entry(item, struct cx88_buffer, vb.queue);
+ list_for_each_entry(buf, &q->active, vb.queue)
buf->count = q->count++;
- }
mod_timer(&q->timeout, jiffies+BUFFER_TIMEOUT);
return 0;
}
@@ -572,42 +570,29 @@ int cx8802_resume_common(struct pci_dev *pci_dev)
return 0;
}
+#if defined(CONFIG_VIDEO_CX88_BLACKBIRD) || \
+ defined(CONFIG_VIDEO_CX88_BLACKBIRD_MODULE)
struct cx8802_dev * cx8802_get_device(struct inode *inode)
{
int minor = iminor(inode);
- struct cx8802_dev *h = NULL;
- struct list_head *list;
+ struct cx8802_dev *dev;
- list_for_each(list,&cx8802_devlist) {
- h = list_entry(list, struct cx8802_dev, devlist);
- if (h->mpeg_dev && h->mpeg_dev->minor == minor)
- return h;
- }
+ list_for_each_entry(dev, &cx8802_devlist, devlist)
+ if (dev->mpeg_dev && dev->mpeg_dev->minor == minor)
+ return dev;
return NULL;
}
+EXPORT_SYMBOL(cx8802_get_device);
+#endif
struct cx8802_driver * cx8802_get_driver(struct cx8802_dev *dev, enum cx88_board_type btype)
{
- struct cx8802_dev *h = NULL;
- struct cx8802_driver *d = NULL;
- struct list_head *list;
- struct list_head *list2;
-
- list_for_each(list,&cx8802_devlist) {
- h = list_entry(list, struct cx8802_dev, devlist);
- if (h != dev)
- continue;
-
- list_for_each(list2, &h->drvlist.devlist) {
- d = list_entry(list2, struct cx8802_driver, devlist);
+ struct cx8802_driver *d;
- /* only unregister the correct driver type */
- if (d->type_id == btype) {
- return d;
- }
- }
- }
+ list_for_each_entry(d, &dev->drvlist, drvlist)
+ if (d->type_id == btype)
+ return d;
return NULL;
}
@@ -671,10 +656,9 @@ static int cx8802_check_driver(struct cx8802_driver *drv)
int cx8802_register_driver(struct cx8802_driver *drv)
{
- struct cx8802_dev *h;
+ struct cx8802_dev *dev;
struct cx8802_driver *driver;
- struct list_head *list;
- int err = 0, i = 0;
+ int err, i = 0;
printk(KERN_INFO
"cx88/2: registering cx8802 driver, type: %s access: %s\n",
@@ -686,14 +670,12 @@ int cx8802_register_driver(struct cx8802_driver *drv)
return err;
}
- list_for_each(list,&cx8802_devlist) {
- h = list_entry(list, struct cx8802_dev, devlist);
-
+ list_for_each_entry(dev, &cx8802_devlist, devlist) {
printk(KERN_INFO
"%s/2: subsystem: %04x:%04x, board: %s [card=%d]\n",
- h->core->name, h->pci->subsystem_vendor,
- h->pci->subsystem_device, h->core->board.name,
- h->core->boardnr);
+ dev->core->name, dev->pci->subsystem_vendor,
+ dev->pci->subsystem_device, dev->core->board.name,
+ dev->core->boardnr);
/* Bring up a new struct for each driver instance */
driver = kzalloc(sizeof(*drv),GFP_KERNEL);
@@ -701,7 +683,7 @@ int cx8802_register_driver(struct cx8802_driver *drv)
return -ENOMEM;
/* Snapshot of the driver registration data */
- drv->core = h->core;
+ drv->core = dev->core;
drv->suspend = cx8802_suspend_common;
drv->resume = cx8802_resume_common;
drv->request_acquire = cx8802_request_acquire;
@@ -712,49 +694,38 @@ int cx8802_register_driver(struct cx8802_driver *drv)
if (err == 0) {
i++;
mutex_lock(&drv->core->lock);
- list_add_tail(&driver->devlist,&h->drvlist.devlist);
+ list_add_tail(&driver->drvlist, &dev->drvlist);
mutex_unlock(&drv->core->lock);
} else {
printk(KERN_ERR
"%s/2: cx8802 probe failed, err = %d\n",
- h->core->name, err);
+ dev->core->name, err);
}
}
- if (i == 0)
- err = -ENODEV;
- else
- err = 0;
- return err;
+ return i ? 0 : -ENODEV;
}
int cx8802_unregister_driver(struct cx8802_driver *drv)
{
- struct cx8802_dev *h;
- struct cx8802_driver *d;
- struct list_head *list;
- struct list_head *list2, *q;
- int err = 0, i = 0;
+ struct cx8802_dev *dev;
+ struct cx8802_driver *d, *dtmp;
+ int err = 0;
printk(KERN_INFO
"cx88/2: unregistering cx8802 driver, type: %s access: %s\n",
drv->type_id == CX88_MPEG_DVB ? "dvb" : "blackbird",
drv->hw_access == CX8802_DRVCTL_SHARED ? "shared" : "exclusive");
- list_for_each(list,&cx8802_devlist) {
- i++;
- h = list_entry(list, struct cx8802_dev, devlist);
-
+ list_for_each_entry(dev, &cx8802_devlist, devlist) {
printk(KERN_INFO
"%s/2: subsystem: %04x:%04x, board: %s [card=%d]\n",
- h->core->name, h->pci->subsystem_vendor,
- h->pci->subsystem_device, h->core->board.name,
- h->core->boardnr);
-
- list_for_each_safe(list2, q, &h->drvlist.devlist) {
- d = list_entry(list2, struct cx8802_driver, devlist);
+ dev->core->name, dev->pci->subsystem_vendor,
+ dev->pci->subsystem_device, dev->core->board.name,
+ dev->core->boardnr);
+ list_for_each_entry_safe(d, dtmp, &dev->drvlist, drvlist) {
/* only unregister the correct driver type */
if (d->type_id != drv->type_id)
continue;
@@ -762,12 +733,12 @@ int cx8802_unregister_driver(struct cx8802_driver *drv)
err = d->remove(d);
if (err == 0) {
mutex_lock(&drv->core->lock);
- list_del(list2);
+ list_del(&d->drvlist);
mutex_unlock(&drv->core->lock);
+ kfree(d);
} else
printk(KERN_ERR "%s/2: cx8802 driver remove "
- "failed (%d)\n", h->core->name, err);
-
+ "failed (%d)\n", dev->core->name, err);
}
}
@@ -805,7 +776,7 @@ static int __devinit cx8802_probe(struct pci_dev *pci_dev,
if (err != 0)
goto fail_free;
- INIT_LIST_HEAD(&dev->drvlist.devlist);
+ INIT_LIST_HEAD(&dev->drvlist);
list_add_tail(&dev->devlist,&cx8802_devlist);
/* Maintain a reference so cx88-video can query the 8802 device. */
@@ -825,23 +796,30 @@ static int __devinit cx8802_probe(struct pci_dev *pci_dev,
static void __devexit cx8802_remove(struct pci_dev *pci_dev)
{
struct cx8802_dev *dev;
- struct cx8802_driver *h;
- struct list_head *list;
dev = pci_get_drvdata(pci_dev);
dprintk( 1, "%s\n", __FUNCTION__);
- list_for_each(list,&dev->drvlist.devlist) {
- h = list_entry(list, struct cx8802_driver, devlist);
- dprintk( 1, " ->driver\n");
- if (h->remove == NULL) {
- printk(KERN_ERR "%s .. skipping driver, no probe function\n", __FUNCTION__);
- continue;
+ if (!list_empty(&dev->drvlist)) {
+ struct cx8802_driver *drv, *tmp;
+ int err;
+
+ printk(KERN_WARNING "%s/2: Trying to remove cx8802 driver "
+ "while cx8802 sub-drivers still loaded?!\n",
+ dev->core->name);
+
+ list_for_each_entry_safe(drv, tmp, &dev->drvlist, drvlist) {
+ err = drv->remove(drv);
+ if (err == 0) {
+ mutex_lock(&drv->core->lock);
+ list_del(&drv->drvlist);
+ mutex_unlock(&drv->core->lock);
+ } else
+ printk(KERN_ERR "%s/2: cx8802 driver remove "
+ "failed (%d)\n", dev->core->name, err);
+ kfree(drv);
}
- printk(KERN_INFO "%s .. Removing driver type %d\n", __FUNCTION__, h->type_id);
- cx8802_unregister_driver(h);
- list_del(&dev->drvlist.devlist);
}
/* Destroy any 8802 reference. */
@@ -901,7 +879,6 @@ EXPORT_SYMBOL(cx8802_fini_common);
EXPORT_SYMBOL(cx8802_register_driver);
EXPORT_SYMBOL(cx8802_unregister_driver);
-EXPORT_SYMBOL(cx8802_get_device);
EXPORT_SYMBOL(cx8802_get_driver);
/* ----------------------------------------------------------- */
/*
diff --git a/drivers/media/video/cx88/cx88-video.c b/drivers/media/video/cx88/cx88-video.c
index 231ae6c4dd2..5ee05f8f3fa 100644
--- a/drivers/media/video/cx88/cx88-video.c
+++ b/drivers/media/video/cx88/cx88-video.c
@@ -1675,7 +1675,6 @@ static struct video_device cx8800_radio_template =
{
.name = "cx8800-radio",
.type = VID_TYPE_TUNER,
- .hardware = 0,
.fops = &radio_fops,
.minor = -1,
.vidioc_querycap = radio_querycap,
diff --git a/drivers/media/video/cx88/cx88-vp3054-i2c.c b/drivers/media/video/cx88/cx88-vp3054-i2c.c
index 77c37889232..6ce5af48847 100644
--- a/drivers/media/video/cx88/cx88-vp3054-i2c.c
+++ b/drivers/media/video/cx88/cx88-vp3054-i2c.c
@@ -41,7 +41,7 @@ static void vp3054_bit_setscl(void *data, int state)
{
struct cx8802_dev *dev = data;
struct cx88_core *core = dev->core;
- struct vp3054_i2c_state *vp3054_i2c = dev->card_priv;
+ struct vp3054_i2c_state *vp3054_i2c = dev->vp3054;
if (state) {
vp3054_i2c->state |= 0x0001; /* SCL high */
@@ -58,7 +58,7 @@ static void vp3054_bit_setsda(void *data, int state)
{
struct cx8802_dev *dev = data;
struct cx88_core *core = dev->core;
- struct vp3054_i2c_state *vp3054_i2c = dev->card_priv;
+ struct vp3054_i2c_state *vp3054_i2c = dev->vp3054;
if (state) {
vp3054_i2c->state |= 0x0002; /* SDA high */
@@ -113,10 +113,10 @@ int vp3054_i2c_probe(struct cx8802_dev *dev)
if (core->boardnr != CX88_BOARD_DNTV_LIVE_DVB_T_PRO)
return 0;
- dev->card_priv = kzalloc(sizeof(*vp3054_i2c), GFP_KERNEL);
- if (dev->card_priv == NULL)
+ vp3054_i2c = kzalloc(sizeof(*vp3054_i2c), GFP_KERNEL);
+ if (vp3054_i2c == NULL)
return -ENOMEM;
- vp3054_i2c = dev->card_priv;
+ dev->vp3054 = vp3054_i2c;
memcpy(&vp3054_i2c->algo, &vp3054_i2c_algo_template,
sizeof(vp3054_i2c->algo));
@@ -139,8 +139,8 @@ int vp3054_i2c_probe(struct cx8802_dev *dev)
if (0 != rc) {
printk("%s: vp3054_i2c register FAILED\n", core->name);
- kfree(dev->card_priv);
- dev->card_priv = NULL;
+ kfree(dev->vp3054);
+ dev->vp3054 = NULL;
}
return rc;
@@ -148,7 +148,7 @@ int vp3054_i2c_probe(struct cx8802_dev *dev)
void vp3054_i2c_remove(struct cx8802_dev *dev)
{
- struct vp3054_i2c_state *vp3054_i2c = dev->card_priv;
+ struct vp3054_i2c_state *vp3054_i2c = dev->vp3054;
if (vp3054_i2c == NULL ||
dev->core->boardnr != CX88_BOARD_DNTV_LIVE_DVB_T_PRO)
diff --git a/drivers/media/video/cx88/cx88.h b/drivers/media/video/cx88/cx88.h
index 42e0a9b8c55..eb296bdecb1 100644
--- a/drivers/media/video/cx88/cx88.h
+++ b/drivers/media/video/cx88/cx88.h
@@ -412,7 +412,9 @@ struct cx8802_suspend_state {
struct cx8802_driver {
struct cx88_core *core;
- struct list_head devlist;
+
+ /* List of drivers attached to device */
+ struct list_head drvlist;
/* Type of driver and access required */
enum cx88_board_type type_id;
@@ -453,27 +455,33 @@ struct cx8802_dev {
/* for blackbird only */
struct list_head devlist;
+#if defined(CONFIG_VIDEO_CX88_BLACKBIRD) || \
+ defined(CONFIG_VIDEO_CX88_BLACKBIRD_MODULE)
struct video_device *mpeg_dev;
u32 mailbox;
int width;
int height;
+ /* mpeg params */
+ struct cx2341x_mpeg_params params;
+#endif
+
#if defined(CONFIG_VIDEO_CX88_DVB) || defined(CONFIG_VIDEO_CX88_DVB_MODULE)
/* for dvb only */
struct videobuf_dvb dvb;
+#endif
- void *card_priv;
+#if defined(CONFIG_VIDEO_CX88_VP3054) || \
+ defined(CONFIG_VIDEO_CX88_VP3054_MODULE)
+ /* For VP3045 secondary I2C bus support */
+ struct vp3054_i2c_state *vp3054;
#endif
/* for switching modulation types */
unsigned char ts_gen_cntrl;
- /* mpeg params */
- struct cx2341x_mpeg_params params;
-
/* List of attached drivers */
- struct cx8802_driver drvlist;
- struct work_struct request_module_wk;
-
+ struct list_head drvlist;
+ struct work_struct request_module_wk;
};
/* ----------------------------------------------------------- */
diff --git a/drivers/media/video/em28xx/em28xx-core.c b/drivers/media/video/em28xx/em28xx-core.c
index d3282ec62c5..d56484f2046 100644
--- a/drivers/media/video/em28xx/em28xx-core.c
+++ b/drivers/media/video/em28xx/em28xx-core.c
@@ -648,7 +648,7 @@ void em28xx_uninit_isoc(struct em28xx *dev)
*/
int em28xx_init_isoc(struct em28xx *dev)
{
- /* change interface to 3 which allowes the biggest packet sizes */
+ /* change interface to 3 which allows the biggest packet sizes */
int i, errCode;
const int sb_size = EM28XX_NUM_PACKETS * dev->max_pkt_size;
@@ -673,6 +673,7 @@ int em28xx_init_isoc(struct em28xx *dev)
("unable to allocate %i bytes for transfer buffer %i\n",
sb_size, i);
em28xx_uninit_isoc(dev);
+ usb_free_urb(urb);
return -ENOMEM;
}
memset(dev->transfer_buffer[i], 0, sb_size);
diff --git a/drivers/media/video/em28xx/em28xx-video.c b/drivers/media/video/em28xx/em28xx-video.c
index e467682aabd..a4c2a907124 100644
--- a/drivers/media/video/em28xx/em28xx-video.c
+++ b/drivers/media/video/em28xx/em28xx-video.c
@@ -1617,7 +1617,6 @@ static int em28xx_init_dev(struct em28xx **devhandle, struct usb_device *udev,
/* Fills VBI device info */
dev->vbi_dev->type = VFL_TYPE_VBI;
- dev->vbi_dev->hardware = 0;
dev->vbi_dev->fops = &em28xx_v4l_fops;
dev->vbi_dev->minor = -1;
dev->vbi_dev->dev = &dev->udev->dev;
@@ -1629,7 +1628,6 @@ static int em28xx_init_dev(struct em28xx **devhandle, struct usb_device *udev,
dev->vdev->type = VID_TYPE_CAPTURE;
if (dev->has_tuner)
dev->vdev->type |= VID_TYPE_TUNER;
- dev->vdev->hardware = 0;
dev->vdev->fops = &em28xx_v4l_fops;
dev->vdev->minor = -1;
dev->vdev->dev = &dev->udev->dev;
diff --git a/drivers/media/video/et61x251/et61x251_core.c b/drivers/media/video/et61x251/et61x251_core.c
index d5fef4c01c8..d19d73b81ed 100644
--- a/drivers/media/video/et61x251/et61x251_core.c
+++ b/drivers/media/video/et61x251/et61x251_core.c
@@ -2585,7 +2585,6 @@ et61x251_usb_probe(struct usb_interface* intf, const struct usb_device_id* id)
strcpy(cam->v4ldev->name, "ET61X[12]51 PC Camera");
cam->v4ldev->owner = THIS_MODULE;
cam->v4ldev->type = VID_TYPE_CAPTURE | VID_TYPE_SCALES;
- cam->v4ldev->hardware = 0;
cam->v4ldev->fops = &et61x251_fops;
cam->v4ldev->minor = video_nr[dev_nr];
cam->v4ldev->release = video_device_release;
diff --git a/drivers/media/video/ir-kbd-i2c.c b/drivers/media/video/ir-kbd-i2c.c
index d98dd0d1e37..29779d8bf7f 100644
--- a/drivers/media/video/ir-kbd-i2c.c
+++ b/drivers/media/video/ir-kbd-i2c.c
@@ -528,6 +528,7 @@ static int ir_probe(struct i2c_adapter *adap)
break;
case I2C_HW_B_CX2388x:
probe = probe_cx88;
+ break;
case I2C_HW_B_CX23885:
probe = probe_cx23885;
break;
diff --git a/drivers/media/video/ivtv/ivtv-driver.c b/drivers/media/video/ivtv/ivtv-driver.c
index fd7a932e1d3..6d2dd8764f8 100644
--- a/drivers/media/video/ivtv/ivtv-driver.c
+++ b/drivers/media/video/ivtv/ivtv-driver.c
@@ -1003,8 +1003,6 @@ static int __devinit ivtv_probe(struct pci_dev *dev,
IVTV_DEBUG_INFO("base addr: 0x%08x\n", itv->base_addr);
- mutex_lock(&itv->serialize_lock);
-
/* PCI Device Setup */
if ((retval = ivtv_setup_pci(itv, dev, pci_id)) != 0) {
if (retval == -EIO)
@@ -1064,7 +1062,7 @@ static int __devinit ivtv_probe(struct pci_dev *dev,
IVTV_DEBUG_INFO("activating i2c...\n");
if (init_ivtv_i2c(itv)) {
IVTV_ERR("Could not initialize i2c\n");
- goto free_irq;
+ goto free_io;
}
IVTV_DEBUG_INFO("Active card count: %d.\n", ivtv_cards_active);
@@ -1176,7 +1174,11 @@ static int __devinit ivtv_probe(struct pci_dev *dev,
IVTV_ERR("Failed to register irq %d\n", retval);
goto free_streams;
}
- mutex_unlock(&itv->serialize_lock);
+ retval = ivtv_streams_register(itv);
+ if (retval) {
+ IVTV_ERR("Error %d registering devices\n", retval);
+ goto free_irq;
+ }
IVTV_INFO("Initialized card #%d: %s\n", itv->num, itv->card_name);
return 0;
@@ -1195,7 +1197,6 @@ static int __devinit ivtv_probe(struct pci_dev *dev,
release_mem_region(itv->base_addr + IVTV_DECODER_OFFSET, IVTV_DECODER_SIZE);
free_workqueue:
destroy_workqueue(itv->irq_work_queues);
- mutex_unlock(&itv->serialize_lock);
err:
if (retval == 0)
retval = -ENODEV;
diff --git a/drivers/media/video/ivtv/ivtv-fileops.c b/drivers/media/video/ivtv/ivtv-fileops.c
index da50fa4a72a..a200a8a95a2 100644
--- a/drivers/media/video/ivtv/ivtv-fileops.c
+++ b/drivers/media/video/ivtv/ivtv-fileops.c
@@ -822,6 +822,11 @@ int ivtv_v4l2_close(struct inode *inode, struct file *filp)
crystal_freq.flags = 0;
ivtv_saa7115(itv, VIDIOC_INT_S_CRYSTAL_FREQ, &crystal_freq);
}
+ if (atomic_read(&itv->capturing) > 0) {
+ /* Undo video mute */
+ ivtv_vapi(itv, CX2341X_ENC_MUTE_VIDEO, 1,
+ itv->params.video_mute | (itv->params.video_mute_yuv << 8));
+ }
/* Done! Unmute and continue. */
ivtv_unmute(itv);
ivtv_release_stream(s);
@@ -892,6 +897,7 @@ static int ivtv_serialized_open(struct ivtv_stream *s, struct file *filp)
if (atomic_read(&itv->capturing) > 0) {
/* switching to radio while capture is
in progress is not polite */
+ ivtv_release_stream(s);
kfree(item);
return -EBUSY;
}
@@ -947,7 +953,7 @@ int ivtv_v4l2_open(struct inode *inode, struct file *filp)
if (itv == NULL) {
/* Couldn't find a device registered
on that minor, shouldn't happen! */
- IVTV_WARN("No ivtv device found on minor %d\n", minor);
+ printk(KERN_WARNING "No ivtv device found on minor %d\n", minor);
return -ENXIO;
}
diff --git a/drivers/media/video/ivtv/ivtv-ioctl.c b/drivers/media/video/ivtv/ivtv-ioctl.c
index 206eee7542d..fd6826f472e 100644
--- a/drivers/media/video/ivtv/ivtv-ioctl.c
+++ b/drivers/media/video/ivtv/ivtv-ioctl.c
@@ -555,6 +555,7 @@ static int ivtv_try_or_set_fmt(struct ivtv *itv, int streamtype,
/* set window size */
if (fmt->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) {
+ struct cx2341x_mpeg_params *p = &itv->params;
int w = fmt->fmt.pix.width;
int h = fmt->fmt.pix.height;
@@ -566,17 +567,19 @@ static int ivtv_try_or_set_fmt(struct ivtv *itv, int streamtype,
fmt->fmt.pix.width = w;
fmt->fmt.pix.height = h;
- if (!set_fmt || (itv->params.width == w && itv->params.height == h))
+ if (!set_fmt || (p->width == w && p->height == h))
return 0;
if (atomic_read(&itv->capturing) > 0)
return -EBUSY;
- itv->params.width = w;
- itv->params.height = h;
+ p->width = w;
+ p->height = h;
if (w != 720 || h != (itv->is_50hz ? 576 : 480))
- itv->params.video_temporal_filter = 0;
+ p->video_temporal_filter = 0;
else
- itv->params.video_temporal_filter = 8;
+ p->video_temporal_filter = 8;
+ if (p->video_encoding == V4L2_MPEG_VIDEO_ENCODING_MPEG_1)
+ fmt->fmt.pix.width /= 2;
itv->video_dec_func(itv, VIDIOC_S_FMT, fmt);
return ivtv_get_fmt(itv, streamtype, fmt);
}
diff --git a/drivers/media/video/ivtv/ivtv-streams.c b/drivers/media/video/ivtv/ivtv-streams.c
index fd135985e70..aa03e61ef31 100644
--- a/drivers/media/video/ivtv/ivtv-streams.c
+++ b/drivers/media/video/ivtv/ivtv-streams.c
@@ -166,10 +166,9 @@ static void ivtv_stream_init(struct ivtv *itv, int type)
ivtv_queue_init(&s->q_io);
}
-static int ivtv_reg_dev(struct ivtv *itv, int type)
+static int ivtv_prep_dev(struct ivtv *itv, int type)
{
struct ivtv_stream *s = &itv->streams[type];
- int vfl_type = ivtv_stream_info[type].vfl_type;
int minor_offset = ivtv_stream_info[type].minor_offset;
int minor;
@@ -187,15 +186,12 @@ static int ivtv_reg_dev(struct ivtv *itv, int type)
if (type >= IVTV_DEC_STREAM_TYPE_MPG && !(itv->v4l2_cap & V4L2_CAP_VIDEO_OUTPUT))
return 0;
- if (minor_offset >= 0)
- /* card number + user defined offset + device offset */
- minor = itv->num + ivtv_first_minor + minor_offset;
- else
- minor = -1;
+ /* card number + user defined offset + device offset */
+ minor = itv->num + ivtv_first_minor + minor_offset;
/* User explicitly selected 0 buffers for these streams, so don't
create them. */
- if (minor >= 0 && ivtv_stream_info[type].dma != PCI_DMA_NONE &&
+ if (ivtv_stream_info[type].dma != PCI_DMA_NONE &&
itv->options.kilobytes[type] == 0) {
IVTV_INFO("Disabled %s device\n", ivtv_stream_info[type].name);
return 0;
@@ -223,21 +219,53 @@ static int ivtv_reg_dev(struct ivtv *itv, int type)
s->v4l2dev->fops = ivtv_stream_info[type].fops;
s->v4l2dev->release = video_device_release;
- if (minor >= 0) {
- /* Register device. First try the desired minor, then any free one. */
- if (video_register_device(s->v4l2dev, vfl_type, minor) &&
- video_register_device(s->v4l2dev, vfl_type, -1)) {
- IVTV_ERR("Couldn't register v4l2 device for %s minor %d\n",
- s->name, minor);
- video_device_release(s->v4l2dev);
- s->v4l2dev = NULL;
- return -ENOMEM;
- }
+ return 0;
+}
+
+/* Initialize v4l2 variables and prepare v4l2 devices */
+int ivtv_streams_setup(struct ivtv *itv)
+{
+ int type;
+
+ /* Setup V4L2 Devices */
+ for (type = 0; type < IVTV_MAX_STREAMS; type++) {
+ /* Prepare device */
+ if (ivtv_prep_dev(itv, type))
+ break;
+
+ if (itv->streams[type].v4l2dev == NULL)
+ continue;
+
+ /* Allocate Stream */
+ if (ivtv_stream_alloc(&itv->streams[type]))
+ break;
}
- else {
- /* Don't register a 'hidden' stream (OSD) */
- IVTV_INFO("Created framebuffer stream for %s\n", s->name);
+ if (type == IVTV_MAX_STREAMS)
return 0;
+
+ /* One or more streams could not be initialized. Clean 'em all up. */
+ ivtv_streams_cleanup(itv);
+ return -ENOMEM;
+}
+
+static int ivtv_reg_dev(struct ivtv *itv, int type)
+{
+ struct ivtv_stream *s = &itv->streams[type];
+ int vfl_type = ivtv_stream_info[type].vfl_type;
+ int minor;
+
+ if (s->v4l2dev == NULL)
+ return 0;
+
+ minor = s->v4l2dev->minor;
+ /* Register device. First try the desired minor, then any free one. */
+ if (video_register_device(s->v4l2dev, vfl_type, minor) &&
+ video_register_device(s->v4l2dev, vfl_type, -1)) {
+ IVTV_ERR("Couldn't register v4l2 device for %s minor %d\n",
+ s->name, minor);
+ video_device_release(s->v4l2dev);
+ s->v4l2dev = NULL;
+ return -ENOMEM;
}
switch (vfl_type) {
@@ -262,27 +290,18 @@ static int ivtv_reg_dev(struct ivtv *itv, int type)
return 0;
}
-/* Initialize v4l2 variables and register v4l2 devices */
-int ivtv_streams_setup(struct ivtv *itv)
+/* Register v4l2 devices */
+int ivtv_streams_register(struct ivtv *itv)
{
int type;
+ int err = 0;
- /* Setup V4L2 Devices */
- for (type = 0; type < IVTV_MAX_STREAMS; type++) {
- /* Register Device */
- if (ivtv_reg_dev(itv, type))
- break;
-
- if (itv->streams[type].v4l2dev == NULL)
- continue;
+ /* Register V4L2 devices */
+ for (type = 0; type < IVTV_MAX_STREAMS; type++)
+ err |= ivtv_reg_dev(itv, type);
- /* Allocate Stream */
- if (ivtv_stream_alloc(&itv->streams[type]))
- break;
- }
- if (type == IVTV_MAX_STREAMS) {
+ if (err == 0)
return 0;
- }
/* One or more streams could not be initialized. Clean 'em all up. */
ivtv_streams_cleanup(itv);
@@ -303,11 +322,8 @@ void ivtv_streams_cleanup(struct ivtv *itv)
continue;
ivtv_stream_free(&itv->streams[type]);
- /* Free Device */
- if (vdev->minor == -1) /* 'Hidden' never registered stream (OSD) */
- video_device_release(vdev);
- else /* All others, just unregister. */
- video_unregister_device(vdev);
+ /* Unregister device */
+ video_unregister_device(vdev);
}
}
@@ -425,6 +441,7 @@ int ivtv_start_v4l2_encode_stream(struct ivtv_stream *s)
{
u32 data[CX2341X_MBOX_MAX_DATA];
struct ivtv *itv = s->itv;
+ struct cx2341x_mpeg_params *p = &itv->params;
int captype = 0, subtype = 0;
int enable_passthrough = 0;
@@ -445,7 +462,7 @@ int ivtv_start_v4l2_encode_stream(struct ivtv_stream *s)
}
itv->mpg_data_received = itv->vbi_data_inserted = 0;
itv->dualwatch_jiffies = jiffies;
- itv->dualwatch_stereo_mode = itv->params.audio_properties & 0x0300;
+ itv->dualwatch_stereo_mode = p->audio_properties & 0x0300;
itv->search_pack_header = 0;
break;
@@ -477,9 +494,6 @@ int ivtv_start_v4l2_encode_stream(struct ivtv_stream *s)
s->subtype = subtype;
s->buffers_stolen = 0;
- /* mute/unmute video */
- ivtv_vapi(itv, CX2341X_ENC_MUTE_VIDEO, 1, test_bit(IVTV_F_I_RADIO_USER, &itv->i_flags) ? 1 : 0);
-
/* Clear Streamoff flags in case left from last capture */
clear_bit(IVTV_F_S_STREAMOFF, &s->s_flags);
@@ -536,7 +550,12 @@ int ivtv_start_v4l2_encode_stream(struct ivtv_stream *s)
itv->pgm_info_offset, itv->pgm_info_num);
/* Setup API for Stream */
- cx2341x_update(itv, ivtv_api_func, NULL, &itv->params);
+ cx2341x_update(itv, ivtv_api_func, NULL, p);
+
+ /* mute if capturing radio */
+ if (test_bit(IVTV_F_I_RADIO_USER, &itv->i_flags))
+ ivtv_vapi(itv, CX2341X_ENC_MUTE_VIDEO, 1,
+ 1 | (p->video_mute_yuv << 8));
}
/* Vsync Setup */
@@ -585,6 +604,7 @@ static int ivtv_setup_v4l2_decode_stream(struct ivtv_stream *s)
{
u32 data[CX2341X_MBOX_MAX_DATA];
struct ivtv *itv = s->itv;
+ struct cx2341x_mpeg_params *p = &itv->params;
int datatype;
if (s->v4l2dev == NULL)
@@ -623,7 +643,7 @@ static int ivtv_setup_v4l2_decode_stream(struct ivtv_stream *s)
break;
}
if (ivtv_vapi(itv, CX2341X_DEC_SET_DECODER_SOURCE, 4, datatype,
- itv->params.width, itv->params.height, itv->params.audio_properties)) {
+ p->width, p->height, p->audio_properties)) {
IVTV_DEBUG_WARN("Couldn't initialize decoder source\n");
}
return 0;
diff --git a/drivers/media/video/ivtv/ivtv-streams.h b/drivers/media/video/ivtv/ivtv-streams.h
index 8f5f5b1c7c8..3d76a415fbd 100644
--- a/drivers/media/video/ivtv/ivtv-streams.h
+++ b/drivers/media/video/ivtv/ivtv-streams.h
@@ -22,6 +22,7 @@
#define IVTV_STREAMS_H
int ivtv_streams_setup(struct ivtv *itv);
+int ivtv_streams_register(struct ivtv *itv);
void ivtv_streams_cleanup(struct ivtv *itv);
/* Capture related */
diff --git a/drivers/media/video/ivtv/ivtv-udma.c b/drivers/media/video/ivtv/ivtv-udma.c
index c4626d1cdf4..912b424e520 100644
--- a/drivers/media/video/ivtv/ivtv-udma.c
+++ b/drivers/media/video/ivtv/ivtv-udma.c
@@ -63,10 +63,10 @@ int ivtv_udma_fill_sg_list (struct ivtv_user_dma *dma, struct ivtv_dma_page_info
memcpy(page_address(dma->bouncemap[map_offset]) + offset, src, len);
kunmap_atomic(src, KM_BOUNCE_READ);
local_irq_restore(flags);
- dma->SGlist[map_offset].page = dma->bouncemap[map_offset];
+ sg_set_page(&dma->SGlist[map_offset], dma->bouncemap[map_offset]);
}
else {
- dma->SGlist[map_offset].page = dma->map[map_offset];
+ sg_set_page(&dma->SGlist[map_offset], dma->map[map_offset]);
}
offset = 0;
map_offset++;
diff --git a/drivers/media/video/ivtv/ivtv-yuv.c b/drivers/media/video/ivtv/ivtv-yuv.c
index e2288f224ab..9091c4837bb 100644
--- a/drivers/media/video/ivtv/ivtv-yuv.c
+++ b/drivers/media/video/ivtv/ivtv-yuv.c
@@ -710,7 +710,7 @@ static u32 ivtv_yuv_window_setup (struct ivtv *itv, struct yuv_frame_info *windo
/* If there's nothing to safe to display, we may as well stop now */
if ((int)window->dst_w <= 2 || (int)window->dst_h <= 2 || (int)window->src_w <= 2 || (int)window->src_h <= 2) {
- return 0;
+ return IVTV_YUV_UPDATE_INVALID;
}
/* Ensure video remains inside OSD area */
@@ -791,7 +791,7 @@ static u32 ivtv_yuv_window_setup (struct ivtv *itv, struct yuv_frame_info *windo
/* Check again. If there's nothing to safe to display, stop now */
if ((int)window->dst_w <= 2 || (int)window->dst_h <= 2 || (int)window->src_w <= 2 || (int)window->src_h <= 2) {
- return 0;
+ return IVTV_YUV_UPDATE_INVALID;
}
/* Both x offset & width are linked, so they have to be done together */
@@ -840,110 +840,118 @@ void ivtv_yuv_work_handler (struct ivtv *itv)
if (!(yuv_update = ivtv_yuv_window_setup (itv, &window)))
return;
- /* Update horizontal settings */
- if (yuv_update & IVTV_YUV_UPDATE_HORIZONTAL)
- ivtv_yuv_handle_horizontal(itv, &window);
+ if (yuv_update & IVTV_YUV_UPDATE_INVALID) {
+ write_reg(0x01008080, 0x2898);
+ } else if (yuv_update) {
+ write_reg(0x00108080, 0x2898);
- if (yuv_update & IVTV_YUV_UPDATE_VERTICAL)
- ivtv_yuv_handle_vertical(itv, &window);
+ if (yuv_update & IVTV_YUV_UPDATE_HORIZONTAL)
+ ivtv_yuv_handle_horizontal(itv, &window);
+
+ if (yuv_update & IVTV_YUV_UPDATE_VERTICAL)
+ ivtv_yuv_handle_vertical(itv, &window);
+ }
memcpy(&itv->yuv_info.old_frame_info, &window, sizeof (itv->yuv_info.old_frame_info));
}
static void ivtv_yuv_init (struct ivtv *itv)
{
+ struct yuv_playback_info *yi = &itv->yuv_info;
+
IVTV_DEBUG_YUV("ivtv_yuv_init\n");
/* Take a snapshot of the current register settings */
- itv->yuv_info.reg_2834 = read_reg(0x02834);
- itv->yuv_info.reg_2838 = read_reg(0x02838);
- itv->yuv_info.reg_283c = read_reg(0x0283c);
- itv->yuv_info.reg_2840 = read_reg(0x02840);
- itv->yuv_info.reg_2844 = read_reg(0x02844);
- itv->yuv_info.reg_2848 = read_reg(0x02848);
- itv->yuv_info.reg_2854 = read_reg(0x02854);
- itv->yuv_info.reg_285c = read_reg(0x0285c);
- itv->yuv_info.reg_2864 = read_reg(0x02864);
- itv->yuv_info.reg_2870 = read_reg(0x02870);
- itv->yuv_info.reg_2874 = read_reg(0x02874);
- itv->yuv_info.reg_2898 = read_reg(0x02898);
- itv->yuv_info.reg_2890 = read_reg(0x02890);
-
- itv->yuv_info.reg_289c = read_reg(0x0289c);
- itv->yuv_info.reg_2918 = read_reg(0x02918);
- itv->yuv_info.reg_291c = read_reg(0x0291c);
- itv->yuv_info.reg_2920 = read_reg(0x02920);
- itv->yuv_info.reg_2924 = read_reg(0x02924);
- itv->yuv_info.reg_2928 = read_reg(0x02928);
- itv->yuv_info.reg_292c = read_reg(0x0292c);
- itv->yuv_info.reg_2930 = read_reg(0x02930);
- itv->yuv_info.reg_2934 = read_reg(0x02934);
- itv->yuv_info.reg_2938 = read_reg(0x02938);
- itv->yuv_info.reg_293c = read_reg(0x0293c);
- itv->yuv_info.reg_2940 = read_reg(0x02940);
- itv->yuv_info.reg_2944 = read_reg(0x02944);
- itv->yuv_info.reg_2948 = read_reg(0x02948);
- itv->yuv_info.reg_294c = read_reg(0x0294c);
- itv->yuv_info.reg_2950 = read_reg(0x02950);
- itv->yuv_info.reg_2954 = read_reg(0x02954);
- itv->yuv_info.reg_2958 = read_reg(0x02958);
- itv->yuv_info.reg_295c = read_reg(0x0295c);
- itv->yuv_info.reg_2960 = read_reg(0x02960);
- itv->yuv_info.reg_2964 = read_reg(0x02964);
- itv->yuv_info.reg_2968 = read_reg(0x02968);
- itv->yuv_info.reg_296c = read_reg(0x0296c);
- itv->yuv_info.reg_2970 = read_reg(0x02970);
-
- itv->yuv_info.v_filter_1 = -1;
- itv->yuv_info.v_filter_2 = -1;
- itv->yuv_info.h_filter = -1;
+ yi->reg_2834 = read_reg(0x02834);
+ yi->reg_2838 = read_reg(0x02838);
+ yi->reg_283c = read_reg(0x0283c);
+ yi->reg_2840 = read_reg(0x02840);
+ yi->reg_2844 = read_reg(0x02844);
+ yi->reg_2848 = read_reg(0x02848);
+ yi->reg_2854 = read_reg(0x02854);
+ yi->reg_285c = read_reg(0x0285c);
+ yi->reg_2864 = read_reg(0x02864);
+ yi->reg_2870 = read_reg(0x02870);
+ yi->reg_2874 = read_reg(0x02874);
+ yi->reg_2898 = read_reg(0x02898);
+ yi->reg_2890 = read_reg(0x02890);
+
+ yi->reg_289c = read_reg(0x0289c);
+ yi->reg_2918 = read_reg(0x02918);
+ yi->reg_291c = read_reg(0x0291c);
+ yi->reg_2920 = read_reg(0x02920);
+ yi->reg_2924 = read_reg(0x02924);
+ yi->reg_2928 = read_reg(0x02928);
+ yi->reg_292c = read_reg(0x0292c);
+ yi->reg_2930 = read_reg(0x02930);
+ yi->reg_2934 = read_reg(0x02934);
+ yi->reg_2938 = read_reg(0x02938);
+ yi->reg_293c = read_reg(0x0293c);
+ yi->reg_2940 = read_reg(0x02940);
+ yi->reg_2944 = read_reg(0x02944);
+ yi->reg_2948 = read_reg(0x02948);
+ yi->reg_294c = read_reg(0x0294c);
+ yi->reg_2950 = read_reg(0x02950);
+ yi->reg_2954 = read_reg(0x02954);
+ yi->reg_2958 = read_reg(0x02958);
+ yi->reg_295c = read_reg(0x0295c);
+ yi->reg_2960 = read_reg(0x02960);
+ yi->reg_2964 = read_reg(0x02964);
+ yi->reg_2968 = read_reg(0x02968);
+ yi->reg_296c = read_reg(0x0296c);
+ yi->reg_2970 = read_reg(0x02970);
+
+ yi->v_filter_1 = -1;
+ yi->v_filter_2 = -1;
+ yi->h_filter = -1;
/* Set some valid size info */
- itv->yuv_info.osd_x_offset = read_reg(0x02a04) & 0x00000FFF;
- itv->yuv_info.osd_y_offset = (read_reg(0x02a04) >> 16) & 0x00000FFF;
+ yi->osd_x_offset = read_reg(0x02a04) & 0x00000FFF;
+ yi->osd_y_offset = (read_reg(0x02a04) >> 16) & 0x00000FFF;
/* Bit 2 of reg 2878 indicates current decoder output format
0 : NTSC 1 : PAL */
if (read_reg(0x2878) & 4)
- itv->yuv_info.decode_height = 576;
+ yi->decode_height = 576;
else
- itv->yuv_info.decode_height = 480;
+ yi->decode_height = 480;
- /* If no visible size set, assume full size */
- if (!itv->yuv_info.osd_vis_w)
- itv->yuv_info.osd_vis_w = 720 - itv->yuv_info.osd_x_offset;
-
- if (!itv->yuv_info.osd_vis_h) {
- itv->yuv_info.osd_vis_h = itv->yuv_info.decode_height - itv->yuv_info.osd_y_offset;
+ if (!itv->osd_info) {
+ yi->osd_vis_w = 720 - yi->osd_x_offset;
+ yi->osd_vis_h = yi->decode_height - yi->osd_y_offset;
} else {
- /* If output video standard has changed, requested height may
- not be legal */
- if (itv->yuv_info.osd_vis_h + itv->yuv_info.osd_y_offset > itv->yuv_info.decode_height) {
- IVTV_DEBUG_WARN("Clipping yuv output - fb size (%d) exceeds video standard limit (%d)\n",
- itv->yuv_info.osd_vis_h + itv->yuv_info.osd_y_offset,
- itv->yuv_info.decode_height);
- itv->yuv_info.osd_vis_h = itv->yuv_info.decode_height - itv->yuv_info.osd_y_offset;
+ /* If no visible size set, assume full size */
+ if (!yi->osd_vis_w)
+ yi->osd_vis_w = 720 - yi->osd_x_offset;
+
+ if (!yi->osd_vis_h)
+ yi->osd_vis_h = yi->decode_height - yi->osd_y_offset;
+ else {
+ /* If output video standard has changed, requested height may
+ not be legal */
+ if (yi->osd_vis_h + yi->osd_y_offset > yi->decode_height) {
+ IVTV_DEBUG_WARN("Clipping yuv output - fb size (%d) exceeds video standard limit (%d)\n",
+ yi->osd_vis_h + yi->osd_y_offset,
+ yi->decode_height);
+ yi->osd_vis_h = yi->decode_height - yi->osd_y_offset;
+ }
}
}
/* We need a buffer for blanking when Y plane is offset - non-fatal if we can't get one */
- itv->yuv_info.blanking_ptr = kzalloc(720*16,GFP_KERNEL);
- if (itv->yuv_info.blanking_ptr) {
- itv->yuv_info.blanking_dmaptr = pci_map_single(itv->dev, itv->yuv_info.blanking_ptr, 720*16, PCI_DMA_TODEVICE);
- }
+ yi->blanking_ptr = kzalloc(720*16, GFP_KERNEL);
+ if (yi->blanking_ptr)
+ yi->blanking_dmaptr = pci_map_single(itv->dev, yi->blanking_ptr, 720*16, PCI_DMA_TODEVICE);
else {
- itv->yuv_info.blanking_dmaptr = 0;
- IVTV_DEBUG_WARN ("Failed to allocate yuv blanking buffer\n");
+ yi->blanking_dmaptr = 0;
+ IVTV_DEBUG_WARN("Failed to allocate yuv blanking buffer\n");
}
- IVTV_DEBUG_WARN("Enable video output\n");
- write_reg_sync(0x00108080, 0x2898);
-
/* Enable YUV decoder output */
write_reg_sync(0x01, IVTV_REG_VDM);
set_bit(IVTV_F_I_DECODING_YUV, &itv->i_flags);
- atomic_set(&itv->yuv_info.next_dma_frame,0);
+ atomic_set(&yi->next_dma_frame, 0);
}
int ivtv_yuv_prep_frame(struct ivtv *itv, struct ivtv_dma_frame *args)
diff --git a/drivers/media/video/ivtv/ivtv-yuv.h b/drivers/media/video/ivtv/ivtv-yuv.h
index f7215eeca01..3b966f0a204 100644
--- a/drivers/media/video/ivtv/ivtv-yuv.h
+++ b/drivers/media/video/ivtv/ivtv-yuv.h
@@ -34,6 +34,7 @@
#define IVTV_YUV_UPDATE_HORIZONTAL 0x01
#define IVTV_YUV_UPDATE_VERTICAL 0x02
+#define IVTV_YUV_UPDATE_INVALID 0x04
extern const u32 yuv_offset[4];
diff --git a/drivers/media/video/ivtv/ivtvfb.c b/drivers/media/video/ivtv/ivtvfb.c
index 9684048fe56..52ffd154a3d 100644
--- a/drivers/media/video/ivtv/ivtvfb.c
+++ b/drivers/media/video/ivtv/ivtvfb.c
@@ -55,7 +55,6 @@
static int ivtvfb_card_id = -1;
static int ivtvfb_debug = 0;
static int osd_laced;
-static int osd_compat;
static int osd_depth;
static int osd_upper;
static int osd_left;
@@ -65,7 +64,6 @@ static int osd_xres;
module_param(ivtvfb_card_id, int, 0444);
module_param_named(debug,ivtvfb_debug, int, 0644);
module_param(osd_laced, bool, 0444);
-module_param(osd_compat, bool, 0444);
module_param(osd_depth, int, 0444);
module_param(osd_upper, int, 0444);
module_param(osd_left, int, 0444);
@@ -80,12 +78,6 @@ MODULE_PARM_DESC(debug,
"Debug level (bitmask). Default: errors only\n"
"\t\t\t(debug = 3 gives full debugging)");
-MODULE_PARM_DESC(osd_compat,
- "Compatibility mode - Display size is locked (use for old X drivers)\n"
- "\t\t\t0=off\n"
- "\t\t\t1=on\n"
- "\t\t\tdefault off");
-
/* Why upper, left, xres, yres, depth, laced ? To match terminology used
by fbset.
Why start at 1 for left & upper coordinate ? Because X doesn't allow 0 */
@@ -166,9 +158,6 @@ struct osd_info {
unsigned long fb_end_aligned_physaddr;
#endif
- /* Current osd mode */
- int osd_mode;
-
/* Store the buffer offset */
int set_osd_coords_x;
int set_osd_coords_y;
@@ -470,13 +459,11 @@ static int ivtvfb_set_var(struct ivtv *itv, struct fb_var_screeninfo *var)
IVTVFB_DEBUG_WARN("ivtvfb_set_var - Invalid bpp\n");
}
- /* Change osd mode if needed.
- Although rare, things can go wrong. The extra mode
- change seems to help... */
- if (osd_mode != -1 && osd_mode != oi->osd_mode) {
+ /* Set video mode. Although rare, the display can become scrambled even
+ if we don't change mode. Always 'bounce' to osd_mode via mode 0 */
+ if (osd_mode != -1) {
ivtv_vapi(itv, CX2341X_OSD_SET_PIXEL_FORMAT, 1, 0);
ivtv_vapi(itv, CX2341X_OSD_SET_PIXEL_FORMAT, 1, osd_mode);
- oi->osd_mode = osd_mode;
}
oi->bits_per_pixel = var->bits_per_pixel;
@@ -579,14 +566,6 @@ static int _ivtvfb_check_var(struct fb_var_screeninfo *var, struct ivtv *itv)
osd_height_limit = 480;
}
- /* Check the bits per pixel */
- if (osd_compat) {
- if (var->bits_per_pixel != 32) {
- IVTVFB_DEBUG_WARN("Invalid colour mode: %d\n", var->bits_per_pixel);
- return -EINVAL;
- }
- }
-
if (var->bits_per_pixel == 8 || var->bits_per_pixel == 32) {
var->transp.offset = 24;
var->transp.length = 8;
@@ -638,32 +617,20 @@ static int _ivtvfb_check_var(struct fb_var_screeninfo *var, struct ivtv *itv)
}
/* Check the resolution */
- if (osd_compat) {
- if (var->xres != oi->ivtvfb_defined.xres ||
- var->yres != oi->ivtvfb_defined.yres ||
- var->xres_virtual != oi->ivtvfb_defined.xres_virtual ||
- var->yres_virtual != oi->ivtvfb_defined.yres_virtual) {
- IVTVFB_DEBUG_WARN("Invalid resolution: %dx%d (virtual %dx%d)\n",
- var->xres, var->yres, var->xres_virtual, var->yres_virtual);
- return -EINVAL;
- }
+ if (var->xres > IVTV_OSD_MAX_WIDTH || var->yres > osd_height_limit) {
+ IVTVFB_DEBUG_WARN("Invalid resolution: %dx%d\n",
+ var->xres, var->yres);
+ return -EINVAL;
}
- else {
- if (var->xres > IVTV_OSD_MAX_WIDTH || var->yres > osd_height_limit) {
- IVTVFB_DEBUG_WARN("Invalid resolution: %dx%d\n",
- var->xres, var->yres);
- return -EINVAL;
- }
- /* Max horizontal size is 1023 @ 32bpp, 2046 & 16bpp, 4092 @ 8bpp */
- if (var->xres_virtual > 4095 / (var->bits_per_pixel / 8) ||
- var->xres_virtual * var->yres_virtual * (var->bits_per_pixel / 8) > oi->video_buffer_size ||
- var->xres_virtual < var->xres ||
- var->yres_virtual < var->yres) {
- IVTVFB_DEBUG_WARN("Invalid virtual resolution: %dx%d\n",
- var->xres_virtual, var->yres_virtual);
- return -EINVAL;
- }
+ /* Max horizontal size is 1023 @ 32bpp, 2046 & 16bpp, 4092 @ 8bpp */
+ if (var->xres_virtual > 4095 / (var->bits_per_pixel / 8) ||
+ var->xres_virtual * var->yres_virtual * (var->bits_per_pixel / 8) > oi->video_buffer_size ||
+ var->xres_virtual < var->xres ||
+ var->yres_virtual < var->yres) {
+ IVTVFB_DEBUG_WARN("Invalid virtual resolution: %dx%d\n",
+ var->xres_virtual, var->yres_virtual);
+ return -EINVAL;
}
/* Some extra checks if in 8 bit mode */
@@ -877,17 +844,15 @@ static int ivtvfb_init_vidmode(struct ivtv *itv)
/* Color mode */
- if (osd_compat) osd_depth = 32;
- if (osd_depth != 8 && osd_depth != 16 && osd_depth != 32) osd_depth = 8;
+ if (osd_depth != 8 && osd_depth != 16 && osd_depth != 32)
+ osd_depth = 8;
oi->bits_per_pixel = osd_depth;
oi->bytes_per_pixel = oi->bits_per_pixel / 8;
- /* Invalidate current osd mode to force a mode switch later */
- oi->osd_mode = -1;
-
/* Horizontal size & position */
- if (osd_xres > 720) osd_xres = 720;
+ if (osd_xres > 720)
+ osd_xres = 720;
/* Must be a multiple of 4 for 8bpp & 2 for 16bpp */
if (osd_depth == 8)
@@ -895,10 +860,7 @@ static int ivtvfb_init_vidmode(struct ivtv *itv)
else if (osd_depth == 16)
osd_xres &= ~1;
- if (osd_xres)
- start_window.width = osd_xres;
- else
- start_window.width = osd_compat ? 720: 640;
+ start_window.width = osd_xres ? osd_xres : 640;
/* Check horizontal start (osd_left). */
if (osd_left && osd_left + start_window.width > 721) {
@@ -921,10 +883,7 @@ static int ivtvfb_init_vidmode(struct ivtv *itv)
if (osd_yres > max_height)
osd_yres = max_height;
- if (osd_yres)
- start_window.height = osd_yres;
- else
- start_window.height = osd_compat ? max_height : (itv->is_50hz ? 480 : 400);
+ start_window.height = osd_yres ? osd_yres : itv->is_50hz ? 480 : 400;
/* Check vertical start (osd_upper). */
if (osd_upper + start_window.height > max_height + 1) {
@@ -1127,10 +1086,6 @@ static int ivtvfb_init_card(struct ivtv *itv)
/* Enable the osd */
ivtvfb_blank(FB_BLANK_UNBLANK, &itv->osd_info->ivtvfb_info);
- /* Note if we're running in compatibility mode */
- if (osd_compat)
- IVTVFB_INFO("Running in compatibility mode. Display resize & mode change disabled\n");
-
/* Allocate DMA */
ivtv_udma_alloc(itv);
return 0;
@@ -1177,9 +1132,12 @@ static void ivtvfb_cleanup(void)
for (i = 0; i < ivtv_cards_active; i++) {
itv = ivtv_cards[i];
if (itv && (itv->v4l2_cap & V4L2_CAP_VIDEO_OUTPUT) && itv->osd_info) {
+ if (unregister_framebuffer(&itv->osd_info->ivtvfb_info)) {
+ IVTVFB_WARN("Framebuffer %d is in use, cannot unload\n", i);
+ return;
+ }
IVTVFB_DEBUG_INFO("Unregister framebuffer %d\n", i);
ivtvfb_blank(FB_BLANK_POWERDOWN, &itv->osd_info->ivtvfb_info);
- unregister_framebuffer(&itv->osd_info->ivtvfb_info);
ivtvfb_release_buffers(itv);
itv->osd_video_pbase = 0;
}
diff --git a/drivers/media/video/meye.c b/drivers/media/video/meye.c
index 69283926a8d..c3116329043 100644
--- a/drivers/media/video/meye.c
+++ b/drivers/media/video/meye.c
@@ -1762,7 +1762,6 @@ static struct video_device meye_template = {
.owner = THIS_MODULE,
.name = "meye",
.type = VID_TYPE_CAPTURE,
- .hardware = VID_HARDWARE_MEYE,
.fops = &meye_fops,
.release = video_device_release,
.minor = -1,
diff --git a/drivers/media/video/ov511.c b/drivers/media/video/ov511.c
index b8d4ac0d938..d55d5800efb 100644
--- a/drivers/media/video/ov511.c
+++ b/drivers/media/video/ov511.c
@@ -4668,7 +4668,6 @@ static struct video_device vdev_template = {
.owner = THIS_MODULE,
.name = "OV511 USB Camera",
.type = VID_TYPE_CAPTURE,
- .hardware = VID_HARDWARE_OV511,
.fops = &ov511_fops,
.release = video_device_release,
.minor = -1,
diff --git a/drivers/media/video/planb.c b/drivers/media/video/planb.c
index 0ef73d9d584..ce4b2f9791e 100644
--- a/drivers/media/video/planb.c
+++ b/drivers/media/video/planb.c
@@ -2013,7 +2013,6 @@ static struct video_device planb_template=
.owner = THIS_MODULE,
.name = PLANB_DEVICE_NAME,
.type = VID_TYPE_OVERLAY,
- .hardware = VID_HARDWARE_PLANB,
.open = planb_open,
.close = planb_close,
.read = planb_read,
diff --git a/drivers/media/video/pms.c b/drivers/media/video/pms.c
index b5a67f0dd19..6820c2aabd3 100644
--- a/drivers/media/video/pms.c
+++ b/drivers/media/video/pms.c
@@ -895,7 +895,6 @@ static struct video_device pms_template=
.owner = THIS_MODULE,
.name = "Mediavision PMS",
.type = VID_TYPE_CAPTURE,
- .hardware = VID_HARDWARE_PMS,
.fops = &pms_fops,
};
diff --git a/drivers/media/video/pvrusb2/pvrusb2-encoder.c b/drivers/media/video/pvrusb2/pvrusb2-encoder.c
index 20b614436d2..205087a3e13 100644
--- a/drivers/media/video/pvrusb2/pvrusb2-encoder.c
+++ b/drivers/media/video/pvrusb2/pvrusb2-encoder.c
@@ -209,6 +209,11 @@ static int pvr2_encoder_cmd(void *ctxt,
LOCK_TAKE(hdw->ctl_lock); do {
+ if (!hdw->flag_encoder_ok) {
+ ret = -EIO;
+ break;
+ }
+
retry_flag = 0;
try_count++;
ret = 0;
@@ -273,6 +278,7 @@ static int pvr2_encoder_cmd(void *ctxt,
ret = -EBUSY;
}
if (ret) {
+ hdw->flag_encoder_ok = 0;
pvr2_trace(
PVR2_TRACE_ERROR_LEGS,
"Giving up on command."
diff --git a/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h b/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h
index 985d9ae7f5e..f873994b088 100644
--- a/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h
+++ b/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h
@@ -225,11 +225,12 @@ struct pvr2_hdw {
unsigned int cmd_debug_write_len; //
unsigned int cmd_debug_read_len; //
- int flag_ok; // device in known good state
- int flag_disconnected; // flag_ok == 0 due to disconnect
- int flag_init_ok; // true if structure is fully initialized
- int flag_streaming_enabled; // true if streaming should be on
- int fw1_state; // current situation with fw1
+ int flag_ok; /* device in known good state */
+ int flag_disconnected; /* flag_ok == 0 due to disconnect */
+ int flag_init_ok; /* true if structure is fully initialized */
+ int flag_streaming_enabled; /* true if streaming should be on */
+ int fw1_state; /* current situation with fw1 */
+ int flag_encoder_ok; /* True if encoder is healthy */
int flag_decoder_is_tuned;
diff --git a/drivers/media/video/pvrusb2/pvrusb2-hdw.c b/drivers/media/video/pvrusb2/pvrusb2-hdw.c
index 27b12b4b5c8..402c5948825 100644
--- a/drivers/media/video/pvrusb2/pvrusb2-hdw.c
+++ b/drivers/media/video/pvrusb2/pvrusb2-hdw.c
@@ -1248,6 +1248,8 @@ int pvr2_upload_firmware2(struct pvr2_hdw *hdw)
time we configure the encoder, then we'll fully configure it. */
hdw->enc_cur_valid = 0;
+ hdw->flag_encoder_ok = 0;
+
/* First prepare firmware loading */
ret |= pvr2_write_register(hdw, 0x0048, 0xffffffff); /*interrupt mask*/
ret |= pvr2_hdw_gpio_chg_dir(hdw,0xffffffff,0x00000088); /*gpio dir*/
@@ -1346,6 +1348,7 @@ int pvr2_upload_firmware2(struct pvr2_hdw *hdw)
pvr2_trace(PVR2_TRACE_ERROR_LEGS,
"firmware2 upload post-proc failure");
} else {
+ hdw->flag_encoder_ok = !0;
hdw->subsys_enabled_mask |= (1<<PVR2_SUBSYS_B_ENC_FIRMWARE);
}
return ret;
diff --git a/drivers/media/video/pvrusb2/pvrusb2-v4l2.c b/drivers/media/video/pvrusb2/pvrusb2-v4l2.c
index 4563b3df8a0..7a596ea7cfe 100644
--- a/drivers/media/video/pvrusb2/pvrusb2-v4l2.c
+++ b/drivers/media/video/pvrusb2/pvrusb2-v4l2.c
@@ -1121,15 +1121,12 @@ static const struct file_operations vdev_fops = {
};
-#define VID_HARDWARE_PVRUSB2 38 /* FIXME : need a good value */
-
static struct video_device vdev_template = {
.owner = THIS_MODULE,
.type = VID_TYPE_CAPTURE | VID_TYPE_TUNER,
.type2 = (V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_VBI_CAPTURE
| V4L2_CAP_TUNER | V4L2_CAP_AUDIO
| V4L2_CAP_READWRITE),
- .hardware = VID_HARDWARE_PVRUSB2,
.fops = &vdev_fops,
};
diff --git a/drivers/media/video/pwc/pwc-if.c b/drivers/media/video/pwc/pwc-if.c
index 950da254214..7300ace8f44 100644
--- a/drivers/media/video/pwc/pwc-if.c
+++ b/drivers/media/video/pwc/pwc-if.c
@@ -166,7 +166,6 @@ static struct video_device pwc_template = {
.owner = THIS_MODULE,
.name = "Philips Webcam", /* Filled in later */
.type = VID_TYPE_CAPTURE,
- .hardware = VID_HARDWARE_PWC,
.release = video_device_release,
.fops = &pwc_fops,
.minor = -1,
diff --git a/drivers/media/video/saa7134/saa6752hs.c b/drivers/media/video/saa7134/saa6752hs.c
index 57f1f5d409e..002e70a33a4 100644
--- a/drivers/media/video/saa7134/saa6752hs.c
+++ b/drivers/media/video/saa7134/saa6752hs.c
@@ -71,7 +71,6 @@ static const struct v4l2_format v4l2_format_table[] =
struct saa6752hs_state {
struct i2c_client client;
- struct v4l2_mpeg_compression old_params;
struct saa6752hs_mpeg_params params;
enum saa6752hs_videoformat video_format;
v4l2_std_id standard;
@@ -161,35 +160,6 @@ static struct saa6752hs_mpeg_params param_defaults =
.au_l2_bitrate = V4L2_MPEG_AUDIO_L2_BITRATE_256K,
};
-static struct v4l2_mpeg_compression old_param_defaults =
-{
- .st_type = V4L2_MPEG_TS_2,
- .st_bitrate = {
- .mode = V4L2_BITRATE_CBR,
- .target = 7000,
- },
-
- .ts_pid_pmt = 16,
- .ts_pid_video = 260,
- .ts_pid_audio = 256,
- .ts_pid_pcr = 259,
-
- .vi_type = V4L2_MPEG_VI_2,
- .vi_aspect_ratio = V4L2_MPEG_ASPECT_4_3,
- .vi_bitrate = {
- .mode = V4L2_BITRATE_VBR,
- .target = 4000,
- .max = 6000,
- },
-
- .au_type = V4L2_MPEG_AU_2_II,
- .au_bitrate = {
- .mode = V4L2_BITRATE_CBR,
- .target = 256,
- },
-
-};
-
/* ---------------------------------------------------------------------- */
static int saa6752hs_chip_command(struct i2c_client* client,
@@ -362,74 +332,6 @@ static void saa6752hs_set_subsampling(struct i2c_client* client,
}
-static void saa6752hs_old_set_params(struct i2c_client* client,
- struct v4l2_mpeg_compression* params)
-{
- struct saa6752hs_state *h = i2c_get_clientdata(client);
-
- /* check PIDs */
- if (params->ts_pid_pmt <= MPEG_PID_MAX) {
- h->old_params.ts_pid_pmt = params->ts_pid_pmt;
- h->params.ts_pid_pmt = params->ts_pid_pmt;
- }
- if (params->ts_pid_pcr <= MPEG_PID_MAX) {
- h->old_params.ts_pid_pcr = params->ts_pid_pcr;
- h->params.ts_pid_pcr = params->ts_pid_pcr;
- }
- if (params->ts_pid_video <= MPEG_PID_MAX) {
- h->old_params.ts_pid_video = params->ts_pid_video;
- h->params.ts_pid_video = params->ts_pid_video;
- }
- if (params->ts_pid_audio <= MPEG_PID_MAX) {
- h->old_params.ts_pid_audio = params->ts_pid_audio;
- h->params.ts_pid_audio = params->ts_pid_audio;
- }
-
- /* check bitrate parameters */
- if ((params->vi_bitrate.mode == V4L2_BITRATE_CBR) ||
- (params->vi_bitrate.mode == V4L2_BITRATE_VBR)) {
- h->old_params.vi_bitrate.mode = params->vi_bitrate.mode;
- h->params.vi_bitrate_mode = (params->vi_bitrate.mode == V4L2_BITRATE_VBR) ?
- V4L2_MPEG_VIDEO_BITRATE_MODE_VBR : V4L2_MPEG_VIDEO_BITRATE_MODE_CBR;
- }
- if (params->vi_bitrate.mode != V4L2_BITRATE_NONE)
- h->old_params.st_bitrate.target = params->st_bitrate.target;
- if (params->vi_bitrate.mode != V4L2_BITRATE_NONE)
- h->old_params.vi_bitrate.target = params->vi_bitrate.target;
- if (params->vi_bitrate.mode == V4L2_BITRATE_VBR)
- h->old_params.vi_bitrate.max = params->vi_bitrate.max;
- if (params->au_bitrate.mode != V4L2_BITRATE_NONE)
- h->old_params.au_bitrate.target = params->au_bitrate.target;
-
- /* aspect ratio */
- if (params->vi_aspect_ratio == V4L2_MPEG_ASPECT_4_3 ||
- params->vi_aspect_ratio == V4L2_MPEG_ASPECT_16_9) {
- h->old_params.vi_aspect_ratio = params->vi_aspect_ratio;
- if (params->vi_aspect_ratio == V4L2_MPEG_ASPECT_4_3)
- h->params.vi_aspect = V4L2_MPEG_VIDEO_ASPECT_4x3;
- else
- h->params.vi_aspect = V4L2_MPEG_VIDEO_ASPECT_16x9;
- }
-
- /* range checks */
- if (h->old_params.st_bitrate.target > MPEG_TOTAL_TARGET_BITRATE_MAX)
- h->old_params.st_bitrate.target = MPEG_TOTAL_TARGET_BITRATE_MAX;
- if (h->old_params.vi_bitrate.target > MPEG_VIDEO_TARGET_BITRATE_MAX)
- h->old_params.vi_bitrate.target = MPEG_VIDEO_TARGET_BITRATE_MAX;
- if (h->old_params.vi_bitrate.max > MPEG_VIDEO_MAX_BITRATE_MAX)
- h->old_params.vi_bitrate.max = MPEG_VIDEO_MAX_BITRATE_MAX;
- h->params.vi_bitrate = params->vi_bitrate.target;
- h->params.vi_bitrate_peak = params->vi_bitrate.max;
- if (h->old_params.au_bitrate.target <= 256) {
- h->old_params.au_bitrate.target = 256;
- h->params.au_l2_bitrate = V4L2_MPEG_AUDIO_L2_BITRATE_256K;
- }
- else {
- h->old_params.au_bitrate.target = 384;
- h->params.au_l2_bitrate = V4L2_MPEG_AUDIO_L2_BITRATE_384K;
- }
-}
-
static int handle_ctrl(struct saa6752hs_mpeg_params *params,
struct v4l2_ext_control *ctrl, unsigned int cmd)
{
@@ -697,7 +599,6 @@ static int saa6752hs_attach(struct i2c_adapter *adap, int addr, int kind)
return -ENOMEM;
h->client = client_template;
h->params = param_defaults;
- h->old_params = old_param_defaults;
h->client.adapter = adap;
h->client.addr = addr;
@@ -734,23 +635,11 @@ saa6752hs_command(struct i2c_client *client, unsigned int cmd, void *arg)
{
struct saa6752hs_state *h = i2c_get_clientdata(client);
struct v4l2_ext_controls *ctrls = arg;
- struct v4l2_mpeg_compression *old_params = arg;
struct saa6752hs_mpeg_params params;
int err = 0;
int i;
switch (cmd) {
- case VIDIOC_S_MPEGCOMP:
- if (NULL == old_params) {
- /* apply settings and start encoder */
- saa6752hs_init(client);
- break;
- }
- saa6752hs_old_set_params(client, old_params);
- /* fall through */
- case VIDIOC_G_MPEGCOMP:
- *old_params = h->old_params;
- break;
case VIDIOC_S_EXT_CTRLS:
if (ctrls->ctrl_class != V4L2_CTRL_CLASS_MPEG)
return -EINVAL;
diff --git a/drivers/media/video/saa7134/saa7134-core.c b/drivers/media/video/saa7134/saa7134-core.c
index 1a4a24471f2..a499eea379e 100644
--- a/drivers/media/video/saa7134/saa7134-core.c
+++ b/drivers/media/video/saa7134/saa7134-core.c
@@ -429,7 +429,7 @@ int saa7134_set_dmabits(struct saa7134_dev *dev)
assert_spin_locked(&dev->slock);
- if (dev->inresume)
+ if (dev->insuspend)
return 0;
/* video capture -- dma 0 + video task A */
@@ -563,6 +563,9 @@ static irqreturn_t saa7134_irq(int irq, void *dev_id)
unsigned long report,status;
int loop, handled = 0;
+ if (dev->insuspend)
+ goto out;
+
for (loop = 0; loop < 10; loop++) {
report = saa_readl(SAA7134_IRQ_REPORT);
status = saa_readl(SAA7134_IRQ_STATUS);
@@ -1163,6 +1166,7 @@ static void __devexit saa7134_finidev(struct pci_dev *pci_dev)
kfree(dev);
}
+#ifdef CONFIG_PM
static int saa7134_suspend(struct pci_dev *pci_dev , pm_message_t state)
{
@@ -1176,6 +1180,19 @@ static int saa7134_suspend(struct pci_dev *pci_dev , pm_message_t state)
saa_writel(SAA7134_IRQ2, 0);
saa_writel(SAA7134_MAIN_CTRL, 0);
+ synchronize_irq(pci_dev->irq);
+ dev->insuspend = 1;
+
+ /* Disable timeout timers - if we have active buffers, we will
+ fill them on resume*/
+
+ del_timer(&dev->video_q.timeout);
+ del_timer(&dev->vbi_q.timeout);
+ del_timer(&dev->ts_q.timeout);
+
+ if (dev->remote)
+ saa7134_ir_stop(dev);
+
pci_set_power_state(pci_dev, pci_choose_state(pci_dev, state));
pci_save_state(pci_dev);
@@ -1194,24 +1211,27 @@ static int saa7134_resume(struct pci_dev *pci_dev)
/* Do things that are done in saa7134_initdev ,
except of initializing memory structures.*/
- dev->inresume = 1;
saa7134_board_init1(dev);
+ /* saa7134_hwinit1 */
if (saa7134_boards[dev->board].video_out)
saa7134_videoport_init(dev);
-
if (card_has_mpeg(dev))
saa7134_ts_init_hw(dev);
-
+ if (dev->remote)
+ saa7134_ir_start(dev, dev->remote);
saa7134_hw_enable1(dev);
- saa7134_set_decoder(dev);
- saa7134_i2c_call_clients(dev, VIDIOC_S_STD, &dev->tvnorm->id);
+
+
saa7134_board_init2(dev);
- saa7134_hw_enable2(dev);
+ /*saa7134_hwinit2*/
+ saa7134_set_tvnorm_hw(dev);
saa7134_tvaudio_setmute(dev);
saa7134_tvaudio_setvolume(dev, dev->ctl_volume);
+ saa7134_tvaudio_do_scan(dev);
saa7134_enable_i2s(dev);
+ saa7134_hw_enable2(dev);
/*resume unfinished buffer(s)*/
spin_lock_irqsave(&dev->slock, flags);
@@ -1219,13 +1239,19 @@ static int saa7134_resume(struct pci_dev *pci_dev)
saa7134_buffer_requeue(dev, &dev->vbi_q);
saa7134_buffer_requeue(dev, &dev->ts_q);
+ /* FIXME: Disable DMA audio sound - temporary till proper support
+ is implemented*/
+
+ dev->dmasound.dma_running = 0;
+
/* start DMA now*/
- dev->inresume = 0;
+ dev->insuspend = 0;
saa7134_set_dmabits(dev);
spin_unlock_irqrestore(&dev->slock, flags);
return 0;
}
+#endif
/* ----------------------------------------------------------- */
@@ -1262,8 +1288,10 @@ static struct pci_driver saa7134_pci_driver = {
.id_table = saa7134_pci_tbl,
.probe = saa7134_initdev,
.remove = __devexit_p(saa7134_finidev),
+#ifdef CONFIG_PM
.suspend = saa7134_suspend,
.resume = saa7134_resume
+#endif
};
static int saa7134_init(void)
diff --git a/drivers/media/video/saa7134/saa7134-empress.c b/drivers/media/video/saa7134/saa7134-empress.c
index 34ca874dd7f..75d0c5bf46d 100644
--- a/drivers/media/video/saa7134/saa7134-empress.c
+++ b/drivers/media/video/saa7134/saa7134-empress.c
@@ -284,17 +284,6 @@ static int ts_do_ioctl(struct inode *inode, struct file *file,
case VIDIOC_S_CTRL:
return saa7134_common_ioctl(dev, cmd, arg);
- case VIDIOC_S_MPEGCOMP:
- printk(KERN_WARNING "VIDIOC_S_MPEGCOMP is obsolete. "
- "Replace with VIDIOC_S_EXT_CTRLS!");
- saa7134_i2c_call_clients(dev, VIDIOC_S_MPEGCOMP, arg);
- ts_init_encoder(dev);
- return 0;
- case VIDIOC_G_MPEGCOMP:
- printk(KERN_WARNING "VIDIOC_G_MPEGCOMP is obsolete. "
- "Replace with VIDIOC_G_EXT_CTRLS!");
- saa7134_i2c_call_clients(dev, VIDIOC_G_MPEGCOMP, arg);
- return 0;
case VIDIOC_S_EXT_CTRLS:
/* count == 0 is abused in saa6752hs.c, so that special
case is handled here explicitly. */
@@ -342,7 +331,6 @@ static struct video_device saa7134_empress_template =
.name = "saa7134-empress",
.type = 0 /* FIXME */,
.type2 = 0 /* FIXME */,
- .hardware = 0,
.fops = &ts_fops,
.minor = -1,
};
diff --git a/drivers/media/video/saa7134/saa7134-input.c b/drivers/media/video/saa7134/saa7134-input.c
index 80d2644f765..3abaa1b8ac9 100644
--- a/drivers/media/video/saa7134/saa7134-input.c
+++ b/drivers/media/video/saa7134/saa7134-input.c
@@ -44,6 +44,14 @@ module_param(ir_rc5_remote_gap, int, 0644);
static int ir_rc5_key_timeout = 115;
module_param(ir_rc5_key_timeout, int, 0644);
+static int repeat_delay = 500;
+module_param(repeat_delay, int, 0644);
+MODULE_PARM_DESC(repeat_delay, "delay before key repeat started");
+static int repeat_period = 33;
+module_param(repeat_period, int, 0644);
+MODULE_PARM_DESC(repeat_period, "repeat period between"
+ "keypresses when key is down");
+
#define dprintk(fmt, arg...) if (ir_debug) \
printk(KERN_DEBUG "%s/ir: " fmt, dev->name , ## arg)
#define i2cdprintk(fmt, arg...) if (ir_debug) \
@@ -59,6 +67,13 @@ static int build_key(struct saa7134_dev *dev)
struct card_ir *ir = dev->remote;
u32 gpio, data;
+ /* here comes the additional handshake steps for some cards */
+ switch (dev->board) {
+ case SAA7134_BOARD_GOTVIEW_7135:
+ saa_setb(SAA7134_GPIO_GPSTATUS1, 0x80);
+ saa_clearb(SAA7134_GPIO_GPSTATUS1, 0x80);
+ break;
+ }
/* rising SAA7134_GPIO_GPRESCAN reads the status */
saa_clearb(SAA7134_GPIO_GPMODE3,SAA7134_GPIO_GPRESCAN);
saa_setb(SAA7134_GPIO_GPMODE3,SAA7134_GPIO_GPRESCAN);
@@ -159,7 +174,7 @@ static void saa7134_input_timer(unsigned long data)
mod_timer(&ir->timer, jiffies + msecs_to_jiffies(ir->polling));
}
-static void saa7134_ir_start(struct saa7134_dev *dev, struct card_ir *ir)
+void saa7134_ir_start(struct saa7134_dev *dev, struct card_ir *ir)
{
if (ir->polling) {
setup_timer(&ir->timer, saa7134_input_timer,
@@ -182,7 +197,7 @@ static void saa7134_ir_start(struct saa7134_dev *dev, struct card_ir *ir)
}
}
-static void saa7134_ir_stop(struct saa7134_dev *dev)
+void saa7134_ir_stop(struct saa7134_dev *dev)
{
if (dev->remote->polling)
del_timer_sync(&dev->remote->timer);
@@ -285,10 +300,10 @@ int saa7134_input_init1(struct saa7134_dev *dev)
break;
case SAA7134_BOARD_GOTVIEW_7135:
ir_codes = ir_codes_gotview7135;
- mask_keycode = 0x0003EC;
- mask_keyup = 0x008000;
+ mask_keycode = 0x0003CC;
mask_keydown = 0x000010;
- polling = 50; // ms
+ polling = 5; /* ms */
+ saa_setb(SAA7134_GPIO_GPMODE1, 0x80);
break;
case SAA7134_BOARD_VIDEOMATE_TV_PVR:
case SAA7134_BOARD_VIDEOMATE_GOLD_PLUS:
@@ -386,6 +401,10 @@ int saa7134_input_init1(struct saa7134_dev *dev)
if (err)
goto err_out_stop;
+ /* the remote isn't as bouncy as a keyboard */
+ ir->dev->rep[REP_DELAY] = repeat_delay;
+ ir->dev->rep[REP_PERIOD] = repeat_period;
+
return 0;
err_out_stop:
diff --git a/drivers/media/video/saa7134/saa7134-tvaudio.c b/drivers/media/video/saa7134/saa7134-tvaudio.c
index 1b9e39a5ea4..f8e304c7623 100644
--- a/drivers/media/video/saa7134/saa7134-tvaudio.c
+++ b/drivers/media/video/saa7134/saa7134-tvaudio.c
@@ -27,6 +27,7 @@
#include <linux/kthread.h>
#include <linux/slab.h>
#include <linux/delay.h>
+#include <linux/freezer.h>
#include <asm/div64.h>
#include "saa7134-reg.h"
@@ -231,7 +232,7 @@ static void mute_input_7134(struct saa7134_dev *dev)
}
if (dev->hw_mute == mute &&
- dev->hw_input == in && !dev->inresume) {
+ dev->hw_input == in && !dev->insuspend) {
dprintk("mute/input: nothing to do [mute=%d,input=%s]\n",
mute,in->name);
return;
@@ -502,13 +503,17 @@ static int tvaudio_thread(void *data)
unsigned int i, audio, nscan;
int max1,max2,carrier,rx,mode,lastmode,default_carrier;
- allow_signal(SIGTERM);
+
+ set_freezable();
+
for (;;) {
tvaudio_sleep(dev,-1);
- if (kthread_should_stop() || signal_pending(current))
+ if (kthread_should_stop())
goto done;
restart:
+ try_to_freeze();
+
dev->thread.scan1 = dev->thread.scan2;
dprintk("tvaudio thread scan start [%d]\n",dev->thread.scan1);
dev->tvaudio = NULL;
@@ -612,9 +617,12 @@ static int tvaudio_thread(void *data)
lastmode = 42;
for (;;) {
+
+ try_to_freeze();
+
if (tvaudio_sleep(dev,5000))
goto restart;
- if (kthread_should_stop() || signal_pending(current))
+ if (kthread_should_stop())
break;
if (UNSET == dev->thread.mode) {
rx = tvaudio_getstereo(dev,&tvaudio[i]);
@@ -630,6 +638,7 @@ static int tvaudio_thread(void *data)
}
done:
+ dev->thread.stopped = 1;
return 0;
}
@@ -777,7 +786,8 @@ static int tvaudio_thread_ddep(void *data)
struct saa7134_dev *dev = data;
u32 value, norms, clock;
- allow_signal(SIGTERM);
+
+ set_freezable();
clock = saa7134_boards[dev->board].audio_clock;
if (UNSET != audio_clock_override)
@@ -790,10 +800,13 @@ static int tvaudio_thread_ddep(void *data)
for (;;) {
tvaudio_sleep(dev,-1);
- if (kthread_should_stop() || signal_pending(current))
+ if (kthread_should_stop())
goto done;
restart:
+
+ try_to_freeze();
+
dev->thread.scan1 = dev->thread.scan2;
dprintk("tvaudio thread scan start [%d]\n",dev->thread.scan1);
@@ -870,6 +883,7 @@ static int tvaudio_thread_ddep(void *data)
}
done:
+ dev->thread.stopped = 1;
return 0;
}
@@ -997,7 +1011,7 @@ int saa7134_tvaudio_init2(struct saa7134_dev *dev)
int saa7134_tvaudio_fini(struct saa7134_dev *dev)
{
/* shutdown tvaudio thread */
- if (dev->thread.thread)
+ if (dev->thread.thread && !dev->thread.stopped)
kthread_stop(dev->thread.thread);
saa_andorb(SAA7134_ANALOG_IO_SELECT, 0x07, 0x00); /* LINE1 */
@@ -1013,7 +1027,9 @@ int saa7134_tvaudio_do_scan(struct saa7134_dev *dev)
} else if (dev->thread.thread) {
dev->thread.mode = UNSET;
dev->thread.scan2++;
- wake_up_process(dev->thread.thread);
+
+ if (!dev->insuspend && !dev->thread.stopped)
+ wake_up_process(dev->thread.thread);
} else {
dev->automute = 0;
saa7134_tvaudio_setmute(dev);
diff --git a/drivers/media/video/saa7134/saa7134-video.c b/drivers/media/video/saa7134/saa7134-video.c
index 471b92793c1..3b9ffb4b648 100644
--- a/drivers/media/video/saa7134/saa7134-video.c
+++ b/drivers/media/video/saa7134/saa7134-video.c
@@ -560,15 +560,8 @@ void set_tvnorm(struct saa7134_dev *dev, struct saa7134_tvnorm *norm)
dev->crop_current = dev->crop_defrect;
- saa7134_set_decoder(dev);
+ saa7134_set_tvnorm_hw(dev);
- if (card_in(dev, dev->ctl_input).tv) {
- if ((card(dev).tuner_type == TUNER_PHILIPS_TDA8290)
- && ((card(dev).tuner_config == 1)
- || (card(dev).tuner_config == 2)))
- saa7134_set_gpio(dev, 22, 5);
- saa7134_i2c_call_clients(dev, VIDIOC_S_STD, &norm->id);
- }
}
static void video_mux(struct saa7134_dev *dev, int input)
@@ -579,7 +572,8 @@ static void video_mux(struct saa7134_dev *dev, int input)
saa7134_tvaudio_setinput(dev, &card_in(dev, input));
}
-void saa7134_set_decoder(struct saa7134_dev *dev)
+
+static void saa7134_set_decoder(struct saa7134_dev *dev)
{
int luma_control, sync_control, mux;
@@ -630,6 +624,19 @@ void saa7134_set_decoder(struct saa7134_dev *dev)
saa_writeb(SAA7134_RAW_DATA_OFFSET, 0x80);
}
+void saa7134_set_tvnorm_hw(struct saa7134_dev *dev)
+{
+ saa7134_set_decoder(dev);
+
+ if (card_in(dev, dev->ctl_input).tv) {
+ if ((card(dev).tuner_type == TUNER_PHILIPS_TDA8290)
+ && ((card(dev).tuner_config == 1)
+ || (card(dev).tuner_config == 2)))
+ saa7134_set_gpio(dev, 22, 5);
+ saa7134_i2c_call_clients(dev, VIDIOC_S_STD, &dev->tvnorm->id);
+ }
+}
+
static void set_h_prescale(struct saa7134_dev *dev, int task, int prescale)
{
static const struct {
@@ -2352,7 +2359,6 @@ struct video_device saa7134_video_template =
.name = "saa7134-video",
.type = VID_TYPE_CAPTURE|VID_TYPE_TUNER|
VID_TYPE_CLIPPING|VID_TYPE_SCALES,
- .hardware = 0,
.fops = &video_fops,
.minor = -1,
};
@@ -2361,7 +2367,6 @@ struct video_device saa7134_vbi_template =
{
.name = "saa7134-vbi",
.type = VID_TYPE_TUNER|VID_TYPE_TELETEXT,
- .hardware = 0,
.fops = &video_fops,
.minor = -1,
};
@@ -2370,7 +2375,6 @@ struct video_device saa7134_radio_template =
{
.name = "saa7134-radio",
.type = VID_TYPE_TUNER,
- .hardware = 0,
.fops = &radio_fops,
.minor = -1,
};
diff --git a/drivers/media/video/saa7134/saa7134.h b/drivers/media/video/saa7134/saa7134.h
index 28ec6804bd5..66a390c321a 100644
--- a/drivers/media/video/saa7134/saa7134.h
+++ b/drivers/media/video/saa7134/saa7134.h
@@ -333,6 +333,7 @@ struct saa7134_thread {
unsigned int scan1;
unsigned int scan2;
unsigned int mode;
+ unsigned int stopped;
};
/* buffer for one video/vbi/ts frame */
@@ -524,7 +525,7 @@ struct saa7134_dev {
unsigned int hw_mute;
int last_carrier;
int nosignal;
- unsigned int inresume;
+ unsigned int insuspend;
/* SAA7134_MPEG_* */
struct saa7134_ts ts;
@@ -632,7 +633,7 @@ extern struct video_device saa7134_radio_template;
void set_tvnorm(struct saa7134_dev *dev, struct saa7134_tvnorm *norm);
int saa7134_videoport_init(struct saa7134_dev *dev);
-void saa7134_set_decoder(struct saa7134_dev *dev);
+void saa7134_set_tvnorm_hw(struct saa7134_dev *dev);
int saa7134_common_ioctl(struct saa7134_dev *dev,
unsigned int cmd, void *arg);
@@ -706,6 +707,8 @@ int saa7134_input_init1(struct saa7134_dev *dev);
void saa7134_input_fini(struct saa7134_dev *dev);
void saa7134_input_irq(struct saa7134_dev *dev);
void saa7134_set_i2c_ir(struct saa7134_dev *dev, struct IR_i2c *ir);
+void saa7134_ir_start(struct saa7134_dev *dev, struct card_ir *ir);
+void saa7134_ir_stop(struct saa7134_dev *dev);
/*
diff --git a/drivers/media/video/se401.c b/drivers/media/video/se401.c
index 93fb04ed99a..d5d7d6cf734 100644
--- a/drivers/media/video/se401.c
+++ b/drivers/media/video/se401.c
@@ -1231,7 +1231,6 @@ static struct video_device se401_template = {
.owner = THIS_MODULE,
.name = "se401 USB camera",
.type = VID_TYPE_CAPTURE,
- .hardware = VID_HARDWARE_SE401,
.fops = &se401_fops,
};
diff --git a/drivers/media/video/sn9c102/sn9c102_core.c b/drivers/media/video/sn9c102/sn9c102_core.c
index 6991e06f765..511847912c4 100644
--- a/drivers/media/video/sn9c102/sn9c102_core.c
+++ b/drivers/media/video/sn9c102/sn9c102_core.c
@@ -3319,7 +3319,6 @@ sn9c102_usb_probe(struct usb_interface* intf, const struct usb_device_id* id)
strcpy(cam->v4ldev->name, "SN9C1xx PC Camera");
cam->v4ldev->owner = THIS_MODULE;
cam->v4ldev->type = VID_TYPE_CAPTURE | VID_TYPE_SCALES;
- cam->v4ldev->hardware = 0;
cam->v4ldev->fops = &sn9c102_fops;
cam->v4ldev->minor = video_nr[dev_nr];
cam->v4ldev->release = video_device_release;
diff --git a/drivers/media/video/stradis.c b/drivers/media/video/stradis.c
index eb220461ac7..3fb85af5d1f 100644
--- a/drivers/media/video/stradis.c
+++ b/drivers/media/video/stradis.c
@@ -1917,7 +1917,6 @@ static const struct file_operations saa_fops = {
static struct video_device saa_template = {
.name = "SAA7146A",
.type = VID_TYPE_CAPTURE | VID_TYPE_OVERLAY,
- .hardware = VID_HARDWARE_SAA7146,
.fops = &saa_fops,
.minor = -1,
};
diff --git a/drivers/media/video/stv680.c b/drivers/media/video/stv680.c
index 9e009a7ab86..afc32aa56fd 100644
--- a/drivers/media/video/stv680.c
+++ b/drivers/media/video/stv680.c
@@ -1398,7 +1398,6 @@ static struct video_device stv680_template = {
.owner = THIS_MODULE,
.name = "STV0680 USB camera",
.type = VID_TYPE_CAPTURE,
- .hardware = VID_HARDWARE_SE401,
.fops = &stv680_fops,
.release = video_device_release,
.minor = -1,
diff --git a/drivers/media/video/tuner-core.c b/drivers/media/video/tuner-core.c
index 94843086cda..6a777604f07 100644
--- a/drivers/media/video/tuner-core.c
+++ b/drivers/media/video/tuner-core.c
@@ -113,7 +113,7 @@ static void fe_standby(struct tuner *t)
static int fe_has_signal(struct tuner *t)
{
struct dvb_tuner_ops *fe_tuner_ops = &t->fe.ops.tuner_ops;
- u16 strength;
+ u16 strength = 0;
if (fe_tuner_ops->get_rf_strength)
fe_tuner_ops->get_rf_strength(&t->fe, &strength);
diff --git a/drivers/media/video/usbvideo/usbvideo.c b/drivers/media/video/usbvideo/usbvideo.c
index 37ce36b9e58..fb434b5602a 100644
--- a/drivers/media/video/usbvideo/usbvideo.c
+++ b/drivers/media/video/usbvideo/usbvideo.c
@@ -952,7 +952,6 @@ static const struct file_operations usbvideo_fops = {
static const struct video_device usbvideo_template = {
.owner = THIS_MODULE,
.type = VID_TYPE_CAPTURE,
- .hardware = VID_HARDWARE_CPIA,
.fops = &usbvideo_fops,
};
diff --git a/drivers/media/video/usbvideo/vicam.c b/drivers/media/video/usbvideo/vicam.c
index db3c9e3deb2..da1ba021110 100644
--- a/drivers/media/video/usbvideo/vicam.c
+++ b/drivers/media/video/usbvideo/vicam.c
@@ -1074,7 +1074,6 @@ static struct video_device vicam_template = {
.owner = THIS_MODULE,
.name = "ViCam-based USB Camera",
.type = VID_TYPE_CAPTURE,
- .hardware = VID_HARDWARE_VICAM,
.fops = &vicam_fops,
.minor = -1,
};
diff --git a/drivers/media/video/usbvision/usbvision-video.c b/drivers/media/video/usbvision/usbvision-video.c
index e2f3c01cfa1..36e689fa16c 100644
--- a/drivers/media/video/usbvision/usbvision-video.c
+++ b/drivers/media/video/usbvision/usbvision-video.c
@@ -1400,7 +1400,6 @@ static const struct file_operations usbvision_fops = {
static struct video_device usbvision_video_template = {
.owner = THIS_MODULE,
.type = VID_TYPE_TUNER | VID_TYPE_CAPTURE,
- .hardware = VID_HARDWARE_USBVISION,
.fops = &usbvision_fops,
.name = "usbvision-video",
.release = video_device_release,
@@ -1455,7 +1454,6 @@ static struct video_device usbvision_radio_template=
{
.owner = THIS_MODULE,
.type = VID_TYPE_TUNER,
- .hardware = VID_HARDWARE_USBVISION,
.fops = &usbvision_radio_fops,
.name = "usbvision-radio",
.release = video_device_release,
@@ -1492,7 +1490,6 @@ static struct video_device usbvision_vbi_template=
{
.owner = THIS_MODULE,
.type = VID_TYPE_TUNER,
- .hardware = VID_HARDWARE_USBVISION,
.fops = &usbvision_vbi_fops,
.release = video_device_release,
.name = "usbvision-vbi",
diff --git a/drivers/media/video/v4l2-common.c b/drivers/media/video/v4l2-common.c
index 321249240d0..1141b4bf41c 100644
--- a/drivers/media/video/v4l2-common.c
+++ b/drivers/media/video/v4l2-common.c
@@ -317,8 +317,6 @@ static const char *v4l2_ioctls[] = {
[_IOC_NR(VIDIOC_ENUM_FMT)] = "VIDIOC_ENUM_FMT",
[_IOC_NR(VIDIOC_G_FMT)] = "VIDIOC_G_FMT",
[_IOC_NR(VIDIOC_S_FMT)] = "VIDIOC_S_FMT",
- [_IOC_NR(VIDIOC_G_MPEGCOMP)] = "VIDIOC_G_MPEGCOMP",
- [_IOC_NR(VIDIOC_S_MPEGCOMP)] = "VIDIOC_S_MPEGCOMP",
[_IOC_NR(VIDIOC_REQBUFS)] = "VIDIOC_REQBUFS",
[_IOC_NR(VIDIOC_QUERYBUF)] = "VIDIOC_QUERYBUF",
[_IOC_NR(VIDIOC_G_FBUF)] = "VIDIOC_G_FBUF",
diff --git a/drivers/media/video/videobuf-core.c b/drivers/media/video/videobuf-core.c
index 5599a36490f..89a44f16f0b 100644
--- a/drivers/media/video/videobuf-core.c
+++ b/drivers/media/video/videobuf-core.c
@@ -967,6 +967,7 @@ int videobuf_cgmbuf(struct videobuf_queue *q,
return 0;
}
+EXPORT_SYMBOL_GPL(videobuf_cgmbuf);
#endif
/* --------------------------------------------------------------------- */
@@ -985,7 +986,6 @@ EXPORT_SYMBOL_GPL(videobuf_reqbufs);
EXPORT_SYMBOL_GPL(videobuf_querybuf);
EXPORT_SYMBOL_GPL(videobuf_qbuf);
EXPORT_SYMBOL_GPL(videobuf_dqbuf);
-EXPORT_SYMBOL_GPL(videobuf_cgmbuf);
EXPORT_SYMBOL_GPL(videobuf_streamon);
EXPORT_SYMBOL_GPL(videobuf_streamoff);
diff --git a/drivers/media/video/videobuf-dma-sg.c b/drivers/media/video/videobuf-dma-sg.c
index 3eb6123227b..0a18286279d 100644
--- a/drivers/media/video/videobuf-dma-sg.c
+++ b/drivers/media/video/videobuf-dma-sg.c
@@ -60,12 +60,13 @@ videobuf_vmalloc_to_sg(unsigned char *virt, int nr_pages)
sglist = kcalloc(nr_pages, sizeof(struct scatterlist), GFP_KERNEL);
if (NULL == sglist)
return NULL;
+ sg_init_table(sglist, nr_pages);
for (i = 0; i < nr_pages; i++, virt += PAGE_SIZE) {
pg = vmalloc_to_page(virt);
if (NULL == pg)
goto err;
BUG_ON(PageHighMem(pg));
- sglist[i].page = pg;
+ sg_set_page(&sglist[i], pg);
sglist[i].length = PAGE_SIZE;
}
return sglist;
@@ -86,13 +87,14 @@ videobuf_pages_to_sg(struct page **pages, int nr_pages, int offset)
sglist = kcalloc(nr_pages, sizeof(*sglist), GFP_KERNEL);
if (NULL == sglist)
return NULL;
+ sg_init_table(sglist, nr_pages);
if (NULL == pages[0])
goto nopage;
if (PageHighMem(pages[0]))
/* DMA to highmem pages might not work */
goto highmem;
- sglist[0].page = pages[0];
+ sg_set_page(&sglist[0], pages[0]);
sglist[0].offset = offset;
sglist[0].length = PAGE_SIZE - offset;
for (i = 1; i < nr_pages; i++) {
@@ -100,7 +102,7 @@ videobuf_pages_to_sg(struct page **pages, int nr_pages, int offset)
goto nopage;
if (PageHighMem(pages[i]))
goto highmem;
- sglist[i].page = pages[i];
+ sg_set_page(&sglist[i], pages[i]);
sglist[i].length = PAGE_SIZE;
}
return sglist;
diff --git a/drivers/media/video/videocodec.c b/drivers/media/video/videocodec.c
index f2bbd7a4d56..87951ec8254 100644
--- a/drivers/media/video/videocodec.c
+++ b/drivers/media/video/videocodec.c
@@ -86,8 +86,8 @@ videocodec_attach (struct videocodec_master *master)
}
dprintk(2,
- "videocodec_attach: '%s', type: %x, flags %lx, magic %lx\n",
- master->name, master->type, master->flags, master->magic);
+ "videocodec_attach: '%s', flags %lx, magic %lx\n",
+ master->name, master->flags, master->magic);
if (!h) {
dprintk(1,
diff --git a/drivers/media/video/videodev.c b/drivers/media/video/videodev.c
index 8d8e517b344..9611c399028 100644
--- a/drivers/media/video/videodev.c
+++ b/drivers/media/video/videodev.c
@@ -1313,48 +1313,6 @@ static int __video_do_ioctl(struct inode *inode, struct file *file,
ret=vfd->vidioc_cropcap(file, fh, p);
break;
}
- case VIDIOC_G_MPEGCOMP:
- {
- struct v4l2_mpeg_compression *p=arg;
-
- /*FIXME: Several fields not shown */
- if (!vfd->vidioc_g_mpegcomp)
- break;
- ret=vfd->vidioc_g_mpegcomp(file, fh, p);
- if (!ret)
- dbgarg (cmd, "ts_pid_pmt=%d, ts_pid_audio=%d,"
- " ts_pid_video=%d, ts_pid_pcr=%d, "
- "ps_size=%d, au_sample_rate=%d, "
- "au_pesid=%c, vi_frame_rate=%d, "
- "vi_frames_per_gop=%d, "
- "vi_bframes_count=%d, vi_pesid=%c\n",
- p->ts_pid_pmt,p->ts_pid_audio,
- p->ts_pid_video,p->ts_pid_pcr,
- p->ps_size, p->au_sample_rate,
- p->au_pesid, p->vi_frame_rate,
- p->vi_frames_per_gop,
- p->vi_bframes_count, p->vi_pesid);
- break;
- }
- case VIDIOC_S_MPEGCOMP:
- {
- struct v4l2_mpeg_compression *p=arg;
- /*FIXME: Several fields not shown */
- if (!vfd->vidioc_s_mpegcomp)
- break;
- dbgarg (cmd, "ts_pid_pmt=%d, ts_pid_audio=%d, "
- "ts_pid_video=%d, ts_pid_pcr=%d, ps_size=%d, "
- "au_sample_rate=%d, au_pesid=%c, "
- "vi_frame_rate=%d, vi_frames_per_gop=%d, "
- "vi_bframes_count=%d, vi_pesid=%c\n",
- p->ts_pid_pmt,p->ts_pid_audio, p->ts_pid_video,
- p->ts_pid_pcr, p->ps_size, p->au_sample_rate,
- p->au_pesid, p->vi_frame_rate,
- p->vi_frames_per_gop, p->vi_bframes_count,
- p->vi_pesid);
- ret=vfd->vidioc_s_mpegcomp(file, fh, p);
- break;
- }
case VIDIOC_G_JPEGCOMP:
{
struct v4l2_jpegcompression *p=arg;
diff --git a/drivers/media/video/vivi.c b/drivers/media/video/vivi.c
index b532aa280a1..ee73dc75131 100644
--- a/drivers/media/video/vivi.c
+++ b/drivers/media/video/vivi.c
@@ -1119,7 +1119,6 @@ static const struct file_operations vivi_fops = {
static struct video_device vivi = {
.name = "vivi",
.type = VID_TYPE_CAPTURE,
- .hardware = 0,
.fops = &vivi_fops,
.minor = -1,
// .release = video_device_release,
diff --git a/drivers/media/video/w9966.c b/drivers/media/video/w9966.c
index 47366408637..08aaae07c7e 100644
--- a/drivers/media/video/w9966.c
+++ b/drivers/media/video/w9966.c
@@ -196,7 +196,6 @@ static struct video_device w9966_template = {
.owner = THIS_MODULE,
.name = W9966_DRIVERNAME,
.type = VID_TYPE_CAPTURE | VID_TYPE_SCALES,
- .hardware = VID_HARDWARE_W9966,
.fops = &w9966_fops,
};
diff --git a/drivers/media/video/w9968cf.c b/drivers/media/video/w9968cf.c
index 9e7f3e685d7..2ae1430f5f7 100644
--- a/drivers/media/video/w9968cf.c
+++ b/drivers/media/video/w9968cf.c
@@ -3549,7 +3549,6 @@ w9968cf_usb_probe(struct usb_interface* intf, const struct usb_device_id* id)
strcpy(cam->v4ldev->name, symbolic(camlist, mod_id));
cam->v4ldev->owner = THIS_MODULE;
cam->v4ldev->type = VID_TYPE_CAPTURE | VID_TYPE_SCALES;
- cam->v4ldev->hardware = VID_HARDWARE_W9968CF;
cam->v4ldev->fops = &w9968cf_fops;
cam->v4ldev->minor = video_nr[dev_nr];
cam->v4ldev->release = video_device_release;
diff --git a/drivers/media/video/zc0301/zc0301_core.c b/drivers/media/video/zc0301/zc0301_core.c
index 08a93c31c0a..2c5665c8244 100644
--- a/drivers/media/video/zc0301/zc0301_core.c
+++ b/drivers/media/video/zc0301/zc0301_core.c
@@ -1985,7 +1985,6 @@ zc0301_usb_probe(struct usb_interface* intf, const struct usb_device_id* id)
strcpy(cam->v4ldev->name, "ZC0301[P] PC Camera");
cam->v4ldev->owner = THIS_MODULE;
cam->v4ldev->type = VID_TYPE_CAPTURE | VID_TYPE_SCALES;
- cam->v4ldev->hardware = 0;
cam->v4ldev->fops = &zc0301_fops;
cam->v4ldev->minor = video_nr[dev_nr];
cam->v4ldev->release = video_device_release;
diff --git a/drivers/media/video/zoran_card.c b/drivers/media/video/zoran_card.c
index 48da36a15fc..6e0ac4c5c37 100644
--- a/drivers/media/video/zoran_card.c
+++ b/drivers/media/video/zoran_card.c
@@ -1235,8 +1235,14 @@ zoran_setup_videocodec (struct zoran *zr,
return m;
}
- m->magic = 0L; /* magic not used */
- m->type = VID_HARDWARE_ZR36067;
+ /* magic and type are unused for master struct. Makes sense only at
+ codec structs.
+ In the past, .type were initialized to the old V4L1 .hardware
+ value, as VID_HARDWARE_ZR36067
+ */
+ m->magic = 0L;
+ m->type = 0;
+
m->flags = CODEC_FLAG_ENCODER | CODEC_FLAG_DECODER;
strncpy(m->name, ZR_DEVNAME(zr), sizeof(m->name));
m->data = zr;
diff --git a/drivers/media/video/zoran_driver.c b/drivers/media/video/zoran_driver.c
index 419e5af7853..dd3d7d2c8b0 100644
--- a/drivers/media/video/zoran_driver.c
+++ b/drivers/media/video/zoran_driver.c
@@ -60,7 +60,6 @@
#include <linux/spinlock.h>
#define MAP_NR(x) virt_to_page(x)
-#define ZORAN_HARDWARE VID_HARDWARE_ZR36067
#define ZORAN_VID_TYPE ( \
VID_TYPE_CAPTURE | \
VID_TYPE_OVERLAY | \
@@ -4659,7 +4658,6 @@ struct video_device zoran_template __devinitdata = {
#ifdef CONFIG_VIDEO_V4L2
.type2 = ZORAN_V4L2_VID_FLAGS,
#endif
- .hardware = ZORAN_HARDWARE,
.fops = &zoran_fops,
.release = &zoran_vdev_release,
.minor = -1
diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/card/queue.c
index a5d0354bbbd..9203a0b221b 100644
--- a/drivers/mmc/card/queue.c
+++ b/drivers/mmc/card/queue.c
@@ -13,6 +13,7 @@
#include <linux/blkdev.h>
#include <linux/freezer.h>
#include <linux/kthread.h>
+#include <linux/scatterlist.h>
#include <linux/mmc/card.h>
#include <linux/mmc/host.h>
@@ -153,19 +154,21 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, spinlock_t *lock
blk_queue_max_hw_segments(mq->queue, bouncesz / 512);
blk_queue_max_segment_size(mq->queue, bouncesz);
- mq->sg = kzalloc(sizeof(struct scatterlist),
+ mq->sg = kmalloc(sizeof(struct scatterlist),
GFP_KERNEL);
if (!mq->sg) {
ret = -ENOMEM;
goto cleanup_queue;
}
+ sg_init_table(mq->sg, 1);
- mq->bounce_sg = kzalloc(sizeof(struct scatterlist) *
+ mq->bounce_sg = kmalloc(sizeof(struct scatterlist) *
bouncesz / 512, GFP_KERNEL);
if (!mq->bounce_sg) {
ret = -ENOMEM;
goto cleanup_queue;
}
+ sg_init_table(mq->bounce_sg, bouncesz / 512);
}
}
#endif
@@ -302,12 +305,12 @@ static void copy_sg(struct scatterlist *dst, unsigned int dst_len,
BUG_ON(dst_len == 0);
if (dst_size == 0) {
- dst_buf = page_address(dst->page) + dst->offset;
+ dst_buf = sg_virt(dst);
dst_size = dst->length;
}
if (src_size == 0) {
- src_buf = page_address(src->page) + src->offset;
+ src_buf = sg_virt(dst);
src_size = src->length;
}
@@ -353,9 +356,7 @@ unsigned int mmc_queue_map_sg(struct mmc_queue *mq)
return 1;
}
- mq->sg[0].page = virt_to_page(mq->bounce_buf);
- mq->sg[0].offset = offset_in_page(mq->bounce_buf);
- mq->sg[0].length = 0;
+ sg_init_one(mq->sg, mq->bounce_buf, 0);
while (sg_len) {
mq->sg[0].length += mq->bounce_sg[sg_len - 1].length;
diff --git a/drivers/mmc/host/at91_mci.c b/drivers/mmc/host/at91_mci.c
index 7a452c2ad1f..b1edcefdd4f 100644
--- a/drivers/mmc/host/at91_mci.c
+++ b/drivers/mmc/host/at91_mci.c
@@ -149,7 +149,7 @@ static inline void at91_mci_sg_to_dma(struct at91mci_host *host, struct mmc_data
sg = &data->sg[i];
- sgbuffer = kmap_atomic(sg->page, KM_BIO_SRC_IRQ) + sg->offset;
+ sgbuffer = kmap_atomic(sg_page(sg), KM_BIO_SRC_IRQ) + sg->offset;
amount = min(size, sg->length);
size -= amount;
@@ -226,7 +226,7 @@ static void at91_mci_pre_dma_read(struct at91mci_host *host)
sg = &data->sg[host->transfer_index++];
pr_debug("sg = %p\n", sg);
- sg->dma_address = dma_map_page(NULL, sg->page, sg->offset, sg->length, DMA_FROM_DEVICE);
+ sg->dma_address = dma_map_page(NULL, sg_page(sg), sg->offset, sg->length, DMA_FROM_DEVICE);
pr_debug("dma address = %08X, length = %d\n", sg->dma_address, sg->length);
@@ -283,7 +283,7 @@ static void at91_mci_post_dma_read(struct at91mci_host *host)
int index;
/* Swap the contents of the buffer */
- buffer = kmap_atomic(sg->page, KM_BIO_SRC_IRQ) + sg->offset;
+ buffer = kmap_atomic(sg_page(sg), KM_BIO_SRC_IRQ) + sg->offset;
pr_debug("buffer = %p, length = %d\n", buffer, sg->length);
for (index = 0; index < (sg->length / 4); index++)
@@ -292,7 +292,7 @@ static void at91_mci_post_dma_read(struct at91mci_host *host)
kunmap_atomic(buffer, KM_BIO_SRC_IRQ);
}
- flush_dcache_page(sg->page);
+ flush_dcache_page(sg_page(sg));
}
/* Is there another transfer to trigger? */
diff --git a/drivers/mmc/host/au1xmmc.c b/drivers/mmc/host/au1xmmc.c
index 92c4d0dfee4..bcbb6d247bf 100644
--- a/drivers/mmc/host/au1xmmc.c
+++ b/drivers/mmc/host/au1xmmc.c
@@ -340,7 +340,7 @@ static void au1xmmc_send_pio(struct au1xmmc_host *host)
/* This is the pointer to the data buffer */
sg = &data->sg[host->pio.index];
- sg_ptr = page_address(sg->page) + sg->offset + host->pio.offset;
+ sg_ptr = sg_virt(sg) + host->pio.offset;
/* This is the space left inside the buffer */
sg_len = data->sg[host->pio.index].length - host->pio.offset;
@@ -400,7 +400,7 @@ static void au1xmmc_receive_pio(struct au1xmmc_host *host)
if (host->pio.index < host->dma.len) {
sg = &data->sg[host->pio.index];
- sg_ptr = page_address(sg->page) + sg->offset + host->pio.offset;
+ sg_ptr = sg_virt(sg) + host->pio.offset;
/* This is the space left inside the buffer */
sg_len = sg_dma_len(&data->sg[host->pio.index]) - host->pio.offset;
@@ -613,14 +613,11 @@ au1xmmc_prepare_data(struct au1xmmc_host *host, struct mmc_data *data)
if (host->flags & HOST_F_XMIT){
ret = au1xxx_dbdma_put_source_flags(channel,
- (void *) (page_address(sg->page) +
- sg->offset),
- len, flags);
+ (void *) sg_virt(sg), len, flags);
}
else {
ret = au1xxx_dbdma_put_dest_flags(channel,
- (void *) (page_address(sg->page) +
- sg->offset),
+ (void *) sg_virt(sg),
len, flags);
}
diff --git a/drivers/mmc/host/imxmmc.c b/drivers/mmc/host/imxmmc.c
index 6ebc41e7592..fc72e1fadb6 100644
--- a/drivers/mmc/host/imxmmc.c
+++ b/drivers/mmc/host/imxmmc.c
@@ -262,7 +262,7 @@ static void imxmci_setup_data(struct imxmci_host *host, struct mmc_data *data)
}
/* Convert back to virtual address */
- host->data_ptr = (u16*)(page_address(data->sg->page) + data->sg->offset);
+ host->data_ptr = (u16*)sg_virt(sg);
host->data_cnt = 0;
clear_bit(IMXMCI_PEND_DMA_DATA_b, &host->pending_events);
diff --git a/drivers/mmc/host/mmc_spi.c b/drivers/mmc/host/mmc_spi.c
index 7ae18eaed6c..12c2d807c14 100644
--- a/drivers/mmc/host/mmc_spi.c
+++ b/drivers/mmc/host/mmc_spi.c
@@ -813,7 +813,7 @@ mmc_spi_data_do(struct mmc_spi_host *host, struct mmc_command *cmd,
&& dir == DMA_FROM_DEVICE)
dir = DMA_BIDIRECTIONAL;
- dma_addr = dma_map_page(dma_dev, sg->page, 0,
+ dma_addr = dma_map_page(dma_dev, sg_page(sg), 0,
PAGE_SIZE, dir);
if (direction == DMA_TO_DEVICE)
t->tx_dma = dma_addr + sg->offset;
@@ -822,7 +822,7 @@ mmc_spi_data_do(struct mmc_spi_host *host, struct mmc_command *cmd,
}
/* allow pio too; we don't allow highmem */
- kmap_addr = kmap(sg->page);
+ kmap_addr = kmap(sg_page(sg));
if (direction == DMA_TO_DEVICE)
t->tx_buf = kmap_addr + sg->offset;
else
@@ -855,8 +855,8 @@ mmc_spi_data_do(struct mmc_spi_host *host, struct mmc_command *cmd,
/* discard mappings */
if (direction == DMA_FROM_DEVICE)
- flush_kernel_dcache_page(sg->page);
- kunmap(sg->page);
+ flush_kernel_dcache_page(sg_page(sg));
+ kunmap(sg_page(sg));
if (dma_dev)
dma_unmap_page(dma_dev, dma_addr, PAGE_SIZE, dir);
diff --git a/drivers/mmc/host/omap.c b/drivers/mmc/host/omap.c
index 60a67dfcda6..971e18b91f4 100644
--- a/drivers/mmc/host/omap.c
+++ b/drivers/mmc/host/omap.c
@@ -24,10 +24,10 @@
#include <linux/mmc/host.h>
#include <linux/mmc/card.h>
#include <linux/clk.h>
+#include <linux/scatterlist.h>
#include <asm/io.h>
#include <asm/irq.h>
-#include <asm/scatterlist.h>
#include <asm/mach-types.h>
#include <asm/arch/board.h>
@@ -383,7 +383,7 @@ mmc_omap_sg_to_buf(struct mmc_omap_host *host)
sg = host->data->sg + host->sg_idx;
host->buffer_bytes_left = sg->length;
- host->buffer = page_address(sg->page) + sg->offset;
+ host->buffer = sg_virt(sg);
if (host->buffer_bytes_left > host->total_bytes_left)
host->buffer_bytes_left = host->total_bytes_left;
}
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index b397121b947..0db837e44b7 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -231,7 +231,7 @@ static void sdhci_deactivate_led(struct sdhci_host *host)
static inline char* sdhci_sg_to_buffer(struct sdhci_host* host)
{
- return page_address(host->cur_sg->page) + host->cur_sg->offset;
+ return sg_virt(host->cur_sg);
}
static inline int sdhci_next_sg(struct sdhci_host* host)
diff --git a/drivers/mmc/host/tifm_sd.c b/drivers/mmc/host/tifm_sd.c
index 9b904795eb7..c11a3d25605 100644
--- a/drivers/mmc/host/tifm_sd.c
+++ b/drivers/mmc/host/tifm_sd.c
@@ -192,7 +192,7 @@ static void tifm_sd_transfer_data(struct tifm_sd *host)
}
off = sg[host->sg_pos].offset + host->block_pos;
- pg = nth_page(sg[host->sg_pos].page, off >> PAGE_SHIFT);
+ pg = nth_page(sg_page(&sg[host->sg_pos]), off >> PAGE_SHIFT);
p_off = offset_in_page(off);
p_cnt = PAGE_SIZE - p_off;
p_cnt = min(p_cnt, cnt);
@@ -241,18 +241,18 @@ static void tifm_sd_bounce_block(struct tifm_sd *host, struct mmc_data *r_data)
}
off = sg[host->sg_pos].offset + host->block_pos;
- pg = nth_page(sg[host->sg_pos].page, off >> PAGE_SHIFT);
+ pg = nth_page(sg_page(&sg[host->sg_pos]), off >> PAGE_SHIFT);
p_off = offset_in_page(off);
p_cnt = PAGE_SIZE - p_off;
p_cnt = min(p_cnt, cnt);
p_cnt = min(p_cnt, t_size);
if (r_data->flags & MMC_DATA_WRITE)
- tifm_sd_copy_page(host->bounce_buf.page,
+ tifm_sd_copy_page(sg_page(&host->bounce_buf),
r_data->blksz - t_size,
pg, p_off, p_cnt);
else if (r_data->flags & MMC_DATA_READ)
- tifm_sd_copy_page(pg, p_off, host->bounce_buf.page,
+ tifm_sd_copy_page(pg, p_off, sg_page(&host->bounce_buf),
r_data->blksz - t_size, p_cnt);
t_size -= p_cnt;
diff --git a/drivers/mmc/host/wbsd.c b/drivers/mmc/host/wbsd.c
index 80db11c05f2..fa4c8c53cc7 100644
--- a/drivers/mmc/host/wbsd.c
+++ b/drivers/mmc/host/wbsd.c
@@ -269,7 +269,7 @@ static inline int wbsd_next_sg(struct wbsd_host *host)
static inline char *wbsd_sg_to_buffer(struct wbsd_host *host)
{
- return page_address(host->cur_sg->page) + host->cur_sg->offset;
+ return sg_virt(host->cur_sg);
}
static inline void wbsd_sg_to_dma(struct wbsd_host *host, struct mmc_data *data)
@@ -283,7 +283,7 @@ static inline void wbsd_sg_to_dma(struct wbsd_host *host, struct mmc_data *data)
len = data->sg_len;
for (i = 0; i < len; i++) {
- sgbuf = page_address(sg[i].page) + sg[i].offset;
+ sgbuf = sg_virt(&sg[i]);
memcpy(dmabuf, sgbuf, sg[i].length);
dmabuf += sg[i].length;
}
@@ -300,7 +300,7 @@ static inline void wbsd_dma_to_sg(struct wbsd_host *host, struct mmc_data *data)
len = data->sg_len;
for (i = 0; i < len; i++) {
- sgbuf = page_address(sg[i].page) + sg[i].offset;
+ sgbuf = sg_virt(&sg[i]);
memcpy(sgbuf, dmabuf, sg[i].length);
dmabuf += sg[i].length;
}
diff --git a/drivers/net/mlx4/icm.c b/drivers/net/mlx4/icm.c
index 4b3c109d5ea..887633b207d 100644
--- a/drivers/net/mlx4/icm.c
+++ b/drivers/net/mlx4/icm.c
@@ -60,7 +60,7 @@ static void mlx4_free_icm_pages(struct mlx4_dev *dev, struct mlx4_icm_chunk *chu
PCI_DMA_BIDIRECTIONAL);
for (i = 0; i < chunk->npages; ++i)
- __free_pages(chunk->mem[i].page,
+ __free_pages(sg_page(&chunk->mem[i]),
get_order(chunk->mem[i].length));
}
@@ -70,7 +70,7 @@ static void mlx4_free_icm_coherent(struct mlx4_dev *dev, struct mlx4_icm_chunk *
for (i = 0; i < chunk->npages; ++i)
dma_free_coherent(&dev->pdev->dev, chunk->mem[i].length,
- lowmem_page_address(chunk->mem[i].page),
+ lowmem_page_address(sg_page(&chunk->mem[i])),
sg_dma_address(&chunk->mem[i]));
}
@@ -95,10 +95,13 @@ void mlx4_free_icm(struct mlx4_dev *dev, struct mlx4_icm *icm, int coherent)
static int mlx4_alloc_icm_pages(struct scatterlist *mem, int order, gfp_t gfp_mask)
{
- mem->page = alloc_pages(gfp_mask, order);
- if (!mem->page)
+ struct page *page;
+
+ page = alloc_pages(gfp_mask, order);
+ if (!page)
return -ENOMEM;
+ sg_set_page(mem, page);
mem->length = PAGE_SIZE << order;
mem->offset = 0;
return 0;
@@ -145,6 +148,7 @@ struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages,
if (!chunk)
goto fail;
+ sg_init_table(chunk->mem, MLX4_ICM_CHUNK_LEN);
chunk->npages = 0;
chunk->nsg = 0;
list_add_tail(&chunk->list, &icm->chunk_list);
@@ -334,7 +338,7 @@ void *mlx4_table_find(struct mlx4_icm_table *table, int obj, dma_addr_t *dma_han
* been assigned to.
*/
if (chunk->mem[i].length > offset) {
- page = chunk->mem[i].page;
+ page = sg_page(&chunk->mem[i]);
goto out;
}
offset -= chunk->mem[i].length;
diff --git a/drivers/net/ppp_mppe.c b/drivers/net/ppp_mppe.c
index c0b6d19d145..bcb0885011c 100644
--- a/drivers/net/ppp_mppe.c
+++ b/drivers/net/ppp_mppe.c
@@ -55,7 +55,7 @@
#include <linux/mm.h>
#include <linux/ppp_defs.h>
#include <linux/ppp-comp.h>
-#include <asm/scatterlist.h>
+#include <linux/scatterlist.h>
#include "ppp_mppe.h"
@@ -68,9 +68,7 @@ MODULE_VERSION("1.0.2");
static unsigned int
setup_sg(struct scatterlist *sg, const void *address, unsigned int length)
{
- sg[0].page = virt_to_page(address);
- sg[0].offset = offset_in_page(address);
- sg[0].length = length;
+ sg_init_one(sg, address, length);
return length;
}
diff --git a/drivers/pci/Makefile b/drivers/pci/Makefile
index 006054a4099..55505565073 100644
--- a/drivers/pci/Makefile
+++ b/drivers/pci/Makefile
@@ -20,6 +20,9 @@ obj-$(CONFIG_PCI_MSI) += msi.o
# Build the Hypertransport interrupt support
obj-$(CONFIG_HT_IRQ) += htirq.o
+# Build Intel IOMMU support
+obj-$(CONFIG_DMAR) += dmar.o iova.o intel-iommu.o
+
#
# Some architectures use the generic PCI setup functions
#
diff --git a/drivers/pci/dmar.c b/drivers/pci/dmar.c
new file mode 100644
index 00000000000..5dfdfdac92e
--- /dev/null
+++ b/drivers/pci/dmar.c
@@ -0,0 +1,329 @@
+/*
+ * Copyright (c) 2006, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
+ * Place - Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * Copyright (C) Ashok Raj <ashok.raj@intel.com>
+ * Copyright (C) Shaohua Li <shaohua.li@intel.com>
+ * Copyright (C) Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
+ *
+ * This file implements early detection/parsing of DMA Remapping Devices
+ * reported to OS through BIOS via DMA remapping reporting (DMAR) ACPI
+ * tables.
+ */
+
+#include <linux/pci.h>
+#include <linux/dmar.h>
+
+#undef PREFIX
+#define PREFIX "DMAR:"
+
+/* No locks are needed as DMA remapping hardware unit
+ * list is constructed at boot time and hotplug of
+ * these units are not supported by the architecture.
+ */
+LIST_HEAD(dmar_drhd_units);
+LIST_HEAD(dmar_rmrr_units);
+
+static struct acpi_table_header * __initdata dmar_tbl;
+
+static void __init dmar_register_drhd_unit(struct dmar_drhd_unit *drhd)
+{
+ /*
+ * add INCLUDE_ALL at the tail, so scan the list will find it at
+ * the very end.
+ */
+ if (drhd->include_all)
+ list_add_tail(&drhd->list, &dmar_drhd_units);
+ else
+ list_add(&drhd->list, &dmar_drhd_units);
+}
+
+static void __init dmar_register_rmrr_unit(struct dmar_rmrr_unit *rmrr)
+{
+ list_add(&rmrr->list, &dmar_rmrr_units);
+}
+
+static int __init dmar_parse_one_dev_scope(struct acpi_dmar_device_scope *scope,
+ struct pci_dev **dev, u16 segment)
+{
+ struct pci_bus *bus;
+ struct pci_dev *pdev = NULL;
+ struct acpi_dmar_pci_path *path;
+ int count;
+
+ bus = pci_find_bus(segment, scope->bus);
+ path = (struct acpi_dmar_pci_path *)(scope + 1);
+ count = (scope->length - sizeof(struct acpi_dmar_device_scope))
+ / sizeof(struct acpi_dmar_pci_path);
+
+ while (count) {
+ if (pdev)
+ pci_dev_put(pdev);
+ /*
+ * Some BIOSes list non-exist devices in DMAR table, just
+ * ignore it
+ */
+ if (!bus) {
+ printk(KERN_WARNING
+ PREFIX "Device scope bus [%d] not found\n",
+ scope->bus);
+ break;
+ }
+ pdev = pci_get_slot(bus, PCI_DEVFN(path->dev, path->fn));
+ if (!pdev) {
+ printk(KERN_WARNING PREFIX
+ "Device scope device [%04x:%02x:%02x.%02x] not found\n",
+ segment, bus->number, path->dev, path->fn);
+ break;
+ }
+ path ++;
+ count --;
+ bus = pdev->subordinate;
+ }
+ if (!pdev) {
+ printk(KERN_WARNING PREFIX
+ "Device scope device [%04x:%02x:%02x.%02x] not found\n",
+ segment, scope->bus, path->dev, path->fn);
+ *dev = NULL;
+ return 0;
+ }
+ if ((scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT && \
+ pdev->subordinate) || (scope->entry_type == \
+ ACPI_DMAR_SCOPE_TYPE_BRIDGE && !pdev->subordinate)) {
+ pci_dev_put(pdev);
+ printk(KERN_WARNING PREFIX
+ "Device scope type does not match for %s\n",
+ pci_name(pdev));
+ return -EINVAL;
+ }
+ *dev = pdev;
+ return 0;
+}
+
+static int __init dmar_parse_dev_scope(void *start, void *end, int *cnt,
+ struct pci_dev ***devices, u16 segment)
+{
+ struct acpi_dmar_device_scope *scope;
+ void * tmp = start;
+ int index;
+ int ret;
+
+ *cnt = 0;
+ while (start < end) {
+ scope = start;
+ if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT ||
+ scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE)
+ (*cnt)++;
+ else
+ printk(KERN_WARNING PREFIX
+ "Unsupported device scope\n");
+ start += scope->length;
+ }
+ if (*cnt == 0)
+ return 0;
+
+ *devices = kcalloc(*cnt, sizeof(struct pci_dev *), GFP_KERNEL);
+ if (!*devices)
+ return -ENOMEM;
+
+ start = tmp;
+ index = 0;
+ while (start < end) {
+ scope = start;
+ if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT ||
+ scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE) {
+ ret = dmar_parse_one_dev_scope(scope,
+ &(*devices)[index], segment);
+ if (ret) {
+ kfree(*devices);
+ return ret;
+ }
+ index ++;
+ }
+ start += scope->length;
+ }
+
+ return 0;
+}
+
+/**
+ * dmar_parse_one_drhd - parses exactly one DMA remapping hardware definition
+ * structure which uniquely represent one DMA remapping hardware unit
+ * present in the platform
+ */
+static int __init
+dmar_parse_one_drhd(struct acpi_dmar_header *header)
+{
+ struct acpi_dmar_hardware_unit *drhd;
+ struct dmar_drhd_unit *dmaru;
+ int ret = 0;
+ static int include_all;
+
+ dmaru = kzalloc(sizeof(*dmaru), GFP_KERNEL);
+ if (!dmaru)
+ return -ENOMEM;
+
+ drhd = (struct acpi_dmar_hardware_unit *)header;
+ dmaru->reg_base_addr = drhd->address;
+ dmaru->include_all = drhd->flags & 0x1; /* BIT0: INCLUDE_ALL */
+
+ if (!dmaru->include_all)
+ ret = dmar_parse_dev_scope((void *)(drhd + 1),
+ ((void *)drhd) + header->length,
+ &dmaru->devices_cnt, &dmaru->devices,
+ drhd->segment);
+ else {
+ /* Only allow one INCLUDE_ALL */
+ if (include_all) {
+ printk(KERN_WARNING PREFIX "Only one INCLUDE_ALL "
+ "device scope is allowed\n");
+ ret = -EINVAL;
+ }
+ include_all = 1;
+ }
+
+ if (ret || (dmaru->devices_cnt == 0 && !dmaru->include_all))
+ kfree(dmaru);
+ else
+ dmar_register_drhd_unit(dmaru);
+ return ret;
+}
+
+static int __init
+dmar_parse_one_rmrr(struct acpi_dmar_header *header)
+{
+ struct acpi_dmar_reserved_memory *rmrr;
+ struct dmar_rmrr_unit *rmrru;
+ int ret = 0;
+
+ rmrru = kzalloc(sizeof(*rmrru), GFP_KERNEL);
+ if (!rmrru)
+ return -ENOMEM;
+
+ rmrr = (struct acpi_dmar_reserved_memory *)header;
+ rmrru->base_address = rmrr->base_address;
+ rmrru->end_address = rmrr->end_address;
+ ret = dmar_parse_dev_scope((void *)(rmrr + 1),
+ ((void *)rmrr) + header->length,
+ &rmrru->devices_cnt, &rmrru->devices, rmrr->segment);
+
+ if (ret || (rmrru->devices_cnt == 0))
+ kfree(rmrru);
+ else
+ dmar_register_rmrr_unit(rmrru);
+ return ret;
+}
+
+static void __init
+dmar_table_print_dmar_entry(struct acpi_dmar_header *header)
+{
+ struct acpi_dmar_hardware_unit *drhd;
+ struct acpi_dmar_reserved_memory *rmrr;
+
+ switch (header->type) {
+ case ACPI_DMAR_TYPE_HARDWARE_UNIT:
+ drhd = (struct acpi_dmar_hardware_unit *)header;
+ printk (KERN_INFO PREFIX
+ "DRHD (flags: 0x%08x)base: 0x%016Lx\n",
+ drhd->flags, drhd->address);
+ break;
+ case ACPI_DMAR_TYPE_RESERVED_MEMORY:
+ rmrr = (struct acpi_dmar_reserved_memory *)header;
+
+ printk (KERN_INFO PREFIX
+ "RMRR base: 0x%016Lx end: 0x%016Lx\n",
+ rmrr->base_address, rmrr->end_address);
+ break;
+ }
+}
+
+/**
+ * parse_dmar_table - parses the DMA reporting table
+ */
+static int __init
+parse_dmar_table(void)
+{
+ struct acpi_table_dmar *dmar;
+ struct acpi_dmar_header *entry_header;
+ int ret = 0;
+
+ dmar = (struct acpi_table_dmar *)dmar_tbl;
+ if (!dmar)
+ return -ENODEV;
+
+ if (!dmar->width) {
+ printk (KERN_WARNING PREFIX "Zero: Invalid DMAR haw\n");
+ return -EINVAL;
+ }
+
+ printk (KERN_INFO PREFIX "Host address width %d\n",
+ dmar->width + 1);
+
+ entry_header = (struct acpi_dmar_header *)(dmar + 1);
+ while (((unsigned long)entry_header) <
+ (((unsigned long)dmar) + dmar_tbl->length)) {
+ dmar_table_print_dmar_entry(entry_header);
+
+ switch (entry_header->type) {
+ case ACPI_DMAR_TYPE_HARDWARE_UNIT:
+ ret = dmar_parse_one_drhd(entry_header);
+ break;
+ case ACPI_DMAR_TYPE_RESERVED_MEMORY:
+ ret = dmar_parse_one_rmrr(entry_header);
+ break;
+ default:
+ printk(KERN_WARNING PREFIX
+ "Unknown DMAR structure type\n");
+ ret = 0; /* for forward compatibility */
+ break;
+ }
+ if (ret)
+ break;
+
+ entry_header = ((void *)entry_header + entry_header->length);
+ }
+ return ret;
+}
+
+
+int __init dmar_table_init(void)
+{
+
+ parse_dmar_table();
+ if (list_empty(&dmar_drhd_units)) {
+ printk(KERN_INFO PREFIX "No DMAR devices found\n");
+ return -ENODEV;
+ }
+ return 0;
+}
+
+/**
+ * early_dmar_detect - checks to see if the platform supports DMAR devices
+ */
+int __init early_dmar_detect(void)
+{
+ acpi_status status = AE_OK;
+
+ /* if we could find DMAR table, then there are DMAR devices */
+ status = acpi_get_table(ACPI_SIG_DMAR, 0,
+ (struct acpi_table_header **)&dmar_tbl);
+
+ if (ACPI_SUCCESS(status) && !dmar_tbl) {
+ printk (KERN_WARNING PREFIX "Unable to map DMAR\n");
+ status = AE_NOT_FOUND;
+ }
+
+ return (ACPI_SUCCESS(status) ? 1 : 0);
+}
diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c
new file mode 100644
index 00000000000..b3d70310af4
--- /dev/null
+++ b/drivers/pci/intel-iommu.c
@@ -0,0 +1,2271 @@
+/*
+ * Copyright (c) 2006, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
+ * Place - Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * Copyright (C) Ashok Raj <ashok.raj@intel.com>
+ * Copyright (C) Shaohua Li <shaohua.li@intel.com>
+ * Copyright (C) Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
+ */
+
+#include <linux/init.h>
+#include <linux/bitmap.h>
+#include <linux/slab.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
+#include <linux/sysdev.h>
+#include <linux/spinlock.h>
+#include <linux/pci.h>
+#include <linux/dmar.h>
+#include <linux/dma-mapping.h>
+#include <linux/mempool.h>
+#include "iova.h"
+#include "intel-iommu.h"
+#include <asm/proto.h> /* force_iommu in this header in x86-64*/
+#include <asm/cacheflush.h>
+#include <asm/iommu.h>
+#include "pci.h"
+
+#define IS_GFX_DEVICE(pdev) ((pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY)
+#define IS_ISA_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA)
+
+#define IOAPIC_RANGE_START (0xfee00000)
+#define IOAPIC_RANGE_END (0xfeefffff)
+#define IOVA_START_ADDR (0x1000)
+
+#define DEFAULT_DOMAIN_ADDRESS_WIDTH 48
+
+#define DMAR_OPERATION_TIMEOUT (HZ*60) /* 1m */
+
+#define DOMAIN_MAX_ADDR(gaw) ((((u64)1) << gaw) - 1)
+
+static void domain_remove_dev_info(struct dmar_domain *domain);
+
+static int dmar_disabled;
+static int __initdata dmar_map_gfx = 1;
+static int dmar_forcedac;
+
+#define DUMMY_DEVICE_DOMAIN_INFO ((struct device_domain_info *)(-1))
+static DEFINE_SPINLOCK(device_domain_lock);
+static LIST_HEAD(device_domain_list);
+
+static int __init intel_iommu_setup(char *str)
+{
+ if (!str)
+ return -EINVAL;
+ while (*str) {
+ if (!strncmp(str, "off", 3)) {
+ dmar_disabled = 1;
+ printk(KERN_INFO"Intel-IOMMU: disabled\n");
+ } else if (!strncmp(str, "igfx_off", 8)) {
+ dmar_map_gfx = 0;
+ printk(KERN_INFO
+ "Intel-IOMMU: disable GFX device mapping\n");
+ } else if (!strncmp(str, "forcedac", 8)) {
+ printk (KERN_INFO
+ "Intel-IOMMU: Forcing DAC for PCI devices\n");
+ dmar_forcedac = 1;
+ }
+
+ str += strcspn(str, ",");
+ while (*str == ',')
+ str++;
+ }
+ return 0;
+}
+__setup("intel_iommu=", intel_iommu_setup);
+
+static struct kmem_cache *iommu_domain_cache;
+static struct kmem_cache *iommu_devinfo_cache;
+static struct kmem_cache *iommu_iova_cache;
+
+static inline void *iommu_kmem_cache_alloc(struct kmem_cache *cachep)
+{
+ unsigned int flags;
+ void *vaddr;
+
+ /* trying to avoid low memory issues */
+ flags = current->flags & PF_MEMALLOC;
+ current->flags |= PF_MEMALLOC;
+ vaddr = kmem_cache_alloc(cachep, GFP_ATOMIC);
+ current->flags &= (~PF_MEMALLOC | flags);
+ return vaddr;
+}
+
+
+static inline void *alloc_pgtable_page(void)
+{
+ unsigned int flags;
+ void *vaddr;
+
+ /* trying to avoid low memory issues */
+ flags = current->flags & PF_MEMALLOC;
+ current->flags |= PF_MEMALLOC;
+ vaddr = (void *)get_zeroed_page(GFP_ATOMIC);
+ current->flags &= (~PF_MEMALLOC | flags);
+ return vaddr;
+}
+
+static inline void free_pgtable_page(void *vaddr)
+{
+ free_page((unsigned long)vaddr);
+}
+
+static inline void *alloc_domain_mem(void)
+{
+ return iommu_kmem_cache_alloc(iommu_domain_cache);
+}
+
+static inline void free_domain_mem(void *vaddr)
+{
+ kmem_cache_free(iommu_domain_cache, vaddr);
+}
+
+static inline void * alloc_devinfo_mem(void)
+{
+ return iommu_kmem_cache_alloc(iommu_devinfo_cache);
+}
+
+static inline void free_devinfo_mem(void *vaddr)
+{
+ kmem_cache_free(iommu_devinfo_cache, vaddr);
+}
+
+struct iova *alloc_iova_mem(void)
+{
+ return iommu_kmem_cache_alloc(iommu_iova_cache);
+}
+
+void free_iova_mem(struct iova *iova)
+{
+ kmem_cache_free(iommu_iova_cache, iova);
+}
+
+static inline void __iommu_flush_cache(
+ struct intel_iommu *iommu, void *addr, int size)
+{
+ if (!ecap_coherent(iommu->ecap))
+ clflush_cache_range(addr, size);
+}
+
+/* Gets context entry for a given bus and devfn */
+static struct context_entry * device_to_context_entry(struct intel_iommu *iommu,
+ u8 bus, u8 devfn)
+{
+ struct root_entry *root;
+ struct context_entry *context;
+ unsigned long phy_addr;
+ unsigned long flags;
+
+ spin_lock_irqsave(&iommu->lock, flags);
+ root = &iommu->root_entry[bus];
+ context = get_context_addr_from_root(root);
+ if (!context) {
+ context = (struct context_entry *)alloc_pgtable_page();
+ if (!context) {
+ spin_unlock_irqrestore(&iommu->lock, flags);
+ return NULL;
+ }
+ __iommu_flush_cache(iommu, (void *)context, PAGE_SIZE_4K);
+ phy_addr = virt_to_phys((void *)context);
+ set_root_value(root, phy_addr);
+ set_root_present(root);
+ __iommu_flush_cache(iommu, root, sizeof(*root));
+ }
+ spin_unlock_irqrestore(&iommu->lock, flags);
+ return &context[devfn];
+}
+
+static int device_context_mapped(struct intel_iommu *iommu, u8 bus, u8 devfn)
+{
+ struct root_entry *root;
+ struct context_entry *context;
+ int ret;
+ unsigned long flags;
+
+ spin_lock_irqsave(&iommu->lock, flags);
+ root = &iommu->root_entry[bus];
+ context = get_context_addr_from_root(root);
+ if (!context) {
+ ret = 0;
+ goto out;
+ }
+ ret = context_present(context[devfn]);
+out:
+ spin_unlock_irqrestore(&iommu->lock, flags);
+ return ret;
+}
+
+static void clear_context_table(struct intel_iommu *iommu, u8 bus, u8 devfn)
+{
+ struct root_entry *root;
+ struct context_entry *context;
+ unsigned long flags;
+
+ spin_lock_irqsave(&iommu->lock, flags);
+ root = &iommu->root_entry[bus];
+ context = get_context_addr_from_root(root);
+ if (context) {
+ context_clear_entry(context[devfn]);
+ __iommu_flush_cache(iommu, &context[devfn], \
+ sizeof(*context));
+ }
+ spin_unlock_irqrestore(&iommu->lock, flags);
+}
+
+static void free_context_table(struct intel_iommu *iommu)
+{
+ struct root_entry *root;
+ int i;
+ unsigned long flags;
+ struct context_entry *context;
+
+ spin_lock_irqsave(&iommu->lock, flags);
+ if (!iommu->root_entry) {
+ goto out;
+ }
+ for (i = 0; i < ROOT_ENTRY_NR; i++) {
+ root = &iommu->root_entry[i];
+ context = get_context_addr_from_root(root);
+ if (context)
+ free_pgtable_page(context);
+ }
+ free_pgtable_page(iommu->root_entry);
+ iommu->root_entry = NULL;
+out:
+ spin_unlock_irqrestore(&iommu->lock, flags);
+}
+
+/* page table handling */
+#define LEVEL_STRIDE (9)
+#define LEVEL_MASK (((u64)1 << LEVEL_STRIDE) - 1)
+
+static inline int agaw_to_level(int agaw)
+{
+ return agaw + 2;
+}
+
+static inline int agaw_to_width(int agaw)
+{
+ return 30 + agaw * LEVEL_STRIDE;
+
+}
+
+static inline int width_to_agaw(int width)
+{
+ return (width - 30) / LEVEL_STRIDE;
+}
+
+static inline unsigned int level_to_offset_bits(int level)
+{
+ return (12 + (level - 1) * LEVEL_STRIDE);
+}
+
+static inline int address_level_offset(u64 addr, int level)
+{
+ return ((addr >> level_to_offset_bits(level)) & LEVEL_MASK);
+}
+
+static inline u64 level_mask(int level)
+{
+ return ((u64)-1 << level_to_offset_bits(level));
+}
+
+static inline u64 level_size(int level)
+{
+ return ((u64)1 << level_to_offset_bits(level));
+}
+
+static inline u64 align_to_level(u64 addr, int level)
+{
+ return ((addr + level_size(level) - 1) & level_mask(level));
+}
+
+static struct dma_pte * addr_to_dma_pte(struct dmar_domain *domain, u64 addr)
+{
+ int addr_width = agaw_to_width(domain->agaw);
+ struct dma_pte *parent, *pte = NULL;
+ int level = agaw_to_level(domain->agaw);
+ int offset;
+ unsigned long flags;
+
+ BUG_ON(!domain->pgd);
+
+ addr &= (((u64)1) << addr_width) - 1;
+ parent = domain->pgd;
+
+ spin_lock_irqsave(&domain->mapping_lock, flags);
+ while (level > 0) {
+ void *tmp_page;
+
+ offset = address_level_offset(addr, level);
+ pte = &parent[offset];
+ if (level == 1)
+ break;
+
+ if (!dma_pte_present(*pte)) {
+ tmp_page = alloc_pgtable_page();
+
+ if (!tmp_page) {
+ spin_unlock_irqrestore(&domain->mapping_lock,
+ flags);
+ return NULL;
+ }
+ __iommu_flush_cache(domain->iommu, tmp_page,
+ PAGE_SIZE_4K);
+ dma_set_pte_addr(*pte, virt_to_phys(tmp_page));
+ /*
+ * high level table always sets r/w, last level page
+ * table control read/write
+ */
+ dma_set_pte_readable(*pte);
+ dma_set_pte_writable(*pte);
+ __iommu_flush_cache(domain->iommu, pte, sizeof(*pte));
+ }
+ parent = phys_to_virt(dma_pte_addr(*pte));
+ level--;
+ }
+
+ spin_unlock_irqrestore(&domain->mapping_lock, flags);
+ return pte;
+}
+
+/* return address's pte at specific level */
+static struct dma_pte *dma_addr_level_pte(struct dmar_domain *domain, u64 addr,
+ int level)
+{
+ struct dma_pte *parent, *pte = NULL;
+ int total = agaw_to_level(domain->agaw);
+ int offset;
+
+ parent = domain->pgd;
+ while (level <= total) {
+ offset = address_level_offset(addr, total);
+ pte = &parent[offset];
+ if (level == total)
+ return pte;
+
+ if (!dma_pte_present(*pte))
+ break;
+ parent = phys_to_virt(dma_pte_addr(*pte));
+ total--;
+ }
+ return NULL;
+}
+
+/* clear one page's page table */
+static void dma_pte_clear_one(struct dmar_domain *domain, u64 addr)
+{
+ struct dma_pte *pte = NULL;
+
+ /* get last level pte */
+ pte = dma_addr_level_pte(domain, addr, 1);
+
+ if (pte) {
+ dma_clear_pte(*pte);
+ __iommu_flush_cache(domain->iommu, pte, sizeof(*pte));
+ }
+}
+
+/* clear last level pte, a tlb flush should be followed */
+static void dma_pte_clear_range(struct dmar_domain *domain, u64 start, u64 end)
+{
+ int addr_width = agaw_to_width(domain->agaw);
+
+ start &= (((u64)1) << addr_width) - 1;
+ end &= (((u64)1) << addr_width) - 1;
+ /* in case it's partial page */
+ start = PAGE_ALIGN_4K(start);
+ end &= PAGE_MASK_4K;
+
+ /* we don't need lock here, nobody else touches the iova range */
+ while (start < end) {
+ dma_pte_clear_one(domain, start);
+ start += PAGE_SIZE_4K;
+ }
+}
+
+/* free page table pages. last level pte should already be cleared */
+static void dma_pte_free_pagetable(struct dmar_domain *domain,
+ u64 start, u64 end)
+{
+ int addr_width = agaw_to_width(domain->agaw);
+ struct dma_pte *pte;
+ int total = agaw_to_level(domain->agaw);
+ int level;
+ u64 tmp;
+
+ start &= (((u64)1) << addr_width) - 1;
+ end &= (((u64)1) << addr_width) - 1;
+
+ /* we don't need lock here, nobody else touches the iova range */
+ level = 2;
+ while (level <= total) {
+ tmp = align_to_level(start, level);
+ if (tmp >= end || (tmp + level_size(level) > end))
+ return;
+
+ while (tmp < end) {
+ pte = dma_addr_level_pte(domain, tmp, level);
+ if (pte) {
+ free_pgtable_page(
+ phys_to_virt(dma_pte_addr(*pte)));
+ dma_clear_pte(*pte);
+ __iommu_flush_cache(domain->iommu,
+ pte, sizeof(*pte));
+ }
+ tmp += level_size(level);
+ }
+ level++;
+ }
+ /* free pgd */
+ if (start == 0 && end >= ((((u64)1) << addr_width) - 1)) {
+ free_pgtable_page(domain->pgd);
+ domain->pgd = NULL;
+ }
+}
+
+/* iommu handling */
+static int iommu_alloc_root_entry(struct intel_iommu *iommu)
+{
+ struct root_entry *root;
+ unsigned long flags;
+
+ root = (struct root_entry *)alloc_pgtable_page();
+ if (!root)
+ return -ENOMEM;
+
+ __iommu_flush_cache(iommu, root, PAGE_SIZE_4K);
+
+ spin_lock_irqsave(&iommu->lock, flags);
+ iommu->root_entry = root;
+ spin_unlock_irqrestore(&iommu->lock, flags);
+
+ return 0;
+}
+
+#define IOMMU_WAIT_OP(iommu, offset, op, cond, sts) \
+{\
+ unsigned long start_time = jiffies;\
+ while (1) {\
+ sts = op (iommu->reg + offset);\
+ if (cond)\
+ break;\
+ if (time_after(jiffies, start_time + DMAR_OPERATION_TIMEOUT))\
+ panic("DMAR hardware is malfunctioning\n");\
+ cpu_relax();\
+ }\
+}
+
+static void iommu_set_root_entry(struct intel_iommu *iommu)
+{
+ void *addr;
+ u32 cmd, sts;
+ unsigned long flag;
+
+ addr = iommu->root_entry;
+
+ spin_lock_irqsave(&iommu->register_lock, flag);
+ dmar_writeq(iommu->reg + DMAR_RTADDR_REG, virt_to_phys(addr));
+
+ cmd = iommu->gcmd | DMA_GCMD_SRTP;
+ writel(cmd, iommu->reg + DMAR_GCMD_REG);
+
+ /* Make sure hardware complete it */
+ IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
+ readl, (sts & DMA_GSTS_RTPS), sts);
+
+ spin_unlock_irqrestore(&iommu->register_lock, flag);
+}
+
+static void iommu_flush_write_buffer(struct intel_iommu *iommu)
+{
+ u32 val;
+ unsigned long flag;
+
+ if (!cap_rwbf(iommu->cap))
+ return;
+ val = iommu->gcmd | DMA_GCMD_WBF;
+
+ spin_lock_irqsave(&iommu->register_lock, flag);
+ writel(val, iommu->reg + DMAR_GCMD_REG);
+
+ /* Make sure hardware complete it */
+ IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
+ readl, (!(val & DMA_GSTS_WBFS)), val);
+
+ spin_unlock_irqrestore(&iommu->register_lock, flag);
+}
+
+/* return value determine if we need a write buffer flush */
+static int __iommu_flush_context(struct intel_iommu *iommu,
+ u16 did, u16 source_id, u8 function_mask, u64 type,
+ int non_present_entry_flush)
+{
+ u64 val = 0;
+ unsigned long flag;
+
+ /*
+ * In the non-present entry flush case, if hardware doesn't cache
+ * non-present entry we do nothing and if hardware cache non-present
+ * entry, we flush entries of domain 0 (the domain id is used to cache
+ * any non-present entries)
+ */
+ if (non_present_entry_flush) {
+ if (!cap_caching_mode(iommu->cap))
+ return 1;
+ else
+ did = 0;
+ }
+
+ switch (type) {
+ case DMA_CCMD_GLOBAL_INVL:
+ val = DMA_CCMD_GLOBAL_INVL;
+ break;
+ case DMA_CCMD_DOMAIN_INVL:
+ val = DMA_CCMD_DOMAIN_INVL|DMA_CCMD_DID(did);
+ break;
+ case DMA_CCMD_DEVICE_INVL:
+ val = DMA_CCMD_DEVICE_INVL|DMA_CCMD_DID(did)
+ | DMA_CCMD_SID(source_id) | DMA_CCMD_FM(function_mask);
+ break;
+ default:
+ BUG();
+ }
+ val |= DMA_CCMD_ICC;
+
+ spin_lock_irqsave(&iommu->register_lock, flag);
+ dmar_writeq(iommu->reg + DMAR_CCMD_REG, val);
+
+ /* Make sure hardware complete it */
+ IOMMU_WAIT_OP(iommu, DMAR_CCMD_REG,
+ dmar_readq, (!(val & DMA_CCMD_ICC)), val);
+
+ spin_unlock_irqrestore(&iommu->register_lock, flag);
+
+ /* flush context entry will implictly flush write buffer */
+ return 0;
+}
+
+static int inline iommu_flush_context_global(struct intel_iommu *iommu,
+ int non_present_entry_flush)
+{
+ return __iommu_flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL,
+ non_present_entry_flush);
+}
+
+static int inline iommu_flush_context_domain(struct intel_iommu *iommu, u16 did,
+ int non_present_entry_flush)
+{
+ return __iommu_flush_context(iommu, did, 0, 0, DMA_CCMD_DOMAIN_INVL,
+ non_present_entry_flush);
+}
+
+static int inline iommu_flush_context_device(struct intel_iommu *iommu,
+ u16 did, u16 source_id, u8 function_mask, int non_present_entry_flush)
+{
+ return __iommu_flush_context(iommu, did, source_id, function_mask,
+ DMA_CCMD_DEVICE_INVL, non_present_entry_flush);
+}
+
+/* return value determine if we need a write buffer flush */
+static int __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did,
+ u64 addr, unsigned int size_order, u64 type,
+ int non_present_entry_flush)
+{
+ int tlb_offset = ecap_iotlb_offset(iommu->ecap);
+ u64 val = 0, val_iva = 0;
+ unsigned long flag;
+
+ /*
+ * In the non-present entry flush case, if hardware doesn't cache
+ * non-present entry we do nothing and if hardware cache non-present
+ * entry, we flush entries of domain 0 (the domain id is used to cache
+ * any non-present entries)
+ */
+ if (non_present_entry_flush) {
+ if (!cap_caching_mode(iommu->cap))
+ return 1;
+ else
+ did = 0;
+ }
+
+ switch (type) {
+ case DMA_TLB_GLOBAL_FLUSH:
+ /* global flush doesn't need set IVA_REG */
+ val = DMA_TLB_GLOBAL_FLUSH|DMA_TLB_IVT;
+ break;
+ case DMA_TLB_DSI_FLUSH:
+ val = DMA_TLB_DSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
+ break;
+ case DMA_TLB_PSI_FLUSH:
+ val = DMA_TLB_PSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
+ /* Note: always flush non-leaf currently */
+ val_iva = size_order | addr;
+ break;
+ default:
+ BUG();
+ }
+ /* Note: set drain read/write */
+#if 0
+ /*
+ * This is probably to be super secure.. Looks like we can
+ * ignore it without any impact.
+ */
+ if (cap_read_drain(iommu->cap))
+ val |= DMA_TLB_READ_DRAIN;
+#endif
+ if (cap_write_drain(iommu->cap))
+ val |= DMA_TLB_WRITE_DRAIN;
+
+ spin_lock_irqsave(&iommu->register_lock, flag);
+ /* Note: Only uses first TLB reg currently */
+ if (val_iva)
+ dmar_writeq(iommu->reg + tlb_offset, val_iva);
+ dmar_writeq(iommu->reg + tlb_offset + 8, val);
+
+ /* Make sure hardware complete it */
+ IOMMU_WAIT_OP(iommu, tlb_offset + 8,
+ dmar_readq, (!(val & DMA_TLB_IVT)), val);
+
+ spin_unlock_irqrestore(&iommu->register_lock, flag);
+
+ /* check IOTLB invalidation granularity */
+ if (DMA_TLB_IAIG(val) == 0)
+ printk(KERN_ERR"IOMMU: flush IOTLB failed\n");
+ if (DMA_TLB_IAIG(val) != DMA_TLB_IIRG(type))
+ pr_debug("IOMMU: tlb flush request %Lx, actual %Lx\n",
+ DMA_TLB_IIRG(type), DMA_TLB_IAIG(val));
+ /* flush context entry will implictly flush write buffer */
+ return 0;
+}
+
+static int inline iommu_flush_iotlb_global(struct intel_iommu *iommu,
+ int non_present_entry_flush)
+{
+ return __iommu_flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH,
+ non_present_entry_flush);
+}
+
+static int inline iommu_flush_iotlb_dsi(struct intel_iommu *iommu, u16 did,
+ int non_present_entry_flush)
+{
+ return __iommu_flush_iotlb(iommu, did, 0, 0, DMA_TLB_DSI_FLUSH,
+ non_present_entry_flush);
+}
+
+static int iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did,
+ u64 addr, unsigned int pages, int non_present_entry_flush)
+{
+ unsigned int mask;
+
+ BUG_ON(addr & (~PAGE_MASK_4K));
+ BUG_ON(pages == 0);
+
+ /* Fallback to domain selective flush if no PSI support */
+ if (!cap_pgsel_inv(iommu->cap))
+ return iommu_flush_iotlb_dsi(iommu, did,
+ non_present_entry_flush);
+
+ /*
+ * PSI requires page size to be 2 ^ x, and the base address is naturally
+ * aligned to the size
+ */
+ mask = ilog2(__roundup_pow_of_two(pages));
+ /* Fallback to domain selective flush if size is too big */
+ if (mask > cap_max_amask_val(iommu->cap))
+ return iommu_flush_iotlb_dsi(iommu, did,
+ non_present_entry_flush);
+
+ return __iommu_flush_iotlb(iommu, did, addr, mask,
+ DMA_TLB_PSI_FLUSH, non_present_entry_flush);
+}
+
+static int iommu_enable_translation(struct intel_iommu *iommu)
+{
+ u32 sts;
+ unsigned long flags;
+
+ spin_lock_irqsave(&iommu->register_lock, flags);
+ writel(iommu->gcmd|DMA_GCMD_TE, iommu->reg + DMAR_GCMD_REG);
+
+ /* Make sure hardware complete it */
+ IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
+ readl, (sts & DMA_GSTS_TES), sts);
+
+ iommu->gcmd |= DMA_GCMD_TE;
+ spin_unlock_irqrestore(&iommu->register_lock, flags);
+ return 0;
+}
+
+static int iommu_disable_translation(struct intel_iommu *iommu)
+{
+ u32 sts;
+ unsigned long flag;
+
+ spin_lock_irqsave(&iommu->register_lock, flag);
+ iommu->gcmd &= ~DMA_GCMD_TE;
+ writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
+
+ /* Make sure hardware complete it */
+ IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
+ readl, (!(sts & DMA_GSTS_TES)), sts);
+
+ spin_unlock_irqrestore(&iommu->register_lock, flag);
+ return 0;
+}
+
+/* iommu interrupt handling. Most stuff are MSI-like. */
+
+static char *fault_reason_strings[] =
+{
+ "Software",
+ "Present bit in root entry is clear",
+ "Present bit in context entry is clear",
+ "Invalid context entry",
+ "Access beyond MGAW",
+ "PTE Write access is not set",
+ "PTE Read access is not set",
+ "Next page table ptr is invalid",
+ "Root table address invalid",
+ "Context table ptr is invalid",
+ "non-zero reserved fields in RTP",
+ "non-zero reserved fields in CTP",
+ "non-zero reserved fields in PTE",
+ "Unknown"
+};
+#define MAX_FAULT_REASON_IDX ARRAY_SIZE(fault_reason_strings)
+
+char *dmar_get_fault_reason(u8 fault_reason)
+{
+ if (fault_reason > MAX_FAULT_REASON_IDX)
+ return fault_reason_strings[MAX_FAULT_REASON_IDX];
+ else
+ return fault_reason_strings[fault_reason];
+}
+
+void dmar_msi_unmask(unsigned int irq)
+{
+ struct intel_iommu *iommu = get_irq_data(irq);
+ unsigned long flag;
+
+ /* unmask it */
+ spin_lock_irqsave(&iommu->register_lock, flag);
+ writel(0, iommu->reg + DMAR_FECTL_REG);
+ /* Read a reg to force flush the post write */
+ readl(iommu->reg + DMAR_FECTL_REG);
+ spin_unlock_irqrestore(&iommu->register_lock, flag);
+}
+
+void dmar_msi_mask(unsigned int irq)
+{
+ unsigned long flag;
+ struct intel_iommu *iommu = get_irq_data(irq);
+
+ /* mask it */
+ spin_lock_irqsave(&iommu->register_lock, flag);
+ writel(DMA_FECTL_IM, iommu->reg + DMAR_FECTL_REG);
+ /* Read a reg to force flush the post write */
+ readl(iommu->reg + DMAR_FECTL_REG);
+ spin_unlock_irqrestore(&iommu->register_lock, flag);
+}
+
+void dmar_msi_write(int irq, struct msi_msg *msg)
+{
+ struct intel_iommu *iommu = get_irq_data(irq);
+ unsigned long flag;
+
+ spin_lock_irqsave(&iommu->register_lock, flag);
+ writel(msg->data, iommu->reg + DMAR_FEDATA_REG);
+ writel(msg->address_lo, iommu->reg + DMAR_FEADDR_REG);
+ writel(msg->address_hi, iommu->reg + DMAR_FEUADDR_REG);
+ spin_unlock_irqrestore(&iommu->register_lock, flag);
+}
+
+void dmar_msi_read(int irq, struct msi_msg *msg)
+{
+ struct intel_iommu *iommu = get_irq_data(irq);
+ unsigned long flag;
+
+ spin_lock_irqsave(&iommu->register_lock, flag);
+ msg->data = readl(iommu->reg + DMAR_FEDATA_REG);
+ msg->address_lo = readl(iommu->reg + DMAR_FEADDR_REG);
+ msg->address_hi = readl(iommu->reg + DMAR_FEUADDR_REG);
+ spin_unlock_irqrestore(&iommu->register_lock, flag);
+}
+
+static int iommu_page_fault_do_one(struct intel_iommu *iommu, int type,
+ u8 fault_reason, u16 source_id, u64 addr)
+{
+ char *reason;
+
+ reason = dmar_get_fault_reason(fault_reason);
+
+ printk(KERN_ERR
+ "DMAR:[%s] Request device [%02x:%02x.%d] "
+ "fault addr %llx \n"
+ "DMAR:[fault reason %02d] %s\n",
+ (type ? "DMA Read" : "DMA Write"),
+ (source_id >> 8), PCI_SLOT(source_id & 0xFF),
+ PCI_FUNC(source_id & 0xFF), addr, fault_reason, reason);
+ return 0;
+}
+
+#define PRIMARY_FAULT_REG_LEN (16)
+static irqreturn_t iommu_page_fault(int irq, void *dev_id)
+{
+ struct intel_iommu *iommu = dev_id;
+ int reg, fault_index;
+ u32 fault_status;
+ unsigned long flag;
+
+ spin_lock_irqsave(&iommu->register_lock, flag);
+ fault_status = readl(iommu->reg + DMAR_FSTS_REG);
+
+ /* TBD: ignore advanced fault log currently */
+ if (!(fault_status & DMA_FSTS_PPF))
+ goto clear_overflow;
+
+ fault_index = dma_fsts_fault_record_index(fault_status);
+ reg = cap_fault_reg_offset(iommu->cap);
+ while (1) {
+ u8 fault_reason;
+ u16 source_id;
+ u64 guest_addr;
+ int type;
+ u32 data;
+
+ /* highest 32 bits */
+ data = readl(iommu->reg + reg +
+ fault_index * PRIMARY_FAULT_REG_LEN + 12);
+ if (!(data & DMA_FRCD_F))
+ break;
+
+ fault_reason = dma_frcd_fault_reason(data);
+ type = dma_frcd_type(data);
+
+ data = readl(iommu->reg + reg +
+ fault_index * PRIMARY_FAULT_REG_LEN + 8);
+ source_id = dma_frcd_source_id(data);
+
+ guest_addr = dmar_readq(iommu->reg + reg +
+ fault_index * PRIMARY_FAULT_REG_LEN);
+ guest_addr = dma_frcd_page_addr(guest_addr);
+ /* clear the fault */
+ writel(DMA_FRCD_F, iommu->reg + reg +
+ fault_index * PRIMARY_FAULT_REG_LEN + 12);
+
+ spin_unlock_irqrestore(&iommu->register_lock, flag);
+
+ iommu_page_fault_do_one(iommu, type, fault_reason,
+ source_id, guest_addr);
+
+ fault_index++;
+ if (fault_index > cap_num_fault_regs(iommu->cap))
+ fault_index = 0;
+ spin_lock_irqsave(&iommu->register_lock, flag);
+ }
+clear_overflow:
+ /* clear primary fault overflow */
+ fault_status = readl(iommu->reg + DMAR_FSTS_REG);
+ if (fault_status & DMA_FSTS_PFO)
+ writel(DMA_FSTS_PFO, iommu->reg + DMAR_FSTS_REG);
+
+ spin_unlock_irqrestore(&iommu->register_lock, flag);
+ return IRQ_HANDLED;
+}
+
+int dmar_set_interrupt(struct intel_iommu *iommu)
+{
+ int irq, ret;
+
+ irq = create_irq();
+ if (!irq) {
+ printk(KERN_ERR "IOMMU: no free vectors\n");
+ return -EINVAL;
+ }
+
+ set_irq_data(irq, iommu);
+ iommu->irq = irq;
+
+ ret = arch_setup_dmar_msi(irq);
+ if (ret) {
+ set_irq_data(irq, NULL);
+ iommu->irq = 0;
+ destroy_irq(irq);
+ return 0;
+ }
+
+ /* Force fault register is cleared */
+ iommu_page_fault(irq, iommu);
+
+ ret = request_irq(irq, iommu_page_fault, 0, iommu->name, iommu);
+ if (ret)
+ printk(KERN_ERR "IOMMU: can't request irq\n");
+ return ret;
+}
+
+static int iommu_init_domains(struct intel_iommu *iommu)
+{
+ unsigned long ndomains;
+ unsigned long nlongs;
+
+ ndomains = cap_ndoms(iommu->cap);
+ pr_debug("Number of Domains supportd <%ld>\n", ndomains);
+ nlongs = BITS_TO_LONGS(ndomains);
+
+ /* TBD: there might be 64K domains,
+ * consider other allocation for future chip
+ */
+ iommu->domain_ids = kcalloc(nlongs, sizeof(unsigned long), GFP_KERNEL);
+ if (!iommu->domain_ids) {
+ printk(KERN_ERR "Allocating domain id array failed\n");
+ return -ENOMEM;
+ }
+ iommu->domains = kcalloc(ndomains, sizeof(struct dmar_domain *),
+ GFP_KERNEL);
+ if (!iommu->domains) {
+ printk(KERN_ERR "Allocating domain array failed\n");
+ kfree(iommu->domain_ids);
+ return -ENOMEM;
+ }
+
+ /*
+ * if Caching mode is set, then invalid translations are tagged
+ * with domainid 0. Hence we need to pre-allocate it.
+ */
+ if (cap_caching_mode(iommu->cap))
+ set_bit(0, iommu->domain_ids);
+ return 0;
+}
+
+static struct intel_iommu *alloc_iommu(struct dmar_drhd_unit *drhd)
+{
+ struct intel_iommu *iommu;
+ int ret;
+ int map_size;
+ u32 ver;
+
+ iommu = kzalloc(sizeof(*iommu), GFP_KERNEL);
+ if (!iommu)
+ return NULL;
+ iommu->reg = ioremap(drhd->reg_base_addr, PAGE_SIZE_4K);
+ if (!iommu->reg) {
+ printk(KERN_ERR "IOMMU: can't map the region\n");
+ goto error;
+ }
+ iommu->cap = dmar_readq(iommu->reg + DMAR_CAP_REG);
+ iommu->ecap = dmar_readq(iommu->reg + DMAR_ECAP_REG);
+
+ /* the registers might be more than one page */
+ map_size = max_t(int, ecap_max_iotlb_offset(iommu->ecap),
+ cap_max_fault_reg_offset(iommu->cap));
+ map_size = PAGE_ALIGN_4K(map_size);
+ if (map_size > PAGE_SIZE_4K) {
+ iounmap(iommu->reg);
+ iommu->reg = ioremap(drhd->reg_base_addr, map_size);
+ if (!iommu->reg) {
+ printk(KERN_ERR "IOMMU: can't map the region\n");
+ goto error;
+ }
+ }
+
+ ver = readl(iommu->reg + DMAR_VER_REG);
+ pr_debug("IOMMU %llx: ver %d:%d cap %llx ecap %llx\n",
+ drhd->reg_base_addr, DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver),
+ iommu->cap, iommu->ecap);
+ ret = iommu_init_domains(iommu);
+ if (ret)
+ goto error_unmap;
+ spin_lock_init(&iommu->lock);
+ spin_lock_init(&iommu->register_lock);
+
+ drhd->iommu = iommu;
+ return iommu;
+error_unmap:
+ iounmap(iommu->reg);
+ iommu->reg = 0;
+error:
+ kfree(iommu);
+ return NULL;
+}
+
+static void domain_exit(struct dmar_domain *domain);
+static void free_iommu(struct intel_iommu *iommu)
+{
+ struct dmar_domain *domain;
+ int i;
+
+ if (!iommu)
+ return;
+
+ i = find_first_bit(iommu->domain_ids, cap_ndoms(iommu->cap));
+ for (; i < cap_ndoms(iommu->cap); ) {
+ domain = iommu->domains[i];
+ clear_bit(i, iommu->domain_ids);
+ domain_exit(domain);
+ i = find_next_bit(iommu->domain_ids,
+ cap_ndoms(iommu->cap), i+1);
+ }
+
+ if (iommu->gcmd & DMA_GCMD_TE)
+ iommu_disable_translation(iommu);
+
+ if (iommu->irq) {
+ set_irq_data(iommu->irq, NULL);
+ /* This will mask the irq */
+ free_irq(iommu->irq, iommu);
+ destroy_irq(iommu->irq);
+ }
+
+ kfree(iommu->domains);
+ kfree(iommu->domain_ids);
+
+ /* free context mapping */
+ free_context_table(iommu);
+
+ if (iommu->reg)
+ iounmap(iommu->reg);
+ kfree(iommu);
+}
+
+static struct dmar_domain * iommu_alloc_domain(struct intel_iommu *iommu)
+{
+ unsigned long num;
+ unsigned long ndomains;
+ struct dmar_domain *domain;
+ unsigned long flags;
+
+ domain = alloc_domain_mem();
+ if (!domain)
+ return NULL;
+
+ ndomains = cap_ndoms(iommu->cap);
+
+ spin_lock_irqsave(&iommu->lock, flags);
+ num = find_first_zero_bit(iommu->domain_ids, ndomains);
+ if (num >= ndomains) {
+ spin_unlock_irqrestore(&iommu->lock, flags);
+ free_domain_mem(domain);
+ printk(KERN_ERR "IOMMU: no free domain ids\n");
+ return NULL;
+ }
+
+ set_bit(num, iommu->domain_ids);
+ domain->id = num;
+ domain->iommu = iommu;
+ iommu->domains[num] = domain;
+ spin_unlock_irqrestore(&iommu->lock, flags);
+
+ return domain;
+}
+
+static void iommu_free_domain(struct dmar_domain *domain)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&domain->iommu->lock, flags);
+ clear_bit(domain->id, domain->iommu->domain_ids);
+ spin_unlock_irqrestore(&domain->iommu->lock, flags);
+}
+
+static struct iova_domain reserved_iova_list;
+
+static void dmar_init_reserved_ranges(void)
+{
+ struct pci_dev *pdev = NULL;
+ struct iova *iova;
+ int i;
+ u64 addr, size;
+
+ init_iova_domain(&reserved_iova_list);
+
+ /* IOAPIC ranges shouldn't be accessed by DMA */
+ iova = reserve_iova(&reserved_iova_list, IOVA_PFN(IOAPIC_RANGE_START),
+ IOVA_PFN(IOAPIC_RANGE_END));
+ if (!iova)
+ printk(KERN_ERR "Reserve IOAPIC range failed\n");
+
+ /* Reserve all PCI MMIO to avoid peer-to-peer access */
+ for_each_pci_dev(pdev) {
+ struct resource *r;
+
+ for (i = 0; i < PCI_NUM_RESOURCES; i++) {
+ r = &pdev->resource[i];
+ if (!r->flags || !(r->flags & IORESOURCE_MEM))
+ continue;
+ addr = r->start;
+ addr &= PAGE_MASK_4K;
+ size = r->end - addr;
+ size = PAGE_ALIGN_4K(size);
+ iova = reserve_iova(&reserved_iova_list, IOVA_PFN(addr),
+ IOVA_PFN(size + addr) - 1);
+ if (!iova)
+ printk(KERN_ERR "Reserve iova failed\n");
+ }
+ }
+
+}
+
+static void domain_reserve_special_ranges(struct dmar_domain *domain)
+{
+ copy_reserved_iova(&reserved_iova_list, &domain->iovad);
+}
+
+static inline int guestwidth_to_adjustwidth(int gaw)
+{
+ int agaw;
+ int r = (gaw - 12) % 9;
+
+ if (r == 0)
+ agaw = gaw;
+ else
+ agaw = gaw + 9 - r;
+ if (agaw > 64)
+ agaw = 64;
+ return agaw;
+}
+
+static int domain_init(struct dmar_domain *domain, int guest_width)
+{
+ struct intel_iommu *iommu;
+ int adjust_width, agaw;
+ unsigned long sagaw;
+
+ init_iova_domain(&domain->iovad);
+ spin_lock_init(&domain->mapping_lock);
+
+ domain_reserve_special_ranges(domain);
+
+ /* calculate AGAW */
+ iommu = domain->iommu;
+ if (guest_width > cap_mgaw(iommu->cap))
+ guest_width = cap_mgaw(iommu->cap);
+ domain->gaw = guest_width;
+ adjust_width = guestwidth_to_adjustwidth(guest_width);
+ agaw = width_to_agaw(adjust_width);
+ sagaw = cap_sagaw(iommu->cap);
+ if (!test_bit(agaw, &sagaw)) {
+ /* hardware doesn't support it, choose a bigger one */
+ pr_debug("IOMMU: hardware doesn't support agaw %d\n", agaw);
+ agaw = find_next_bit(&sagaw, 5, agaw);
+ if (agaw >= 5)
+ return -ENODEV;
+ }
+ domain->agaw = agaw;
+ INIT_LIST_HEAD(&domain->devices);
+
+ /* always allocate the top pgd */
+ domain->pgd = (struct dma_pte *)alloc_pgtable_page();
+ if (!domain->pgd)
+ return -ENOMEM;
+ __iommu_flush_cache(iommu, domain->pgd, PAGE_SIZE_4K);
+ return 0;
+}
+
+static void domain_exit(struct dmar_domain *domain)
+{
+ u64 end;
+
+ /* Domain 0 is reserved, so dont process it */
+ if (!domain)
+ return;
+
+ domain_remove_dev_info(domain);
+ /* destroy iovas */
+ put_iova_domain(&domain->iovad);
+ end = DOMAIN_MAX_ADDR(domain->gaw);
+ end = end & (~PAGE_MASK_4K);
+
+ /* clear ptes */
+ dma_pte_clear_range(domain, 0, end);
+
+ /* free page tables */
+ dma_pte_free_pagetable(domain, 0, end);
+
+ iommu_free_domain(domain);
+ free_domain_mem(domain);
+}
+
+static int domain_context_mapping_one(struct dmar_domain *domain,
+ u8 bus, u8 devfn)
+{
+ struct context_entry *context;
+ struct intel_iommu *iommu = domain->iommu;
+ unsigned long flags;
+
+ pr_debug("Set context mapping for %02x:%02x.%d\n",
+ bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
+ BUG_ON(!domain->pgd);
+ context = device_to_context_entry(iommu, bus, devfn);
+ if (!context)
+ return -ENOMEM;
+ spin_lock_irqsave(&iommu->lock, flags);
+ if (context_present(*context)) {
+ spin_unlock_irqrestore(&iommu->lock, flags);
+ return 0;
+ }
+
+ context_set_domain_id(*context, domain->id);
+ context_set_address_width(*context, domain->agaw);
+ context_set_address_root(*context, virt_to_phys(domain->pgd));
+ context_set_translation_type(*context, CONTEXT_TT_MULTI_LEVEL);
+ context_set_fault_enable(*context);
+ context_set_present(*context);
+ __iommu_flush_cache(iommu, context, sizeof(*context));
+
+ /* it's a non-present to present mapping */
+ if (iommu_flush_context_device(iommu, domain->id,
+ (((u16)bus) << 8) | devfn, DMA_CCMD_MASK_NOBIT, 1))
+ iommu_flush_write_buffer(iommu);
+ else
+ iommu_flush_iotlb_dsi(iommu, 0, 0);
+ spin_unlock_irqrestore(&iommu->lock, flags);
+ return 0;
+}
+
+static int
+domain_context_mapping(struct dmar_domain *domain, struct pci_dev *pdev)
+{
+ int ret;
+ struct pci_dev *tmp, *parent;
+
+ ret = domain_context_mapping_one(domain, pdev->bus->number,
+ pdev->devfn);
+ if (ret)
+ return ret;
+
+ /* dependent device mapping */
+ tmp = pci_find_upstream_pcie_bridge(pdev);
+ if (!tmp)
+ return 0;
+ /* Secondary interface's bus number and devfn 0 */
+ parent = pdev->bus->self;
+ while (parent != tmp) {
+ ret = domain_context_mapping_one(domain, parent->bus->number,
+ parent->devfn);
+ if (ret)
+ return ret;
+ parent = parent->bus->self;
+ }
+ if (tmp->is_pcie) /* this is a PCIE-to-PCI bridge */
+ return domain_context_mapping_one(domain,
+ tmp->subordinate->number, 0);
+ else /* this is a legacy PCI bridge */
+ return domain_context_mapping_one(domain,
+ tmp->bus->number, tmp->devfn);
+}
+
+static int domain_context_mapped(struct dmar_domain *domain,
+ struct pci_dev *pdev)
+{
+ int ret;
+ struct pci_dev *tmp, *parent;
+
+ ret = device_context_mapped(domain->iommu,
+ pdev->bus->number, pdev->devfn);
+ if (!ret)
+ return ret;
+ /* dependent device mapping */
+ tmp = pci_find_upstream_pcie_bridge(pdev);
+ if (!tmp)
+ return ret;
+ /* Secondary interface's bus number and devfn 0 */
+ parent = pdev->bus->self;
+ while (parent != tmp) {
+ ret = device_context_mapped(domain->iommu, parent->bus->number,
+ parent->devfn);
+ if (!ret)
+ return ret;
+ parent = parent->bus->self;
+ }
+ if (tmp->is_pcie)
+ return device_context_mapped(domain->iommu,
+ tmp->subordinate->number, 0);
+ else
+ return device_context_mapped(domain->iommu,
+ tmp->bus->number, tmp->devfn);
+}
+
+static int
+domain_page_mapping(struct dmar_domain *domain, dma_addr_t iova,
+ u64 hpa, size_t size, int prot)
+{
+ u64 start_pfn, end_pfn;
+ struct dma_pte *pte;
+ int index;
+
+ if ((prot & (DMA_PTE_READ|DMA_PTE_WRITE)) == 0)
+ return -EINVAL;
+ iova &= PAGE_MASK_4K;
+ start_pfn = ((u64)hpa) >> PAGE_SHIFT_4K;
+ end_pfn = (PAGE_ALIGN_4K(((u64)hpa) + size)) >> PAGE_SHIFT_4K;
+ index = 0;
+ while (start_pfn < end_pfn) {
+ pte = addr_to_dma_pte(domain, iova + PAGE_SIZE_4K * index);
+ if (!pte)
+ return -ENOMEM;
+ /* We don't need lock here, nobody else
+ * touches the iova range
+ */
+ BUG_ON(dma_pte_addr(*pte));
+ dma_set_pte_addr(*pte, start_pfn << PAGE_SHIFT_4K);
+ dma_set_pte_prot(*pte, prot);
+ __iommu_flush_cache(domain->iommu, pte, sizeof(*pte));
+ start_pfn++;
+ index++;
+ }
+ return 0;
+}
+
+static void detach_domain_for_dev(struct dmar_domain *domain, u8 bus, u8 devfn)
+{
+ clear_context_table(domain->iommu, bus, devfn);
+ iommu_flush_context_global(domain->iommu, 0);
+ iommu_flush_iotlb_global(domain->iommu, 0);
+}
+
+static void domain_remove_dev_info(struct dmar_domain *domain)
+{
+ struct device_domain_info *info;
+ unsigned long flags;
+
+ spin_lock_irqsave(&device_domain_lock, flags);
+ while (!list_empty(&domain->devices)) {
+ info = list_entry(domain->devices.next,
+ struct device_domain_info, link);
+ list_del(&info->link);
+ list_del(&info->global);
+ if (info->dev)
+ info->dev->dev.archdata.iommu = NULL;
+ spin_unlock_irqrestore(&device_domain_lock, flags);
+
+ detach_domain_for_dev(info->domain, info->bus, info->devfn);
+ free_devinfo_mem(info);
+
+ spin_lock_irqsave(&device_domain_lock, flags);
+ }
+ spin_unlock_irqrestore(&device_domain_lock, flags);
+}
+
+/*
+ * find_domain
+ * Note: we use struct pci_dev->dev.archdata.iommu stores the info
+ */
+struct dmar_domain *
+find_domain(struct pci_dev *pdev)
+{
+ struct device_domain_info *info;
+
+ /* No lock here, assumes no domain exit in normal case */
+ info = pdev->dev.archdata.iommu;
+ if (info)
+ return info->domain;
+ return NULL;
+}
+
+static int dmar_pci_device_match(struct pci_dev *devices[], int cnt,
+ struct pci_dev *dev)
+{
+ int index;
+
+ while (dev) {
+ for (index = 0; index < cnt; index ++)
+ if (dev == devices[index])
+ return 1;
+
+ /* Check our parent */
+ dev = dev->bus->self;
+ }
+
+ return 0;
+}
+
+static struct dmar_drhd_unit *
+dmar_find_matched_drhd_unit(struct pci_dev *dev)
+{
+ struct dmar_drhd_unit *drhd = NULL;
+
+ list_for_each_entry(drhd, &dmar_drhd_units, list) {
+ if (drhd->include_all || dmar_pci_device_match(drhd->devices,
+ drhd->devices_cnt, dev))
+ return drhd;
+ }
+
+ return NULL;
+}
+
+/* domain is initialized */
+static struct dmar_domain *get_domain_for_dev(struct pci_dev *pdev, int gaw)
+{
+ struct dmar_domain *domain, *found = NULL;
+ struct intel_iommu *iommu;
+ struct dmar_drhd_unit *drhd;
+ struct device_domain_info *info, *tmp;
+ struct pci_dev *dev_tmp;
+ unsigned long flags;
+ int bus = 0, devfn = 0;
+
+ domain = find_domain(pdev);
+ if (domain)
+ return domain;
+
+ dev_tmp = pci_find_upstream_pcie_bridge(pdev);
+ if (dev_tmp) {
+ if (dev_tmp->is_pcie) {
+ bus = dev_tmp->subordinate->number;
+ devfn = 0;
+ } else {
+ bus = dev_tmp->bus->number;
+ devfn = dev_tmp->devfn;
+ }
+ spin_lock_irqsave(&device_domain_lock, flags);
+ list_for_each_entry(info, &device_domain_list, global) {
+ if (info->bus == bus && info->devfn == devfn) {
+ found = info->domain;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&device_domain_lock, flags);
+ /* pcie-pci bridge already has a domain, uses it */
+ if (found) {
+ domain = found;
+ goto found_domain;
+ }
+ }
+
+ /* Allocate new domain for the device */
+ drhd = dmar_find_matched_drhd_unit(pdev);
+ if (!drhd) {
+ printk(KERN_ERR "IOMMU: can't find DMAR for device %s\n",
+ pci_name(pdev));
+ return NULL;
+ }
+ iommu = drhd->iommu;
+
+ domain = iommu_alloc_domain(iommu);
+ if (!domain)
+ goto error;
+
+ if (domain_init(domain, gaw)) {
+ domain_exit(domain);
+ goto error;
+ }
+
+ /* register pcie-to-pci device */
+ if (dev_tmp) {
+ info = alloc_devinfo_mem();
+ if (!info) {
+ domain_exit(domain);
+ goto error;
+ }
+ info->bus = bus;
+ info->devfn = devfn;
+ info->dev = NULL;
+ info->domain = domain;
+ /* This domain is shared by devices under p2p bridge */
+ domain->flags |= DOMAIN_FLAG_MULTIPLE_DEVICES;
+
+ /* pcie-to-pci bridge already has a domain, uses it */
+ found = NULL;
+ spin_lock_irqsave(&device_domain_lock, flags);
+ list_for_each_entry(tmp, &device_domain_list, global) {
+ if (tmp->bus == bus && tmp->devfn == devfn) {
+ found = tmp->domain;
+ break;
+ }
+ }
+ if (found) {
+ free_devinfo_mem(info);
+ domain_exit(domain);
+ domain = found;
+ } else {
+ list_add(&info->link, &domain->devices);
+ list_add(&info->global, &device_domain_list);
+ }
+ spin_unlock_irqrestore(&device_domain_lock, flags);
+ }
+
+found_domain:
+ info = alloc_devinfo_mem();
+ if (!info)
+ goto error;
+ info->bus = pdev->bus->number;
+ info->devfn = pdev->devfn;
+ info->dev = pdev;
+ info->domain = domain;
+ spin_lock_irqsave(&device_domain_lock, flags);
+ /* somebody is fast */
+ found = find_domain(pdev);
+ if (found != NULL) {
+ spin_unlock_irqrestore(&device_domain_lock, flags);
+ if (found != domain) {
+ domain_exit(domain);
+ domain = found;
+ }
+ free_devinfo_mem(info);
+ return domain;
+ }
+ list_add(&info->link, &domain->devices);
+ list_add(&info->global, &device_domain_list);
+ pdev->dev.archdata.iommu = info;
+ spin_unlock_irqrestore(&device_domain_lock, flags);
+ return domain;
+error:
+ /* recheck it here, maybe others set it */
+ return find_domain(pdev);
+}
+
+static int iommu_prepare_identity_map(struct pci_dev *pdev, u64 start, u64 end)
+{
+ struct dmar_domain *domain;
+ unsigned long size;
+ u64 base;
+ int ret;
+
+ printk(KERN_INFO
+ "IOMMU: Setting identity map for device %s [0x%Lx - 0x%Lx]\n",
+ pci_name(pdev), start, end);
+ /* page table init */
+ domain = get_domain_for_dev(pdev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
+ if (!domain)
+ return -ENOMEM;
+
+ /* The address might not be aligned */
+ base = start & PAGE_MASK_4K;
+ size = end - base;
+ size = PAGE_ALIGN_4K(size);
+ if (!reserve_iova(&domain->iovad, IOVA_PFN(base),
+ IOVA_PFN(base + size) - 1)) {
+ printk(KERN_ERR "IOMMU: reserve iova failed\n");
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ pr_debug("Mapping reserved region %lx@%llx for %s\n",
+ size, base, pci_name(pdev));
+ /*
+ * RMRR range might have overlap with physical memory range,
+ * clear it first
+ */
+ dma_pte_clear_range(domain, base, base + size);
+
+ ret = domain_page_mapping(domain, base, base, size,
+ DMA_PTE_READ|DMA_PTE_WRITE);
+ if (ret)
+ goto error;
+
+ /* context entry init */
+ ret = domain_context_mapping(domain, pdev);
+ if (!ret)
+ return 0;
+error:
+ domain_exit(domain);
+ return ret;
+
+}
+
+static inline int iommu_prepare_rmrr_dev(struct dmar_rmrr_unit *rmrr,
+ struct pci_dev *pdev)
+{
+ if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO)
+ return 0;
+ return iommu_prepare_identity_map(pdev, rmrr->base_address,
+ rmrr->end_address + 1);
+}
+
+#ifdef CONFIG_DMAR_GFX_WA
+extern int arch_get_ram_range(int slot, u64 *addr, u64 *size);
+static void __init iommu_prepare_gfx_mapping(void)
+{
+ struct pci_dev *pdev = NULL;
+ u64 base, size;
+ int slot;
+ int ret;
+
+ for_each_pci_dev(pdev) {
+ if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO ||
+ !IS_GFX_DEVICE(pdev))
+ continue;
+ printk(KERN_INFO "IOMMU: gfx device %s 1-1 mapping\n",
+ pci_name(pdev));
+ slot = arch_get_ram_range(0, &base, &size);
+ while (slot >= 0) {
+ ret = iommu_prepare_identity_map(pdev,
+ base, base + size);
+ if (ret)
+ goto error;
+ slot = arch_get_ram_range(slot, &base, &size);
+ }
+ continue;
+error:
+ printk(KERN_ERR "IOMMU: mapping reserved region failed\n");
+ }
+}
+#endif
+
+#ifdef CONFIG_DMAR_FLOPPY_WA
+static inline void iommu_prepare_isa(void)
+{
+ struct pci_dev *pdev;
+ int ret;
+
+ pdev = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, NULL);
+ if (!pdev)
+ return;
+
+ printk(KERN_INFO "IOMMU: Prepare 0-16M unity mapping for LPC\n");
+ ret = iommu_prepare_identity_map(pdev, 0, 16*1024*1024);
+
+ if (ret)
+ printk("IOMMU: Failed to create 0-64M identity map, "
+ "floppy might not work\n");
+
+}
+#else
+static inline void iommu_prepare_isa(void)
+{
+ return;
+}
+#endif /* !CONFIG_DMAR_FLPY_WA */
+
+int __init init_dmars(void)
+{
+ struct dmar_drhd_unit *drhd;
+ struct dmar_rmrr_unit *rmrr;
+ struct pci_dev *pdev;
+ struct intel_iommu *iommu;
+ int ret, unit = 0;
+
+ /*
+ * for each drhd
+ * allocate root
+ * initialize and program root entry to not present
+ * endfor
+ */
+ for_each_drhd_unit(drhd) {
+ if (drhd->ignored)
+ continue;
+ iommu = alloc_iommu(drhd);
+ if (!iommu) {
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ /*
+ * TBD:
+ * we could share the same root & context tables
+ * amoung all IOMMU's. Need to Split it later.
+ */
+ ret = iommu_alloc_root_entry(iommu);
+ if (ret) {
+ printk(KERN_ERR "IOMMU: allocate root entry failed\n");
+ goto error;
+ }
+ }
+
+ /*
+ * For each rmrr
+ * for each dev attached to rmrr
+ * do
+ * locate drhd for dev, alloc domain for dev
+ * allocate free domain
+ * allocate page table entries for rmrr
+ * if context not allocated for bus
+ * allocate and init context
+ * set present in root table for this bus
+ * init context with domain, translation etc
+ * endfor
+ * endfor
+ */
+ for_each_rmrr_units(rmrr) {
+ int i;
+ for (i = 0; i < rmrr->devices_cnt; i++) {
+ pdev = rmrr->devices[i];
+ /* some BIOS lists non-exist devices in DMAR table */
+ if (!pdev)
+ continue;
+ ret = iommu_prepare_rmrr_dev(rmrr, pdev);
+ if (ret)
+ printk(KERN_ERR
+ "IOMMU: mapping reserved region failed\n");
+ }
+ }
+
+ iommu_prepare_gfx_mapping();
+
+ iommu_prepare_isa();
+
+ /*
+ * for each drhd
+ * enable fault log
+ * global invalidate context cache
+ * global invalidate iotlb
+ * enable translation
+ */
+ for_each_drhd_unit(drhd) {
+ if (drhd->ignored)
+ continue;
+ iommu = drhd->iommu;
+ sprintf (iommu->name, "dmar%d", unit++);
+
+ iommu_flush_write_buffer(iommu);
+
+ ret = dmar_set_interrupt(iommu);
+ if (ret)
+ goto error;
+
+ iommu_set_root_entry(iommu);
+
+ iommu_flush_context_global(iommu, 0);
+ iommu_flush_iotlb_global(iommu, 0);
+
+ ret = iommu_enable_translation(iommu);
+ if (ret)
+ goto error;
+ }
+
+ return 0;
+error:
+ for_each_drhd_unit(drhd) {
+ if (drhd->ignored)
+ continue;
+ iommu = drhd->iommu;
+ free_iommu(iommu);
+ }
+ return ret;
+}
+
+static inline u64 aligned_size(u64 host_addr, size_t size)
+{
+ u64 addr;
+ addr = (host_addr & (~PAGE_MASK_4K)) + size;
+ return PAGE_ALIGN_4K(addr);
+}
+
+struct iova *
+iommu_alloc_iova(struct dmar_domain *domain, size_t size, u64 end)
+{
+ struct iova *piova;
+
+ /* Make sure it's in range */
+ end = min_t(u64, DOMAIN_MAX_ADDR(domain->gaw), end);
+ if (!size || (IOVA_START_ADDR + size > end))
+ return NULL;
+
+ piova = alloc_iova(&domain->iovad,
+ size >> PAGE_SHIFT_4K, IOVA_PFN(end), 1);
+ return piova;
+}
+
+static struct iova *
+__intel_alloc_iova(struct device *dev, struct dmar_domain *domain,
+ size_t size)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct iova *iova = NULL;
+
+ if ((pdev->dma_mask <= DMA_32BIT_MASK) || (dmar_forcedac)) {
+ iova = iommu_alloc_iova(domain, size, pdev->dma_mask);
+ } else {
+ /*
+ * First try to allocate an io virtual address in
+ * DMA_32BIT_MASK and if that fails then try allocating
+ * from higer range
+ */
+ iova = iommu_alloc_iova(domain, size, DMA_32BIT_MASK);
+ if (!iova)
+ iova = iommu_alloc_iova(domain, size, pdev->dma_mask);
+ }
+
+ if (!iova) {
+ printk(KERN_ERR"Allocating iova for %s failed", pci_name(pdev));
+ return NULL;
+ }
+
+ return iova;
+}
+
+static struct dmar_domain *
+get_valid_domain_for_dev(struct pci_dev *pdev)
+{
+ struct dmar_domain *domain;
+ int ret;
+
+ domain = get_domain_for_dev(pdev,
+ DEFAULT_DOMAIN_ADDRESS_WIDTH);
+ if (!domain) {
+ printk(KERN_ERR
+ "Allocating domain for %s failed", pci_name(pdev));
+ return 0;
+ }
+
+ /* make sure context mapping is ok */
+ if (unlikely(!domain_context_mapped(domain, pdev))) {
+ ret = domain_context_mapping(domain, pdev);
+ if (ret) {
+ printk(KERN_ERR
+ "Domain context map for %s failed",
+ pci_name(pdev));
+ return 0;
+ }
+ }
+
+ return domain;
+}
+
+static dma_addr_t intel_map_single(struct device *hwdev, void *addr,
+ size_t size, int dir)
+{
+ struct pci_dev *pdev = to_pci_dev(hwdev);
+ int ret;
+ struct dmar_domain *domain;
+ unsigned long start_addr;
+ struct iova *iova;
+ int prot = 0;
+
+ BUG_ON(dir == DMA_NONE);
+ if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO)
+ return virt_to_bus(addr);
+
+ domain = get_valid_domain_for_dev(pdev);
+ if (!domain)
+ return 0;
+
+ addr = (void *)virt_to_phys(addr);
+ size = aligned_size((u64)addr, size);
+
+ iova = __intel_alloc_iova(hwdev, domain, size);
+ if (!iova)
+ goto error;
+
+ start_addr = iova->pfn_lo << PAGE_SHIFT_4K;
+
+ /*
+ * Check if DMAR supports zero-length reads on write only
+ * mappings..
+ */
+ if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
+ !cap_zlr(domain->iommu->cap))
+ prot |= DMA_PTE_READ;
+ if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
+ prot |= DMA_PTE_WRITE;
+ /*
+ * addr - (addr + size) might be partial page, we should map the whole
+ * page. Note: if two part of one page are separately mapped, we
+ * might have two guest_addr mapping to the same host addr, but this
+ * is not a big problem
+ */
+ ret = domain_page_mapping(domain, start_addr,
+ ((u64)addr) & PAGE_MASK_4K, size, prot);
+ if (ret)
+ goto error;
+
+ pr_debug("Device %s request: %lx@%llx mapping: %lx@%llx, dir %d\n",
+ pci_name(pdev), size, (u64)addr,
+ size, (u64)start_addr, dir);
+
+ /* it's a non-present to present mapping */
+ ret = iommu_flush_iotlb_psi(domain->iommu, domain->id,
+ start_addr, size >> PAGE_SHIFT_4K, 1);
+ if (ret)
+ iommu_flush_write_buffer(domain->iommu);
+
+ return (start_addr + ((u64)addr & (~PAGE_MASK_4K)));
+
+error:
+ if (iova)
+ __free_iova(&domain->iovad, iova);
+ printk(KERN_ERR"Device %s request: %lx@%llx dir %d --- failed\n",
+ pci_name(pdev), size, (u64)addr, dir);
+ return 0;
+}
+
+static void intel_unmap_single(struct device *dev, dma_addr_t dev_addr,
+ size_t size, int dir)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct dmar_domain *domain;
+ unsigned long start_addr;
+ struct iova *iova;
+
+ if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO)
+ return;
+ domain = find_domain(pdev);
+ BUG_ON(!domain);
+
+ iova = find_iova(&domain->iovad, IOVA_PFN(dev_addr));
+ if (!iova)
+ return;
+
+ start_addr = iova->pfn_lo << PAGE_SHIFT_4K;
+ size = aligned_size((u64)dev_addr, size);
+
+ pr_debug("Device %s unmapping: %lx@%llx\n",
+ pci_name(pdev), size, (u64)start_addr);
+
+ /* clear the whole page */
+ dma_pte_clear_range(domain, start_addr, start_addr + size);
+ /* free page tables */
+ dma_pte_free_pagetable(domain, start_addr, start_addr + size);
+
+ if (iommu_flush_iotlb_psi(domain->iommu, domain->id, start_addr,
+ size >> PAGE_SHIFT_4K, 0))
+ iommu_flush_write_buffer(domain->iommu);
+
+ /* free iova */
+ __free_iova(&domain->iovad, iova);
+}
+
+static void * intel_alloc_coherent(struct device *hwdev, size_t size,
+ dma_addr_t *dma_handle, gfp_t flags)
+{
+ void *vaddr;
+ int order;
+
+ size = PAGE_ALIGN_4K(size);
+ order = get_order(size);
+ flags &= ~(GFP_DMA | GFP_DMA32);
+
+ vaddr = (void *)__get_free_pages(flags, order);
+ if (!vaddr)
+ return NULL;
+ memset(vaddr, 0, size);
+
+ *dma_handle = intel_map_single(hwdev, vaddr, size, DMA_BIDIRECTIONAL);
+ if (*dma_handle)
+ return vaddr;
+ free_pages((unsigned long)vaddr, order);
+ return NULL;
+}
+
+static void intel_free_coherent(struct device *hwdev, size_t size,
+ void *vaddr, dma_addr_t dma_handle)
+{
+ int order;
+
+ size = PAGE_ALIGN_4K(size);
+ order = get_order(size);
+
+ intel_unmap_single(hwdev, dma_handle, size, DMA_BIDIRECTIONAL);
+ free_pages((unsigned long)vaddr, order);
+}
+
+#define SG_ENT_VIRT_ADDRESS(sg) (page_address((sg)->page) + (sg)->offset)
+static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
+ int nelems, int dir)
+{
+ int i;
+ struct pci_dev *pdev = to_pci_dev(hwdev);
+ struct dmar_domain *domain;
+ unsigned long start_addr;
+ struct iova *iova;
+ size_t size = 0;
+ void *addr;
+ struct scatterlist *sg;
+
+ if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO)
+ return;
+
+ domain = find_domain(pdev);
+
+ iova = find_iova(&domain->iovad, IOVA_PFN(sglist[0].dma_address));
+ if (!iova)
+ return;
+ for_each_sg(sglist, sg, nelems, i) {
+ addr = SG_ENT_VIRT_ADDRESS(sg);
+ size += aligned_size((u64)addr, sg->length);
+ }
+
+ start_addr = iova->pfn_lo << PAGE_SHIFT_4K;
+
+ /* clear the whole page */
+ dma_pte_clear_range(domain, start_addr, start_addr + size);
+ /* free page tables */
+ dma_pte_free_pagetable(domain, start_addr, start_addr + size);
+
+ if (iommu_flush_iotlb_psi(domain->iommu, domain->id, start_addr,
+ size >> PAGE_SHIFT_4K, 0))
+ iommu_flush_write_buffer(domain->iommu);
+
+ /* free iova */
+ __free_iova(&domain->iovad, iova);
+}
+
+static int intel_nontranslate_map_sg(struct device *hddev,
+ struct scatterlist *sglist, int nelems, int dir)
+{
+ int i;
+ struct scatterlist *sg;
+
+ for_each_sg(sglist, sg, nelems, i) {
+ BUG_ON(!sg->page);
+ sg->dma_address = virt_to_bus(SG_ENT_VIRT_ADDRESS(sg));
+ sg->dma_length = sg->length;
+ }
+ return nelems;
+}
+
+static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist,
+ int nelems, int dir)
+{
+ void *addr;
+ int i;
+ struct pci_dev *pdev = to_pci_dev(hwdev);
+ struct dmar_domain *domain;
+ size_t size = 0;
+ int prot = 0;
+ size_t offset = 0;
+ struct iova *iova = NULL;
+ int ret;
+ struct scatterlist *sg;
+ unsigned long start_addr;
+
+ BUG_ON(dir == DMA_NONE);
+ if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO)
+ return intel_nontranslate_map_sg(hwdev, sglist, nelems, dir);
+
+ domain = get_valid_domain_for_dev(pdev);
+ if (!domain)
+ return 0;
+
+ for_each_sg(sglist, sg, nelems, i) {
+ addr = SG_ENT_VIRT_ADDRESS(sg);
+ addr = (void *)virt_to_phys(addr);
+ size += aligned_size((u64)addr, sg->length);
+ }
+
+ iova = __intel_alloc_iova(hwdev, domain, size);
+ if (!iova) {
+ sglist->dma_length = 0;
+ return 0;
+ }
+
+ /*
+ * Check if DMAR supports zero-length reads on write only
+ * mappings..
+ */
+ if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
+ !cap_zlr(domain->iommu->cap))
+ prot |= DMA_PTE_READ;
+ if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
+ prot |= DMA_PTE_WRITE;
+
+ start_addr = iova->pfn_lo << PAGE_SHIFT_4K;
+ offset = 0;
+ for_each_sg(sglist, sg, nelems, i) {
+ addr = SG_ENT_VIRT_ADDRESS(sg);
+ addr = (void *)virt_to_phys(addr);
+ size = aligned_size((u64)addr, sg->length);
+ ret = domain_page_mapping(domain, start_addr + offset,
+ ((u64)addr) & PAGE_MASK_4K,
+ size, prot);
+ if (ret) {
+ /* clear the page */
+ dma_pte_clear_range(domain, start_addr,
+ start_addr + offset);
+ /* free page tables */
+ dma_pte_free_pagetable(domain, start_addr,
+ start_addr + offset);
+ /* free iova */
+ __free_iova(&domain->iovad, iova);
+ return 0;
+ }
+ sg->dma_address = start_addr + offset +
+ ((u64)addr & (~PAGE_MASK_4K));
+ sg->dma_length = sg->length;
+ offset += size;
+ }
+
+ /* it's a non-present to present mapping */
+ if (iommu_flush_iotlb_psi(domain->iommu, domain->id,
+ start_addr, offset >> PAGE_SHIFT_4K, 1))
+ iommu_flush_write_buffer(domain->iommu);
+ return nelems;
+}
+
+static struct dma_mapping_ops intel_dma_ops = {
+ .alloc_coherent = intel_alloc_coherent,
+ .free_coherent = intel_free_coherent,
+ .map_single = intel_map_single,
+ .unmap_single = intel_unmap_single,
+ .map_sg = intel_map_sg,
+ .unmap_sg = intel_unmap_sg,
+};
+
+static inline int iommu_domain_cache_init(void)
+{
+ int ret = 0;
+
+ iommu_domain_cache = kmem_cache_create("iommu_domain",
+ sizeof(struct dmar_domain),
+ 0,
+ SLAB_HWCACHE_ALIGN,
+
+ NULL);
+ if (!iommu_domain_cache) {
+ printk(KERN_ERR "Couldn't create iommu_domain cache\n");
+ ret = -ENOMEM;
+ }
+
+ return ret;
+}
+
+static inline int iommu_devinfo_cache_init(void)
+{
+ int ret = 0;
+
+ iommu_devinfo_cache = kmem_cache_create("iommu_devinfo",
+ sizeof(struct device_domain_info),
+ 0,
+ SLAB_HWCACHE_ALIGN,
+
+ NULL);
+ if (!iommu_devinfo_cache) {
+ printk(KERN_ERR "Couldn't create devinfo cache\n");
+ ret = -ENOMEM;
+ }
+
+ return ret;
+}
+
+static inline int iommu_iova_cache_init(void)
+{
+ int ret = 0;
+
+ iommu_iova_cache = kmem_cache_create("iommu_iova",
+ sizeof(struct iova),
+ 0,
+ SLAB_HWCACHE_ALIGN,
+
+ NULL);
+ if (!iommu_iova_cache) {
+ printk(KERN_ERR "Couldn't create iova cache\n");
+ ret = -ENOMEM;
+ }
+
+ return ret;
+}
+
+static int __init iommu_init_mempool(void)
+{
+ int ret;
+ ret = iommu_iova_cache_init();
+ if (ret)
+ return ret;
+
+ ret = iommu_domain_cache_init();
+ if (ret)
+ goto domain_error;
+
+ ret = iommu_devinfo_cache_init();
+ if (!ret)
+ return ret;
+
+ kmem_cache_destroy(iommu_domain_cache);
+domain_error:
+ kmem_cache_destroy(iommu_iova_cache);
+
+ return -ENOMEM;
+}
+
+static void __init iommu_exit_mempool(void)
+{
+ kmem_cache_destroy(iommu_devinfo_cache);
+ kmem_cache_destroy(iommu_domain_cache);
+ kmem_cache_destroy(iommu_iova_cache);
+
+}
+
+void __init detect_intel_iommu(void)
+{
+ if (swiotlb || no_iommu || iommu_detected || dmar_disabled)
+ return;
+ if (early_dmar_detect()) {
+ iommu_detected = 1;
+ }
+}
+
+static void __init init_no_remapping_devices(void)
+{
+ struct dmar_drhd_unit *drhd;
+
+ for_each_drhd_unit(drhd) {
+ if (!drhd->include_all) {
+ int i;
+ for (i = 0; i < drhd->devices_cnt; i++)
+ if (drhd->devices[i] != NULL)
+ break;
+ /* ignore DMAR unit if no pci devices exist */
+ if (i == drhd->devices_cnt)
+ drhd->ignored = 1;
+ }
+ }
+
+ if (dmar_map_gfx)
+ return;
+
+ for_each_drhd_unit(drhd) {
+ int i;
+ if (drhd->ignored || drhd->include_all)
+ continue;
+
+ for (i = 0; i < drhd->devices_cnt; i++)
+ if (drhd->devices[i] &&
+ !IS_GFX_DEVICE(drhd->devices[i]))
+ break;
+
+ if (i < drhd->devices_cnt)
+ continue;
+
+ /* bypass IOMMU if it is just for gfx devices */
+ drhd->ignored = 1;
+ for (i = 0; i < drhd->devices_cnt; i++) {
+ if (!drhd->devices[i])
+ continue;
+ drhd->devices[i]->dev.archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
+ }
+ }
+}
+
+int __init intel_iommu_init(void)
+{
+ int ret = 0;
+
+ if (no_iommu || swiotlb || dmar_disabled)
+ return -ENODEV;
+
+ if (dmar_table_init())
+ return -ENODEV;
+
+ iommu_init_mempool();
+ dmar_init_reserved_ranges();
+
+ init_no_remapping_devices();
+
+ ret = init_dmars();
+ if (ret) {
+ printk(KERN_ERR "IOMMU: dmar init failed\n");
+ put_iova_domain(&reserved_iova_list);
+ iommu_exit_mempool();
+ return ret;
+ }
+ printk(KERN_INFO
+ "PCI-DMA: Intel(R) Virtualization Technology for Directed I/O\n");
+
+ force_iommu = 1;
+ dma_ops = &intel_dma_ops;
+ return 0;
+}
+
diff --git a/drivers/pci/intel-iommu.h b/drivers/pci/intel-iommu.h
new file mode 100644
index 00000000000..ee88dd2400c
--- /dev/null
+++ b/drivers/pci/intel-iommu.h
@@ -0,0 +1,325 @@
+/*
+ * Copyright (c) 2006, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
+ * Place - Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * Copyright (C) Ashok Raj <ashok.raj@intel.com>
+ * Copyright (C) Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
+ */
+
+#ifndef _INTEL_IOMMU_H_
+#define _INTEL_IOMMU_H_
+
+#include <linux/types.h>
+#include <linux/msi.h>
+#include "iova.h"
+#include <linux/io.h>
+
+/*
+ * Intel IOMMU register specification per version 1.0 public spec.
+ */
+
+#define DMAR_VER_REG 0x0 /* Arch version supported by this IOMMU */
+#define DMAR_CAP_REG 0x8 /* Hardware supported capabilities */
+#define DMAR_ECAP_REG 0x10 /* Extended capabilities supported */
+#define DMAR_GCMD_REG 0x18 /* Global command register */
+#define DMAR_GSTS_REG 0x1c /* Global status register */
+#define DMAR_RTADDR_REG 0x20 /* Root entry table */
+#define DMAR_CCMD_REG 0x28 /* Context command reg */
+#define DMAR_FSTS_REG 0x34 /* Fault Status register */
+#define DMAR_FECTL_REG 0x38 /* Fault control register */
+#define DMAR_FEDATA_REG 0x3c /* Fault event interrupt data register */
+#define DMAR_FEADDR_REG 0x40 /* Fault event interrupt addr register */
+#define DMAR_FEUADDR_REG 0x44 /* Upper address register */
+#define DMAR_AFLOG_REG 0x58 /* Advanced Fault control */
+#define DMAR_PMEN_REG 0x64 /* Enable Protected Memory Region */
+#define DMAR_PLMBASE_REG 0x68 /* PMRR Low addr */
+#define DMAR_PLMLIMIT_REG 0x6c /* PMRR low limit */
+#define DMAR_PHMBASE_REG 0x70 /* pmrr high base addr */
+#define DMAR_PHMLIMIT_REG 0x78 /* pmrr high limit */
+
+#define OFFSET_STRIDE (9)
+/*
+#define dmar_readl(dmar, reg) readl(dmar + reg)
+#define dmar_readq(dmar, reg) ({ \
+ u32 lo, hi; \
+ lo = readl(dmar + reg); \
+ hi = readl(dmar + reg + 4); \
+ (((u64) hi) << 32) + lo; })
+*/
+static inline u64 dmar_readq(void *addr)
+{
+ u32 lo, hi;
+ lo = readl(addr);
+ hi = readl(addr + 4);
+ return (((u64) hi) << 32) + lo;
+}
+
+static inline void dmar_writeq(void __iomem *addr, u64 val)
+{
+ writel((u32)val, addr);
+ writel((u32)(val >> 32), addr + 4);
+}
+
+#define DMAR_VER_MAJOR(v) (((v) & 0xf0) >> 4)
+#define DMAR_VER_MINOR(v) ((v) & 0x0f)
+
+/*
+ * Decoding Capability Register
+ */
+#define cap_read_drain(c) (((c) >> 55) & 1)
+#define cap_write_drain(c) (((c) >> 54) & 1)
+#define cap_max_amask_val(c) (((c) >> 48) & 0x3f)
+#define cap_num_fault_regs(c) ((((c) >> 40) & 0xff) + 1)
+#define cap_pgsel_inv(c) (((c) >> 39) & 1)
+
+#define cap_super_page_val(c) (((c) >> 34) & 0xf)
+#define cap_super_offset(c) (((find_first_bit(&cap_super_page_val(c), 4)) \
+ * OFFSET_STRIDE) + 21)
+
+#define cap_fault_reg_offset(c) ((((c) >> 24) & 0x3ff) * 16)
+#define cap_max_fault_reg_offset(c) \
+ (cap_fault_reg_offset(c) + cap_num_fault_regs(c) * 16)
+
+#define cap_zlr(c) (((c) >> 22) & 1)
+#define cap_isoch(c) (((c) >> 23) & 1)
+#define cap_mgaw(c) ((((c) >> 16) & 0x3f) + 1)
+#define cap_sagaw(c) (((c) >> 8) & 0x1f)
+#define cap_caching_mode(c) (((c) >> 7) & 1)
+#define cap_phmr(c) (((c) >> 6) & 1)
+#define cap_plmr(c) (((c) >> 5) & 1)
+#define cap_rwbf(c) (((c) >> 4) & 1)
+#define cap_afl(c) (((c) >> 3) & 1)
+#define cap_ndoms(c) (((unsigned long)1) << (4 + 2 * ((c) & 0x7)))
+/*
+ * Extended Capability Register
+ */
+
+#define ecap_niotlb_iunits(e) ((((e) >> 24) & 0xff) + 1)
+#define ecap_iotlb_offset(e) ((((e) >> 8) & 0x3ff) * 16)
+#define ecap_max_iotlb_offset(e) \
+ (ecap_iotlb_offset(e) + ecap_niotlb_iunits(e) * 16)
+#define ecap_coherent(e) ((e) & 0x1)
+
+
+/* IOTLB_REG */
+#define DMA_TLB_GLOBAL_FLUSH (((u64)1) << 60)
+#define DMA_TLB_DSI_FLUSH (((u64)2) << 60)
+#define DMA_TLB_PSI_FLUSH (((u64)3) << 60)
+#define DMA_TLB_IIRG(type) ((type >> 60) & 7)
+#define DMA_TLB_IAIG(val) (((val) >> 57) & 7)
+#define DMA_TLB_READ_DRAIN (((u64)1) << 49)
+#define DMA_TLB_WRITE_DRAIN (((u64)1) << 48)
+#define DMA_TLB_DID(id) (((u64)((id) & 0xffff)) << 32)
+#define DMA_TLB_IVT (((u64)1) << 63)
+#define DMA_TLB_IH_NONLEAF (((u64)1) << 6)
+#define DMA_TLB_MAX_SIZE (0x3f)
+
+/* GCMD_REG */
+#define DMA_GCMD_TE (((u32)1) << 31)
+#define DMA_GCMD_SRTP (((u32)1) << 30)
+#define DMA_GCMD_SFL (((u32)1) << 29)
+#define DMA_GCMD_EAFL (((u32)1) << 28)
+#define DMA_GCMD_WBF (((u32)1) << 27)
+
+/* GSTS_REG */
+#define DMA_GSTS_TES (((u32)1) << 31)
+#define DMA_GSTS_RTPS (((u32)1) << 30)
+#define DMA_GSTS_FLS (((u32)1) << 29)
+#define DMA_GSTS_AFLS (((u32)1) << 28)
+#define DMA_GSTS_WBFS (((u32)1) << 27)
+
+/* CCMD_REG */
+#define DMA_CCMD_ICC (((u64)1) << 63)
+#define DMA_CCMD_GLOBAL_INVL (((u64)1) << 61)
+#define DMA_CCMD_DOMAIN_INVL (((u64)2) << 61)
+#define DMA_CCMD_DEVICE_INVL (((u64)3) << 61)
+#define DMA_CCMD_FM(m) (((u64)((m) & 0x3)) << 32)
+#define DMA_CCMD_MASK_NOBIT 0
+#define DMA_CCMD_MASK_1BIT 1
+#define DMA_CCMD_MASK_2BIT 2
+#define DMA_CCMD_MASK_3BIT 3
+#define DMA_CCMD_SID(s) (((u64)((s) & 0xffff)) << 16)
+#define DMA_CCMD_DID(d) ((u64)((d) & 0xffff))
+
+/* FECTL_REG */
+#define DMA_FECTL_IM (((u32)1) << 31)
+
+/* FSTS_REG */
+#define DMA_FSTS_PPF ((u32)2)
+#define DMA_FSTS_PFO ((u32)1)
+#define dma_fsts_fault_record_index(s) (((s) >> 8) & 0xff)
+
+/* FRCD_REG, 32 bits access */
+#define DMA_FRCD_F (((u32)1) << 31)
+#define dma_frcd_type(d) ((d >> 30) & 1)
+#define dma_frcd_fault_reason(c) (c & 0xff)
+#define dma_frcd_source_id(c) (c & 0xffff)
+#define dma_frcd_page_addr(d) (d & (((u64)-1) << 12)) /* low 64 bit */
+
+/*
+ * 0: Present
+ * 1-11: Reserved
+ * 12-63: Context Ptr (12 - (haw-1))
+ * 64-127: Reserved
+ */
+struct root_entry {
+ u64 val;
+ u64 rsvd1;
+};
+#define ROOT_ENTRY_NR (PAGE_SIZE_4K/sizeof(struct root_entry))
+static inline bool root_present(struct root_entry *root)
+{
+ return (root->val & 1);
+}
+static inline void set_root_present(struct root_entry *root)
+{
+ root->val |= 1;
+}
+static inline void set_root_value(struct root_entry *root, unsigned long value)
+{
+ root->val |= value & PAGE_MASK_4K;
+}
+
+struct context_entry;
+static inline struct context_entry *
+get_context_addr_from_root(struct root_entry *root)
+{
+ return (struct context_entry *)
+ (root_present(root)?phys_to_virt(
+ root->val & PAGE_MASK_4K):
+ NULL);
+}
+
+/*
+ * low 64 bits:
+ * 0: present
+ * 1: fault processing disable
+ * 2-3: translation type
+ * 12-63: address space root
+ * high 64 bits:
+ * 0-2: address width
+ * 3-6: aval
+ * 8-23: domain id
+ */
+struct context_entry {
+ u64 lo;
+ u64 hi;
+};
+#define context_present(c) ((c).lo & 1)
+#define context_fault_disable(c) (((c).lo >> 1) & 1)
+#define context_translation_type(c) (((c).lo >> 2) & 3)
+#define context_address_root(c) ((c).lo & PAGE_MASK_4K)
+#define context_address_width(c) ((c).hi & 7)
+#define context_domain_id(c) (((c).hi >> 8) & ((1 << 16) - 1))
+
+#define context_set_present(c) do {(c).lo |= 1;} while (0)
+#define context_set_fault_enable(c) \
+ do {(c).lo &= (((u64)-1) << 2) | 1;} while (0)
+#define context_set_translation_type(c, val) \
+ do { \
+ (c).lo &= (((u64)-1) << 4) | 3; \
+ (c).lo |= ((val) & 3) << 2; \
+ } while (0)
+#define CONTEXT_TT_MULTI_LEVEL 0
+#define context_set_address_root(c, val) \
+ do {(c).lo |= (val) & PAGE_MASK_4K;} while (0)
+#define context_set_address_width(c, val) do {(c).hi |= (val) & 7;} while (0)
+#define context_set_domain_id(c, val) \
+ do {(c).hi |= ((val) & ((1 << 16) - 1)) << 8;} while (0)
+#define context_clear_entry(c) do {(c).lo = 0; (c).hi = 0;} while (0)
+
+/*
+ * 0: readable
+ * 1: writable
+ * 2-6: reserved
+ * 7: super page
+ * 8-11: available
+ * 12-63: Host physcial address
+ */
+struct dma_pte {
+ u64 val;
+};
+#define dma_clear_pte(p) do {(p).val = 0;} while (0)
+
+#define DMA_PTE_READ (1)
+#define DMA_PTE_WRITE (2)
+
+#define dma_set_pte_readable(p) do {(p).val |= DMA_PTE_READ;} while (0)
+#define dma_set_pte_writable(p) do {(p).val |= DMA_PTE_WRITE;} while (0)
+#define dma_set_pte_prot(p, prot) \
+ do {(p).val = ((p).val & ~3) | ((prot) & 3); } while (0)
+#define dma_pte_addr(p) ((p).val & PAGE_MASK_4K)
+#define dma_set_pte_addr(p, addr) do {\
+ (p).val |= ((addr) & PAGE_MASK_4K); } while (0)
+#define dma_pte_present(p) (((p).val & 3) != 0)
+
+struct intel_iommu;
+
+struct dmar_domain {
+ int id; /* domain id */
+ struct intel_iommu *iommu; /* back pointer to owning iommu */
+
+ struct list_head devices; /* all devices' list */
+ struct iova_domain iovad; /* iova's that belong to this domain */
+
+ struct dma_pte *pgd; /* virtual address */
+ spinlock_t mapping_lock; /* page table lock */
+ int gaw; /* max guest address width */
+
+ /* adjusted guest address width, 0 is level 2 30-bit */
+ int agaw;
+
+#define DOMAIN_FLAG_MULTIPLE_DEVICES 1
+ int flags;
+};
+
+/* PCI domain-device relationship */
+struct device_domain_info {
+ struct list_head link; /* link to domain siblings */
+ struct list_head global; /* link to global list */
+ u8 bus; /* PCI bus numer */
+ u8 devfn; /* PCI devfn number */
+ struct pci_dev *dev; /* it's NULL for PCIE-to-PCI bridge */
+ struct dmar_domain *domain; /* pointer to domain */
+};
+
+extern int init_dmars(void);
+
+struct intel_iommu {
+ void __iomem *reg; /* Pointer to hardware regs, virtual addr */
+ u64 cap;
+ u64 ecap;
+ unsigned long *domain_ids; /* bitmap of domains */
+ struct dmar_domain **domains; /* ptr to domains */
+ int seg;
+ u32 gcmd; /* Holds TE, EAFL. Don't need SRTP, SFL, WBF */
+ spinlock_t lock; /* protect context, domain ids */
+ spinlock_t register_lock; /* protect register handling */
+ struct root_entry *root_entry; /* virtual address */
+
+ unsigned int irq;
+ unsigned char name[7]; /* Device Name */
+ struct msi_msg saved_msg;
+ struct sys_device sysdev;
+};
+
+#ifndef CONFIG_DMAR_GFX_WA
+static inline void iommu_prepare_gfx_mapping(void)
+{
+ return;
+}
+#endif /* !CONFIG_DMAR_GFX_WA */
+
+#endif
diff --git a/drivers/pci/iova.c b/drivers/pci/iova.c
new file mode 100644
index 00000000000..a84571c2936
--- /dev/null
+++ b/drivers/pci/iova.c
@@ -0,0 +1,394 @@
+/*
+ * Copyright (c) 2006, Intel Corporation.
+ *
+ * This file is released under the GPLv2.
+ *
+ * Copyright (C) 2006 Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
+ */
+
+#include "iova.h"
+
+void
+init_iova_domain(struct iova_domain *iovad)
+{
+ spin_lock_init(&iovad->iova_alloc_lock);
+ spin_lock_init(&iovad->iova_rbtree_lock);
+ iovad->rbroot = RB_ROOT;
+ iovad->cached32_node = NULL;
+
+}
+
+static struct rb_node *
+__get_cached_rbnode(struct iova_domain *iovad, unsigned long *limit_pfn)
+{
+ if ((*limit_pfn != DMA_32BIT_PFN) ||
+ (iovad->cached32_node == NULL))
+ return rb_last(&iovad->rbroot);
+ else {
+ struct rb_node *prev_node = rb_prev(iovad->cached32_node);
+ struct iova *curr_iova =
+ container_of(iovad->cached32_node, struct iova, node);
+ *limit_pfn = curr_iova->pfn_lo - 1;
+ return prev_node;
+ }
+}
+
+static void
+__cached_rbnode_insert_update(struct iova_domain *iovad,
+ unsigned long limit_pfn, struct iova *new)
+{
+ if (limit_pfn != DMA_32BIT_PFN)
+ return;
+ iovad->cached32_node = &new->node;
+}
+
+static void
+__cached_rbnode_delete_update(struct iova_domain *iovad, struct iova *free)
+{
+ struct iova *cached_iova;
+ struct rb_node *curr;
+
+ if (!iovad->cached32_node)
+ return;
+ curr = iovad->cached32_node;
+ cached_iova = container_of(curr, struct iova, node);
+
+ if (free->pfn_lo >= cached_iova->pfn_lo)
+ iovad->cached32_node = rb_next(&free->node);
+}
+
+/* Computes the padding size required, to make the
+ * the start address naturally aligned on its size
+ */
+static int
+iova_get_pad_size(int size, unsigned int limit_pfn)
+{
+ unsigned int pad_size = 0;
+ unsigned int order = ilog2(size);
+
+ if (order)
+ pad_size = (limit_pfn + 1) % (1 << order);
+
+ return pad_size;
+}
+
+static int __alloc_iova_range(struct iova_domain *iovad, unsigned long size,
+ unsigned long limit_pfn, struct iova *new, bool size_aligned)
+{
+ struct rb_node *curr = NULL;
+ unsigned long flags;
+ unsigned long saved_pfn;
+ unsigned int pad_size = 0;
+
+ /* Walk the tree backwards */
+ spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
+ saved_pfn = limit_pfn;
+ curr = __get_cached_rbnode(iovad, &limit_pfn);
+ while (curr) {
+ struct iova *curr_iova = container_of(curr, struct iova, node);
+ if (limit_pfn < curr_iova->pfn_lo)
+ goto move_left;
+ else if (limit_pfn < curr_iova->pfn_hi)
+ goto adjust_limit_pfn;
+ else {
+ if (size_aligned)
+ pad_size = iova_get_pad_size(size, limit_pfn);
+ if ((curr_iova->pfn_hi + size + pad_size) <= limit_pfn)
+ break; /* found a free slot */
+ }
+adjust_limit_pfn:
+ limit_pfn = curr_iova->pfn_lo - 1;
+move_left:
+ curr = rb_prev(curr);
+ }
+
+ if (!curr) {
+ if (size_aligned)
+ pad_size = iova_get_pad_size(size, limit_pfn);
+ if ((IOVA_START_PFN + size + pad_size) > limit_pfn) {
+ spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
+ return -ENOMEM;
+ }
+ }
+
+ /* pfn_lo will point to size aligned address if size_aligned is set */
+ new->pfn_lo = limit_pfn - (size + pad_size) + 1;
+ new->pfn_hi = new->pfn_lo + size - 1;
+
+ spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
+ return 0;
+}
+
+static void
+iova_insert_rbtree(struct rb_root *root, struct iova *iova)
+{
+ struct rb_node **new = &(root->rb_node), *parent = NULL;
+ /* Figure out where to put new node */
+ while (*new) {
+ struct iova *this = container_of(*new, struct iova, node);
+ parent = *new;
+
+ if (iova->pfn_lo < this->pfn_lo)
+ new = &((*new)->rb_left);
+ else if (iova->pfn_lo > this->pfn_lo)
+ new = &((*new)->rb_right);
+ else
+ BUG(); /* this should not happen */
+ }
+ /* Add new node and rebalance tree. */
+ rb_link_node(&iova->node, parent, new);
+ rb_insert_color(&iova->node, root);
+}
+
+/**
+ * alloc_iova - allocates an iova
+ * @iovad - iova domain in question
+ * @size - size of page frames to allocate
+ * @limit_pfn - max limit address
+ * @size_aligned - set if size_aligned address range is required
+ * This function allocates an iova in the range limit_pfn to IOVA_START_PFN
+ * looking from limit_pfn instead from IOVA_START_PFN. If the size_aligned
+ * flag is set then the allocated address iova->pfn_lo will be naturally
+ * aligned on roundup_power_of_two(size).
+ */
+struct iova *
+alloc_iova(struct iova_domain *iovad, unsigned long size,
+ unsigned long limit_pfn,
+ bool size_aligned)
+{
+ unsigned long flags;
+ struct iova *new_iova;
+ int ret;
+
+ new_iova = alloc_iova_mem();
+ if (!new_iova)
+ return NULL;
+
+ /* If size aligned is set then round the size to
+ * to next power of two.
+ */
+ if (size_aligned)
+ size = __roundup_pow_of_two(size);
+
+ spin_lock_irqsave(&iovad->iova_alloc_lock, flags);
+ ret = __alloc_iova_range(iovad, size, limit_pfn, new_iova,
+ size_aligned);
+
+ if (ret) {
+ spin_unlock_irqrestore(&iovad->iova_alloc_lock, flags);
+ free_iova_mem(new_iova);
+ return NULL;
+ }
+
+ /* Insert the new_iova into domain rbtree by holding writer lock */
+ spin_lock(&iovad->iova_rbtree_lock);
+ iova_insert_rbtree(&iovad->rbroot, new_iova);
+ __cached_rbnode_insert_update(iovad, limit_pfn, new_iova);
+ spin_unlock(&iovad->iova_rbtree_lock);
+
+ spin_unlock_irqrestore(&iovad->iova_alloc_lock, flags);
+
+ return new_iova;
+}
+
+/**
+ * find_iova - find's an iova for a given pfn
+ * @iovad - iova domain in question.
+ * pfn - page frame number
+ * This function finds and returns an iova belonging to the
+ * given doamin which matches the given pfn.
+ */
+struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn)
+{
+ unsigned long flags;
+ struct rb_node *node;
+
+ /* Take the lock so that no other thread is manipulating the rbtree */
+ spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
+ node = iovad->rbroot.rb_node;
+ while (node) {
+ struct iova *iova = container_of(node, struct iova, node);
+
+ /* If pfn falls within iova's range, return iova */
+ if ((pfn >= iova->pfn_lo) && (pfn <= iova->pfn_hi)) {
+ spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
+ /* We are not holding the lock while this iova
+ * is referenced by the caller as the same thread
+ * which called this function also calls __free_iova()
+ * and it is by desing that only one thread can possibly
+ * reference a particular iova and hence no conflict.
+ */
+ return iova;
+ }
+
+ if (pfn < iova->pfn_lo)
+ node = node->rb_left;
+ else if (pfn > iova->pfn_lo)
+ node = node->rb_right;
+ }
+
+ spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
+ return NULL;
+}
+
+/**
+ * __free_iova - frees the given iova
+ * @iovad: iova domain in question.
+ * @iova: iova in question.
+ * Frees the given iova belonging to the giving domain
+ */
+void
+__free_iova(struct iova_domain *iovad, struct iova *iova)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
+ __cached_rbnode_delete_update(iovad, iova);
+ rb_erase(&iova->node, &iovad->rbroot);
+ spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
+ free_iova_mem(iova);
+}
+
+/**
+ * free_iova - finds and frees the iova for a given pfn
+ * @iovad: - iova domain in question.
+ * @pfn: - pfn that is allocated previously
+ * This functions finds an iova for a given pfn and then
+ * frees the iova from that domain.
+ */
+void
+free_iova(struct iova_domain *iovad, unsigned long pfn)
+{
+ struct iova *iova = find_iova(iovad, pfn);
+ if (iova)
+ __free_iova(iovad, iova);
+
+}
+
+/**
+ * put_iova_domain - destroys the iova doamin
+ * @iovad: - iova domain in question.
+ * All the iova's in that domain are destroyed.
+ */
+void put_iova_domain(struct iova_domain *iovad)
+{
+ struct rb_node *node;
+ unsigned long flags;
+
+ spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
+ node = rb_first(&iovad->rbroot);
+ while (node) {
+ struct iova *iova = container_of(node, struct iova, node);
+ rb_erase(node, &iovad->rbroot);
+ free_iova_mem(iova);
+ node = rb_first(&iovad->rbroot);
+ }
+ spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
+}
+
+static int
+__is_range_overlap(struct rb_node *node,
+ unsigned long pfn_lo, unsigned long pfn_hi)
+{
+ struct iova *iova = container_of(node, struct iova, node);
+
+ if ((pfn_lo <= iova->pfn_hi) && (pfn_hi >= iova->pfn_lo))
+ return 1;
+ return 0;
+}
+
+static struct iova *
+__insert_new_range(struct iova_domain *iovad,
+ unsigned long pfn_lo, unsigned long pfn_hi)
+{
+ struct iova *iova;
+
+ iova = alloc_iova_mem();
+ if (!iova)
+ return iova;
+
+ iova->pfn_hi = pfn_hi;
+ iova->pfn_lo = pfn_lo;
+ iova_insert_rbtree(&iovad->rbroot, iova);
+ return iova;
+}
+
+static void
+__adjust_overlap_range(struct iova *iova,
+ unsigned long *pfn_lo, unsigned long *pfn_hi)
+{
+ if (*pfn_lo < iova->pfn_lo)
+ iova->pfn_lo = *pfn_lo;
+ if (*pfn_hi > iova->pfn_hi)
+ *pfn_lo = iova->pfn_hi + 1;
+}
+
+/**
+ * reserve_iova - reserves an iova in the given range
+ * @iovad: - iova domain pointer
+ * @pfn_lo: - lower page frame address
+ * @pfn_hi:- higher pfn adderss
+ * This function allocates reserves the address range from pfn_lo to pfn_hi so
+ * that this address is not dished out as part of alloc_iova.
+ */
+struct iova *
+reserve_iova(struct iova_domain *iovad,
+ unsigned long pfn_lo, unsigned long pfn_hi)
+{
+ struct rb_node *node;
+ unsigned long flags;
+ struct iova *iova;
+ unsigned int overlap = 0;
+
+ spin_lock_irqsave(&iovad->iova_alloc_lock, flags);
+ spin_lock(&iovad->iova_rbtree_lock);
+ for (node = rb_first(&iovad->rbroot); node; node = rb_next(node)) {
+ if (__is_range_overlap(node, pfn_lo, pfn_hi)) {
+ iova = container_of(node, struct iova, node);
+ __adjust_overlap_range(iova, &pfn_lo, &pfn_hi);
+ if ((pfn_lo >= iova->pfn_lo) &&
+ (pfn_hi <= iova->pfn_hi))
+ goto finish;
+ overlap = 1;
+
+ } else if (overlap)
+ break;
+ }
+
+ /* We are here either becasue this is the first reserver node
+ * or need to insert remaining non overlap addr range
+ */
+ iova = __insert_new_range(iovad, pfn_lo, pfn_hi);
+finish:
+
+ spin_unlock(&iovad->iova_rbtree_lock);
+ spin_unlock_irqrestore(&iovad->iova_alloc_lock, flags);
+ return iova;
+}
+
+/**
+ * copy_reserved_iova - copies the reserved between domains
+ * @from: - source doamin from where to copy
+ * @to: - destination domin where to copy
+ * This function copies reserved iova's from one doamin to
+ * other.
+ */
+void
+copy_reserved_iova(struct iova_domain *from, struct iova_domain *to)
+{
+ unsigned long flags;
+ struct rb_node *node;
+
+ spin_lock_irqsave(&from->iova_alloc_lock, flags);
+ spin_lock(&from->iova_rbtree_lock);
+ for (node = rb_first(&from->rbroot); node; node = rb_next(node)) {
+ struct iova *iova = container_of(node, struct iova, node);
+ struct iova *new_iova;
+ new_iova = reserve_iova(to, iova->pfn_lo, iova->pfn_hi);
+ if (!new_iova)
+ printk(KERN_ERR "Reserve iova range %lx@%lx failed\n",
+ iova->pfn_lo, iova->pfn_lo);
+ }
+ spin_unlock(&from->iova_rbtree_lock);
+ spin_unlock_irqrestore(&from->iova_alloc_lock, flags);
+}
diff --git a/drivers/pci/iova.h b/drivers/pci/iova.h
new file mode 100644
index 00000000000..ae3028d5a94
--- /dev/null
+++ b/drivers/pci/iova.h
@@ -0,0 +1,63 @@
+/*
+ * Copyright (c) 2006, Intel Corporation.
+ *
+ * This file is released under the GPLv2.
+ *
+ * Copyright (C) 2006 Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
+ *
+ */
+
+#ifndef _IOVA_H_
+#define _IOVA_H_
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/rbtree.h>
+#include <linux/dma-mapping.h>
+
+/*
+ * We need a fixed PAGE_SIZE of 4K irrespective of
+ * arch PAGE_SIZE for IOMMU page tables.
+ */
+#define PAGE_SHIFT_4K (12)
+#define PAGE_SIZE_4K (1UL << PAGE_SHIFT_4K)
+#define PAGE_MASK_4K (((u64)-1) << PAGE_SHIFT_4K)
+#define PAGE_ALIGN_4K(addr) (((addr) + PAGE_SIZE_4K - 1) & PAGE_MASK_4K)
+
+/* IO virtual address start page frame number */
+#define IOVA_START_PFN (1)
+
+#define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT_4K)
+#define DMA_32BIT_PFN IOVA_PFN(DMA_32BIT_MASK)
+#define DMA_64BIT_PFN IOVA_PFN(DMA_64BIT_MASK)
+
+/* iova structure */
+struct iova {
+ struct rb_node node;
+ unsigned long pfn_hi; /* IOMMU dish out addr hi */
+ unsigned long pfn_lo; /* IOMMU dish out addr lo */
+};
+
+/* holds all the iova translations for a domain */
+struct iova_domain {
+ spinlock_t iova_alloc_lock;/* Lock to protect iova allocation */
+ spinlock_t iova_rbtree_lock; /* Lock to protect update of rbtree */
+ struct rb_root rbroot; /* iova domain rbtree root */
+ struct rb_node *cached32_node; /* Save last alloced node */
+};
+
+struct iova *alloc_iova_mem(void);
+void free_iova_mem(struct iova *iova);
+void free_iova(struct iova_domain *iovad, unsigned long pfn);
+void __free_iova(struct iova_domain *iovad, struct iova *iova);
+struct iova *alloc_iova(struct iova_domain *iovad, unsigned long size,
+ unsigned long limit_pfn,
+ bool size_aligned);
+struct iova *reserve_iova(struct iova_domain *iovad, unsigned long pfn_lo,
+ unsigned long pfn_hi);
+void copy_reserved_iova(struct iova_domain *from, struct iova_domain *to);
+void init_iova_domain(struct iova_domain *iovad);
+struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn);
+void put_iova_domain(struct iova_domain *iovad);
+
+#endif
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index 6fda33de84e..fc87e14b50d 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -90,3 +90,4 @@ pci_match_one_device(const struct pci_device_id *id, const struct pci_dev *dev)
return NULL;
}
+struct pci_dev *pci_find_upstream_pcie_bridge(struct pci_dev *pdev);
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index 5db6b6690b5..463a5a9d583 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -837,6 +837,19 @@ static void pci_release_dev(struct device *dev)
kfree(pci_dev);
}
+static void set_pcie_port_type(struct pci_dev *pdev)
+{
+ int pos;
+ u16 reg16;
+
+ pos = pci_find_capability(pdev, PCI_CAP_ID_EXP);
+ if (!pos)
+ return;
+ pdev->is_pcie = 1;
+ pci_read_config_word(pdev, pos + PCI_EXP_FLAGS, &reg16);
+ pdev->pcie_type = (reg16 & PCI_EXP_FLAGS_TYPE) >> 4;
+}
+
/**
* pci_cfg_space_size - get the configuration space size of the PCI device.
* @dev: PCI device
@@ -951,6 +964,7 @@ pci_scan_device(struct pci_bus *bus, int devfn)
dev->device = (l >> 16) & 0xffff;
dev->cfg_size = pci_cfg_space_size(dev);
dev->error_state = pci_channel_io_normal;
+ set_pcie_port_type(dev);
/* Assume 32-bit PCI; let 64-bit PCI cards (which are far rarer)
set this higher, assuming the system even supports it. */
diff --git a/drivers/pci/search.c b/drivers/pci/search.c
index c6e79d01ce3..b001b5922e3 100644
--- a/drivers/pci/search.c
+++ b/drivers/pci/search.c
@@ -14,6 +14,40 @@
#include "pci.h"
DECLARE_RWSEM(pci_bus_sem);
+/*
+ * find the upstream PCIE-to-PCI bridge of a PCI device
+ * if the device is PCIE, return NULL
+ * if the device isn't connected to a PCIE bridge (that is its parent is a
+ * legacy PCI bridge and the bridge is directly connected to bus 0), return its
+ * parent
+ */
+struct pci_dev *
+pci_find_upstream_pcie_bridge(struct pci_dev *pdev)
+{
+ struct pci_dev *tmp = NULL;
+
+ if (pdev->is_pcie)
+ return NULL;
+ while (1) {
+ if (!pdev->bus->self)
+ break;
+ pdev = pdev->bus->self;
+ /* a p2p bridge */
+ if (!pdev->is_pcie) {
+ tmp = pdev;
+ continue;
+ }
+ /* PCI device should connect to a PCIE bridge */
+ if (pdev->pcie_type != PCI_EXP_TYPE_PCI_BRIDGE) {
+ /* Busted hardware? */
+ WARN_ON_ONCE(1);
+ return NULL;
+ }
+ return pdev;
+ }
+
+ return tmp;
+}
static struct pci_bus *pci_do_find_bus(struct pci_bus *bus, unsigned char busnr)
{
diff --git a/drivers/power/apm_power.c b/drivers/power/apm_power.c
index 39a90a6f0f8..bbf3ee10da0 100644
--- a/drivers/power/apm_power.c
+++ b/drivers/power/apm_power.c
@@ -26,65 +26,124 @@ static struct power_supply *main_battery;
static void find_main_battery(void)
{
struct device *dev;
- struct power_supply *bat, *batm;
+ struct power_supply *bat = NULL;
+ struct power_supply *max_charge_bat = NULL;
+ struct power_supply *max_energy_bat = NULL;
union power_supply_propval full;
int max_charge = 0;
+ int max_energy = 0;
main_battery = NULL;
- batm = NULL;
+
list_for_each_entry(dev, &power_supply_class->devices, node) {
bat = dev_get_drvdata(dev);
- /* If none of battery devices cantains 'use_for_apm' flag,
- choice one with maximum design charge */
- if (!PSY_PROP(bat, CHARGE_FULL_DESIGN, &full)) {
+
+ if (bat->use_for_apm) {
+ /* nice, we explicitly asked to report this battery. */
+ main_battery = bat;
+ return;
+ }
+
+ if (!PSY_PROP(bat, CHARGE_FULL_DESIGN, &full) ||
+ !PSY_PROP(bat, CHARGE_FULL, &full)) {
if (full.intval > max_charge) {
- batm = bat;
+ max_charge_bat = bat;
max_charge = full.intval;
}
+ } else if (!PSY_PROP(bat, ENERGY_FULL_DESIGN, &full) ||
+ !PSY_PROP(bat, ENERGY_FULL, &full)) {
+ if (full.intval > max_energy) {
+ max_energy_bat = bat;
+ max_energy = full.intval;
+ }
}
+ }
- if (bat->use_for_apm)
- main_battery = bat;
+ if ((max_energy_bat && max_charge_bat) &&
+ (max_energy_bat != max_charge_bat)) {
+ /* try guess battery with more capacity */
+ if (!PSY_PROP(max_charge_bat, VOLTAGE_MAX_DESIGN, &full)) {
+ if (max_energy > max_charge * full.intval)
+ main_battery = max_energy_bat;
+ else
+ main_battery = max_charge_bat;
+ } else if (!PSY_PROP(max_energy_bat, VOLTAGE_MAX_DESIGN,
+ &full)) {
+ if (max_charge > max_energy / full.intval)
+ main_battery = max_charge_bat;
+ else
+ main_battery = max_energy_bat;
+ } else {
+ /* give up, choice any */
+ main_battery = max_energy_bat;
+ }
+ } else if (max_charge_bat) {
+ main_battery = max_charge_bat;
+ } else if (max_energy_bat) {
+ main_battery = max_energy_bat;
+ } else {
+ /* give up, try the last if any */
+ main_battery = bat;
}
- if (!main_battery)
- main_battery = batm;
}
-static int calculate_time(int status)
+static int calculate_time(int status, int using_charge)
{
- union power_supply_propval charge_full, charge_empty;
- union power_supply_propval charge, I;
+ union power_supply_propval full;
+ union power_supply_propval empty;
+ union power_supply_propval cur;
+ union power_supply_propval I;
+ enum power_supply_property full_prop;
+ enum power_supply_property full_design_prop;
+ enum power_supply_property empty_prop;
+ enum power_supply_property empty_design_prop;
+ enum power_supply_property cur_avg_prop;
+ enum power_supply_property cur_now_prop;
- if (MPSY_PROP(CHARGE_FULL, &charge_full)) {
- /* if battery can't report this property, use design value */
- if (MPSY_PROP(CHARGE_FULL_DESIGN, &charge_full))
+ if (MPSY_PROP(CURRENT_AVG, &I)) {
+ /* if battery can't report average value, use momentary */
+ if (MPSY_PROP(CURRENT_NOW, &I))
return -1;
}
- if (MPSY_PROP(CHARGE_EMPTY, &charge_empty)) {
- /* if battery can't report this property, use design value */
- if (MPSY_PROP(CHARGE_EMPTY_DESIGN, &charge_empty))
- charge_empty.intval = 0;
+ if (using_charge) {
+ full_prop = POWER_SUPPLY_PROP_CHARGE_FULL;
+ full_design_prop = POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN;
+ empty_prop = POWER_SUPPLY_PROP_CHARGE_EMPTY;
+ empty_design_prop = POWER_SUPPLY_PROP_CHARGE_EMPTY;
+ cur_avg_prop = POWER_SUPPLY_PROP_CHARGE_AVG;
+ cur_now_prop = POWER_SUPPLY_PROP_CHARGE_NOW;
+ } else {
+ full_prop = POWER_SUPPLY_PROP_ENERGY_FULL;
+ full_design_prop = POWER_SUPPLY_PROP_ENERGY_FULL_DESIGN;
+ empty_prop = POWER_SUPPLY_PROP_ENERGY_EMPTY;
+ empty_design_prop = POWER_SUPPLY_PROP_CHARGE_EMPTY;
+ cur_avg_prop = POWER_SUPPLY_PROP_ENERGY_AVG;
+ cur_now_prop = POWER_SUPPLY_PROP_ENERGY_NOW;
}
- if (MPSY_PROP(CHARGE_AVG, &charge)) {
- /* if battery can't report average value, use momentary */
- if (MPSY_PROP(CHARGE_NOW, &charge))
+ if (_MPSY_PROP(full_prop, &full)) {
+ /* if battery can't report this property, use design value */
+ if (_MPSY_PROP(full_design_prop, &full))
return -1;
}
- if (MPSY_PROP(CURRENT_AVG, &I)) {
+ if (_MPSY_PROP(empty_prop, &empty)) {
+ /* if battery can't report this property, use design value */
+ if (_MPSY_PROP(empty_design_prop, &empty))
+ empty.intval = 0;
+ }
+
+ if (_MPSY_PROP(cur_avg_prop, &cur)) {
/* if battery can't report average value, use momentary */
- if (MPSY_PROP(CURRENT_NOW, &I))
+ if (_MPSY_PROP(cur_now_prop, &cur))
return -1;
}
if (status == POWER_SUPPLY_STATUS_CHARGING)
- return ((charge.intval - charge_full.intval) * 60L) /
- I.intval;
+ return ((cur.intval - full.intval) * 60L) / I.intval;
else
- return -((charge.intval - charge_empty.intval) * 60L) /
- I.intval;
+ return -((cur.intval - empty.intval) * 60L) / I.intval;
}
static int calculate_capacity(int using_charge)
@@ -200,18 +259,22 @@ static void apm_battery_apm_get_power_status(struct apm_power_info *info)
info->units = APM_UNITS_MINS;
if (status.intval == POWER_SUPPLY_STATUS_CHARGING) {
- if (MPSY_PROP(TIME_TO_FULL_AVG, &time_to_full)) {
- if (MPSY_PROP(TIME_TO_FULL_NOW, &time_to_full))
- info->time = calculate_time(status.intval);
- else
- info->time = time_to_full.intval / 60;
+ if (!MPSY_PROP(TIME_TO_FULL_AVG, &time_to_full) ||
+ !MPSY_PROP(TIME_TO_FULL_NOW, &time_to_full)) {
+ info->time = time_to_full.intval / 60;
+ } else {
+ info->time = calculate_time(status.intval, 0);
+ if (info->time == -1)
+ info->time = calculate_time(status.intval, 1);
}
} else {
- if (MPSY_PROP(TIME_TO_EMPTY_AVG, &time_to_empty)) {
- if (MPSY_PROP(TIME_TO_EMPTY_NOW, &time_to_empty))
- info->time = calculate_time(status.intval);
- else
- info->time = time_to_empty.intval / 60;
+ if (!MPSY_PROP(TIME_TO_EMPTY_AVG, &time_to_empty) ||
+ !MPSY_PROP(TIME_TO_EMPTY_NOW, &time_to_empty)) {
+ info->time = time_to_empty.intval / 60;
+ } else {
+ info->time = calculate_time(status.intval, 0);
+ if (info->time == -1)
+ info->time = calculate_time(status.intval, 1);
}
}
diff --git a/drivers/scsi/3w-9xxx.c b/drivers/scsi/3w-9xxx.c
index fb14014ee16..afb262b4be1 100644
--- a/drivers/scsi/3w-9xxx.c
+++ b/drivers/scsi/3w-9xxx.c
@@ -1840,7 +1840,7 @@ static int twa_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id,
(scsi_bufflen(srb) < TW_MIN_SGL_LENGTH)) {
if (srb->sc_data_direction == DMA_TO_DEVICE || srb->sc_data_direction == DMA_BIDIRECTIONAL) {
struct scatterlist *sg = scsi_sglist(srb);
- char *buf = kmap_atomic(sg->page, KM_IRQ0) + sg->offset;
+ char *buf = kmap_atomic(sg_page(sg), KM_IRQ0) + sg->offset;
memcpy(tw_dev->generic_buffer_virt[request_id], buf, sg->length);
kunmap_atomic(buf - sg->offset, KM_IRQ0);
}
@@ -1919,7 +1919,7 @@ static void twa_scsiop_execute_scsi_complete(TW_Device_Extension *tw_dev, int re
char *buf;
unsigned long flags = 0;
local_irq_save(flags);
- buf = kmap_atomic(sg->page, KM_IRQ0) + sg->offset;
+ buf = kmap_atomic(sg_page(sg), KM_IRQ0) + sg->offset;
memcpy(buf, tw_dev->generic_buffer_virt[request_id], sg->length);
kunmap_atomic(buf - sg->offset, KM_IRQ0);
local_irq_restore(flags);
diff --git a/drivers/scsi/3w-xxxx.c b/drivers/scsi/3w-xxxx.c
index a64153b9603..59716ebeb10 100644
--- a/drivers/scsi/3w-xxxx.c
+++ b/drivers/scsi/3w-xxxx.c
@@ -1469,7 +1469,7 @@ static void tw_transfer_internal(TW_Device_Extension *tw_dev, int request_id,
struct scatterlist *sg = scsi_sglist(cmd);
local_irq_save(flags);
- buf = kmap_atomic(sg->page, KM_IRQ0) + sg->offset;
+ buf = kmap_atomic(sg_page(sg), KM_IRQ0) + sg->offset;
transfer_len = min(sg->length, len);
memcpy(buf, data, transfer_len);
diff --git a/drivers/scsi/NCR5380.c b/drivers/scsi/NCR5380.c
index 988f0bc5eda..2597209183d 100644
--- a/drivers/scsi/NCR5380.c
+++ b/drivers/scsi/NCR5380.c
@@ -298,8 +298,7 @@ static __inline__ void initialize_SCp(Scsi_Cmnd * cmd)
if (cmd->use_sg) {
cmd->SCp.buffer = (struct scatterlist *) cmd->request_buffer;
cmd->SCp.buffers_residual = cmd->use_sg - 1;
- cmd->SCp.ptr = page_address(cmd->SCp.buffer->page)+
- cmd->SCp.buffer->offset;
+ cmd->SCp.ptr = sg_virt(cmd->SCp.buffer);
cmd->SCp.this_residual = cmd->SCp.buffer->length;
} else {
cmd->SCp.buffer = NULL;
@@ -2143,8 +2142,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance) {
++cmd->SCp.buffer;
--cmd->SCp.buffers_residual;
cmd->SCp.this_residual = cmd->SCp.buffer->length;
- cmd->SCp.ptr = page_address(cmd->SCp.buffer->page)+
- cmd->SCp.buffer->offset;
+ cmd->SCp.ptr = sg_virt(cmd->SCp.buffer);
dprintk(NDEBUG_INFORMATION, ("scsi%d : %d bytes and %d buffers left\n", instance->host_no, cmd->SCp.this_residual, cmd->SCp.buffers_residual));
}
/*
diff --git a/drivers/scsi/NCR53C9x.c b/drivers/scsi/NCR53C9x.c
index 96e8e29aa05..5b0efc90391 100644
--- a/drivers/scsi/NCR53C9x.c
+++ b/drivers/scsi/NCR53C9x.c
@@ -927,7 +927,7 @@ static void esp_get_dmabufs(struct NCR_ESP *esp, Scsi_Cmnd *sp)
esp->dma_mmu_get_scsi_sgl(esp, sp);
else
sp->SCp.ptr =
- (char *) virt_to_phys((page_address(sp->SCp.buffer->page) + sp->SCp.buffer->offset));
+ (char *) virt_to_phys(sg_virt(sp->SCp.buffer));
}
}
@@ -1748,7 +1748,7 @@ static inline void advance_sg(struct NCR_ESP *esp, Scsi_Cmnd *sp)
if (esp->dma_advance_sg)
esp->dma_advance_sg (sp);
else
- sp->SCp.ptr = (char *) virt_to_phys((page_address(sp->SCp.buffer->page) + sp->SCp.buffer->offset));
+ sp->SCp.ptr = (char *) virt_to_phys(sg_virt(sp->SCp.buffer));
}
diff --git a/drivers/scsi/NCR53c406a.c b/drivers/scsi/NCR53c406a.c
index 3168a179484..137d065db3d 100644
--- a/drivers/scsi/NCR53c406a.c
+++ b/drivers/scsi/NCR53c406a.c
@@ -875,8 +875,7 @@ static void NCR53c406a_intr(void *dev_id)
outb(TRANSFER_INFO | DMA_OP, CMD_REG);
#if USE_PIO
scsi_for_each_sg(current_SC, sg, scsi_sg_count(current_SC), i) {
- NCR53c406a_pio_write(page_address(sg->page) + sg->offset,
- sg->length);
+ NCR53c406a_pio_write(sg_virt(sg), sg->length);
}
REG0;
#endif /* USE_PIO */
@@ -897,8 +896,7 @@ static void NCR53c406a_intr(void *dev_id)
outb(TRANSFER_INFO | DMA_OP, CMD_REG);
#if USE_PIO
scsi_for_each_sg(current_SC, sg, scsi_sg_count(current_SC), i) {
- NCR53c406a_pio_read(page_address(sg->page) + sg->offset,
- sg->length);
+ NCR53c406a_pio_read(sg_virt(sg), sg->length);
}
REG0;
#endif /* USE_PIO */
diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c
index 80e448d0f3d..a77ab8d693d 100644
--- a/drivers/scsi/aacraid/aachba.c
+++ b/drivers/scsi/aacraid/aachba.c
@@ -356,7 +356,7 @@ static void aac_internal_transfer(struct scsi_cmnd *scsicmd, void *data, unsigne
int transfer_len;
struct scatterlist *sg = scsi_sglist(scsicmd);
- buf = kmap_atomic(sg->page, KM_IRQ0) + sg->offset;
+ buf = kmap_atomic(sg_page(sg), KM_IRQ0) + sg->offset;
transfer_len = min(sg->length, len + offset);
transfer_len -= offset;
diff --git a/drivers/scsi/aha152x.c b/drivers/scsi/aha152x.c
index a58c265dc8a..ea8c6994764 100644
--- a/drivers/scsi/aha152x.c
+++ b/drivers/scsi/aha152x.c
@@ -613,7 +613,7 @@ struct aha152x_scdata {
#define SCNEXT(SCpnt) SCDATA(SCpnt)->next
#define SCSEM(SCpnt) SCDATA(SCpnt)->done
-#define SG_ADDRESS(buffer) ((char *) (page_address((buffer)->page)+(buffer)->offset))
+#define SG_ADDRESS(buffer) ((char *) sg_virt((buffer)))
/* state handling */
static void seldi_run(struct Scsi_Host *shpnt);
diff --git a/drivers/scsi/aha1542.c b/drivers/scsi/aha1542.c
index 961a1882cb7..bbcc2c52d79 100644
--- a/drivers/scsi/aha1542.c
+++ b/drivers/scsi/aha1542.c
@@ -49,7 +49,7 @@
#include "aha1542.h"
#define SCSI_BUF_PA(address) isa_virt_to_bus(address)
-#define SCSI_SG_PA(sgent) (isa_page_to_bus((sgent)->page) + (sgent)->offset)
+#define SCSI_SG_PA(sgent) (isa_page_to_bus(sg_page((sgent))) + (sgent)->offset)
static void BAD_DMA(void *address, unsigned int length)
{
@@ -66,8 +66,7 @@ static void BAD_SG_DMA(Scsi_Cmnd * SCpnt,
int badseg)
{
printk(KERN_CRIT "sgpnt[%d:%d] page %p/0x%llx length %u\n",
- badseg, nseg,
- page_address(sgp->page) + sgp->offset,
+ badseg, nseg, sg_virt(sgp),
(unsigned long long)SCSI_SG_PA(sgp),
sgp->length);
@@ -712,8 +711,7 @@ static int aha1542_queuecommand(Scsi_Cmnd * SCpnt, void (*done) (Scsi_Cmnd *))
printk(KERN_CRIT "Bad segment list supplied to aha1542.c (%d, %d)\n", SCpnt->use_sg, i);
scsi_for_each_sg(SCpnt, sg, SCpnt->use_sg, i) {
printk(KERN_CRIT "%d: %p %d\n", i,
- (page_address(sg->page) +
- sg->offset), sg->length);
+ sg_virt(sg), sg->length);
};
printk(KERN_CRIT "cptr %x: ", (unsigned int) cptr);
ptr = (unsigned char *) &cptr[i];
diff --git a/drivers/scsi/arcmsr/arcmsr_hba.c b/drivers/scsi/arcmsr/arcmsr_hba.c
index f81777586b8..f7a252885a5 100644
--- a/drivers/scsi/arcmsr/arcmsr_hba.c
+++ b/drivers/scsi/arcmsr/arcmsr_hba.c
@@ -1343,7 +1343,7 @@ static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb, \
/* 4 bytes: Areca io control code */
sg = scsi_sglist(cmd);
- buffer = kmap_atomic(sg->page, KM_IRQ0) + sg->offset;
+ buffer = kmap_atomic(sg_page(sg), KM_IRQ0) + sg->offset;
if (scsi_sg_count(cmd) > 1) {
retvalue = ARCMSR_MESSAGE_FAIL;
goto message_out;
@@ -1593,7 +1593,7 @@ static void arcmsr_handle_virtual_command(struct AdapterControlBlock *acb,
strncpy(&inqdata[32], "R001", 4); /* Product Revision */
sg = scsi_sglist(cmd);
- buffer = kmap_atomic(sg->page, KM_IRQ0) + sg->offset;
+ buffer = kmap_atomic(sg_page(sg), KM_IRQ0) + sg->offset;
memcpy(buffer, inqdata, sizeof(inqdata));
sg = scsi_sglist(cmd);
diff --git a/drivers/scsi/atari_NCR5380.c b/drivers/scsi/atari_NCR5380.c
index 52d0b87e9aa..d1780980fb2 100644
--- a/drivers/scsi/atari_NCR5380.c
+++ b/drivers/scsi/atari_NCR5380.c
@@ -515,8 +515,7 @@ static inline void initialize_SCp(Scsi_Cmnd *cmd)
if (cmd->use_sg) {
cmd->SCp.buffer = (struct scatterlist *)cmd->request_buffer;
cmd->SCp.buffers_residual = cmd->use_sg - 1;
- cmd->SCp.ptr = (char *)page_address(cmd->SCp.buffer->page) +
- cmd->SCp.buffer->offset;
+ cmd->SCp.ptr = sg_virt(cmd->SCp.buffer);
cmd->SCp.this_residual = cmd->SCp.buffer->length;
/* ++roman: Try to merge some scatter-buffers if they are at
* contiguous physical addresses.
@@ -2054,8 +2053,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
++cmd->SCp.buffer;
--cmd->SCp.buffers_residual;
cmd->SCp.this_residual = cmd->SCp.buffer->length;
- cmd->SCp.ptr = page_address(cmd->SCp.buffer->page) +
- cmd->SCp.buffer->offset;
+ cmd->SCp.ptr = sg_virt(cmd->SCp.buffer);
/* ++roman: Try to merge some scatter-buffers if
* they are at contiguous physical addresses.
*/
diff --git a/drivers/scsi/eata_pio.c b/drivers/scsi/eata_pio.c
index 96180bb47e4..982c5092be1 100644
--- a/drivers/scsi/eata_pio.c
+++ b/drivers/scsi/eata_pio.c
@@ -172,7 +172,7 @@ static void IncStat(struct scsi_pointer *SCp, unsigned int Increment)
SCp->Status = 0;
else {
SCp->buffer++;
- SCp->ptr = page_address(SCp->buffer->page) + SCp->buffer->offset;
+ SCp->ptr = sg_virt(SCp->buffer);
SCp->this_residual = SCp->buffer->length;
}
}
@@ -410,7 +410,7 @@ static int eata_pio_queue(struct scsi_cmnd *cmd,
} else {
cmd->SCp.buffer = cmd->request_buffer;
cmd->SCp.buffers_residual = cmd->use_sg;
- cmd->SCp.ptr = page_address(cmd->SCp.buffer->page) + cmd->SCp.buffer->offset;
+ cmd->SCp.ptr = sg_virt(cmd->SCp.buffer);
cmd->SCp.this_residual = cmd->SCp.buffer->length;
}
cmd->SCp.Status = (cmd->SCp.this_residual != 0); /* TRUE as long as bytes
diff --git a/drivers/scsi/fd_mcs.c b/drivers/scsi/fd_mcs.c
index 668569e8856..8335b608e57 100644
--- a/drivers/scsi/fd_mcs.c
+++ b/drivers/scsi/fd_mcs.c
@@ -973,7 +973,7 @@ static irqreturn_t fd_mcs_intr(int irq, void *dev_id)
if (current_SC->SCp.buffers_residual) {
--current_SC->SCp.buffers_residual;
++current_SC->SCp.buffer;
- current_SC->SCp.ptr = page_address(current_SC->SCp.buffer->page) + current_SC->SCp.buffer->offset;
+ current_SC->SCp.ptr = sg_virt(current_SC->SCp.buffer);
current_SC->SCp.this_residual = current_SC->SCp.buffer->length;
} else
break;
@@ -1006,7 +1006,7 @@ static irqreturn_t fd_mcs_intr(int irq, void *dev_id)
if (!current_SC->SCp.this_residual && current_SC->SCp.buffers_residual) {
--current_SC->SCp.buffers_residual;
++current_SC->SCp.buffer;
- current_SC->SCp.ptr = page_address(current_SC->SCp.buffer->page) + current_SC->SCp.buffer->offset;
+ current_SC->SCp.ptr = sg_virt(current_SC->SCp.buffer);
current_SC->SCp.this_residual = current_SC->SCp.buffer->length;
}
}
@@ -1109,7 +1109,7 @@ static int fd_mcs_queue(Scsi_Cmnd * SCpnt, void (*done) (Scsi_Cmnd *))
if (current_SC->use_sg) {
current_SC->SCp.buffer = (struct scatterlist *) current_SC->request_buffer;
- current_SC->SCp.ptr = page_address(current_SC->SCp.buffer->page) + current_SC->SCp.buffer->offset;
+ current_SC->SCp.ptr = sg_virt(current_SC->SCp.buffer);
current_SC->SCp.this_residual = current_SC->SCp.buffer->length;
current_SC->SCp.buffers_residual = current_SC->use_sg - 1;
} else {
diff --git a/drivers/scsi/fdomain.c b/drivers/scsi/fdomain.c
index 5d282e6a6ae..2cd6b4959eb 100644
--- a/drivers/scsi/fdomain.c
+++ b/drivers/scsi/fdomain.c
@@ -1321,7 +1321,7 @@ static irqreturn_t do_fdomain_16x0_intr(int irq, void *dev_id)
if (current_SC->SCp.buffers_residual) {
--current_SC->SCp.buffers_residual;
++current_SC->SCp.buffer;
- current_SC->SCp.ptr = page_address(current_SC->SCp.buffer->page) + current_SC->SCp.buffer->offset;
+ current_SC->SCp.ptr = sg_virt(current_SC->SCp.buffer);
current_SC->SCp.this_residual = current_SC->SCp.buffer->length;
} else
break;
@@ -1354,7 +1354,7 @@ static irqreturn_t do_fdomain_16x0_intr(int irq, void *dev_id)
&& current_SC->SCp.buffers_residual) {
--current_SC->SCp.buffers_residual;
++current_SC->SCp.buffer;
- current_SC->SCp.ptr = page_address(current_SC->SCp.buffer->page) + current_SC->SCp.buffer->offset;
+ current_SC->SCp.ptr = sg_virt(current_SC->SCp.buffer);
current_SC->SCp.this_residual = current_SC->SCp.buffer->length;
}
}
@@ -1439,8 +1439,7 @@ static int fdomain_16x0_queue(struct scsi_cmnd *SCpnt,
if (scsi_sg_count(current_SC)) {
current_SC->SCp.buffer = scsi_sglist(current_SC);
- current_SC->SCp.ptr = page_address(current_SC->SCp.buffer->page)
- + current_SC->SCp.buffer->offset;
+ current_SC->SCp.ptr = sg_virt(current_SC->SCp.buffer);
current_SC->SCp.this_residual = current_SC->SCp.buffer->length;
current_SC->SCp.buffers_residual = scsi_sg_count(current_SC) - 1;
} else {
diff --git a/drivers/scsi/gdth.c b/drivers/scsi/gdth.c
index 3ac080ee6e2..5ab3ce76248 100644
--- a/drivers/scsi/gdth.c
+++ b/drivers/scsi/gdth.c
@@ -2374,18 +2374,18 @@ static void gdth_copy_internal_data(gdth_ha_str *ha, Scsi_Cmnd *scp,
if (cpsum+cpnow > cpcount)
cpnow = cpcount - cpsum;
cpsum += cpnow;
- if (!sl->page) {
+ if (!sg_page(sl)) {
printk("GDT-HA %d: invalid sc/gt element in gdth_copy_internal_data()\n",
ha->hanum);
return;
}
local_irq_save(flags);
- address = kmap_atomic(sl->page, KM_BIO_SRC_IRQ) + sl->offset;
+ address = kmap_atomic(sg_page(sl), KM_BIO_SRC_IRQ) + sl->offset;
if (to_buffer)
memcpy(buffer, address, cpnow);
else
memcpy(address, buffer, cpnow);
- flush_dcache_page(sl->page);
+ flush_dcache_page(sg_page(sl));
kunmap_atomic(address, KM_BIO_SRC_IRQ);
local_irq_restore(flags);
if (cpsum == cpcount)
diff --git a/drivers/scsi/ibmmca.c b/drivers/scsi/ibmmca.c
index 714e6273a70..db004a45073 100644
--- a/drivers/scsi/ibmmca.c
+++ b/drivers/scsi/ibmmca.c
@@ -1828,7 +1828,7 @@ static int ibmmca_queuecommand(Scsi_Cmnd * cmd, void (*done) (Scsi_Cmnd *))
BUG_ON(scsi_sg_count(cmd) > 16);
scsi_for_each_sg(cmd, sg, scsi_sg_count(cmd), i) {
- ld(shpnt)[ldn].sge[i].address = (void *) (isa_page_to_bus(sg->page) + sg->offset);
+ ld(shpnt)[ldn].sge[i].address = (void *) (isa_page_to_bus(sg_page(sg)) + sg->offset);
ld(shpnt)[ldn].sge[i].byte_length = sg->length;
}
scb->enable |= IM_POINTER_TO_LIST;
diff --git a/drivers/scsi/ide-scsi.c b/drivers/scsi/ide-scsi.c
index 252d1806467..8d0244c2e7d 100644
--- a/drivers/scsi/ide-scsi.c
+++ b/drivers/scsi/ide-scsi.c
@@ -175,18 +175,18 @@ static void idescsi_input_buffers (ide_drive_t *drive, idescsi_pc_t *pc, unsigne
while (bcount) {
count = min(pc->sg->length - pc->b_count, bcount);
- if (PageHighMem(pc->sg->page)) {
+ if (PageHighMem(sg_page(pc->sg))) {
unsigned long flags;
local_irq_save(flags);
- buf = kmap_atomic(pc->sg->page, KM_IRQ0) +
+ buf = kmap_atomic(sg_page(pc->sg), KM_IRQ0) +
pc->sg->offset;
drive->hwif->atapi_input_bytes(drive,
buf + pc->b_count, count);
kunmap_atomic(buf - pc->sg->offset, KM_IRQ0);
local_irq_restore(flags);
} else {
- buf = page_address(pc->sg->page) + pc->sg->offset;
+ buf = sg_virt(pc->sg);
drive->hwif->atapi_input_bytes(drive,
buf + pc->b_count, count);
}
@@ -212,18 +212,18 @@ static void idescsi_output_buffers (ide_drive_t *drive, idescsi_pc_t *pc, unsign
while (bcount) {
count = min(pc->sg->length - pc->b_count, bcount);
- if (PageHighMem(pc->sg->page)) {
+ if (PageHighMem(sg_page(pc->sg))) {
unsigned long flags;
local_irq_save(flags);
- buf = kmap_atomic(pc->sg->page, KM_IRQ0) +
+ buf = kmap_atomic(sg_page(pc->sg), KM_IRQ0) +
pc->sg->offset;
drive->hwif->atapi_output_bytes(drive,
buf + pc->b_count, count);
kunmap_atomic(buf - pc->sg->offset, KM_IRQ0);
local_irq_restore(flags);
} else {
- buf = page_address(pc->sg->page) + pc->sg->offset;
+ buf = sg_virt(pc->sg);
drive->hwif->atapi_output_bytes(drive,
buf + pc->b_count, count);
}
diff --git a/drivers/scsi/imm.c b/drivers/scsi/imm.c
index 74cdc1f0a78..a3d0c6b1495 100644
--- a/drivers/scsi/imm.c
+++ b/drivers/scsi/imm.c
@@ -705,9 +705,7 @@ static int imm_completion(struct scsi_cmnd *cmd)
cmd->SCp.buffer++;
cmd->SCp.this_residual =
cmd->SCp.buffer->length;
- cmd->SCp.ptr =
- page_address(cmd->SCp.buffer->page) +
- cmd->SCp.buffer->offset;
+ cmd->SCp.ptr = sg_virt(cmd->SCp.buffer);
/*
* Make sure that we transfer even number of bytes
@@ -844,9 +842,7 @@ static int imm_engine(imm_struct *dev, struct scsi_cmnd *cmd)
cmd->SCp.buffer =
(struct scatterlist *) cmd->request_buffer;
cmd->SCp.this_residual = cmd->SCp.buffer->length;
- cmd->SCp.ptr =
- page_address(cmd->SCp.buffer->page) +
- cmd->SCp.buffer->offset;
+ cmd->SCp.ptr = sg_virt(cmd->SCp.buffer);
} else {
/* else fill the only available buffer */
cmd->SCp.buffer = NULL;
diff --git a/drivers/scsi/in2000.c b/drivers/scsi/in2000.c
index ab7cbf3449c..c8b452f2878 100644
--- a/drivers/scsi/in2000.c
+++ b/drivers/scsi/in2000.c
@@ -372,7 +372,7 @@ static int in2000_queuecommand(Scsi_Cmnd * cmd, void (*done) (Scsi_Cmnd *))
if (cmd->use_sg) {
cmd->SCp.buffer = (struct scatterlist *) cmd->request_buffer;
cmd->SCp.buffers_residual = cmd->use_sg - 1;
- cmd->SCp.ptr = (char *) page_address(cmd->SCp.buffer->page) + cmd->SCp.buffer->offset;
+ cmd->SCp.ptr = sg_virt(cmd->SCp.buffer);
cmd->SCp.this_residual = cmd->SCp.buffer->length;
} else {
cmd->SCp.buffer = NULL;
@@ -764,7 +764,7 @@ static void transfer_bytes(Scsi_Cmnd * cmd, int data_in_dir)
++cmd->SCp.buffer;
--cmd->SCp.buffers_residual;
cmd->SCp.this_residual = cmd->SCp.buffer->length;
- cmd->SCp.ptr = page_address(cmd->SCp.buffer->page) + cmd->SCp.buffer->offset;
+ cmd->SCp.ptr = sg_virt(cmd->SCp.buffer);
}
/* Set up hardware registers */
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index c316a0bcae6..439b97a6a26 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -2872,6 +2872,7 @@ static struct ipr_sglist *ipr_alloc_ucode_buffer(int buf_len)
}
scatterlist = sglist->scatterlist;
+ sg_init_table(scatterlist, num_elem);
sglist->order = order;
sglist->num_sg = num_elem;
@@ -2884,12 +2885,12 @@ static struct ipr_sglist *ipr_alloc_ucode_buffer(int buf_len)
/* Free up what we already allocated */
for (j = i - 1; j >= 0; j--)
- __free_pages(scatterlist[j].page, order);
+ __free_pages(sg_page(&scatterlist[j]), order);
kfree(sglist);
return NULL;
}
- scatterlist[i].page = page;
+ sg_set_page(&scatterlist[i], page);
}
return sglist;
@@ -2910,7 +2911,7 @@ static void ipr_free_ucode_buffer(struct ipr_sglist *sglist)
int i;
for (i = 0; i < sglist->num_sg; i++)
- __free_pages(sglist->scatterlist[i].page, sglist->order);
+ __free_pages(sg_page(&sglist->scatterlist[i]), sglist->order);
kfree(sglist);
}
@@ -2940,9 +2941,11 @@ static int ipr_copy_ucode_buffer(struct ipr_sglist *sglist,
scatterlist = sglist->scatterlist;
for (i = 0; i < (len / bsize_elem); i++, buffer += bsize_elem) {
- kaddr = kmap(scatterlist[i].page);
+ struct page *page = sg_page(&scatterlist[i]);
+
+ kaddr = kmap(page);
memcpy(kaddr, buffer, bsize_elem);
- kunmap(scatterlist[i].page);
+ kunmap(page);
scatterlist[i].length = bsize_elem;
@@ -2953,9 +2956,11 @@ static int ipr_copy_ucode_buffer(struct ipr_sglist *sglist,
}
if (len % bsize_elem) {
- kaddr = kmap(scatterlist[i].page);
+ struct page *page = sg_page(&scatterlist[i]);
+
+ kaddr = kmap(page);
memcpy(kaddr, buffer, len % bsize_elem);
- kunmap(scatterlist[i].page);
+ kunmap(page);
scatterlist[i].length = len % bsize_elem;
}
diff --git a/drivers/scsi/ips.c b/drivers/scsi/ips.c
index edaac2714c5..5c5a9b2628f 100644
--- a/drivers/scsi/ips.c
+++ b/drivers/scsi/ips.c
@@ -1515,7 +1515,7 @@ static int ips_is_passthru(struct scsi_cmnd *SC)
/* kmap_atomic() ensures addressability of the user buffer.*/
/* local_irq_save() protects the KM_IRQ0 address slot. */
local_irq_save(flags);
- buffer = kmap_atomic(sg->page, KM_IRQ0) + sg->offset;
+ buffer = kmap_atomic(sg_page(sg), KM_IRQ0) + sg->offset;
if (buffer && buffer[0] == 'C' && buffer[1] == 'O' &&
buffer[2] == 'P' && buffer[3] == 'P') {
kunmap_atomic(buffer - sg->offset, KM_IRQ0);
@@ -3523,7 +3523,7 @@ ips_scmd_buf_write(struct scsi_cmnd *scmd, void *data, unsigned int count)
/* kmap_atomic() ensures addressability of the data buffer.*/
/* local_irq_save() protects the KM_IRQ0 address slot. */
local_irq_save(flags);
- buffer = kmap_atomic(sg[i].page, KM_IRQ0) + sg[i].offset;
+ buffer = kmap_atomic(sg_page(&sg[i]), KM_IRQ0) + sg[i].offset;
memcpy(buffer, &cdata[xfer_cnt], min_cnt);
kunmap_atomic(buffer - sg[i].offset, KM_IRQ0);
local_irq_restore(flags);
@@ -3556,7 +3556,7 @@ ips_scmd_buf_read(struct scsi_cmnd *scmd, void *data, unsigned int count)
/* kmap_atomic() ensures addressability of the data buffer.*/
/* local_irq_save() protects the KM_IRQ0 address slot. */
local_irq_save(flags);
- buffer = kmap_atomic(sg[i].page, KM_IRQ0) + sg[i].offset;
+ buffer = kmap_atomic(sg_page(&sg[i]), KM_IRQ0) + sg[i].offset;
memcpy(&cdata[xfer_cnt], buffer, min_cnt);
kunmap_atomic(buffer - sg[i].offset, KM_IRQ0);
local_irq_restore(flags);
diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c
index a21455d0274..6ce4109efdf 100644
--- a/drivers/scsi/iscsi_tcp.c
+++ b/drivers/scsi/iscsi_tcp.c
@@ -70,9 +70,7 @@ module_param_named(max_lun, iscsi_max_lun, uint, S_IRUGO);
static inline void
iscsi_buf_init_iov(struct iscsi_buf *ibuf, char *vbuf, int size)
{
- ibuf->sg.page = virt_to_page(vbuf);
- ibuf->sg.offset = offset_in_page(vbuf);
- ibuf->sg.length = size;
+ sg_init_one(&ibuf->sg, vbuf, size);
ibuf->sent = 0;
ibuf->use_sendmsg = 1;
}
@@ -80,13 +78,14 @@ iscsi_buf_init_iov(struct iscsi_buf *ibuf, char *vbuf, int size)
static inline void
iscsi_buf_init_sg(struct iscsi_buf *ibuf, struct scatterlist *sg)
{
- ibuf->sg.page = sg->page;
+ sg_init_table(&ibuf->sg, 1);
+ sg_set_page(&ibuf->sg, sg_page(sg));
ibuf->sg.offset = sg->offset;
ibuf->sg.length = sg->length;
/*
* Fastpath: sg element fits into single page
*/
- if (sg->length + sg->offset <= PAGE_SIZE && !PageSlab(sg->page))
+ if (sg->length + sg->offset <= PAGE_SIZE && !PageSlab(sg_page(sg)))
ibuf->use_sendmsg = 0;
else
ibuf->use_sendmsg = 1;
@@ -716,7 +715,7 @@ static int iscsi_scsi_data_in(struct iscsi_conn *conn)
for (i = tcp_ctask->sg_count; i < scsi_sg_count(sc); i++) {
char *dest;
- dest = kmap_atomic(sg[i].page, KM_SOFTIRQ0);
+ dest = kmap_atomic(sg_page(&sg[i]), KM_SOFTIRQ0);
rc = iscsi_ctask_copy(tcp_conn, ctask, dest + sg[i].offset,
sg[i].length, offset);
kunmap_atomic(dest, KM_SOFTIRQ0);
@@ -1103,9 +1102,9 @@ iscsi_send(struct iscsi_conn *conn, struct iscsi_buf *buf, int size, int flags)
* slab case.
*/
if (buf->use_sendmsg)
- res = sock_no_sendpage(sk, buf->sg.page, offset, size, flags);
+ res = sock_no_sendpage(sk, sg_page(&buf->sg), offset, size, flags);
else
- res = tcp_conn->sendpage(sk, buf->sg.page, offset, size, flags);
+ res = tcp_conn->sendpage(sk, sg_page(&buf->sg), offset, size, flags);
if (res >= 0) {
conn->txdata_octets += res;
diff --git a/drivers/scsi/megaraid.c b/drivers/scsi/megaraid.c
index 10d1aff9938..66c65203573 100644
--- a/drivers/scsi/megaraid.c
+++ b/drivers/scsi/megaraid.c
@@ -658,7 +658,7 @@ mega_build_cmd(adapter_t *adapter, Scsi_Cmnd *cmd, int *busy)
struct scatterlist *sg;
sg = scsi_sglist(cmd);
- buf = kmap_atomic(sg->page, KM_IRQ0) + sg->offset;
+ buf = kmap_atomic(sg_page(sg), KM_IRQ0) + sg->offset;
memset(buf, 0, cmd->cmnd[4]);
kunmap_atomic(buf - sg->offset, KM_IRQ0);
@@ -1542,10 +1542,8 @@ mega_cmd_done(adapter_t *adapter, u8 completed[], int nstatus, int status)
if( cmd->cmnd[0] == INQUIRY && !islogical ) {
sgl = scsi_sglist(cmd);
- if( sgl->page ) {
- c = *(unsigned char *)
- page_address((&sgl[0])->page) +
- (&sgl[0])->offset;
+ if( sg_page(sgl) ) {
+ c = *(unsigned char *) sg_virt(&sgl[0]);
} else {
printk(KERN_WARNING
"megaraid: invalid sg.\n");
diff --git a/drivers/scsi/megaraid/megaraid_mbox.c b/drivers/scsi/megaraid/megaraid_mbox.c
index 78779209ac8..c8923108183 100644
--- a/drivers/scsi/megaraid/megaraid_mbox.c
+++ b/drivers/scsi/megaraid/megaraid_mbox.c
@@ -1584,10 +1584,8 @@ megaraid_mbox_build_cmd(adapter_t *adapter, struct scsi_cmnd *scp, int *busy)
caddr_t vaddr;
sgl = scsi_sglist(scp);
- if (sgl->page) {
- vaddr = (caddr_t)
- (page_address((&sgl[0])->page)
- + (&sgl[0])->offset);
+ if (sg_page(sgl)) {
+ vaddr = (caddr_t) sg_virt(&sgl[0]);
memset(vaddr, 0, scp->cmnd[4]);
}
@@ -2328,10 +2326,8 @@ megaraid_mbox_dpc(unsigned long devp)
&& IS_RAID_CH(raid_dev, scb->dev_channel)) {
sgl = scsi_sglist(scp);
- if (sgl->page) {
- c = *(unsigned char *)
- (page_address((&sgl[0])->page) +
- (&sgl[0])->offset);
+ if (sg_page(sgl)) {
+ c = *(unsigned char *) sg_virt(&sgl[0]);
} else {
con_log(CL_ANN, (KERN_WARNING
"megaraid mailbox: invalid sg:%d\n",
diff --git a/drivers/scsi/oktagon_esp.c b/drivers/scsi/oktagon_esp.c
index 26a6d55faf3..8e5eadbd5c5 100644
--- a/drivers/scsi/oktagon_esp.c
+++ b/drivers/scsi/oktagon_esp.c
@@ -550,8 +550,7 @@ void dma_mmu_get_scsi_one(struct NCR_ESP *esp, Scsi_Cmnd *sp)
void dma_mmu_get_scsi_sgl(struct NCR_ESP *esp, Scsi_Cmnd *sp)
{
- sp->SCp.ptr = page_address(sp->SCp.buffer->page)+
- sp->SCp.buffer->offset;
+ sp->SCp.ptr = sg_virt(sp->SCp.buffer);
}
void dma_mmu_release_scsi_one(struct NCR_ESP *esp, Scsi_Cmnd *sp)
@@ -564,8 +563,7 @@ void dma_mmu_release_scsi_sgl(struct NCR_ESP *esp, Scsi_Cmnd *sp)
void dma_advance_sg(Scsi_Cmnd *sp)
{
- sp->SCp.ptr = page_address(sp->SCp.buffer->page)+
- sp->SCp.buffer->offset;
+ sp->SCp.ptr = sg_virt(sp->SCp.buffer);
}
diff --git a/drivers/scsi/osst.c b/drivers/scsi/osst.c
index 331b789937c..1c5c4b68f20 100644
--- a/drivers/scsi/osst.c
+++ b/drivers/scsi/osst.c
@@ -542,7 +542,7 @@ static int osst_verify_frame(struct osst_tape * STp, int frame_seq_number, int q
if (STp->raw) {
if (STp->buffer->syscall_result) {
for (i=0; i < STp->buffer->sg_segs; i++)
- memset(page_address(STp->buffer->sg[i].page),
+ memset(page_address(sg_page(&STp->buffer->sg[i])),
0, STp->buffer->sg[i].length);
strcpy(STp->buffer->b_data, "READ ERROR ON FRAME");
} else
@@ -4437,7 +4437,7 @@ static int os_scsi_tape_open(struct inode * inode, struct file * filp)
for (i = 0, b_size = 0;
(i < STp->buffer->sg_segs) && ((b_size + STp->buffer->sg[i].length) <= OS_DATA_SIZE);
b_size += STp->buffer->sg[i++].length);
- STp->buffer->aux = (os_aux_t *) (page_address(STp->buffer->sg[i].page) + OS_DATA_SIZE - b_size);
+ STp->buffer->aux = (os_aux_t *) (page_address(sg_page(&STp->buffer->sg[i])) + OS_DATA_SIZE - b_size);
#if DEBUG
printk(OSST_DEB_MSG "%s:D: b_data points to %p in segment 0 at %p\n", name,
STp->buffer->b_data, page_address(STp->buffer->sg[0].page));
@@ -5252,25 +5252,26 @@ static int enlarge_buffer(struct osst_buffer *STbuffer, int need_dma)
/* Try to allocate the first segment up to OS_DATA_SIZE and the others
big enough to reach the goal (code assumes no segments in place) */
for (b_size = OS_DATA_SIZE, order = OSST_FIRST_ORDER; b_size >= PAGE_SIZE; order--, b_size /= 2) {
- STbuffer->sg[0].page = alloc_pages(priority, order);
+ struct page *page = alloc_pages(priority, order);
+
STbuffer->sg[0].offset = 0;
- if (STbuffer->sg[0].page != NULL) {
+ if (page != NULL) {
+ sg_set_page(&STbuffer->sg[0], page);
STbuffer->sg[0].length = b_size;
- STbuffer->b_data = page_address(STbuffer->sg[0].page);
+ STbuffer->b_data = page_address(page);
break;
}
}
- if (STbuffer->sg[0].page == NULL) {
+ if (sg_page(&STbuffer->sg[0]) == NULL) {
printk(KERN_NOTICE "osst :I: Can't allocate tape buffer main segment.\n");
return 0;
}
/* Got initial segment of 'bsize,order', continue with same size if possible, except for AUX */
for (segs=STbuffer->sg_segs=1, got=b_size;
segs < max_segs && got < OS_FRAME_SIZE; ) {
- STbuffer->sg[segs].page =
- alloc_pages(priority, (OS_FRAME_SIZE - got <= PAGE_SIZE) ? 0 : order);
+ struct page *page = alloc_pages(priority, (OS_FRAME_SIZE - got <= PAGE_SIZE) ? 0 : order);
STbuffer->sg[segs].offset = 0;
- if (STbuffer->sg[segs].page == NULL) {
+ if (page == NULL) {
if (OS_FRAME_SIZE - got <= (max_segs - segs) * b_size / 2 && order) {
b_size /= 2; /* Large enough for the rest of the buffers */
order--;
@@ -5284,6 +5285,7 @@ static int enlarge_buffer(struct osst_buffer *STbuffer, int need_dma)
normalize_buffer(STbuffer);
return 0;
}
+ sg_set_page(&STbuffer->sg[segs], page);
STbuffer->sg[segs].length = (OS_FRAME_SIZE - got <= PAGE_SIZE / 2) ? (OS_FRAME_SIZE - got) : b_size;
got += STbuffer->sg[segs].length;
STbuffer->buffer_size = got;
@@ -5316,7 +5318,7 @@ static void normalize_buffer(struct osst_buffer *STbuffer)
b_size < STbuffer->sg[i].length;
b_size *= 2, order++);
- __free_pages(STbuffer->sg[i].page, order);
+ __free_pages(sg_page(&STbuffer->sg[i]), order);
STbuffer->buffer_size -= STbuffer->sg[i].length;
}
#if DEBUG
@@ -5344,7 +5346,7 @@ static int append_to_buffer(const char __user *ubp, struct osst_buffer *st_bp, i
for ( ; i < st_bp->sg_segs && do_count > 0; i++) {
cnt = st_bp->sg[i].length - offset < do_count ?
st_bp->sg[i].length - offset : do_count;
- res = copy_from_user(page_address(st_bp->sg[i].page) + offset, ubp, cnt);
+ res = copy_from_user(page_address(sg_page(&st_bp->sg[i])) + offset, ubp, cnt);
if (res)
return (-EFAULT);
do_count -= cnt;
@@ -5377,7 +5379,7 @@ static int from_buffer(struct osst_buffer *st_bp, char __user *ubp, int do_count
for ( ; i < st_bp->sg_segs && do_count > 0; i++) {
cnt = st_bp->sg[i].length - offset < do_count ?
st_bp->sg[i].length - offset : do_count;
- res = copy_to_user(ubp, page_address(st_bp->sg[i].page) + offset, cnt);
+ res = copy_to_user(ubp, page_address(sg_page(&st_bp->sg[i])) + offset, cnt);
if (res)
return (-EFAULT);
do_count -= cnt;
@@ -5410,7 +5412,7 @@ static int osst_zero_buffer_tail(struct osst_buffer *st_bp)
i < st_bp->sg_segs && do_count > 0; i++) {
cnt = st_bp->sg[i].length - offset < do_count ?
st_bp->sg[i].length - offset : do_count ;
- memset(page_address(st_bp->sg[i].page) + offset, 0, cnt);
+ memset(page_address(sg_page(&st_bp->sg[i])) + offset, 0, cnt);
do_count -= cnt;
offset = 0;
}
@@ -5430,7 +5432,7 @@ static int osst_copy_to_buffer(struct osst_buffer *st_bp, unsigned char *ptr)
for (i = 0; i < st_bp->sg_segs && do_count > 0; i++) {
cnt = st_bp->sg[i].length < do_count ?
st_bp->sg[i].length : do_count ;
- memcpy(page_address(st_bp->sg[i].page), ptr, cnt);
+ memcpy(page_address(sg_page(&st_bp->sg[i])), ptr, cnt);
do_count -= cnt;
ptr += cnt;
}
@@ -5451,7 +5453,7 @@ static int osst_copy_from_buffer(struct osst_buffer *st_bp, unsigned char *ptr)
for (i = 0; i < st_bp->sg_segs && do_count > 0; i++) {
cnt = st_bp->sg[i].length < do_count ?
st_bp->sg[i].length : do_count ;
- memcpy(ptr, page_address(st_bp->sg[i].page), cnt);
+ memcpy(ptr, page_address(sg_page(&st_bp->sg[i])), cnt);
do_count -= cnt;
ptr += cnt;
}
diff --git a/drivers/scsi/pcmcia/nsp_cs.h b/drivers/scsi/pcmcia/nsp_cs.h
index 98397559c53..7db28cd4944 100644
--- a/drivers/scsi/pcmcia/nsp_cs.h
+++ b/drivers/scsi/pcmcia/nsp_cs.h
@@ -393,7 +393,7 @@ enum _burst_mode {
#define MSG_EXT_SDTR 0x01
/* scatter-gather table */
-# define BUFFER_ADDR ((char *)((unsigned int)(SCpnt->SCp.buffer->page) + SCpnt->SCp.buffer->offset))
+# define BUFFER_ADDR ((char *)((sg_virt(SCpnt->SCp.buffer))))
#endif /*__nsp_cs__*/
/* end */
diff --git a/drivers/scsi/pcmcia/sym53c500_cs.c b/drivers/scsi/pcmcia/sym53c500_cs.c
index 190e2a7d706..969b9387a0c 100644
--- a/drivers/scsi/pcmcia/sym53c500_cs.c
+++ b/drivers/scsi/pcmcia/sym53c500_cs.c
@@ -443,8 +443,7 @@ SYM53C500_intr(int irq, void *dev_id)
scsi_for_each_sg(curSC, sg, scsi_sg_count(curSC), i) {
SYM53C500_pio_write(fast_pio, port_base,
- page_address(sg->page) + sg->offset,
- sg->length);
+ sg_virt(sg), sg->length);
}
REG0(port_base);
}
@@ -463,8 +462,7 @@ SYM53C500_intr(int irq, void *dev_id)
scsi_for_each_sg(curSC, sg, scsi_sg_count(curSC), i) {
SYM53C500_pio_read(fast_pio, port_base,
- page_address(sg->page) + sg->offset,
- sg->length);
+ sg_virt(sg), sg->length);
}
REG0(port_base);
}
diff --git a/drivers/scsi/ppa.c b/drivers/scsi/ppa.c
index 67b6d76a6c8..67ee51a3d7e 100644
--- a/drivers/scsi/ppa.c
+++ b/drivers/scsi/ppa.c
@@ -608,9 +608,7 @@ static int ppa_completion(struct scsi_cmnd *cmd)
cmd->SCp.buffer++;
cmd->SCp.this_residual =
cmd->SCp.buffer->length;
- cmd->SCp.ptr =
- page_address(cmd->SCp.buffer->page) +
- cmd->SCp.buffer->offset;
+ cmd->SCp.ptr = sg_virt(cmd->SCp.buffer);
}
}
/* Now check to see if the drive is ready to comunicate */
@@ -756,8 +754,7 @@ static int ppa_engine(ppa_struct *dev, struct scsi_cmnd *cmd)
/* if many buffers are available, start filling the first */
cmd->SCp.buffer = (struct scatterlist *) cmd->request_buffer;
cmd->SCp.this_residual = cmd->SCp.buffer->length;
- cmd->SCp.ptr = page_address(cmd->SCp.buffer->page) +
- cmd->SCp.buffer->offset;
+ cmd->SCp.ptr = sg_virt(cmd->SCp.buffer);
} else {
/* else fill the only available buffer */
cmd->SCp.buffer = NULL;
diff --git a/drivers/scsi/ps3rom.c b/drivers/scsi/ps3rom.c
index 0f43d1d046d..03f19b8d19c 100644
--- a/drivers/scsi/ps3rom.c
+++ b/drivers/scsi/ps3rom.c
@@ -111,14 +111,14 @@ static int fill_from_dev_buffer(struct scsi_cmnd *cmd, const void *buf)
req_len = act_len = 0;
scsi_for_each_sg(cmd, sgpnt, scsi_sg_count(cmd), k) {
if (active) {
- kaddr = kmap_atomic(sgpnt->page, KM_IRQ0);
+ kaddr = kmap_atomic(sg_page(sgpnt), KM_IRQ0);
len = sgpnt->length;
if ((req_len + len) > buflen) {
active = 0;
len = buflen - req_len;
}
memcpy(kaddr + sgpnt->offset, buf + req_len, len);
- flush_kernel_dcache_page(sgpnt->page);
+ flush_kernel_dcache_page(sg_page(sgpnt));
kunmap_atomic(kaddr, KM_IRQ0);
act_len += len;
}
@@ -147,7 +147,7 @@ static int fetch_to_dev_buffer(struct scsi_cmnd *cmd, void *buf)
req_len = fin = 0;
scsi_for_each_sg(cmd, sgpnt, scsi_sg_count(cmd), k) {
- kaddr = kmap_atomic(sgpnt->page, KM_IRQ0);
+ kaddr = kmap_atomic(sg_page(sgpnt->page), KM_IRQ0);
len = sgpnt->length;
if ((req_len + len) > buflen) {
len = buflen - req_len;
diff --git a/drivers/scsi/qlogicfas408.c b/drivers/scsi/qlogicfas408.c
index 2bfbf26c00e..de7b3bc2cbc 100644
--- a/drivers/scsi/qlogicfas408.c
+++ b/drivers/scsi/qlogicfas408.c
@@ -317,7 +317,7 @@ static unsigned int ql_pcmd(struct scsi_cmnd *cmd)
return ((priv->qabort == 1 ?
DID_ABORT : DID_RESET) << 16);
}
- buf = page_address(sg->page) + sg->offset;
+ buf = sg_virt(sg);
if (ql_pdma(priv, phase, buf, sg->length))
break;
}
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index 72ee4c9cfb1..46cae5a212d 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -625,7 +625,7 @@ static int fill_from_dev_buffer(struct scsi_cmnd * scp, unsigned char * arr,
scsi_for_each_sg(scp, sg, scp->use_sg, k) {
if (active) {
kaddr = (unsigned char *)
- kmap_atomic(sg->page, KM_USER0);
+ kmap_atomic(sg_page(sg), KM_USER0);
if (NULL == kaddr)
return (DID_ERROR << 16);
kaddr_off = (unsigned char *)kaddr + sg->offset;
@@ -672,7 +672,7 @@ static int fetch_to_dev_buffer(struct scsi_cmnd * scp, unsigned char * arr,
sg = scsi_sglist(scp);
req_len = fin = 0;
for (k = 0; k < scp->use_sg; ++k, sg = sg_next(sg)) {
- kaddr = (unsigned char *)kmap_atomic(sg->page, KM_USER0);
+ kaddr = (unsigned char *)kmap_atomic(sg_page(sg), KM_USER0);
if (NULL == kaddr)
return -1;
kaddr_off = (unsigned char *)kaddr + sg->offset;
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index aac8a02cbe8..61fdaf02f25 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -295,7 +295,7 @@ static int scsi_req_map_sg(struct request *rq, struct scatterlist *sgl,
int i, err, nr_vecs = 0;
for_each_sg(sgl, sg, nsegs, i) {
- page = sg->page;
+ page = sg_page(sg);
off = sg->offset;
len = sg->length;
data_len += len;
@@ -764,7 +764,7 @@ struct scatterlist *scsi_alloc_sgtable(struct scsi_cmnd *cmd, gfp_t gfp_mask)
if (unlikely(!sgl))
goto enomem;
- memset(sgl, 0, sizeof(*sgl) * sgp->size);
+ sg_init_table(sgl, sgp->size);
/*
* first loop through, set initial index and return value
@@ -781,6 +781,13 @@ struct scatterlist *scsi_alloc_sgtable(struct scsi_cmnd *cmd, gfp_t gfp_mask)
sg_chain(prev, SCSI_MAX_SG_SEGMENTS, sgl);
/*
+ * if we have nothing left, mark the last segment as
+ * end-of-list
+ */
+ if (!left)
+ sg_mark_end(sgl, this);
+
+ /*
* don't allow subsequent mempool allocs to sleep, it would
* violate the mempool principle.
*/
@@ -2353,7 +2360,7 @@ void *scsi_kmap_atomic_sg(struct scatterlist *sgl, int sg_count,
*offset = *offset - len_complete + sg->offset;
/* Assumption: contiguous pages can be accessed as "page + i" */
- page = nth_page(sg->page, (*offset >> PAGE_SHIFT));
+ page = nth_page(sg_page(sg), (*offset >> PAGE_SHIFT));
*offset &= ~PAGE_MASK;
/* Bytes in this sg-entry from *offset to the end of the page */
diff --git a/drivers/scsi/seagate.c b/drivers/scsi/seagate.c
index ce80fa9ad81..b11324479b5 100644
--- a/drivers/scsi/seagate.c
+++ b/drivers/scsi/seagate.c
@@ -999,14 +999,14 @@ connect_loop:
for (i = 0; i < nobuffs; ++i)
printk("scsi%d : buffer %d address = %p length = %d\n",
hostno, i,
- page_address(buffer[i].page) + buffer[i].offset,
+ sg_virt(&buffer[i]),
buffer[i].length);
}
#endif
buffer = (struct scatterlist *) SCint->request_buffer;
len = buffer->length;
- data = page_address(buffer->page) + buffer->offset;
+ data = sg_virt(buffer);
} else {
DPRINTK (DEBUG_SG, "scsi%d : scatter gather not requested.\n", hostno);
buffer = NULL;
@@ -1239,7 +1239,7 @@ connect_loop:
--nobuffs;
++buffer;
len = buffer->length;
- data = page_address(buffer->page) + buffer->offset;
+ data = sg_virt(buffer);
DPRINTK (DEBUG_SG,
"scsi%d : next scatter-gather buffer len = %d address = %08x\n",
hostno, len, data);
@@ -1396,7 +1396,7 @@ connect_loop:
--nobuffs;
++buffer;
len = buffer->length;
- data = page_address(buffer->page) + buffer->offset;
+ data = sg_virt(buffer);
DPRINTK (DEBUG_SG, "scsi%d : next scatter-gather buffer len = %d address = %08x\n", hostno, len, data);
}
break;
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index 7238b2dfc49..cc197100284 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -1169,7 +1169,7 @@ sg_vma_nopage(struct vm_area_struct *vma, unsigned long addr, int *type)
len = vma->vm_end - sa;
len = (len < sg->length) ? len : sg->length;
if (offset < len) {
- page = virt_to_page(page_address(sg->page) + offset);
+ page = virt_to_page(page_address(sg_page(sg)) + offset);
get_page(page); /* increment page count */
break;
}
@@ -1717,13 +1717,13 @@ st_map_user_pages(struct scatterlist *sgl, const unsigned int max_pages,
goto out_unlock; */
}
- sgl[0].page = pages[0];
+ sg_set_page(sgl, pages[0]);
sgl[0].offset = uaddr & ~PAGE_MASK;
if (nr_pages > 1) {
sgl[0].length = PAGE_SIZE - sgl[0].offset;
count -= sgl[0].length;
for (i=1; i < nr_pages ; i++) {
- sgl[i].page = pages[i];
+ sg_set_page(&sgl[i], pages[i]);
sgl[i].length = count < PAGE_SIZE ? count : PAGE_SIZE;
count -= PAGE_SIZE;
}
@@ -1754,7 +1754,7 @@ st_unmap_user_pages(struct scatterlist *sgl, const unsigned int nr_pages,
int i;
for (i=0; i < nr_pages; i++) {
- struct page *page = sgl[i].page;
+ struct page *page = sg_page(&sgl[i]);
if (dirtied)
SetPageDirty(page);
@@ -1854,7 +1854,7 @@ sg_build_indirect(Sg_scatter_hold * schp, Sg_fd * sfp, int buff_size)
scatter_elem_sz_prev = ret_sz;
}
}
- sg->page = p;
+ sg_set_page(sg, p);
sg->length = (ret_sz > num) ? num : ret_sz;
SCSI_LOG_TIMEOUT(5, printk("sg_build_indirect: k=%d, num=%d, "
@@ -1907,14 +1907,14 @@ sg_write_xfer(Sg_request * srp)
onum = 1;
ksglen = sg->length;
- p = page_address(sg->page);
+ p = page_address(sg_page(sg));
for (j = 0, k = 0; j < onum; ++j) {
res = sg_u_iovec(hp, iovec_count, j, 1, &usglen, &up);
if (res)
return res;
for (; p; sg = sg_next(sg), ksglen = sg->length,
- p = page_address(sg->page)) {
+ p = page_address(sg_page(sg))) {
if (usglen <= 0)
break;
if (ksglen > usglen) {
@@ -1991,12 +1991,12 @@ sg_remove_scat(Sg_scatter_hold * schp)
} else {
int k;
- for (k = 0; (k < schp->k_use_sg) && sg->page;
+ for (k = 0; (k < schp->k_use_sg) && sg_page(sg);
++k, sg = sg_next(sg)) {
SCSI_LOG_TIMEOUT(5, printk(
"sg_remove_scat: k=%d, pg=0x%p, len=%d\n",
- k, sg->page, sg->length));
- sg_page_free(sg->page, sg->length);
+ k, sg_page(sg), sg->length));
+ sg_page_free(sg_page(sg), sg->length);
}
}
kfree(schp->buffer);
@@ -2038,7 +2038,7 @@ sg_read_xfer(Sg_request * srp)
} else
onum = 1;
- p = page_address(sg->page);
+ p = page_address(sg_page(sg));
ksglen = sg->length;
for (j = 0, k = 0; j < onum; ++j) {
res = sg_u_iovec(hp, iovec_count, j, 0, &usglen, &up);
@@ -2046,7 +2046,7 @@ sg_read_xfer(Sg_request * srp)
return res;
for (; p; sg = sg_next(sg), ksglen = sg->length,
- p = page_address(sg->page)) {
+ p = page_address(sg_page(sg))) {
if (usglen <= 0)
break;
if (ksglen > usglen) {
@@ -2092,15 +2092,15 @@ sg_read_oxfer(Sg_request * srp, char __user *outp, int num_read_xfer)
if ((!outp) || (num_read_xfer <= 0))
return 0;
- for (k = 0; (k < schp->k_use_sg) && sg->page; ++k, sg = sg_next(sg)) {
+ for (k = 0; (k < schp->k_use_sg) && sg_page(sg); ++k, sg = sg_next(sg)) {
num = sg->length;
if (num > num_read_xfer) {
- if (__copy_to_user(outp, page_address(sg->page),
+ if (__copy_to_user(outp, page_address(sg_page(sg)),
num_read_xfer))
return -EFAULT;
break;
} else {
- if (__copy_to_user(outp, page_address(sg->page),
+ if (__copy_to_user(outp, page_address(sg_page(sg)),
num))
return -EFAULT;
num_read_xfer -= num;
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
index 73c44cbdea4..ce69b9efc10 100644
--- a/drivers/scsi/st.c
+++ b/drivers/scsi/st.c
@@ -3797,7 +3797,7 @@ static void buf_to_sg(struct st_buffer *STbp, unsigned int length)
sg = &(STbp->sg[0]);
frp = STbp->frp;
for (i=count=0; count < length; i++) {
- sg[i].page = frp[i].page;
+ sg_set_page(&sg[i], frp[i].page);
if (length - count > frp[i].length)
sg[i].length = frp[i].length;
else
@@ -4446,14 +4446,14 @@ static int sgl_map_user_pages(struct scatterlist *sgl, const unsigned int max_pa
}
/* Populate the scatter/gather list */
- sgl[0].page = pages[0];
+ sg_set_page(&sgl[0], pages[0]);
sgl[0].offset = uaddr & ~PAGE_MASK;
if (nr_pages > 1) {
sgl[0].length = PAGE_SIZE - sgl[0].offset;
count -= sgl[0].length;
for (i=1; i < nr_pages ; i++) {
+ sg_set_page(&sgl[i], pages[i]);;
sgl[i].offset = 0;
- sgl[i].page = pages[i];
sgl[i].length = count < PAGE_SIZE ? count : PAGE_SIZE;
count -= PAGE_SIZE;
}
@@ -4483,7 +4483,7 @@ static int sgl_unmap_user_pages(struct scatterlist *sgl, const unsigned int nr_p
int i;
for (i=0; i < nr_pages; i++) {
- struct page *page = sgl[i].page;
+ struct page *page = sg_page(&sgl[i]);
if (dirtied)
SetPageDirty(page);
diff --git a/drivers/scsi/sun3_NCR5380.c b/drivers/scsi/sun3_NCR5380.c
index 4aafe89b557..2dcde373b20 100644
--- a/drivers/scsi/sun3_NCR5380.c
+++ b/drivers/scsi/sun3_NCR5380.c
@@ -272,8 +272,7 @@ static struct scsi_host_template *the_template = NULL;
#define HOSTNO instance->host_no
#define H_NO(cmd) (cmd)->device->host->host_no
-#define SGADDR(buffer) (void *)(((unsigned long)page_address((buffer)->page)) + \
- (buffer)->offset)
+#define SGADDR(buffer) (void *)(((unsigned long)sg_virt(((buffer)))))
#ifdef SUPPORT_TAGS
diff --git a/drivers/scsi/sym53c416.c b/drivers/scsi/sym53c416.c
index 8befab7e983..90cee94d952 100644
--- a/drivers/scsi/sym53c416.c
+++ b/drivers/scsi/sym53c416.c
@@ -196,7 +196,7 @@ static unsigned int sym53c416_base_3[2] = {0,0};
#define MAXHOSTS 4
-#define SG_ADDRESS(buffer) ((char *) (page_address((buffer)->page)+(buffer)->offset))
+#define SG_ADDRESS(buffer) ((char *) sg_virt((buffer)))
enum phases
{
diff --git a/drivers/scsi/tmscsim.c b/drivers/scsi/tmscsim.c
index 5c72ca31a47..44193049c4a 100644
--- a/drivers/scsi/tmscsim.c
+++ b/drivers/scsi/tmscsim.c
@@ -430,10 +430,7 @@ static __inline__ void dc390_Going_remove (struct dc390_dcb* pDCB, struct dc390_
static struct scatterlist* dc390_sg_build_single(struct scatterlist *sg, void *addr, unsigned int length)
{
- memset(sg, 0, sizeof(struct scatterlist));
- sg->page = virt_to_page(addr);
- sg->length = length;
- sg->offset = (unsigned long)addr & ~PAGE_MASK;
+ sg_init_one(sg, addr, length);
return sg;
}
diff --git a/drivers/scsi/ultrastor.c b/drivers/scsi/ultrastor.c
index ea72bbeb8f9..6d1f0edd798 100644
--- a/drivers/scsi/ultrastor.c
+++ b/drivers/scsi/ultrastor.c
@@ -681,7 +681,7 @@ static inline void build_sg_list(struct mscp *mscp, struct scsi_cmnd *SCpnt)
max = scsi_sg_count(SCpnt);
scsi_for_each_sg(SCpnt, sg, max, i) {
- mscp->sglist[i].address = isa_page_to_bus(sg->page) + sg->offset;
+ mscp->sglist[i].address = isa_page_to_bus(sg_page(sg)) + sg->offset;
mscp->sglist[i].num_bytes = sg->length;
transfer_length += sg->length;
}
diff --git a/drivers/scsi/wd33c93.c b/drivers/scsi/wd33c93.c
index 0e8e642fd3b..fdbb92d1f72 100644
--- a/drivers/scsi/wd33c93.c
+++ b/drivers/scsi/wd33c93.c
@@ -410,8 +410,7 @@ wd33c93_queuecommand(struct scsi_cmnd *cmd,
if (cmd->use_sg) {
cmd->SCp.buffer = (struct scatterlist *) cmd->request_buffer;
cmd->SCp.buffers_residual = cmd->use_sg - 1;
- cmd->SCp.ptr = page_address(cmd->SCp.buffer->page) +
- cmd->SCp.buffer->offset;
+ cmd->SCp.ptr = sg_virt(cmd->SCp.buffer);
cmd->SCp.this_residual = cmd->SCp.buffer->length;
} else {
cmd->SCp.buffer = NULL;
@@ -745,8 +744,7 @@ transfer_bytes(const wd33c93_regs regs, struct scsi_cmnd *cmd,
++cmd->SCp.buffer;
--cmd->SCp.buffers_residual;
cmd->SCp.this_residual = cmd->SCp.buffer->length;
- cmd->SCp.ptr = page_address(cmd->SCp.buffer->page) +
- cmd->SCp.buffer->offset;
+ cmd->SCp.ptr = sg_virt(cmd->SCp.buffer);
}
if (!cmd->SCp.this_residual) /* avoid bogus setups */
return;
diff --git a/drivers/scsi/wd7000.c b/drivers/scsi/wd7000.c
index 255c611e78b..03cd44f231d 100644
--- a/drivers/scsi/wd7000.c
+++ b/drivers/scsi/wd7000.c
@@ -1123,7 +1123,7 @@ static int wd7000_queuecommand(struct scsi_cmnd *SCpnt,
any2scsi(scb->maxlen, nseg * sizeof(Sgb));
scsi_for_each_sg(SCpnt, sg, nseg, i) {
- any2scsi(sgb[i].ptr, isa_page_to_bus(sg->page) + sg->offset);
+ any2scsi(sgb[i].ptr, isa_page_to_bus(sg_page(sg)) + sg->offset);
any2scsi(sgb[i].len, sg->length);
}
} else {
diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
index 8dd5a6afd51..90d64a80846 100644
--- a/drivers/usb/core/message.c
+++ b/drivers/usb/core/message.c
@@ -437,13 +437,11 @@ int usb_sg_init (
#if defined(CONFIG_HIGHMEM) || defined(CONFIG_IOMMU)
io->urbs[i]->transfer_buffer = NULL;
#else
- io->urbs[i]->transfer_buffer =
- page_address(sg[i].page) + sg[i].offset;
+ io->urbs[i]->transfer_buffer = sg_virt(&sg[i]);
#endif
} else {
/* hc may use _only_ transfer_buffer */
- io->urbs [i]->transfer_buffer =
- page_address (sg [i].page) + sg [i].offset;
+ io->urbs [i]->transfer_buffer = sg_virt(&sg[i]);
len = sg [i].length;
}
diff --git a/drivers/usb/image/microtek.c b/drivers/usb/image/microtek.c
index e7d982a7154..91e999c9f68 100644
--- a/drivers/usb/image/microtek.c
+++ b/drivers/usb/image/microtek.c
@@ -519,8 +519,7 @@ static void mts_do_sg (struct urb* transfer)
context->fragment++;
mts_int_submit_urb(transfer,
context->data_pipe,
- page_address(sg[context->fragment].page) +
- sg[context->fragment].offset,
+ sg_virt(&sg[context->fragment]),
sg[context->fragment].length,
context->fragment + 1 == scsi_sg_count(context->srb) ?
mts_data_done : mts_do_sg);
@@ -557,7 +556,7 @@ mts_build_transfer_context(struct scsi_cmnd *srb, struct mts_desc* desc)
return;
} else {
sg = scsi_sglist(srb);
- desc->context.data = page_address(sg[0].page) + sg[0].offset;
+ desc->context.data = sg_virt(&sg[0]);
desc->context.data_length = sg[0].length;
}
diff --git a/drivers/usb/misc/usbtest.c b/drivers/usb/misc/usbtest.c
index e901d31e051..ea316214648 100644
--- a/drivers/usb/misc/usbtest.c
+++ b/drivers/usb/misc/usbtest.c
@@ -360,9 +360,9 @@ static void free_sglist (struct scatterlist *sg, int nents)
if (!sg)
return;
for (i = 0; i < nents; i++) {
- if (!sg [i].page)
+ if (!sg_page(&sg[i]))
continue;
- kfree (page_address (sg [i].page) + sg [i].offset);
+ kfree (sg_virt(&sg[i]));
}
kfree (sg);
}
diff --git a/drivers/usb/storage/protocol.c b/drivers/usb/storage/protocol.c
index cc8f7c52c72..889622baac2 100644
--- a/drivers/usb/storage/protocol.c
+++ b/drivers/usb/storage/protocol.c
@@ -195,7 +195,7 @@ unsigned int usb_stor_access_xfer_buf(unsigned char *buffer,
* the *offset and *index values for the next loop. */
cnt = 0;
while (cnt < buflen) {
- struct page *page = sg->page +
+ struct page *page = sg_page(sg) +
((sg->offset + *offset) >> PAGE_SHIFT);
unsigned int poff =
(sg->offset + *offset) & (PAGE_SIZE-1);