aboutsummaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/motherboard.c2
-rw-r--r--drivers/char/mem.c12
-rw-r--r--drivers/scsi/dc395x.c48
-rw-r--r--drivers/video/fbmem.c4
-rw-r--r--drivers/video/intelfb/intelfbdrv.c50
5 files changed, 51 insertions, 65 deletions
diff --git a/drivers/acpi/motherboard.c b/drivers/acpi/motherboard.c
index 2934475d67d..61ea70742d4 100644
--- a/drivers/acpi/motherboard.c
+++ b/drivers/acpi/motherboard.c
@@ -43,7 +43,7 @@ ACPI_MODULE_NAME ("acpi_motherboard")
*/
#define IS_RESERVED_ADDR(base, len) \
(((len) > 0) && ((base) > 0) && ((base) + (len) < IO_SPACE_LIMIT) \
- && ((base) + (len) > 0x1000))
+ && ((base) + (len) > PCIBIOS_MIN_IO))
/*
* Clearing the flag (IORESOURCE_BUSY) allows drivers to use
diff --git a/drivers/char/mem.c b/drivers/char/mem.c
index 42187381506..850a78c9c4b 100644
--- a/drivers/char/mem.c
+++ b/drivers/char/mem.c
@@ -261,7 +261,11 @@ static int mmap_mem(struct file * file, struct vm_area_struct * vma)
static int mmap_kmem(struct file * file, struct vm_area_struct * vma)
{
- unsigned long long val;
+ unsigned long pfn;
+
+ /* Turn a kernel-virtual address into a physical page frame */
+ pfn = __pa((u64)vma->vm_pgoff << PAGE_SHIFT) >> PAGE_SHIFT;
+
/*
* RED-PEN: on some architectures there is more mapped memory
* than available in mem_map which pfn_valid checks
@@ -269,10 +273,10 @@ static int mmap_kmem(struct file * file, struct vm_area_struct * vma)
*
* RED-PEN: vmalloc is not supported right now.
*/
- if (!pfn_valid(vma->vm_pgoff))
+ if (!pfn_valid(pfn))
return -EIO;
- val = (u64)vma->vm_pgoff << PAGE_SHIFT;
- vma->vm_pgoff = __pa(val) >> PAGE_SHIFT;
+
+ vma->vm_pgoff = pfn;
return mmap_mem(file, vma);
}
diff --git a/drivers/scsi/dc395x.c b/drivers/scsi/dc395x.c
index 929170dcd3c..600ba120286 100644
--- a/drivers/scsi/dc395x.c
+++ b/drivers/scsi/dc395x.c
@@ -183,7 +183,7 @@
* cross a page boundy.
*/
#define SEGMENTX_LEN (sizeof(struct SGentry)*DC395x_MAX_SG_LISTENTRY)
-#define VIRTX_LEN (sizeof(void *) * DC395x_MAX_SG_LISTENTRY)
+
struct SGentry {
u32 address; /* bus! address */
@@ -235,7 +235,6 @@ struct ScsiReqBlk {
u8 sg_count; /* No of HW sg entries for this request */
u8 sg_index; /* Index of HW sg entry for this request */
u32 total_xfer_length; /* Total number of bytes remaining to be transfered */
- void **virt_map;
unsigned char *virt_addr; /* Virtual address of current transfer position */
/*
@@ -1022,14 +1021,14 @@ static void build_srb(struct scsi_cmnd *cmd, struct DeviceCtlBlk *dcb,
reqlen, cmd->request_buffer, cmd->use_sg,
srb->sg_count);
+ srb->virt_addr = page_address(sl->page);
for (i = 0; i < srb->sg_count; i++) {
- u32 seglen = (u32)sg_dma_len(sl + i);
- sgp[i].address = (u32)sg_dma_address(sl + i);
+ u32 busaddr = (u32)sg_dma_address(&sl[i]);
+ u32 seglen = (u32)sl[i].length;
+ sgp[i].address = busaddr;
sgp[i].length = seglen;
srb->total_xfer_length += seglen;
- srb->virt_map[i] = kmap(sl[i].page);
}
- srb->virt_addr = srb->virt_map[0];
sgp += srb->sg_count - 1;
/*
@@ -1976,7 +1975,6 @@ static void sg_update_list(struct ScsiReqBlk *srb, u32 left)
int segment = cmd->use_sg;
u32 xferred = srb->total_xfer_length - left; /* bytes transfered */
struct SGentry *psge = srb->segment_x + srb->sg_index;
- void **virt = srb->virt_map;
dprintkdbg(DBG_0,
"sg_update_list: Transfered %i of %i bytes, %i remain\n",
@@ -2016,16 +2014,16 @@ static void sg_update_list(struct ScsiReqBlk *srb, u32 left)
/* We have to walk the scatterlist to find it */
sg = (struct scatterlist *)cmd->request_buffer;
- idx = 0;
while (segment--) {
unsigned long mask =
~((unsigned long)sg->length - 1) & PAGE_MASK;
if ((sg_dma_address(sg) & mask) == (psge->address & mask)) {
- srb->virt_addr = virt[idx] + (psge->address & ~PAGE_MASK);
+ srb->virt_addr = (page_address(sg->page)
+ + psge->address -
+ (psge->address & PAGE_MASK));
return;
}
++sg;
- ++idx;
}
dprintkl(KERN_ERR, "sg_update_list: sg_to_virt failed\n");
@@ -2151,7 +2149,7 @@ static void data_out_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
DC395x_read32(acb, TRM_S1040_DMA_CXCNT));
}
/*
- * calculate all the residue data that not yet transfered
+ * calculate all the residue data that not yet tranfered
* SCSI transfer counter + left in SCSI FIFO data
*
* .....TRM_S1040_SCSI_COUNTER (24bits)
@@ -3269,7 +3267,6 @@ static void pci_unmap_srb(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb)
struct scsi_cmnd *cmd = srb->cmd;
enum dma_data_direction dir = cmd->sc_data_direction;
if (cmd->use_sg && dir != PCI_DMA_NONE) {
- int i;
/* unmap DC395x SG list */
dprintkdbg(DBG_SG, "pci_unmap_srb: list=%08x(%05x)\n",
srb->sg_bus_addr, SEGMENTX_LEN);
@@ -3279,8 +3276,6 @@ static void pci_unmap_srb(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb)
dprintkdbg(DBG_SG, "pci_unmap_srb: segs=%i buffer=%p\n",
cmd->use_sg, cmd->request_buffer);
/* unmap the sg segments */
- for (i = 0; i < srb->sg_count; i++)
- kunmap(virt_to_page(srb->virt_map[i]));
pci_unmap_sg(acb->dev,
(struct scatterlist *)cmd->request_buffer,
cmd->use_sg, dir);
@@ -3327,7 +3322,7 @@ static void srb_done(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
if (cmd->use_sg) {
struct scatterlist* sg = (struct scatterlist *)cmd->request_buffer;
- ptr = (struct ScsiInqData *)(srb->virt_map[0] + sg->offset);
+ ptr = (struct ScsiInqData *)(page_address(sg->page) + sg->offset);
} else {
ptr = (struct ScsiInqData *)(cmd->request_buffer);
}
@@ -4262,9 +4257,8 @@ static void adapter_sg_tables_free(struct AdapterCtlBlk *acb)
const unsigned srbs_per_page = PAGE_SIZE/SEGMENTX_LEN;
for (i = 0; i < DC395x_MAX_SRB_CNT; i += srbs_per_page)
- kfree(acb->srb_array[i].segment_x);
-
- vfree(acb->srb_array[0].virt_map);
+ if (acb->srb_array[i].segment_x)
+ kfree(acb->srb_array[i].segment_x);
}
@@ -4280,12 +4274,9 @@ static int __devinit adapter_sg_tables_alloc(struct AdapterCtlBlk *acb)
int srb_idx = 0;
unsigned i = 0;
struct SGentry *ptr;
- void **virt_array;
- for (i = 0; i < DC395x_MAX_SRB_CNT; i++) {
+ for (i = 0; i < DC395x_MAX_SRB_CNT; i++)
acb->srb_array[i].segment_x = NULL;
- acb->srb_array[i].virt_map = NULL;
- }
dprintkdbg(DBG_1, "Allocate %i pages for SG tables\n", pages);
while (pages--) {
@@ -4306,19 +4297,6 @@ static int __devinit adapter_sg_tables_alloc(struct AdapterCtlBlk *acb)
ptr + (i * DC395x_MAX_SG_LISTENTRY);
else
dprintkl(KERN_DEBUG, "No space for tmsrb SG table reserved?!\n");
-
- virt_array = vmalloc((DC395x_MAX_SRB_CNT + 1) * DC395x_MAX_SG_LISTENTRY * sizeof(void*));
-
- if (!virt_array) {
- adapter_sg_tables_free(acb);
- return 1;
- }
-
- for (i = 0; i < DC395x_MAX_SRB_CNT + 1; i++) {
- acb->srb_array[i].virt_map = virt_array;
- virt_array += DC395x_MAX_SG_LISTENTRY;
- }
-
return 0;
}
diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c
index d2e19f6dd72..4ff853fbe0b 100644
--- a/drivers/video/fbmem.c
+++ b/drivers/video/fbmem.c
@@ -628,7 +628,7 @@ fb_pan_display(struct fb_info *info, struct fb_var_screeninfo *var)
int
fb_set_var(struct fb_info *info, struct fb_var_screeninfo *var)
{
- int err;
+ int err, flags = info->flags;
if (var->activate & FB_ACTIVATE_INV_MODE) {
struct fb_videomode mode1, mode2;
@@ -682,7 +682,7 @@ fb_set_var(struct fb_info *info, struct fb_var_screeninfo *var)
!list_empty(&info->modelist))
err = fb_add_videomode(&mode, &info->modelist);
- if (!err && info->flags & FBINFO_MISC_USEREVENT) {
+ if (!err && (flags & FBINFO_MISC_USEREVENT)) {
struct fb_event event;
info->flags &= ~FBINFO_MISC_USEREVENT;
diff --git a/drivers/video/intelfb/intelfbdrv.c b/drivers/video/intelfb/intelfbdrv.c
index 298bc9cd99e..a112a178685 100644
--- a/drivers/video/intelfb/intelfbdrv.c
+++ b/drivers/video/intelfb/intelfbdrv.c
@@ -583,23 +583,6 @@ intelfb_pci_register(struct pci_dev *pdev, const struct pci_device_id *ent)
return -ENODEV;
}
- /* Map the fb and MMIO regions */
- dinfo->aperture.virtual = (u8 __iomem *)ioremap_nocache
- (dinfo->aperture.physical, dinfo->aperture.size);
- if (!dinfo->aperture.virtual) {
- ERR_MSG("Cannot remap FB region.\n");
- cleanup(dinfo);
- return -ENODEV;
- }
- dinfo->mmio_base =
- (u8 __iomem *)ioremap_nocache(dinfo->mmio_base_phys,
- INTEL_REG_SIZE);
- if (!dinfo->mmio_base) {
- ERR_MSG("Cannot remap MMIO region.\n");
- cleanup(dinfo);
- return -ENODEV;
- }
-
/* Get the chipset info. */
dinfo->pci_chipset = pdev->device;
@@ -630,9 +613,15 @@ intelfb_pci_register(struct pci_dev *pdev, const struct pci_device_id *ent)
dinfo->accel = 0;
}
+ if (MB(voffset) < stolen_size)
+ offset = (stolen_size >> 12);
+ else
+ offset = ROUND_UP_TO_PAGE(MB(voffset))/GTT_PAGE_SIZE;
+
/* Framebuffer parameters - Use all the stolen memory if >= vram */
- if (ROUND_UP_TO_PAGE(stolen_size) >= MB(vram)) {
+ if (ROUND_UP_TO_PAGE(stolen_size) >= ((offset << 12) + MB(vram))) {
dinfo->fb.size = ROUND_UP_TO_PAGE(stolen_size);
+ dinfo->fb.offset = 0;
dinfo->fbmem_gart = 0;
} else {
dinfo->fb.size = MB(vram);
@@ -663,11 +652,6 @@ intelfb_pci_register(struct pci_dev *pdev, const struct pci_device_id *ent)
return -ENODEV;
}
- if (MB(voffset) < stolen_size)
- offset = (stolen_size >> 12);
- else
- offset = ROUND_UP_TO_PAGE(MB(voffset))/GTT_PAGE_SIZE;
-
/* set the mem offsets - set them after the already used pages */
if (dinfo->accel) {
dinfo->ring.offset = offset + gtt_info.current_memory;
@@ -682,6 +666,26 @@ intelfb_pci_register(struct pci_dev *pdev, const struct pci_device_id *ent)
+ (dinfo->cursor.size >> 12);
}
+ /* Map the fb and MMIO regions */
+ /* ioremap only up to the end of used aperture */
+ dinfo->aperture.virtual = (u8 __iomem *)ioremap_nocache
+ (dinfo->aperture.physical, (dinfo->fb.offset << 12)
+ + dinfo->fb.size);
+ if (!dinfo->aperture.virtual) {
+ ERR_MSG("Cannot remap FB region.\n");
+ cleanup(dinfo);
+ return -ENODEV;
+ }
+
+ dinfo->mmio_base =
+ (u8 __iomem *)ioremap_nocache(dinfo->mmio_base_phys,
+ INTEL_REG_SIZE);
+ if (!dinfo->mmio_base) {
+ ERR_MSG("Cannot remap MMIO region.\n");
+ cleanup(dinfo);
+ return -ENODEV;
+ }
+
/* Allocate memories (which aren't stolen) */
if (dinfo->accel) {
if (!(dinfo->gtt_ring_mem =