aboutsummaryrefslogtreecommitdiff
path: root/drivers/scsi/sg.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/scsi/sg.c')
-rw-r--r--drivers/scsi/sg.c37
1 files changed, 3 insertions, 34 deletions
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index a8b05ce5de5..7405d0df95d 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -1139,32 +1139,6 @@ sg_fasync(int fd, struct file *filp, int mode)
return (retval < 0) ? retval : 0;
}
-/* When startFinish==1 increments page counts for pages other than the
- first of scatter gather elements obtained from alloc_pages().
- When startFinish==0 decrements ... */
-static void
-sg_rb_correct4mmap(Sg_scatter_hold * rsv_schp, int startFinish)
-{
- struct scatterlist *sg = rsv_schp->buffer;
- struct page *page;
- int k, m;
-
- SCSI_LOG_TIMEOUT(3, printk("sg_rb_correct4mmap: startFinish=%d, scatg=%d\n",
- startFinish, rsv_schp->k_use_sg));
- /* N.B. correction _not_ applied to base page of each allocation */
- for (k = 0; k < rsv_schp->k_use_sg; ++k, ++sg) {
- for (m = PAGE_SIZE; m < sg->length; m += PAGE_SIZE) {
- page = sg->page;
- if (startFinish)
- get_page(page);
- else {
- if (page_count(page) > 0)
- __put_page(page);
- }
- }
- }
-}
-
static struct page *
sg_vma_nopage(struct vm_area_struct *vma, unsigned long addr, int *type)
{
@@ -1236,10 +1210,7 @@ sg_mmap(struct file *filp, struct vm_area_struct *vma)
sa += len;
}
- if (0 == sfp->mmap_called) {
- sg_rb_correct4mmap(rsv_schp, 1); /* do only once per fd lifetime */
- sfp->mmap_called = 1;
- }
+ sfp->mmap_called = 1;
vma->vm_flags |= VM_RESERVED;
vma->vm_private_data = sfp;
vma->vm_ops = &sg_mmap_vm_ops;
@@ -2388,8 +2359,6 @@ __sg_remove_sfp(Sg_device * sdp, Sg_fd * sfp)
SCSI_LOG_TIMEOUT(6,
printk("__sg_remove_sfp: bufflen=%d, k_use_sg=%d\n",
(int) sfp->reserve.bufflen, (int) sfp->reserve.k_use_sg));
- if (sfp->mmap_called)
- sg_rb_correct4mmap(&sfp->reserve, 0); /* undo correction */
sg_remove_scat(&sfp->reserve);
}
sfp->parentdp = NULL;
@@ -2471,9 +2440,9 @@ sg_page_malloc(int rqSz, int lowDma, int *retSzp)
return resp;
if (lowDma)
- page_mask = GFP_ATOMIC | GFP_DMA | __GFP_NOWARN;
+ page_mask = GFP_ATOMIC | GFP_DMA | __GFP_COMP | __GFP_NOWARN;
else
- page_mask = GFP_ATOMIC | __GFP_NOWARN;
+ page_mask = GFP_ATOMIC | __GFP_COMP | __GFP_NOWARN;
for (order = 0, a_size = PAGE_SIZE; a_size < rqSz;
order++, a_size <<= 1) ;