diff options
author | Paul Mackerras <paulus@samba.org> | 2007-09-20 10:09:27 +1000 |
---|---|---|
committer | Paul Mackerras <paulus@samba.org> | 2007-09-20 10:09:27 +1000 |
commit | 0ce49a3945474fc942ec37c0c0efece60f592f80 (patch) | |
tree | f42b821b2d9e2d8775bc22f56d444c2cc7b7b7dd /drivers/char/mspec.c | |
parent | 9e4859ef5462193643fd2b3c8ffb298e5a4a4319 (diff) | |
parent | a88a8eff1e6e32d3288986a9d36c6a449c032d3a (diff) |
Merge branch 'linux-2.6'
Diffstat (limited to 'drivers/char/mspec.c')
-rw-r--r-- | drivers/char/mspec.c | 69 |
1 files changed, 48 insertions, 21 deletions
diff --git a/drivers/char/mspec.c b/drivers/char/mspec.c index c08a4152ee8..049a46cc9f8 100644 --- a/drivers/char/mspec.c +++ b/drivers/char/mspec.c @@ -67,7 +67,7 @@ /* * Page types allocated by the device. */ -enum { +enum mspec_page_type { MSPEC_FETCHOP = 1, MSPEC_CACHED, MSPEC_UNCACHED @@ -83,15 +83,25 @@ static int is_sn2; * One of these structures is allocated when an mspec region is mmaped. The * structure is pointed to by the vma->vm_private_data field in the vma struct. * This structure is used to record the addresses of the mspec pages. + * This structure is shared by all vma's that are split off from the + * original vma when split_vma()'s are done. + * + * The refcnt is incremented atomically because mm->mmap_sem does not + * protect in fork case where multiple tasks share the vma_data. */ struct vma_data { atomic_t refcnt; /* Number of vmas sharing the data. */ - spinlock_t lock; /* Serialize access to the vma. */ + spinlock_t lock; /* Serialize access to this structure. */ int count; /* Number of pages allocated. */ - int type; /* Type of pages allocated. */ + enum mspec_page_type type; /* Type of pages allocated. */ + int flags; /* See VMD_xxx below. */ + unsigned long vm_start; /* Original (unsplit) base. */ + unsigned long vm_end; /* Original (unsplit) end. */ unsigned long maddr[0]; /* Array of MSPEC addresses. */ }; +#define VMD_VMALLOCED 0x1 /* vmalloc'd rather than kmalloc'd */ + /* used on shub2 to clear FOP cache in the HUB */ static unsigned long scratch_page[MAX_NUMNODES]; #define SH2_AMO_CACHE_ENTRIES 4 @@ -129,8 +139,8 @@ mspec_zero_block(unsigned long addr, int len) * mspec_open * * Called when a device mapping is created by a means other than mmap - * (via fork, etc.). Increments the reference count on the underlying - * mspec data so it is not freed prematurely. + * (via fork, munmap, etc.). Increments the reference count on the + * underlying mspec data so it is not freed prematurely. */ static void mspec_open(struct vm_area_struct *vma) @@ -151,34 +161,44 @@ static void mspec_close(struct vm_area_struct *vma) { struct vma_data *vdata; - int i, pages, result, vdata_size; + int index, last_index, result; + unsigned long my_page; vdata = vma->vm_private_data; - if (!atomic_dec_and_test(&vdata->refcnt)) - return; - pages = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; - vdata_size = sizeof(struct vma_data) + pages * sizeof(long); - for (i = 0; i < pages; i++) { - if (vdata->maddr[i] == 0) + BUG_ON(vma->vm_start < vdata->vm_start || vma->vm_end > vdata->vm_end); + + spin_lock(&vdata->lock); + index = (vma->vm_start - vdata->vm_start) >> PAGE_SHIFT; + last_index = (vma->vm_end - vdata->vm_start) >> PAGE_SHIFT; + for (; index < last_index; index++) { + if (vdata->maddr[index] == 0) continue; /* * Clear the page before sticking it back * into the pool. */ - result = mspec_zero_block(vdata->maddr[i], PAGE_SIZE); + my_page = vdata->maddr[index]; + vdata->maddr[index] = 0; + spin_unlock(&vdata->lock); + result = mspec_zero_block(my_page, PAGE_SIZE); if (!result) - uncached_free_page(vdata->maddr[i]); + uncached_free_page(my_page); else printk(KERN_WARNING "mspec_close(): " "failed to zero page %i\n", result); + spin_lock(&vdata->lock); } + spin_unlock(&vdata->lock); - if (vdata_size <= PAGE_SIZE) - kfree(vdata); - else + if (!atomic_dec_and_test(&vdata->refcnt)) + return; + + if (vdata->flags & VMD_VMALLOCED) vfree(vdata); + else + kfree(vdata); } @@ -195,7 +215,8 @@ mspec_nopfn(struct vm_area_struct *vma, unsigned long address) int index; struct vma_data *vdata = vma->vm_private_data; - index = (address - vma->vm_start) >> PAGE_SHIFT; + BUG_ON(address < vdata->vm_start || address >= vdata->vm_end); + index = (address - vdata->vm_start) >> PAGE_SHIFT; maddr = (volatile unsigned long) vdata->maddr[index]; if (maddr == 0) { maddr = uncached_alloc_page(numa_node_id()); @@ -237,10 +258,11 @@ static struct vm_operations_struct mspec_vm_ops = { * underlying pages. */ static int -mspec_mmap(struct file *file, struct vm_area_struct *vma, int type) +mspec_mmap(struct file *file, struct vm_area_struct *vma, + enum mspec_page_type type) { struct vma_data *vdata; - int pages, vdata_size; + int pages, vdata_size, flags = 0; if (vma->vm_pgoff != 0) return -EINVAL; @@ -255,12 +277,17 @@ mspec_mmap(struct file *file, struct vm_area_struct *vma, int type) vdata_size = sizeof(struct vma_data) + pages * sizeof(long); if (vdata_size <= PAGE_SIZE) vdata = kmalloc(vdata_size, GFP_KERNEL); - else + else { vdata = vmalloc(vdata_size); + flags = VMD_VMALLOCED; + } if (!vdata) return -ENOMEM; memset(vdata, 0, vdata_size); + vdata->vm_start = vma->vm_start; + vdata->vm_end = vma->vm_end; + vdata->flags = flags; vdata->type = type; spin_lock_init(&vdata->lock); vdata->refcnt = ATOMIC_INIT(1); |