diff options
-rw-r--r-- | drivers/md/dm-crypt.c | 19 | ||||
-rw-r--r-- | mm/swap_state.c | 27 |
2 files changed, 13 insertions, 33 deletions
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c index 77619a56e2b..0dd6c2b5391 100644 --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c @@ -331,25 +331,19 @@ crypt_alloc_buffer(struct crypt_config *cc, unsigned int size, struct bio *bio; unsigned int nr_iovecs = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; int gfp_mask = GFP_NOIO | __GFP_HIGHMEM; - unsigned long flags = current->flags; unsigned int i; /* - * Tell VM to act less aggressively and fail earlier. - * This is not necessary but increases throughput. + * Use __GFP_NOMEMALLOC to tell the VM to act less aggressively and + * to fail earlier. This is not necessary but increases throughput. * FIXME: Is this really intelligent? */ - current->flags &= ~PF_MEMALLOC; - if (base_bio) - bio = bio_clone(base_bio, GFP_NOIO); + bio = bio_clone(base_bio, GFP_NOIO|__GFP_NOMEMALLOC); else - bio = bio_alloc(GFP_NOIO, nr_iovecs); - if (!bio) { - if (flags & PF_MEMALLOC) - current->flags |= PF_MEMALLOC; + bio = bio_alloc(GFP_NOIO|__GFP_NOMEMALLOC, nr_iovecs); + if (!bio) return NULL; - } /* if the last bio was not complete, continue where that one ended */ bio->bi_idx = *bio_vec_idx; @@ -386,9 +380,6 @@ crypt_alloc_buffer(struct crypt_config *cc, unsigned int size, size -= bv->bv_len; } - if (flags & PF_MEMALLOC) - current->flags |= PF_MEMALLOC; - if (!bio->bi_size) { bio_put(bio); return NULL; diff --git a/mm/swap_state.c b/mm/swap_state.c index a063a902ed0..4f251775ef9 100644 --- a/mm/swap_state.c +++ b/mm/swap_state.c @@ -143,7 +143,6 @@ void __delete_from_swap_cache(struct page *page) int add_to_swap(struct page * page) { swp_entry_t entry; - int pf_flags; int err; if (!PageLocked(page)) @@ -154,29 +153,19 @@ int add_to_swap(struct page * page) if (!entry.val) return 0; - /* Radix-tree node allocations are performing - * GFP_ATOMIC allocations under PF_MEMALLOC. - * They can completely exhaust the page allocator. - * - * So PF_MEMALLOC is dropped here. This causes the slab - * allocations to fail earlier, so radix-tree nodes will - * then be allocated from the mempool reserves. + /* + * Radix-tree node allocations from PF_MEMALLOC contexts could + * completely exhaust the page allocator. __GFP_NOMEMALLOC + * stops emergency reserves from being allocated. * - * We're still using __GFP_HIGH for radix-tree node - * allocations, so some of the emergency pools are available, - * just not all of them. + * TODO: this could cause a theoretical memory reclaim + * deadlock in the swap out path. */ - - pf_flags = current->flags; - current->flags &= ~PF_MEMALLOC; - /* * Add it to the swap cache and mark it dirty */ - err = __add_to_swap_cache(page, entry, GFP_ATOMIC|__GFP_NOWARN); - - if (pf_flags & PF_MEMALLOC) - current->flags |= PF_MEMALLOC; + err = __add_to_swap_cache(page, entry, + GFP_ATOMIC|__GFP_NOMEMALLOC|__GFP_NOWARN); switch (err) { case 0: /* Success */ |