aboutsummaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/acpi/acpiosxf.h4
-rw-r--r--include/acpi/processor.h5
-rw-r--r--include/drm/drm_pciids.h10
-rw-r--r--include/drm/radeon_drm.h29
-rw-r--r--include/drm/ttm/ttm_bo_driver.h20
-rw-r--r--include/drm/ttm/ttm_module.h2
-rw-r--r--include/linux/bitmap.h18
-rw-r--r--include/linux/blkdev.h1
-rw-r--r--include/linux/cb710.h29
-rw-r--r--include/linux/cgroup.h28
-rw-r--r--include/linux/clocksource.h14
-rw-r--r--include/linux/cpumask.h20
-rw-r--r--include/linux/decompress/generic.h32
-rw-r--r--include/linux/flex_array.h49
-rw-r--r--include/linux/fs.h6
-rw-r--r--include/linux/ftrace_event.h12
-rw-r--r--include/linux/gen_stats.h5
-rw-r--r--include/linux/hugetlb.h6
-rw-r--r--include/linux/inetdevice.h2
-rw-r--r--include/linux/input/matrix_keypad.h13
-rw-r--r--include/linux/iocontext.h2
-rw-r--r--include/linux/kvm_host.h1
-rw-r--r--include/linux/lguest.h39
-rw-r--r--include/linux/lguest_launcher.h18
-rw-r--r--include/linux/libata.h1
-rw-r--r--include/linux/lmb.h2
-rw-r--r--include/linux/mm.h15
-rw-r--r--include/linux/mm_types.h2
-rw-r--r--include/linux/mtd/mtd.h2
-rw-r--r--include/linux/mtd/partitions.h2
-rw-r--r--include/linux/nfs_fs.h5
-rw-r--r--include/linux/nodemask.h28
-rw-r--r--include/linux/perf_counter.h73
-rw-r--r--include/linux/pps.h2
-rw-r--r--include/linux/scatterlist.h2
-rw-r--r--include/linux/sched.h1
-rw-r--r--include/linux/security.h24
-rw-r--r--include/linux/tty.h1
-rw-r--r--include/linux/tty_ldisc.h2
-rw-r--r--include/linux/ucb1400.h4
-rw-r--r--include/linux/uio.h17
-rw-r--r--include/linux/virtio_blk.h6
-rw-r--r--include/linux/virtio_config.h3
-rw-r--r--include/linux/virtio_net.h6
-rw-r--r--include/linux/virtio_ring.h12
-rw-r--r--include/linux/wait.h9
-rw-r--r--include/net/act_api.h2
-rw-r--r--include/net/bluetooth/rfcomm.h12
-rw-r--r--include/net/cfg80211.h5
-rw-r--r--include/net/gen_stats.h10
-rw-r--r--include/net/netfilter/xt_rateest.h2
-rw-r--r--include/net/pkt_sched.h4
-rw-r--r--include/net/sch_generic.h2
-rw-r--r--include/trace/ftrace.h183
54 files changed, 590 insertions, 214 deletions
diff --git a/include/acpi/acpiosxf.h b/include/acpi/acpiosxf.h
index 3e798593b17..ab0b85cf21f 100644
--- a/include/acpi/acpiosxf.h
+++ b/include/acpi/acpiosxf.h
@@ -242,6 +242,10 @@ acpi_os_derive_pci_id(acpi_handle rhandle,
acpi_status acpi_os_validate_interface(char *interface);
acpi_status acpi_osi_invalidate(char* interface);
+acpi_status
+acpi_os_validate_address(u8 space_id, acpi_physical_address address,
+ acpi_size length, char *name);
+
u64 acpi_os_get_timer(void);
acpi_status acpi_os_signal(u32 function, void *info);
diff --git a/include/acpi/processor.h b/include/acpi/processor.h
index baf1e0a9a7e..740ac3ad8fd 100644
--- a/include/acpi/processor.h
+++ b/include/acpi/processor.h
@@ -174,7 +174,7 @@ struct acpi_processor_throttling {
cpumask_var_t shared_cpu_map;
int (*acpi_processor_get_throttling) (struct acpi_processor * pr);
int (*acpi_processor_set_throttling) (struct acpi_processor * pr,
- int state);
+ int state, bool force);
u32 address;
u8 duty_offset;
@@ -321,7 +321,8 @@ static inline int acpi_processor_ppc_has_changed(struct acpi_processor *pr)
/* in processor_throttling.c */
int acpi_processor_tstate_has_changed(struct acpi_processor *pr);
int acpi_processor_get_throttling_info(struct acpi_processor *pr);
-extern int acpi_processor_set_throttling(struct acpi_processor *pr, int state);
+extern int acpi_processor_set_throttling(struct acpi_processor *pr,
+ int state, bool force);
extern const struct file_operations acpi_processor_throttling_fops;
extern void acpi_processor_throttling_init(void);
/* in processor_idle.c */
diff --git a/include/drm/drm_pciids.h b/include/drm/drm_pciids.h
index 7174818c2c1..853508499d2 100644
--- a/include/drm/drm_pciids.h
+++ b/include/drm/drm_pciids.h
@@ -257,9 +257,12 @@
{0x1002, 0x940F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R600|RADEON_NEW_MEMMAP}, \
{0x1002, 0x94A0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV740|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x94A1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV740|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x94A3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV740|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x94B1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV740|RADEON_NEW_MEMMAP}, \
{0x1002, 0x94B3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV740|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x94B4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV740|RADEON_NEW_MEMMAP}, \
{0x1002, 0x94B5, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV740|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x94B9, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV740|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x9440, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_NEW_MEMMAP}, \
{0x1002, 0x9441, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_NEW_MEMMAP}, \
{0x1002, 0x9442, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_NEW_MEMMAP}, \
@@ -288,6 +291,7 @@
{0x1002, 0x948F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV730|RADEON_NEW_MEMMAP}, \
{0x1002, 0x9490, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV730|RADEON_NEW_MEMMAP}, \
{0x1002, 0x9491, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV730|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x9495, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV730|RADEON_NEW_MEMMAP}, \
{0x1002, 0x9498, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV730|RADEON_NEW_MEMMAP}, \
{0x1002, 0x949C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV730|RADEON_NEW_MEMMAP}, \
{0x1002, 0x949E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV730|RADEON_NEW_MEMMAP}, \
@@ -325,6 +329,7 @@
{0x1002, 0x9552, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV710|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x9553, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV710|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x9555, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV710|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x9557, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV710|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x9580, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV630|RADEON_NEW_MEMMAP}, \
{0x1002, 0x9581, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV630|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x9583, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV630|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
@@ -365,6 +370,11 @@
{0x1002, 0x9614, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS780|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
{0x1002, 0x9615, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS780|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
{0x1002, 0x9616, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS780|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ {0x1002, 0x9710, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS880|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ {0x1002, 0x9711, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS880|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ {0x1002, 0x9712, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS880|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ {0x1002, 0x9713, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS880|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ {0x1002, 0x9714, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS880|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
{0, 0, 0}
#define r128_PCI_IDS \
diff --git a/include/drm/radeon_drm.h b/include/drm/radeon_drm.h
index 41862e9a4c2..2ba61e18fc8 100644
--- a/include/drm/radeon_drm.h
+++ b/include/drm/radeon_drm.h
@@ -506,6 +506,9 @@ typedef struct {
#define DRM_RADEON_GEM_WAIT_IDLE 0x24
#define DRM_RADEON_CS 0x26
#define DRM_RADEON_INFO 0x27
+#define DRM_RADEON_GEM_SET_TILING 0x28
+#define DRM_RADEON_GEM_GET_TILING 0x29
+#define DRM_RADEON_GEM_BUSY 0x2a
#define DRM_IOCTL_RADEON_CP_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_CP_INIT, drm_radeon_init_t)
#define DRM_IOCTL_RADEON_CP_START DRM_IO( DRM_COMMAND_BASE + DRM_RADEON_CP_START)
@@ -544,7 +547,9 @@ typedef struct {
#define DRM_IOCTL_RADEON_GEM_WAIT_IDLE DRM_IOW(DRM_COMMAND_BASE + DRM_RADEON_GEM_WAIT_IDLE, struct drm_radeon_gem_wait_idle)
#define DRM_IOCTL_RADEON_CS DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_CS, struct drm_radeon_cs)
#define DRM_IOCTL_RADEON_INFO DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_INFO, struct drm_radeon_info)
-
+#define DRM_IOCTL_RADEON_SET_TILING DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_SET_TILING, struct drm_radeon_gem_set_tiling)
+#define DRM_IOCTL_RADEON_GET_TILING DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_GET_TILING, struct drm_radeon_gem_get_tiling)
+#define DRM_IOCTL_RADEON_GEM_BUSY DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_BUSY, struct drm_radeon_gem_busy)
typedef struct drm_radeon_init {
enum {
@@ -704,6 +709,7 @@ typedef struct drm_radeon_indirect {
#define RADEON_PARAM_FB_LOCATION 14 /* FB location */
#define RADEON_PARAM_NUM_GB_PIPES 15 /* num GB pipes */
#define RADEON_PARAM_DEVICE_ID 16
+#define RADEON_PARAM_NUM_Z_PIPES 17 /* num Z pipes */
typedef struct drm_radeon_getparam {
int param;
@@ -796,6 +802,24 @@ struct drm_radeon_gem_create {
uint32_t flags;
};
+#define RADEON_TILING_MACRO 0x1
+#define RADEON_TILING_MICRO 0x2
+#define RADEON_TILING_SWAP 0x4
+#define RADEON_TILING_SURFACE 0x8 /* this object requires a surface
+ * when mapped - i.e. front buffer */
+
+struct drm_radeon_gem_set_tiling {
+ uint32_t handle;
+ uint32_t tiling_flags;
+ uint32_t pitch;
+};
+
+struct drm_radeon_gem_get_tiling {
+ uint32_t handle;
+ uint32_t tiling_flags;
+ uint32_t pitch;
+};
+
struct drm_radeon_gem_mmap {
uint32_t handle;
uint32_t pad;
@@ -817,7 +841,7 @@ struct drm_radeon_gem_wait_idle {
struct drm_radeon_gem_busy {
uint32_t handle;
- uint32_t busy;
+ uint32_t domain;
};
struct drm_radeon_gem_pread {
@@ -874,6 +898,7 @@ struct drm_radeon_cs {
#define RADEON_INFO_DEVICE_ID 0x00
#define RADEON_INFO_NUM_GB_PIPES 0x01
+#define RADEON_INFO_NUM_Z_PIPES 0x02
struct drm_radeon_info {
uint32_t request;
diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h
index 62ed733c52a..a68829db381 100644
--- a/include/drm/ttm/ttm_bo_driver.h
+++ b/include/drm/ttm/ttm_bo_driver.h
@@ -121,6 +121,7 @@ struct ttm_backend {
#define TTM_PAGE_FLAG_SWAPPED (1 << 4)
#define TTM_PAGE_FLAG_PERSISTANT_SWAP (1 << 5)
#define TTM_PAGE_FLAG_ZERO_ALLOC (1 << 6)
+#define TTM_PAGE_FLAG_DMA32 (1 << 7)
enum ttm_caching_state {
tt_uncached,
@@ -353,6 +354,14 @@ struct ttm_bo_driver {
int (*sync_obj_flush) (void *sync_obj, void *sync_arg);
void (*sync_obj_unref) (void **sync_obj);
void *(*sync_obj_ref) (void *sync_obj);
+
+ /* hook to notify driver about a driver move so it
+ * can do tiling things */
+ void (*move_notify)(struct ttm_buffer_object *bo,
+ struct ttm_mem_reg *new_mem);
+ /* notify the driver we are taking a fault on this BO
+ * and have reserved it */
+ void (*fault_reserve_notify)(struct ttm_buffer_object *bo);
};
#define TTM_NUM_MEM_TYPES 8
@@ -429,6 +438,8 @@ struct ttm_bo_device {
*/
struct delayed_work wq;
+
+ bool need_dma32;
};
/**
@@ -648,7 +659,14 @@ extern int ttm_bo_device_release(struct ttm_bo_device *bdev);
extern int ttm_bo_device_init(struct ttm_bo_device *bdev,
struct ttm_mem_global *mem_glob,
struct ttm_bo_driver *driver,
- uint64_t file_page_offset);
+ uint64_t file_page_offset, bool need_dma32);
+
+/**
+ * ttm_bo_unmap_virtual
+ *
+ * @bo: tear down the virtual mappings for this BO
+ */
+extern void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo);
/**
* ttm_bo_reserve:
diff --git a/include/drm/ttm/ttm_module.h b/include/drm/ttm/ttm_module.h
index 889a4c7958a..d1d433834e4 100644
--- a/include/drm/ttm/ttm_module.h
+++ b/include/drm/ttm/ttm_module.h
@@ -33,7 +33,7 @@
#include <linux/kernel.h>
-#define TTM_PFX "[TTM]"
+#define TTM_PFX "[TTM] "
enum ttm_global_types {
TTM_GLOBAL_TTM_MEM = 0,
diff --git a/include/linux/bitmap.h b/include/linux/bitmap.h
index 2878811c613..756d78b8c1c 100644
--- a/include/linux/bitmap.h
+++ b/include/linux/bitmap.h
@@ -94,13 +94,13 @@ extern void __bitmap_shift_right(unsigned long *dst,
const unsigned long *src, int shift, int bits);
extern void __bitmap_shift_left(unsigned long *dst,
const unsigned long *src, int shift, int bits);
-extern void __bitmap_and(unsigned long *dst, const unsigned long *bitmap1,
+extern int __bitmap_and(unsigned long *dst, const unsigned long *bitmap1,
const unsigned long *bitmap2, int bits);
extern void __bitmap_or(unsigned long *dst, const unsigned long *bitmap1,
const unsigned long *bitmap2, int bits);
extern void __bitmap_xor(unsigned long *dst, const unsigned long *bitmap1,
const unsigned long *bitmap2, int bits);
-extern void __bitmap_andnot(unsigned long *dst, const unsigned long *bitmap1,
+extern int __bitmap_andnot(unsigned long *dst, const unsigned long *bitmap1,
const unsigned long *bitmap2, int bits);
extern int __bitmap_intersects(const unsigned long *bitmap1,
const unsigned long *bitmap2, int bits);
@@ -171,13 +171,12 @@ static inline void bitmap_copy(unsigned long *dst, const unsigned long *src,
}
}
-static inline void bitmap_and(unsigned long *dst, const unsigned long *src1,
+static inline int bitmap_and(unsigned long *dst, const unsigned long *src1,
const unsigned long *src2, int nbits)
{
if (small_const_nbits(nbits))
- *dst = *src1 & *src2;
- else
- __bitmap_and(dst, src1, src2, nbits);
+ return (*dst = *src1 & *src2) != 0;
+ return __bitmap_and(dst, src1, src2, nbits);
}
static inline void bitmap_or(unsigned long *dst, const unsigned long *src1,
@@ -198,13 +197,12 @@ static inline void bitmap_xor(unsigned long *dst, const unsigned long *src1,
__bitmap_xor(dst, src1, src2, nbits);
}
-static inline void bitmap_andnot(unsigned long *dst, const unsigned long *src1,
+static inline int bitmap_andnot(unsigned long *dst, const unsigned long *src1,
const unsigned long *src2, int nbits)
{
if (small_const_nbits(nbits))
- *dst = *src1 & ~(*src2);
- else
- __bitmap_andnot(dst, src1, src2, nbits);
+ return (*dst = *src1 & ~(*src2)) != 0;
+ return __bitmap_andnot(dst, src1, src2, nbits);
}
static inline void bitmap_complement(unsigned long *dst, const unsigned long *src,
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index e7cb5dbf6c2..69103e053c9 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -913,6 +913,7 @@ extern void blk_queue_logical_block_size(struct request_queue *, unsigned short)
extern void blk_queue_physical_block_size(struct request_queue *, unsigned short);
extern void blk_queue_alignment_offset(struct request_queue *q,
unsigned int alignment);
+extern void blk_limits_io_min(struct queue_limits *limits, unsigned int min);
extern void blk_queue_io_min(struct request_queue *q, unsigned int min);
extern void blk_queue_io_opt(struct request_queue *q, unsigned int opt);
extern void blk_set_default_limits(struct queue_limits *lim);
diff --git a/include/linux/cb710.h b/include/linux/cb710.h
index 63bc9a4d292..8cc10411bab 100644
--- a/include/linux/cb710.h
+++ b/include/linux/cb710.h
@@ -140,29 +140,6 @@ void cb710_dump_regs(struct cb710_chip *chip, unsigned dump);
#include <linux/highmem.h>
#include <linux/scatterlist.h>
-/**
- * cb710_sg_miter_stop_writing - stop mapping iteration after writing
- * @miter: sg mapping iter to be stopped
- *
- * Description:
- * Stops mapping iterator @miter. @miter should have been started
- * started using sg_miter_start(). A stopped iteration can be
- * resumed by calling sg_miter_next() on it. This is useful when
- * resources (kmap) need to be released during iteration.
- *
- * This is a convenience wrapper that will be optimized out for arches
- * that don't need flush_kernel_dcache_page().
- *
- * Context:
- * IRQ disabled if the SG_MITER_ATOMIC is set. Don't care otherwise.
- */
-static inline void cb710_sg_miter_stop_writing(struct sg_mapping_iter *miter)
-{
- if (miter->page)
- flush_kernel_dcache_page(miter->page);
- sg_miter_stop(miter);
-}
-
/*
* 32-bit PIO mapping sg iterator
*
@@ -171,12 +148,12 @@ static inline void cb710_sg_miter_stop_writing(struct sg_mapping_iter *miter)
* without DMA support).
*
* Best-case reading (transfer from device):
- * sg_miter_start();
+ * sg_miter_start(, SG_MITER_TO_SG);
* cb710_sg_dwiter_write_from_io();
- * cb710_sg_miter_stop_writing();
+ * sg_miter_stop();
*
* Best-case writing (transfer to device):
- * sg_miter_start();
+ * sg_miter_start(, SG_MITER_FROM_SG);
* cb710_sg_dwiter_read_to_io();
* sg_miter_stop();
*/
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
index 665fa70e409..90bba9e6228 100644
--- a/include/linux/cgroup.h
+++ b/include/linux/cgroup.h
@@ -179,14 +179,11 @@ struct cgroup {
*/
struct list_head release_list;
- /* pids_mutex protects the fields below */
+ /* pids_mutex protects pids_list and cached pid arrays. */
struct rw_semaphore pids_mutex;
- /* Array of process ids in the cgroup */
- pid_t *tasks_pids;
- /* How many files are using the current tasks_pids array */
- int pids_use_count;
- /* Length of the current tasks_pids array */
- int pids_length;
+
+ /* Linked list of struct cgroup_pids */
+ struct list_head pids_list;
/* For RCU-protected deletion */
struct rcu_head rcu_head;
@@ -366,6 +363,23 @@ int cgroup_task_count(const struct cgroup *cgrp);
int cgroup_is_descendant(const struct cgroup *cgrp, struct task_struct *task);
/*
+ * When the subsys has to access css and may add permanent refcnt to css,
+ * it should take care of racy conditions with rmdir(). Following set of
+ * functions, is for stop/restart rmdir if necessary.
+ * Because these will call css_get/put, "css" should be alive css.
+ *
+ * cgroup_exclude_rmdir();
+ * ...do some jobs which may access arbitrary empty cgroup
+ * cgroup_release_and_wakeup_rmdir();
+ *
+ * When someone removes a cgroup while cgroup_exclude_rmdir() holds it,
+ * it sleeps and cgroup_release_and_wakeup_rmdir() will wake him up.
+ */
+
+void cgroup_exclude_rmdir(struct cgroup_subsys_state *css);
+void cgroup_release_and_wakeup_rmdir(struct cgroup_subsys_state *css);
+
+/*
* Control Group subsystem type.
* See Documentation/cgroups/cgroups.txt for details
*/
diff --git a/include/linux/clocksource.h b/include/linux/clocksource.h
index c56457c8334..1219be4fb42 100644
--- a/include/linux/clocksource.h
+++ b/include/linux/clocksource.h
@@ -293,7 +293,12 @@ static inline int clocksource_enable(struct clocksource *cs)
if (cs->enable)
ret = cs->enable(cs);
- /* save mult_orig on enable */
+ /*
+ * The frequency may have changed while the clocksource
+ * was disabled. If so the code in ->enable() must update
+ * the mult value to reflect the new frequency. Make sure
+ * mult_orig follows this change.
+ */
cs->mult_orig = cs->mult;
return ret;
@@ -309,6 +314,13 @@ static inline int clocksource_enable(struct clocksource *cs)
*/
static inline void clocksource_disable(struct clocksource *cs)
{
+ /*
+ * Save mult_orig in mult so clocksource_enable() can
+ * restore the value regardless if ->enable() updates
+ * the value of mult or not.
+ */
+ cs->mult = cs->mult_orig;
+
if (cs->disable)
cs->disable(cs);
}
diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h
index c5ac87ca7bc..796df12091b 100644
--- a/include/linux/cpumask.h
+++ b/include/linux/cpumask.h
@@ -43,10 +43,10 @@
* int cpu_isset(cpu, mask) true iff bit 'cpu' set in mask
* int cpu_test_and_set(cpu, mask) test and set bit 'cpu' in mask
*
- * void cpus_and(dst, src1, src2) dst = src1 & src2 [intersection]
+ * int cpus_and(dst, src1, src2) dst = src1 & src2 [intersection]
* void cpus_or(dst, src1, src2) dst = src1 | src2 [union]
* void cpus_xor(dst, src1, src2) dst = src1 ^ src2
- * void cpus_andnot(dst, src1, src2) dst = src1 & ~src2
+ * int cpus_andnot(dst, src1, src2) dst = src1 & ~src2
* void cpus_complement(dst, src) dst = ~src
*
* int cpus_equal(mask1, mask2) Does mask1 == mask2?
@@ -179,10 +179,10 @@ static inline int __cpu_test_and_set(int cpu, cpumask_t *addr)
}
#define cpus_and(dst, src1, src2) __cpus_and(&(dst), &(src1), &(src2), NR_CPUS)
-static inline void __cpus_and(cpumask_t *dstp, const cpumask_t *src1p,
+static inline int __cpus_and(cpumask_t *dstp, const cpumask_t *src1p,
const cpumask_t *src2p, int nbits)
{
- bitmap_and(dstp->bits, src1p->bits, src2p->bits, nbits);
+ return bitmap_and(dstp->bits, src1p->bits, src2p->bits, nbits);
}
#define cpus_or(dst, src1, src2) __cpus_or(&(dst), &(src1), &(src2), NR_CPUS)
@@ -201,10 +201,10 @@ static inline void __cpus_xor(cpumask_t *dstp, const cpumask_t *src1p,
#define cpus_andnot(dst, src1, src2) \
__cpus_andnot(&(dst), &(src1), &(src2), NR_CPUS)
-static inline void __cpus_andnot(cpumask_t *dstp, const cpumask_t *src1p,
+static inline int __cpus_andnot(cpumask_t *dstp, const cpumask_t *src1p,
const cpumask_t *src2p, int nbits)
{
- bitmap_andnot(dstp->bits, src1p->bits, src2p->bits, nbits);
+ return bitmap_andnot(dstp->bits, src1p->bits, src2p->bits, nbits);
}
#define cpus_complement(dst, src) __cpus_complement(&(dst), &(src), NR_CPUS)
@@ -738,11 +738,11 @@ static inline void cpumask_clear(struct cpumask *dstp)
* @src1p: the first input
* @src2p: the second input
*/
-static inline void cpumask_and(struct cpumask *dstp,
+static inline int cpumask_and(struct cpumask *dstp,
const struct cpumask *src1p,
const struct cpumask *src2p)
{
- bitmap_and(cpumask_bits(dstp), cpumask_bits(src1p),
+ return bitmap_and(cpumask_bits(dstp), cpumask_bits(src1p),
cpumask_bits(src2p), nr_cpumask_bits);
}
@@ -779,11 +779,11 @@ static inline void cpumask_xor(struct cpumask *dstp,
* @src1p: the first input
* @src2p: the second input
*/
-static inline void cpumask_andnot(struct cpumask *dstp,
+static inline int cpumask_andnot(struct cpumask *dstp,
const struct cpumask *src1p,
const struct cpumask *src2p)
{
- bitmap_andnot(cpumask_bits(dstp), cpumask_bits(src1p),
+ return bitmap_andnot(cpumask_bits(dstp), cpumask_bits(src1p),
cpumask_bits(src2p), nr_cpumask_bits);
}
diff --git a/include/linux/decompress/generic.h b/include/linux/decompress/generic.h
index 6dfb856327b..0c7111a55a1 100644
--- a/include/linux/decompress/generic.h
+++ b/include/linux/decompress/generic.h
@@ -1,31 +1,37 @@
#ifndef DECOMPRESS_GENERIC_H
#define DECOMPRESS_GENERIC_H
-/* Minimal chunksize to be read.
- *Bzip2 prefers at least 4096
- *Lzma prefers 0x10000 */
-#define COMPR_IOBUF_SIZE 4096
-
typedef int (*decompress_fn) (unsigned char *inbuf, int len,
int(*fill)(void*, unsigned int),
- int(*writebb)(void*, unsigned int),
- unsigned char *output,
+ int(*flush)(void*, unsigned int),
+ unsigned char *outbuf,
int *posp,
void(*error)(char *x));
/* inbuf - input buffer
*len - len of pre-read data in inbuf
- *fill - function to fill inbuf if empty
- *writebb - function to write out outbug
+ *fill - function to fill inbuf when empty
+ *flush - function to write out outbuf
+ *outbuf - output buffer
*posp - if non-null, input position (number of bytes read) will be
* returned here
*
- *If len != 0, the inbuf is initialized (with as much data), and fill
- *should not be called
- *If len = 0, the inbuf is allocated, but empty. Its size is IOBUF_SIZE
- *fill should be called (repeatedly...) to read data, at most IOBUF_SIZE
+ *If len != 0, inbuf should contain all the necessary input data, and fill
+ *should be NULL
+ *If len = 0, inbuf can be NULL, in which case the decompressor will allocate
+ *the input buffer. If inbuf != NULL it must be at least XXX_IOBUF_SIZE bytes.
+ *fill will be called (repeatedly...) to read data, at most XXX_IOBUF_SIZE
+ *bytes should be read per call. Replace XXX with the appropriate decompressor
+ *name, i.e. LZMA_IOBUF_SIZE.
+ *
+ *If flush = NULL, outbuf must be large enough to buffer all the expected
+ *output. If flush != NULL, the output buffer will be allocated by the
+ *decompressor (outbuf = NULL), and the flush function will be called to
+ *flush the output buffer at the appropriate time (decompressor and stream
+ *dependent).
*/
+
/* Utility routine to detect the decompression method */
decompress_fn decompress_method(const unsigned char *inbuf, int len,
const char **name);
diff --git a/include/linux/flex_array.h b/include/linux/flex_array.h
new file mode 100644
index 00000000000..45ff1849151
--- /dev/null
+++ b/include/linux/flex_array.h
@@ -0,0 +1,49 @@
+#ifndef _FLEX_ARRAY_H
+#define _FLEX_ARRAY_H
+
+#include <linux/types.h>
+#include <asm/page.h>
+
+#define FLEX_ARRAY_PART_SIZE PAGE_SIZE
+#define FLEX_ARRAY_BASE_SIZE PAGE_SIZE
+
+struct flex_array_part;
+
+/*
+ * This is meant to replace cases where an array-like
+ * structure has gotten too big to fit into kmalloc()
+ * and the developer is getting tempted to use
+ * vmalloc().
+ */
+
+struct flex_array {
+ union {
+ struct {
+ int element_size;
+ int total_nr_elements;
+ struct flex_array_part *parts[];
+ };
+ /*
+ * This little trick makes sure that
+ * sizeof(flex_array) == PAGE_SIZE
+ */
+ char padding[FLEX_ARRAY_BASE_SIZE];
+ };
+};
+
+#define FLEX_ARRAY_INIT(size, total) { { {\
+ .element_size = (size), \
+ .total_nr_elements = (total), \
+} } }
+
+struct flex_array *flex_array_alloc(int element_size, unsigned int total,
+ gfp_t flags);
+int flex_array_prealloc(struct flex_array *fa, unsigned int start,
+ unsigned int end, gfp_t flags);
+void flex_array_free(struct flex_array *fa);
+void flex_array_free_parts(struct flex_array *fa);
+int flex_array_put(struct flex_array *fa, unsigned int element_nr, void *src,
+ gfp_t flags);
+void *flex_array_get(struct flex_array *fa, unsigned int element_nr);
+
+#endif /* _FLEX_ARRAY_H */
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 0872372184f..73e9b643e45 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -1946,6 +1946,7 @@ extern void putname(const char *name);
extern int register_blkdev(unsigned int, const char *);
extern void unregister_blkdev(unsigned int, const char *);
extern struct block_device *bdget(dev_t);
+extern struct block_device *bdgrab(struct block_device *bdev);
extern void bd_set_size(struct block_device *, loff_t size);
extern void bd_forget(struct inode *inode);
extern void bdput(struct block_device *);
@@ -2122,7 +2123,7 @@ extern struct file *do_filp_open(int dfd, const char *pathname,
int open_flag, int mode, int acc_mode);
extern int may_open(struct path *, int, int);
-extern int kernel_read(struct file *, unsigned long, char *, unsigned long);
+extern int kernel_read(struct file *, loff_t, char *, unsigned long);
extern struct file * open_exec(const char *);
/* fs/dcache.c -- generic fs support functions */
@@ -2136,7 +2137,7 @@ extern loff_t default_llseek(struct file *file, loff_t offset, int origin);
extern loff_t vfs_llseek(struct file *file, loff_t offset, int origin);
-extern struct inode * inode_init_always(struct super_block *, struct inode *);
+extern int inode_init_always(struct super_block *, struct inode *);
extern void inode_init_once(struct inode *);
extern void inode_add_to_lists(struct super_block *, struct inode *);
extern void iput(struct inode *);
@@ -2163,6 +2164,7 @@ extern void __iget(struct inode * inode);
extern void iget_failed(struct inode *);
extern void clear_inode(struct inode *);
extern void destroy_inode(struct inode *);
+extern void __destroy_inode(struct inode *);
extern struct inode *new_inode(struct super_block *);
extern int should_remove_suid(struct dentry *);
extern int file_remove_suid(struct file *);
diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h
index 5c093ffc655..a81170de7f6 100644
--- a/include/linux/ftrace_event.h
+++ b/include/linux/ftrace_event.h
@@ -89,7 +89,9 @@ enum print_line_t {
TRACE_TYPE_NO_CONSUME = 3 /* Handled but ask to not consume */
};
-
+void tracing_generic_entry_update(struct trace_entry *entry,
+ unsigned long flags,
+ int pc);
struct ring_buffer_event *
trace_current_buffer_lock_reserve(int type, unsigned long len,
unsigned long flags, int pc);
@@ -119,11 +121,9 @@ struct ftrace_event_call {
void *filter;
void *mod;
-#ifdef CONFIG_EVENT_PROFILE
- atomic_t profile_count;
- int (*profile_enable)(struct ftrace_event_call *);
- void (*profile_disable)(struct ftrace_event_call *);
-#endif
+ atomic_t profile_count;
+ int (*profile_enable)(struct ftrace_event_call *);
+ void (*profile_disable)(struct ftrace_event_call *);
};
#define MAX_FILTER_PRED 32
diff --git a/include/linux/gen_stats.h b/include/linux/gen_stats.h
index 0ffa41df0ee..710e901085d 100644
--- a/include/linux/gen_stats.h
+++ b/include/linux/gen_stats.h
@@ -22,6 +22,11 @@ struct gnet_stats_basic
{
__u64 bytes;
__u32 packets;
+};
+struct gnet_stats_basic_packed
+{
+ __u64 bytes;
+ __u32 packets;
} __attribute__ ((packed));
/**
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index 2723513a565..5cbc620bdfe 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -10,6 +10,7 @@
#include <asm/tlbflush.h>
struct ctl_table;
+struct user_struct;
int PageHuge(struct page *page);
@@ -146,7 +147,8 @@ static inline struct hugetlbfs_sb_info *HUGETLBFS_SB(struct super_block *sb)
extern const struct file_operations hugetlbfs_file_operations;
extern struct vm_operations_struct hugetlb_vm_ops;
-struct file *hugetlb_file_setup(const char *name, size_t, int);
+struct file *hugetlb_file_setup(const char *name, size_t size, int acct,
+ struct user_struct **user);
int hugetlb_get_quota(struct address_space *mapping, long delta);
void hugetlb_put_quota(struct address_space *mapping, long delta);
@@ -168,7 +170,7 @@ static inline void set_file_hugepages(struct file *file)
#define is_file_hugepages(file) 0
#define set_file_hugepages(file) BUG()
-#define hugetlb_file_setup(name,size,acctflag) ERR_PTR(-ENOSYS)
+#define hugetlb_file_setup(name,size,acct,user) ERR_PTR(-ENOSYS)
#endif /* !CONFIG_HUGETLBFS */
diff --git a/include/linux/inetdevice.h b/include/linux/inetdevice.h
index acef2a770b6..ad27c7da879 100644
--- a/include/linux/inetdevice.h
+++ b/include/linux/inetdevice.h
@@ -82,7 +82,7 @@ static inline void ipv4_devconf_setall(struct in_device *in_dev)
#define IN_DEV_FORWARD(in_dev) IN_DEV_CONF_GET((in_dev), FORWARDING)
#define IN_DEV_MFORWARD(in_dev) IN_DEV_ANDCONF((in_dev), MC_FORWARDING)
-#define IN_DEV_RPFILTER(in_dev) IN_DEV_ANDCONF((in_dev), RP_FILTER)
+#define IN_DEV_RPFILTER(in_dev) IN_DEV_MAXCONF((in_dev), RP_FILTER)
#define IN_DEV_SOURCE_ROUTE(in_dev) IN_DEV_ANDCONF((in_dev), \
ACCEPT_SOURCE_ROUTE)
#define IN_DEV_BOOTP_RELAY(in_dev) IN_DEV_ANDCONF((in_dev), BOOTP_RELAY)
diff --git a/include/linux/input/matrix_keypad.h b/include/linux/input/matrix_keypad.h
index 7964516c695..15d5903af2d 100644
--- a/include/linux/input/matrix_keypad.h
+++ b/include/linux/input/matrix_keypad.h
@@ -15,12 +15,13 @@
#define KEY_COL(k) (((k) >> 16) & 0xff)
#define KEY_VAL(k) ((k) & 0xffff)
+#define MATRIX_SCAN_CODE(row, col, row_shift) (((row) << (row_shift)) + (col))
+
/**
* struct matrix_keymap_data - keymap for matrix keyboards
* @keymap: pointer to array of uint32 values encoded with KEY() macro
* representing keymap
* @keymap_size: number of entries (initialized) in this keymap
- * @max_keymap_size: maximum size of keymap supported by the device
*
* This structure is supposed to be used by platform code to supply
* keymaps to drivers that implement matrix-like keypads/keyboards.
@@ -28,14 +29,13 @@
struct matrix_keymap_data {
const uint32_t *keymap;
unsigned int keymap_size;
- unsigned int max_keymap_size;
};
/**
* struct matrix_keypad_platform_data - platform-dependent keypad data
* @keymap_data: pointer to &matrix_keymap_data
- * @row_gpios: array of gpio numbers reporesenting rows
- * @col_gpios: array of gpio numbers reporesenting colums
+ * @row_gpios: pointer to array of gpio numbers representing rows
+ * @col_gpios: pointer to array of gpio numbers reporesenting colums
* @num_row_gpios: actual number of row gpios used by device
* @num_col_gpios: actual number of col gpios used by device
* @col_scan_delay_us: delay, measured in microseconds, that is
@@ -48,8 +48,9 @@ struct matrix_keymap_data {
struct matrix_keypad_platform_data {
const struct matrix_keymap_data *keymap_data;
- unsigned int row_gpios[MATRIX_MAX_ROWS];
- unsigned int col_gpios[MATRIX_MAX_COLS];
+ const unsigned int *row_gpios;
+ const unsigned int *col_gpios;
+
unsigned int num_row_gpios;
unsigned int num_col_gpios;
diff --git a/include/linux/iocontext.h b/include/linux/iocontext.h
index dd05434fa45..4da4a75c3f1 100644
--- a/include/linux/iocontext.h
+++ b/include/linux/iocontext.h
@@ -92,7 +92,7 @@ static inline struct io_context *ioc_task_link(struct io_context *ioc)
* a race).
*/
if (ioc && atomic_long_inc_not_zero(&ioc->refcount)) {
- atomic_long_inc(&ioc->refcount);
+ atomic_inc(&ioc->nr_tasks);
return ioc;
}
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 16713dc672e..3060bdc35ff 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -110,6 +110,7 @@ struct kvm_memory_slot {
struct kvm_kernel_irq_routing_entry {
u32 gsi;
+ u32 type;
int (*set)(struct kvm_kernel_irq_routing_entry *e,
struct kvm *kvm, int level);
union {
diff --git a/include/linux/lguest.h b/include/linux/lguest.h
index dbf2479e808..2fb1dcbcb5a 100644
--- a/include/linux/lguest.h
+++ b/include/linux/lguest.h
@@ -1,5 +1,7 @@
-/* Things the lguest guest needs to know. Note: like all lguest interfaces,
- * this is subject to wild and random change between versions. */
+/*
+ * Things the lguest guest needs to know. Note: like all lguest interfaces,
+ * this is subject to wild and random change between versions.
+ */
#ifndef _LINUX_LGUEST_H
#define _LINUX_LGUEST_H
@@ -11,32 +13,41 @@
#define LG_CLOCK_MIN_DELTA 100UL
#define LG_CLOCK_MAX_DELTA ULONG_MAX
-/*G:031 The second method of communicating with the Host is to via "struct
+/*G:031
+ * The second method of communicating with the Host is to via "struct
* lguest_data". Once the Guest's initialization hypercall tells the Host where
- * this is, the Guest and Host both publish information in it. :*/
-struct lguest_data
-{
- /* 512 == enabled (same as eflags in normal hardware). The Guest
- * changes interrupts so often that a hypercall is too slow. */
+ * this is, the Guest and Host both publish information in it.
+:*/
+struct lguest_data {
+ /*
+ * 512 == enabled (same as eflags in normal hardware). The Guest
+ * changes interrupts so often that a hypercall is too slow.
+ */
unsigned int irq_enabled;
/* Fine-grained interrupt disabling by the Guest */
DECLARE_BITMAP(blocked_interrupts, LGUEST_IRQS);
- /* The Host writes the virtual address of the last page fault here,
+ /*
+ * The Host writes the virtual address of the last page fault here,
* which saves the Guest a hypercall. CR2 is the native register where
- * this address would normally be found. */
+ * this address would normally be found.
+ */
unsigned long cr2;
/* Wallclock time set by the Host. */
struct timespec time;
- /* Interrupt pending set by the Host. The Guest should do a hypercall
- * if it re-enables interrupts and sees this set (to X86_EFLAGS_IF). */
+ /*
+ * Interrupt pending set by the Host. The Guest should do a hypercall
+ * if it re-enables interrupts and sees this set (to X86_EFLAGS_IF).
+ */
int irq_pending;
- /* Async hypercall ring. Instead of directly making hypercalls, we can
+ /*
+ * Async hypercall ring. Instead of directly making hypercalls, we can
* place them in here for processing the next time the Host wants.
- * This batching can be quite efficient. */
+ * This batching can be quite efficient.
+ */
/* 0xFF == done (set by Host), 0 == pending (set by Guest). */
u8 hcall_status[LHCALL_RING_SIZE];
diff --git a/include/linux/lguest_launcher.h b/include/linux/lguest_launcher.h
index bfefbdf7498..495203ff221 100644
--- a/include/linux/lguest_launcher.h
+++ b/include/linux/lguest_launcher.h
@@ -29,8 +29,10 @@ struct lguest_device_desc {
__u8 type;
/* The number of virtqueues (first in config array) */
__u8 num_vq;
- /* The number of bytes of feature bits. Multiply by 2: one for host
- * features and one for Guest acknowledgements. */
+ /*
+ * The number of bytes of feature bits. Multiply by 2: one for host
+ * features and one for Guest acknowledgements.
+ */
__u8 feature_len;
/* The number of bytes of the config array after virtqueues. */
__u8 config_len;
@@ -39,8 +41,10 @@ struct lguest_device_desc {
__u8 config[0];
};
-/*D:135 This is how we expect the device configuration field for a virtqueue
- * to be laid out in config space. */
+/*D:135
+ * This is how we expect the device configuration field for a virtqueue
+ * to be laid out in config space.
+ */
struct lguest_vqconfig {
/* The number of entries in the virtio_ring */
__u16 num;
@@ -61,7 +65,9 @@ enum lguest_req
LHREQ_EVENTFD, /* + address, fd. */
};
-/* The alignment to use between consumer and producer parts of vring.
- * x86 pagesize for historical reasons. */
+/*
+ * The alignment to use between consumer and producer parts of vring.
+ * x86 pagesize for historical reasons.
+ */
#define LGUEST_VRING_ALIGN 4096
#endif /* _LINUX_LGUEST_LAUNCHER */
diff --git a/include/linux/libata.h b/include/linux/libata.h
index 79b6d7fd4ac..e5b6e33c657 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -589,6 +589,7 @@ struct ata_device {
#endif
/* n_sector is CLEAR_BEGIN, read comment above CLEAR_BEGIN */
u64 n_sectors; /* size of device, if ATA */
+ u64 n_native_sectors; /* native size, if ATA */
unsigned int class; /* ATA_DEV_xxx */
unsigned long unpark_deadline;
diff --git a/include/linux/lmb.h b/include/linux/lmb.h
index c46c89505da..2442e3f3d03 100644
--- a/include/linux/lmb.h
+++ b/include/linux/lmb.h
@@ -51,7 +51,7 @@ extern u64 __init lmb_alloc_base(u64 size,
extern u64 __init __lmb_alloc_base(u64 size,
u64 align, u64 max_addr);
extern u64 __init lmb_phys_mem_size(void);
-extern u64 __init lmb_end_of_DRAM(void);
+extern u64 lmb_end_of_DRAM(void);
extern void __init lmb_enforce_memory_limit(u64 memory_limit);
extern int __init lmb_is_reserved(u64 addr);
extern int lmb_find(struct lmb_property *res);
diff --git a/include/linux/mm.h b/include/linux/mm.h
index ba3a7cb1eaa..9a72cc78e6b 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -34,8 +34,6 @@ extern int sysctl_legacy_va_layout;
#define sysctl_legacy_va_layout 0
#endif
-extern unsigned long mmap_min_addr;
-
#include <asm/page.h>
#include <asm/pgtable.h>
#include <asm/processor.h>
@@ -575,19 +573,6 @@ static inline void set_page_links(struct page *page, enum zone_type zone,
}
/*
- * If a hint addr is less than mmap_min_addr change hint to be as
- * low as possible but still greater than mmap_min_addr
- */
-static inline unsigned long round_hint_to_min(unsigned long hint)
-{
- hint &= PAGE_MASK;
- if (((void *)hint != NULL) &&
- (hint < mmap_min_addr))
- return PAGE_ALIGN(mmap_min_addr);
- return hint;
-}
-
-/*
* Some inline functions in vmstat.h depend on page_zone()
*/
#include <linux/vmstat.h>
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 7acc8439d9b..0042090a4d7 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -240,8 +240,6 @@ struct mm_struct {
unsigned long saved_auxv[AT_VECTOR_SIZE]; /* for /proc/PID/auxv */
- s8 oom_adj; /* OOM kill score adjustment (bit shift) */
-
cpumask_t cpu_vm_mask;
/* Architecture-specific MM context */
diff --git a/include/linux/mtd/mtd.h b/include/linux/mtd/mtd.h
index 5675b63a063..0f32a9b6ff5 100644
--- a/include/linux/mtd/mtd.h
+++ b/include/linux/mtd/mtd.h
@@ -251,7 +251,7 @@ struct mtd_info {
static inline struct mtd_info *dev_to_mtd(struct device *dev)
{
- return dev ? container_of(dev, struct mtd_info, dev) : NULL;
+ return dev ? dev_get_drvdata(dev) : NULL;
}
static inline uint32_t mtd_div_by_eb(uint64_t sz, struct mtd_info *mtd)
diff --git a/include/linux/mtd/partitions.h b/include/linux/mtd/partitions.h
index af6dcb992bc..b70313d33ff 100644
--- a/include/linux/mtd/partitions.h
+++ b/include/linux/mtd/partitions.h
@@ -47,6 +47,8 @@ struct mtd_partition {
#define MTDPART_SIZ_FULL (0)
+struct mtd_info;
+
int add_mtd_partitions(struct mtd_info *, const struct mtd_partition *, int);
int del_mtd_partitions(struct mtd_info *);
diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h
index fdffb413b19..f6b90240dd4 100644
--- a/include/linux/nfs_fs.h
+++ b/include/linux/nfs_fs.h
@@ -473,7 +473,6 @@ extern int nfs_writepages(struct address_space *, struct writeback_control *);
extern int nfs_flush_incompatible(struct file *file, struct page *page);
extern int nfs_updatepage(struct file *, struct page *, unsigned int, unsigned int);
extern int nfs_writeback_done(struct rpc_task *, struct nfs_write_data *);
-extern void nfs_writedata_release(void *);
/*
* Try to write back everything synchronously (but check the
@@ -488,7 +487,6 @@ extern int nfs_wb_page_cancel(struct inode *inode, struct page* page);
extern int nfs_commit_inode(struct inode *, int);
extern struct nfs_write_data *nfs_commitdata_alloc(void);
extern void nfs_commit_free(struct nfs_write_data *wdata);
-extern void nfs_commitdata_release(void *wdata);
#else
static inline int
nfs_commit_inode(struct inode *inode, int how)
@@ -507,6 +505,7 @@ nfs_have_writebacks(struct inode *inode)
* Allocate nfs_write_data structures
*/
extern struct nfs_write_data *nfs_writedata_alloc(unsigned int npages);
+extern void nfs_writedata_free(struct nfs_write_data *);
/*
* linux/fs/nfs/read.c
@@ -515,7 +514,6 @@ extern int nfs_readpage(struct file *, struct page *);
extern int nfs_readpages(struct file *, struct address_space *,
struct list_head *, unsigned);
extern int nfs_readpage_result(struct rpc_task *, struct nfs_read_data *);
-extern void nfs_readdata_release(void *data);
extern int nfs_readpage_async(struct nfs_open_context *, struct inode *,
struct page *);
@@ -523,6 +521,7 @@ extern int nfs_readpage_async(struct nfs_open_context *, struct inode *,
* Allocate nfs_read_data structures
*/
extern struct nfs_read_data *nfs_readdata_alloc(unsigned int npages);
+extern void nfs_readdata_free(struct nfs_read_data *);
/*
* linux/fs/nfs3proc.c
diff --git a/include/linux/nodemask.h b/include/linux/nodemask.h
index 829b94b156f..b359c4a9ec9 100644
--- a/include/linux/nodemask.h
+++ b/include/linux/nodemask.h
@@ -82,6 +82,12 @@
* to generate slightly worse code. So use a simple one-line #define
* for node_isset(), instead of wrapping an inline inside a macro, the
* way we do the other calls.
+ *
+ * NODEMASK_SCRATCH
+ * When doing above logical AND, OR, XOR, Remap operations the callers tend to
+ * need temporary nodemask_t's on the stack. But if NODES_SHIFT is large,
+ * nodemask_t's consume too much stack space. NODEMASK_SCRATCH is a helper
+ * for such situations. See below and CPUMASK_ALLOC also.
*/
#include <linux/kernel.h>
@@ -473,4 +479,26 @@ static inline int num_node_state(enum node_states state)
#define for_each_node(node) for_each_node_state(node, N_POSSIBLE)
#define for_each_online_node(node) for_each_node_state(node, N_ONLINE)
+/*
+ * For nodemask scrach area.(See CPUMASK_ALLOC() in cpumask.h)
+ */
+
+#if NODES_SHIFT > 8 /* nodemask_t > 64 bytes */
+#define NODEMASK_ALLOC(x, m) struct x *m = kmalloc(sizeof(*m), GFP_KERNEL)
+#define NODEMASK_FREE(m) kfree(m)
+#else
+#define NODEMASK_ALLOC(x, m) struct x _m, *m = &_m
+#define NODEMASK_FREE(m)
+#endif
+
+/* A example struture for using NODEMASK_ALLOC, used in mempolicy. */
+struct nodemask_scratch {
+ nodemask_t mask1;
+ nodemask_t mask2;
+};
+
+#define NODEMASK_SCRATCH(x) NODEMASK_ALLOC(nodemask_scratch, x)
+#define NODEMASK_SCRATCH_FREE(x) NODEMASK_FREE(x)
+
+
#endif /* __LINUX_NODEMASK_H */
diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h
index bd15d7a5f5c..b53f7006cc4 100644
--- a/include/linux/perf_counter.h
+++ b/include/linux/perf_counter.h
@@ -115,27 +115,44 @@ enum perf_counter_sample_format {
PERF_SAMPLE_TID = 1U << 1,
PERF_SAMPLE_TIME = 1U << 2,
PERF_SAMPLE_ADDR = 1U << 3,
- PERF_SAMPLE_GROUP = 1U << 4,
+ PERF_SAMPLE_READ = 1U << 4,
PERF_SAMPLE_CALLCHAIN = 1U << 5,
PERF_SAMPLE_ID = 1U << 6,
PERF_SAMPLE_CPU = 1U << 7,
PERF_SAMPLE_PERIOD = 1U << 8,
PERF_SAMPLE_STREAM_ID = 1U << 9,
+ PERF_SAMPLE_RAW = 1U << 10,
- PERF_SAMPLE_MAX = 1U << 10, /* non-ABI */
+ PERF_SAMPLE_MAX = 1U << 11, /* non-ABI */
};
/*
- * Bits that can be set in attr.read_format to request that
- * reads on the counter should return the indicated quantities,
- * in increasing order of bit value, after the counter value.
+ * The format of the data returned by read() on a perf counter fd,
+ * as specified by attr.read_format:
+ *
+ * struct read_format {
+ * { u64 value;
+ * { u64 time_enabled; } && PERF_FORMAT_ENABLED
+ * { u64 time_running; } && PERF_FORMAT_RUNNING
+ * { u64 id; } && PERF_FORMAT_ID
+ * } && !PERF_FORMAT_GROUP
+ *
+ * { u64 nr;
+ * { u64 time_enabled; } && PERF_FORMAT_ENABLED
+ * { u64 time_running; } && PERF_FORMAT_RUNNING
+ * { u64 value;
+ * { u64 id; } && PERF_FORMAT_ID
+ * } cntr[nr];
+ * } && PERF_FORMAT_GROUP
+ * };
*/
enum perf_counter_read_format {
PERF_FORMAT_TOTAL_TIME_ENABLED = 1U << 0,
PERF_FORMAT_TOTAL_TIME_RUNNING = 1U << 1,
PERF_FORMAT_ID = 1U << 2,
+ PERF_FORMAT_GROUP = 1U << 3,
- PERF_FORMAT_MAX = 1U << 3, /* non-ABI */
+ PERF_FORMAT_MAX = 1U << 4, /* non-ABI */
};
#define PERF_ATTR_SIZE_VER0 64 /* sizeof first published struct */
@@ -181,8 +198,9 @@ struct perf_counter_attr {
freq : 1, /* use freq, not period */
inherit_stat : 1, /* per task counts */
enable_on_exec : 1, /* next exec enables */
+ task : 1, /* trace fork/exit */
- __reserved_1 : 51;
+ __reserved_1 : 50;
__u32 wakeup_events; /* wakeup every n events */
__u32 __reserved_2;
@@ -311,6 +329,15 @@ enum perf_event_type {
/*
* struct {
* struct perf_event_header header;
+ * u32 pid, ppid;
+ * u32 tid, ptid;
+ * };
+ */
+ PERF_EVENT_EXIT = 4,
+
+ /*
+ * struct {
+ * struct perf_event_header header;
* u64 time;
* u64 id;
* u64 stream_id;
@@ -323,6 +350,7 @@ enum perf_event_type {
* struct {
* struct perf_event_header header;
* u32 pid, ppid;
+ * u32 tid, ptid;
* };
*/
PERF_EVENT_FORK = 7,
@@ -331,10 +359,8 @@ enum perf_event_type {
* struct {
* struct perf_event_header header;
* u32 pid, tid;
- * u64 value;
- * { u64 time_enabled; } && PERF_FORMAT_ENABLED
- * { u64 time_running; } && PERF_FORMAT_RUNNING
- * { u64 parent_id; } && PERF_FORMAT_ID
+ *
+ * struct read_format values;
* };
*/
PERF_EVENT_READ = 8,
@@ -352,11 +378,24 @@ enum perf_event_type {
* { u32 cpu, res; } && PERF_SAMPLE_CPU
* { u64 period; } && PERF_SAMPLE_PERIOD
*
- * { u64 nr;
- * { u64 id, val; } cnt[nr]; } && PERF_SAMPLE_GROUP
+ * { struct read_format values; } && PERF_SAMPLE_READ
*
* { u64 nr,
* u64 ips[nr]; } && PERF_SAMPLE_CALLCHAIN
+ *
+ * #
+ * # The RAW record below is opaque data wrt the ABI
+ * #
+ * # That is, the ABI doesn't make any promises wrt to
+ * # the stability of its content, it may vary depending
+ * # on event, hardware, kernel version and phase of
+ * # the moon.
+ * #
+ * # In other words, PERF_SAMPLE_RAW contents are not an ABI.
+ * #
+ *
+ * { u32 size;
+ * char data[size];}&& PERF_SAMPLE_RAW
* };
*/
PERF_EVENT_SAMPLE = 9,
@@ -402,6 +441,11 @@ struct perf_callchain_entry {
__u64 ip[PERF_MAX_STACK_DEPTH];
};
+struct perf_raw_record {
+ u32 size;
+ void *data;
+};
+
struct task_struct;
/**
@@ -670,10 +714,13 @@ struct perf_sample_data {
struct pt_regs *regs;
u64 addr;
u64 period;
+ struct perf_raw_record *raw;
};
extern int perf_counter_overflow(struct perf_counter *counter, int nmi,
struct perf_sample_data *data);
+extern void perf_counter_output(struct perf_counter *counter, int nmi,
+ struct perf_sample_data *data);
/*
* Return 1 for a software counter, 0 for a hardware counter
diff --git a/include/linux/pps.h b/include/linux/pps.h
index cfe5c7214ec..0194ab06177 100644
--- a/include/linux/pps.h
+++ b/include/linux/pps.h
@@ -22,6 +22,8 @@
#ifndef _PPS_H_
#define _PPS_H_
+#include <linux/types.h>
+
#define PPS_VERSION "5.3.6"
#define PPS_MAX_SOURCES 16 /* should be enough... */
diff --git a/include/linux/scatterlist.h b/include/linux/scatterlist.h
index e5996984ddd..9aaf5bfdad1 100644
--- a/include/linux/scatterlist.h
+++ b/include/linux/scatterlist.h
@@ -242,6 +242,8 @@ size_t sg_copy_to_buffer(struct scatterlist *sgl, unsigned int nents,
*/
#define SG_MITER_ATOMIC (1 << 0) /* use kmap_atomic */
+#define SG_MITER_TO_SG (1 << 1) /* flush back to phys on unmap */
+#define SG_MITER_FROM_SG (1 << 2) /* nop */
struct sg_mapping_iter {
/* the following three fields can be accessed directly */
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 3ab08e4bb6b..0f1ea4a6695 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1198,6 +1198,7 @@ struct task_struct {
* a short time
*/
unsigned char fpu_counter;
+ s8 oomkilladj; /* OOM kill score adjustment (bit shift). */
#ifdef CONFIG_BLK_DEV_IO_TRACE
unsigned int btrace_seq;
#endif
diff --git a/include/linux/security.h b/include/linux/security.h
index 5eff459b383..1f16eea2017 100644
--- a/include/linux/security.h
+++ b/include/linux/security.h
@@ -28,6 +28,7 @@
#include <linux/resource.h>
#include <linux/sem.h>
#include <linux/shm.h>
+#include <linux/mm.h> /* PAGE_ALIGN */
#include <linux/msg.h>
#include <linux/sched.h>
#include <linux/key.h>
@@ -66,6 +67,9 @@ extern int cap_inode_setxattr(struct dentry *dentry, const char *name,
extern int cap_inode_removexattr(struct dentry *dentry, const char *name);
extern int cap_inode_need_killpriv(struct dentry *dentry);
extern int cap_inode_killpriv(struct dentry *dentry);
+extern int cap_file_mmap(struct file *file, unsigned long reqprot,
+ unsigned long prot, unsigned long flags,
+ unsigned long addr, unsigned long addr_only);
extern int cap_task_fix_setuid(struct cred *new, const struct cred *old, int flags);
extern int cap_task_prctl(int option, unsigned long arg2, unsigned long arg3,
unsigned long arg4, unsigned long arg5);
@@ -92,6 +96,7 @@ extern int cap_netlink_send(struct sock *sk, struct sk_buff *skb);
extern int cap_netlink_recv(struct sk_buff *skb, int cap);
extern unsigned long mmap_min_addr;
+extern unsigned long dac_mmap_min_addr;
/*
* Values used in the task_security_ops calls
*/
@@ -116,6 +121,21 @@ struct request_sock;
#define LSM_UNSAFE_PTRACE 2
#define LSM_UNSAFE_PTRACE_CAP 4
+/*
+ * If a hint addr is less than mmap_min_addr change hint to be as
+ * low as possible but still greater than mmap_min_addr
+ */
+static inline unsigned long round_hint_to_min(unsigned long hint)
+{
+ hint &= PAGE_MASK;
+ if (((void *)hint != NULL) &&
+ (hint < mmap_min_addr))
+ return PAGE_ALIGN(mmap_min_addr);
+ return hint;
+}
+extern int mmap_min_addr_handler(struct ctl_table *table, int write, struct file *filp,
+ void __user *buffer, size_t *lenp, loff_t *ppos);
+
#ifdef CONFIG_SECURITY
struct security_mnt_opts {
@@ -2197,9 +2217,7 @@ static inline int security_file_mmap(struct file *file, unsigned long reqprot,
unsigned long addr,
unsigned long addr_only)
{
- if ((addr < mmap_min_addr) && !capable(CAP_SYS_RAWIO))
- return -EACCES;
- return 0;
+ return cap_file_mmap(file, reqprot, prot, flags, addr, addr_only);
}
static inline int security_file_mprotect(struct vm_area_struct *vma,
diff --git a/include/linux/tty.h b/include/linux/tty.h
index 1488d8c81aa..e8c6c9136c9 100644
--- a/include/linux/tty.h
+++ b/include/linux/tty.h
@@ -394,6 +394,7 @@ extern void __do_SAK(struct tty_struct *tty);
extern void disassociate_ctty(int priv);
extern void no_tty(void);
extern void tty_flip_buffer_push(struct tty_struct *tty);
+extern void tty_flush_to_ldisc(struct tty_struct *tty);
extern void tty_buffer_free_all(struct tty_struct *tty);
extern void tty_buffer_flush(struct tty_struct *tty);
extern void tty_buffer_init(struct tty_struct *tty);
diff --git a/include/linux/tty_ldisc.h b/include/linux/tty_ldisc.h
index 40f38d89677..0c4ee9b88f8 100644
--- a/include/linux/tty_ldisc.h
+++ b/include/linux/tty_ldisc.h
@@ -144,7 +144,7 @@ struct tty_ldisc_ops {
struct tty_ldisc {
struct tty_ldisc_ops *ops;
- int refcount;
+ atomic_t users;
};
#define TTY_LDISC_MAGIC 0x5403
diff --git a/include/linux/ucb1400.h b/include/linux/ucb1400.h
index ed889f4168f..ae779bb8cc0 100644
--- a/include/linux/ucb1400.h
+++ b/include/linux/ucb1400.h
@@ -73,6 +73,10 @@
#define UCB_ADC_DATA 0x68
#define UCB_ADC_DAT_VALID (1 << 15)
+
+#define UCB_FCSR 0x6c
+#define UCB_FCSR_AVE (1 << 12)
+
#define UCB_ADC_DAT_MASK 0x3ff
#define UCB_ID 0x7e
diff --git a/include/linux/uio.h b/include/linux/uio.h
index b7fe13883bd..98c114323a8 100644
--- a/include/linux/uio.h
+++ b/include/linux/uio.h
@@ -19,15 +19,6 @@ struct iovec
__kernel_size_t iov_len; /* Must be size_t (1003.1g) */
};
-#ifdef __KERNEL__
-
-struct kvec {
- void *iov_base; /* and that should *never* hold a userland pointer */
- size_t iov_len;
-};
-
-#endif
-
/*
* UIO_MAXIOV shall be at least 16 1003.1g (5.4.1.1)
*/
@@ -35,6 +26,13 @@ struct kvec {
#define UIO_FASTIOV 8
#define UIO_MAXIOV 1024
+#ifdef __KERNEL__
+
+struct kvec {
+ void *iov_base; /* and that should *never* hold a userland pointer */
+ size_t iov_len;
+};
+
/*
* Total number of bytes covered by an iovec.
*
@@ -53,5 +51,6 @@ static inline size_t iov_length(const struct iovec *iov, unsigned long nr_segs)
}
unsigned long iov_shorten(struct iovec *iov, unsigned long nr_segs, size_t to);
+#endif
#endif
diff --git a/include/linux/virtio_blk.h b/include/linux/virtio_blk.h
index be7d255fc7c..8dab9f2b883 100644
--- a/include/linux/virtio_blk.h
+++ b/include/linux/virtio_blk.h
@@ -20,8 +20,7 @@
#define VIRTIO_BLK_ID_BYTES (sizeof(__u16[256])) /* IDENTIFY DATA */
-struct virtio_blk_config
-{
+struct virtio_blk_config {
/* The capacity (in 512-byte sectors). */
__u64 capacity;
/* The maximum segment size (if VIRTIO_BLK_F_SIZE_MAX) */
@@ -50,8 +49,7 @@ struct virtio_blk_config
#define VIRTIO_BLK_T_BARRIER 0x80000000
/* This is the first element of the read scatter-gather list. */
-struct virtio_blk_outhdr
-{
+struct virtio_blk_outhdr {
/* VIRTIO_BLK_T* */
__u32 type;
/* io priority. */
diff --git a/include/linux/virtio_config.h b/include/linux/virtio_config.h
index 99f514575f6..e547e3c8ee9 100644
--- a/include/linux/virtio_config.h
+++ b/include/linux/virtio_config.h
@@ -79,8 +79,7 @@
* the dev->feature bits if it wants.
*/
typedef void vq_callback_t(struct virtqueue *);
-struct virtio_config_ops
-{
+struct virtio_config_ops {
void (*get)(struct virtio_device *vdev, unsigned offset,
void *buf, unsigned len);
void (*set)(struct virtio_device *vdev, unsigned offset,
diff --git a/include/linux/virtio_net.h b/include/linux/virtio_net.h
index 9c543d6ac53..d8dd539c9f4 100644
--- a/include/linux/virtio_net.h
+++ b/include/linux/virtio_net.h
@@ -31,8 +31,7 @@
#define VIRTIO_NET_S_LINK_UP 1 /* Link is up */
-struct virtio_net_config
-{
+struct virtio_net_config {
/* The config defining mac address (if VIRTIO_NET_F_MAC) */
__u8 mac[6];
/* See VIRTIO_NET_F_STATUS and VIRTIO_NET_S_* above */
@@ -41,8 +40,7 @@ struct virtio_net_config
/* This is the first element of the scatter-gather list. If you don't
* specify GSO or CSUM features, you can simply ignore the header. */
-struct virtio_net_hdr
-{
+struct virtio_net_hdr {
#define VIRTIO_NET_HDR_F_NEEDS_CSUM 1 // Use csum_start, csum_offset
__u8 flags;
#define VIRTIO_NET_HDR_GSO_NONE 0 // Not a GSO frame
diff --git a/include/linux/virtio_ring.h b/include/linux/virtio_ring.h
index 693e0ec5afa..e4d144b132b 100644
--- a/include/linux/virtio_ring.h
+++ b/include/linux/virtio_ring.h
@@ -30,8 +30,7 @@
#define VIRTIO_RING_F_INDIRECT_DESC 28
/* Virtio ring descriptors: 16 bytes. These can chain together via "next". */
-struct vring_desc
-{
+struct vring_desc {
/* Address (guest-physical). */
__u64 addr;
/* Length. */
@@ -42,24 +41,21 @@ struct vring_desc
__u16 next;
};
-struct vring_avail
-{
+struct vring_avail {
__u16 flags;
__u16 idx;
__u16 ring[];
};
/* u32 is used here for ids for padding reasons. */
-struct vring_used_elem
-{
+struct vring_used_elem {
/* Index of start of used descriptor chain. */
__u32 id;
/* Total length of the descriptor chain which was used (written to) */
__u32 len;
};
-struct vring_used
-{
+struct vring_used {
__u16 flags;
__u16 idx;
struct vring_used_elem ring[];
diff --git a/include/linux/wait.h b/include/linux/wait.h
index 6788e1a4d4c..cf3c2f5dba5 100644
--- a/include/linux/wait.h
+++ b/include/linux/wait.h
@@ -77,7 +77,14 @@ struct task_struct;
#define __WAIT_BIT_KEY_INITIALIZER(word, bit) \
{ .flags = word, .bit_nr = bit, }
-extern void init_waitqueue_head(wait_queue_head_t *q);
+extern void __init_waitqueue_head(wait_queue_head_t *q, struct lock_class_key *);
+
+#define init_waitqueue_head(q) \
+ do { \
+ static struct lock_class_key __key; \
+ \
+ __init_waitqueue_head((q), &__key); \
+ } while (0)
#ifdef CONFIG_LOCKDEP
# define __WAIT_QUEUE_HEAD_INIT_ONSTACK(name) \
diff --git a/include/net/act_api.h b/include/net/act_api.h
index 565eed8fe49..c05fd717c58 100644
--- a/include/net/act_api.h
+++ b/include/net/act_api.h
@@ -16,7 +16,7 @@ struct tcf_common {
u32 tcfc_capab;
int tcfc_action;
struct tcf_t tcfc_tm;
- struct gnet_stats_basic tcfc_bstats;
+ struct gnet_stats_basic_packed tcfc_bstats;
struct gnet_stats_queue tcfc_qstats;
struct gnet_stats_rate_est tcfc_rate_est;
spinlock_t tcfc_lock;
diff --git a/include/net/bluetooth/rfcomm.h b/include/net/bluetooth/rfcomm.h
index 80072611d26..c274993234e 100644
--- a/include/net/bluetooth/rfcomm.h
+++ b/include/net/bluetooth/rfcomm.h
@@ -355,7 +355,17 @@ struct rfcomm_dev_list_req {
};
int rfcomm_dev_ioctl(struct sock *sk, unsigned int cmd, void __user *arg);
+
+#ifdef CONFIG_BT_RFCOMM_TTY
int rfcomm_init_ttys(void);
void rfcomm_cleanup_ttys(void);
-
+#else
+static inline int rfcomm_init_ttys(void)
+{
+ return 0;
+}
+static inline void rfcomm_cleanup_ttys(void)
+{
+}
+#endif
#endif /* __RFCOMM_H */
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
index 1a21895b732..d1892d66701 100644
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -979,6 +979,10 @@ struct cfg80211_ops {
* channels at a later time. This can be used for devices which do not
* have calibration information gauranteed for frequencies or settings
* outside of its regulatory domain.
+ * @disable_beacon_hints: enable this if your driver needs to ensure that
+ * passive scan flags and beaconing flags may not be lifted by cfg80211
+ * due to regulatory beacon hints. For more information on beacon
+ * hints read the documenation for regulatory_hint_found_beacon()
* @reg_notifier: the driver's regulatory notification callback
* @regd: the driver's regulatory domain, if one was requested via
* the regulatory_hint() API. This can be used by the driver
@@ -1004,6 +1008,7 @@ struct wiphy {
bool custom_regulatory;
bool strict_regulatory;
+ bool disable_beacon_hints;
enum cfg80211_signal_type signal_type;
diff --git a/include/net/gen_stats.h b/include/net/gen_stats.h
index d136b5240ef..c1488553e34 100644
--- a/include/net/gen_stats.h
+++ b/include/net/gen_stats.h
@@ -28,7 +28,7 @@ extern int gnet_stats_start_copy_compat(struct sk_buff *skb, int type,
spinlock_t *lock, struct gnet_dump *d);
extern int gnet_stats_copy_basic(struct gnet_dump *d,
- struct gnet_stats_basic *b);
+ struct gnet_stats_basic_packed *b);
extern int gnet_stats_copy_rate_est(struct gnet_dump *d,
struct gnet_stats_rate_est *r);
extern int gnet_stats_copy_queue(struct gnet_dump *d,
@@ -37,14 +37,14 @@ extern int gnet_stats_copy_app(struct gnet_dump *d, void *st, int len);
extern int gnet_stats_finish_copy(struct gnet_dump *d);
-extern int gen_new_estimator(struct gnet_stats_basic *bstats,
+extern int gen_new_estimator(struct gnet_stats_basic_packed *bstats,
struct gnet_stats_rate_est *rate_est,
spinlock_t *stats_lock, struct nlattr *opt);
-extern void gen_kill_estimator(struct gnet_stats_basic *bstats,
+extern void gen_kill_estimator(struct gnet_stats_basic_packed *bstats,
struct gnet_stats_rate_est *rate_est);
-extern int gen_replace_estimator(struct gnet_stats_basic *bstats,
+extern int gen_replace_estimator(struct gnet_stats_basic_packed *bstats,
struct gnet_stats_rate_est *rate_est,
spinlock_t *stats_lock, struct nlattr *opt);
-extern bool gen_estimator_active(const struct gnet_stats_basic *bstats,
+extern bool gen_estimator_active(const struct gnet_stats_basic_packed *bstats,
const struct gnet_stats_rate_est *rate_est);
#endif
diff --git a/include/net/netfilter/xt_rateest.h b/include/net/netfilter/xt_rateest.h
index 65d594dffbf..ddbf37e1961 100644
--- a/include/net/netfilter/xt_rateest.h
+++ b/include/net/netfilter/xt_rateest.h
@@ -8,7 +8,7 @@ struct xt_rateest {
spinlock_t lock;
struct gnet_estimator params;
struct gnet_stats_rate_est rstats;
- struct gnet_stats_basic bstats;
+ struct gnet_stats_basic_packed bstats;
};
extern struct xt_rateest *xt_rateest_lookup(const char *name);
diff --git a/include/net/pkt_sched.h b/include/net/pkt_sched.h
index 82a3191375f..7eafb8d5447 100644
--- a/include/net/pkt_sched.h
+++ b/include/net/pkt_sched.h
@@ -61,8 +61,8 @@ psched_tdiff_bounded(psched_time_t tv1, psched_time_t tv2, psched_time_t bound)
}
struct qdisc_watchdog {
- struct hrtimer timer;
- struct Qdisc *qdisc;
+ struct tasklet_hrtimer timer;
+ struct Qdisc *qdisc;
};
extern void qdisc_watchdog_init(struct qdisc_watchdog *wd, struct Qdisc *qdisc);
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
index 964ffa0d881..5482e9582f5 100644
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -72,7 +72,7 @@ struct Qdisc
*/
unsigned long state;
struct sk_buff_head q;
- struct gnet_stats_basic bstats;
+ struct gnet_stats_basic_packed bstats;
struct gnet_stats_queue qstats;
};
diff --git a/include/trace/ftrace.h b/include/trace/ftrace.h
index 1867553c61e..f64fbaae781 100644
--- a/include/trace/ftrace.h
+++ b/include/trace/ftrace.h
@@ -144,6 +144,9 @@
#undef TP_fast_assign
#define TP_fast_assign(args...) args
+#undef TP_perf_assign
+#define TP_perf_assign(args...)
+
#undef TRACE_EVENT
#define TRACE_EVENT(call, proto, args, tstruct, func, print) \
static int \
@@ -345,6 +348,56 @@ static inline int ftrace_get_offsets_##call( \
#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
+#ifdef CONFIG_EVENT_PROFILE
+
+/*
+ * Generate the functions needed for tracepoint perf_counter support.
+ *
+ * NOTE: The insertion profile callback (ftrace_profile_<call>) is defined later
+ *
+ * static int ftrace_profile_enable_<call>(struct ftrace_event_call *event_call)
+ * {
+ * int ret = 0;
+ *
+ * if (!atomic_inc_return(&event_call->profile_count))
+ * ret = register_trace_<call>(ftrace_profile_<call>);
+ *
+ * return ret;
+ * }
+ *
+ * static void ftrace_profile_disable_<call>(struct ftrace_event_call *event_call)
+ * {
+ * if (atomic_add_negative(-1, &event->call->profile_count))
+ * unregister_trace_<call>(ftrace_profile_<call>);
+ * }
+ *
+ */
+
+#undef TRACE_EVENT
+#define TRACE_EVENT(call, proto, args, tstruct, assign, print) \
+ \
+static void ftrace_profile_##call(proto); \
+ \
+static int ftrace_profile_enable_##call(struct ftrace_event_call *event_call) \
+{ \
+ int ret = 0; \
+ \
+ if (!atomic_inc_return(&event_call->profile_count)) \
+ ret = register_trace_##call(ftrace_profile_##call); \
+ \
+ return ret; \
+} \
+ \
+static void ftrace_profile_disable_##call(struct ftrace_event_call *event_call)\
+{ \
+ if (atomic_add_negative(-1, &event_call->profile_count)) \
+ unregister_trace_##call(ftrace_profile_##call); \
+}
+
+#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
+
+#endif
+
/*
* Stage 4 of the trace events.
*
@@ -447,28 +500,6 @@ static inline int ftrace_get_offsets_##call( \
#define TP_FMT(fmt, args...) fmt "\n", ##args
#ifdef CONFIG_EVENT_PROFILE
-#define _TRACE_PROFILE(call, proto, args) \
-static void ftrace_profile_##call(proto) \
-{ \
- extern void perf_tpcounter_event(int); \
- perf_tpcounter_event(event_##call.id); \
-} \
- \
-static int ftrace_profile_enable_##call(struct ftrace_event_call *event_call) \
-{ \
- int ret = 0; \
- \
- if (!atomic_inc_return(&event_call->profile_count)) \
- ret = register_trace_##call(ftrace_profile_##call); \
- \
- return ret; \
-} \
- \
-static void ftrace_profile_disable_##call(struct ftrace_event_call *event_call)\
-{ \
- if (atomic_add_negative(-1, &event_call->profile_count)) \
- unregister_trace_##call(ftrace_profile_##call); \
-}
#define _TRACE_PROFILE_INIT(call) \
.profile_count = ATOMIC_INIT(-1), \
@@ -476,7 +507,6 @@ static void ftrace_profile_disable_##call(struct ftrace_event_call *event_call)\
.profile_disable = ftrace_profile_disable_##call,
#else
-#define _TRACE_PROFILE(call, proto, args)
#define _TRACE_PROFILE_INIT(call)
#endif
@@ -502,7 +532,6 @@ static void ftrace_profile_disable_##call(struct ftrace_event_call *event_call)\
#undef TRACE_EVENT
#define TRACE_EVENT(call, proto, args, tstruct, assign, print) \
-_TRACE_PROFILE(call, PARAMS(proto), PARAMS(args)) \
\
static struct ftrace_event_call event_##call; \
\
@@ -586,6 +615,110 @@ __attribute__((section("_ftrace_events"))) event_##call = { \
#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
-#undef _TRACE_PROFILE
+/*
+ * Define the insertion callback to profile events
+ *
+ * The job is very similar to ftrace_raw_event_<call> except that we don't
+ * insert in the ring buffer but in a perf counter.
+ *
+ * static void ftrace_profile_<call>(proto)
+ * {
+ * struct ftrace_data_offsets_<call> __maybe_unused __data_offsets;
+ * struct ftrace_event_call *event_call = &event_<call>;
+ * extern void perf_tpcounter_event(int, u64, u64, void *, int);
+ * struct ftrace_raw_##call *entry;
+ * u64 __addr = 0, __count = 1;
+ * unsigned long irq_flags;
+ * int __entry_size;
+ * int __data_size;
+ * int pc;
+ *
+ * local_save_flags(irq_flags);
+ * pc = preempt_count();
+ *
+ * __data_size = ftrace_get_offsets_<call>(&__data_offsets, args);
+ *
+ * // Below we want to get the aligned size by taking into account
+ * // the u32 field that will later store the buffer size
+ * __entry_size = ALIGN(__data_size + sizeof(*entry) + sizeof(u32),
+ * sizeof(u64));
+ * __entry_size -= sizeof(u32);
+ *
+ * do {
+ * char raw_data[__entry_size]; <- allocate our sample in the stack
+ * struct trace_entry *ent;
+ *
+ * zero dead bytes from alignment to avoid stack leak to userspace:
+ *
+ * *(u64 *)(&raw_data[__entry_size - sizeof(u64)]) = 0ULL;
+ * entry = (struct ftrace_raw_<call> *)raw_data;
+ * ent = &entry->ent;
+ * tracing_generic_entry_update(ent, irq_flags, pc);
+ * ent->type = event_call->id;
+ *
+ * <tstruct> <- do some jobs with dynamic arrays
+ *
+ * <assign> <- affect our values
+ *
+ * perf_tpcounter_event(event_call->id, __addr, __count, entry,
+ * __entry_size); <- submit them to perf counter
+ * } while (0);
+ *
+ * }
+ */
+
+#ifdef CONFIG_EVENT_PROFILE
+
+#undef __perf_addr
+#define __perf_addr(a) __addr = (a)
+
+#undef __perf_count
+#define __perf_count(c) __count = (c)
+
+#undef TRACE_EVENT
+#define TRACE_EVENT(call, proto, args, tstruct, assign, print) \
+static void ftrace_profile_##call(proto) \
+{ \
+ struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\
+ struct ftrace_event_call *event_call = &event_##call; \
+ extern void perf_tpcounter_event(int, u64, u64, void *, int); \
+ struct ftrace_raw_##call *entry; \
+ u64 __addr = 0, __count = 1; \
+ unsigned long irq_flags; \
+ int __entry_size; \
+ int __data_size; \
+ int pc; \
+ \
+ local_save_flags(irq_flags); \
+ pc = preempt_count(); \
+ \
+ __data_size = ftrace_get_offsets_##call(&__data_offsets, args); \
+ __entry_size = ALIGN(__data_size + sizeof(*entry) + sizeof(u32),\
+ sizeof(u64)); \
+ __entry_size -= sizeof(u32); \
+ \
+ do { \
+ char raw_data[__entry_size]; \
+ struct trace_entry *ent; \
+ \
+ *(u64 *)(&raw_data[__entry_size - sizeof(u64)]) = 0ULL; \
+ entry = (struct ftrace_raw_##call *)raw_data; \
+ ent = &entry->ent; \
+ tracing_generic_entry_update(ent, irq_flags, pc); \
+ ent->type = event_call->id; \
+ \
+ tstruct \
+ \
+ { assign; } \
+ \
+ perf_tpcounter_event(event_call->id, __addr, __count, entry,\
+ __entry_size); \
+ } while (0); \
+ \
+}
+
+#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
+#endif /* CONFIG_EVENT_PROFILE */
+
#undef _TRACE_PROFILE_INIT