diff options
Diffstat (limited to 'include')
40 files changed, 372 insertions, 202 deletions
diff --git a/include/asm-i386/acpi.h b/include/asm-i386/acpi.h index 55059abf9c9..20f52395421 100644 --- a/include/asm-i386/acpi.h +++ b/include/asm-i386/acpi.h @@ -103,6 +103,12 @@ __acpi_release_global_lock (unsigned int *lock) :"=r"(n_hi), "=r"(n_lo) \ :"0"(n_hi), "1"(n_lo)) +#ifdef CONFIG_X86_IO_APIC +extern void check_acpi_pci(void); +#else +static inline void check_acpi_pci(void) { } +#endif + #ifdef CONFIG_ACPI extern int acpi_lapic; extern int acpi_ioapic; @@ -128,8 +134,6 @@ extern int acpi_gsi_to_irq(u32 gsi, unsigned int *irq); extern int skip_ioapic_setup; extern int acpi_skip_timer_override; -extern void check_acpi_pci(void); - static inline void disable_ioapic_setup(void) { skip_ioapic_setup = 1; @@ -142,8 +146,6 @@ static inline int ioapic_setup_disabled(void) #else static inline void disable_ioapic_setup(void) { } -static inline void check_acpi_pci(void) { } - #endif static inline void acpi_noirq_set(void) { acpi_noirq = 1; } diff --git a/include/asm-i386/pgtable.h b/include/asm-i386/pgtable.h index 088a945bf26..ee056c41a9f 100644 --- a/include/asm-i386/pgtable.h +++ b/include/asm-i386/pgtable.h @@ -219,13 +219,12 @@ extern unsigned long pg0[]; * The following only work if pte_present() is true. * Undefined behaviour if not.. */ -#define __LARGE_PTE (_PAGE_PSE | _PAGE_PRESENT) static inline int pte_user(pte_t pte) { return (pte).pte_low & _PAGE_USER; } static inline int pte_read(pte_t pte) { return (pte).pte_low & _PAGE_USER; } static inline int pte_dirty(pte_t pte) { return (pte).pte_low & _PAGE_DIRTY; } static inline int pte_young(pte_t pte) { return (pte).pte_low & _PAGE_ACCESSED; } static inline int pte_write(pte_t pte) { return (pte).pte_low & _PAGE_RW; } -static inline int pte_huge(pte_t pte) { return ((pte).pte_low & __LARGE_PTE) == __LARGE_PTE; } +static inline int pte_huge(pte_t pte) { return (pte).pte_low & _PAGE_PSE; } /* * The following only works if pte_present() is not true. @@ -242,7 +241,7 @@ static inline pte_t pte_mkexec(pte_t pte) { (pte).pte_low |= _PAGE_USER; return static inline pte_t pte_mkdirty(pte_t pte) { (pte).pte_low |= _PAGE_DIRTY; return pte; } static inline pte_t pte_mkyoung(pte_t pte) { (pte).pte_low |= _PAGE_ACCESSED; return pte; } static inline pte_t pte_mkwrite(pte_t pte) { (pte).pte_low |= _PAGE_RW; return pte; } -static inline pte_t pte_mkhuge(pte_t pte) { (pte).pte_low |= __LARGE_PTE; return pte; } +static inline pte_t pte_mkhuge(pte_t pte) { (pte).pte_low |= _PAGE_PSE; return pte; } #ifdef CONFIG_X86_PAE # include <asm/pgtable-3level.h> diff --git a/include/asm-ia64/page.h b/include/asm-ia64/page.h index 5e6362a786b..3ab27333dae 100644 --- a/include/asm-ia64/page.h +++ b/include/asm-ia64/page.h @@ -57,6 +57,8 @@ # define HAVE_ARCH_HUGETLB_UNMAPPED_AREA # define ARCH_HAS_HUGEPAGE_ONLY_RANGE +# define ARCH_HAS_PREPARE_HUGEPAGE_RANGE +# define ARCH_HAS_HUGETLB_FREE_PGD_RANGE #endif /* CONFIG_HUGETLB_PAGE */ #ifdef __ASSEMBLY__ diff --git a/include/asm-ia64/pgtable.h b/include/asm-ia64/pgtable.h index e2560c58384..c0f8144f234 100644 --- a/include/asm-ia64/pgtable.h +++ b/include/asm-ia64/pgtable.h @@ -314,7 +314,7 @@ ia64_phys_addr_valid (unsigned long addr) #define pte_mkyoung(pte) (__pte(pte_val(pte) | _PAGE_A)) #define pte_mkclean(pte) (__pte(pte_val(pte) & ~_PAGE_D)) #define pte_mkdirty(pte) (__pte(pte_val(pte) | _PAGE_D)) -#define pte_mkhuge(pte) (__pte(pte_val(pte) | _PAGE_P)) +#define pte_mkhuge(pte) (__pte(pte_val(pte))) /* * Macro to a page protection value as "uncacheable". Note that "protection" is really a @@ -505,9 +505,6 @@ extern struct page *zero_page_memmap_ptr; #define HUGETLB_PGDIR_SHIFT (HPAGE_SHIFT + 2*(PAGE_SHIFT-3)) #define HUGETLB_PGDIR_SIZE (__IA64_UL(1) << HUGETLB_PGDIR_SHIFT) #define HUGETLB_PGDIR_MASK (~(HUGETLB_PGDIR_SIZE-1)) -struct mmu_gather; -void hugetlb_free_pgd_range(struct mmu_gather **tlb, unsigned long addr, - unsigned long end, unsigned long floor, unsigned long ceiling); #endif /* diff --git a/include/asm-mips/termbits.h b/include/asm-mips/termbits.h index c29c65b7818..fa6d04dac56 100644 --- a/include/asm-mips/termbits.h +++ b/include/asm-mips/termbits.h @@ -77,7 +77,7 @@ struct termios { #define IXANY 0004000 /* Any character will restart after stop. */ #define IXOFF 0010000 /* Enable start/stop input control. */ #define IMAXBEL 0020000 /* Ring bell when input queue is full. */ -#define IUTF8 0040000 /* Input is UTF8 */ +#define IUTF8 0040000 /* Input is UTF-8 */ /* c_oflag bits */ #define OPOST 0000001 /* Perform output processing. */ diff --git a/include/asm-powerpc/pgtable.h b/include/asm-powerpc/pgtable.h index e38931379a7..185ee15963a 100644 --- a/include/asm-powerpc/pgtable.h +++ b/include/asm-powerpc/pgtable.h @@ -468,11 +468,6 @@ extern pgd_t swapper_pg_dir[]; extern void paging_init(void); -#ifdef CONFIG_HUGETLB_PAGE -#define hugetlb_free_pgd_range(tlb, addr, end, floor, ceiling) \ - free_pgd_range(tlb, addr, end, floor, ceiling) -#endif - /* * This gets called at the end of handling a page fault, when * the kernel has put a new PTE into the page table for the process. diff --git a/include/asm-s390/pgalloc.h b/include/asm-s390/pgalloc.h index 3417dd71ab4..e28aaf28e4a 100644 --- a/include/asm-s390/pgalloc.h +++ b/include/asm-s390/pgalloc.h @@ -158,11 +158,4 @@ static inline void pte_free(struct page *pte) #define __pte_free_tlb(tlb,pte) tlb_remove_page(tlb,pte) -/* - * This establishes kernel virtual mappings (e.g., as a result of a - * vmalloc call). Since s390-esame uses a separate kernel page table, - * there is nothing to do here... :) - */ -#define set_pgdir(addr,entry) do { } while(0) - #endif /* _S390_PGALLOC_H */ diff --git a/include/asm-sh64/pgalloc.h b/include/asm-sh64/pgalloc.h index 678251ac1db..b29dd468817 100644 --- a/include/asm-sh64/pgalloc.h +++ b/include/asm-sh64/pgalloc.h @@ -167,22 +167,6 @@ static __inline__ void pmd_free(pmd_t *pmd) extern int do_check_pgt_cache(int, int); -static inline void set_pgdir(unsigned long address, pgd_t entry) -{ - struct task_struct * p; - pgd_t *pgd; - - read_lock(&tasklist_lock); - for_each_process(p) { - if (!p->mm) - continue; - *pgd_offset(p->mm,address) = entry; - } - read_unlock(&tasklist_lock); - for (pgd = (pgd_t *)pgd_quicklist; pgd; pgd = (pgd_t *)*(unsigned long *)pgd) - pgd[address >> PGDIR_SHIFT] = entry; -} - #define pmd_populate_kernel(mm, pmd, pte) \ set_pmd(pmd, __pmd(_PAGE_TABLE + (unsigned long) (pte))) diff --git a/include/asm-sparc/pgtable.h b/include/asm-sparc/pgtable.h index b33c35411e8..9eea8f4d41f 100644 --- a/include/asm-sparc/pgtable.h +++ b/include/asm-sparc/pgtable.h @@ -269,11 +269,14 @@ BTFIXUPDEF_CALL_CONST(pte_t, mk_pte, struct page *, pgprot_t) BTFIXUPDEF_CALL_CONST(pte_t, mk_pte_phys, unsigned long, pgprot_t) BTFIXUPDEF_CALL_CONST(pte_t, mk_pte_io, unsigned long, pgprot_t, int) +BTFIXUPDEF_CALL_CONST(pgprot_t, pgprot_noncached, pgprot_t) #define mk_pte(page,pgprot) BTFIXUP_CALL(mk_pte)(page,pgprot) #define mk_pte_phys(page,pgprot) BTFIXUP_CALL(mk_pte_phys)(page,pgprot) #define mk_pte_io(page,pgprot,space) BTFIXUP_CALL(mk_pte_io)(page,pgprot,space) +#define pgprot_noncached(pgprot) BTFIXUP_CALL(pgprot_noncached)(pgprot) + BTFIXUPDEF_INT(pte_modify_mask) static pte_t pte_modify(pte_t pte, pgprot_t newprot) __attribute_const__; @@ -309,9 +312,6 @@ BTFIXUPDEF_CALL(pte_t *, pte_offset_kernel, pmd_t *, unsigned long) #define pte_unmap(pte) do{}while(0) #define pte_unmap_nested(pte) do{}while(0) -/* The permissions for pgprot_val to make a page mapped on the obio space */ -extern unsigned int pg_iobits; - /* Certain architectures need to do special things when pte's * within a page table are directly modified. Thus, the following * hook is made available. diff --git a/include/asm-sparc64/cpudata.h b/include/asm-sparc64/cpudata.h index c66a81bbc84..9d6a6dbaf12 100644 --- a/include/asm-sparc64/cpudata.h +++ b/include/asm-sparc64/cpudata.h @@ -71,7 +71,8 @@ struct trap_per_cpu { /* Dcache line 7: Physical addresses of CPU send mondo block and CPU list. */ unsigned long cpu_mondo_block_pa; unsigned long cpu_list_pa; - unsigned long __pad1[2]; + unsigned long tsb_huge; + unsigned long tsb_huge_temp; /* Dcache line 8: Unused, needed to keep trap_block a power-of-2 in size. */ unsigned long __pad2[4]; @@ -116,6 +117,8 @@ extern struct sun4v_2insn_patch_entry __sun4v_2insn_patch, #define TRAP_PER_CPU_FAULT_INFO 0x40 #define TRAP_PER_CPU_CPU_MONDO_BLOCK_PA 0xc0 #define TRAP_PER_CPU_CPU_LIST_PA 0xc8 +#define TRAP_PER_CPU_TSB_HUGE 0xd0 +#define TRAP_PER_CPU_TSB_HUGE_TEMP 0xd8 #define TRAP_BLOCK_SZ_SHIFT 8 diff --git a/include/asm-sparc64/mmu.h b/include/asm-sparc64/mmu.h index 230ba678d3b..2d4f2ea9568 100644 --- a/include/asm-sparc64/mmu.h +++ b/include/asm-sparc64/mmu.h @@ -90,18 +90,39 @@ extern void __tsb_insert(unsigned long ent, unsigned long tag, unsigned long pte extern void tsb_flush(unsigned long ent, unsigned long tag); extern void tsb_init(struct tsb *tsb, unsigned long size); -typedef struct { - spinlock_t lock; - unsigned long sparc64_ctx_val; +struct tsb_config { struct tsb *tsb; unsigned long tsb_rss_limit; unsigned long tsb_nentries; unsigned long tsb_reg_val; unsigned long tsb_map_vaddr; unsigned long tsb_map_pte; - struct hv_tsb_descr tsb_descr; +}; + +#define MM_TSB_BASE 0 + +#ifdef CONFIG_HUGETLB_PAGE +#define MM_TSB_HUGE 1 +#define MM_NUM_TSBS 2 +#else +#define MM_NUM_TSBS 1 +#endif + +typedef struct { + spinlock_t lock; + unsigned long sparc64_ctx_val; + unsigned long huge_pte_count; + struct tsb_config tsb_block[MM_NUM_TSBS]; + struct hv_tsb_descr tsb_descr[MM_NUM_TSBS]; } mm_context_t; #endif /* !__ASSEMBLY__ */ +#define TSB_CONFIG_TSB 0x00 +#define TSB_CONFIG_RSS_LIMIT 0x08 +#define TSB_CONFIG_NENTRIES 0x10 +#define TSB_CONFIG_REG_VAL 0x18 +#define TSB_CONFIG_MAP_VADDR 0x20 +#define TSB_CONFIG_MAP_PTE 0x28 + #endif /* __MMU_H */ diff --git a/include/asm-sparc64/mmu_context.h b/include/asm-sparc64/mmu_context.h index e7974321d05..2337eb48771 100644 --- a/include/asm-sparc64/mmu_context.h +++ b/include/asm-sparc64/mmu_context.h @@ -29,20 +29,25 @@ extern int init_new_context(struct task_struct *tsk, struct mm_struct *mm); extern void destroy_context(struct mm_struct *mm); extern void __tsb_context_switch(unsigned long pgd_pa, - unsigned long tsb_reg, - unsigned long tsb_vaddr, - unsigned long tsb_pte, + struct tsb_config *tsb_base, + struct tsb_config *tsb_huge, unsigned long tsb_descr_pa); static inline void tsb_context_switch(struct mm_struct *mm) { - __tsb_context_switch(__pa(mm->pgd), mm->context.tsb_reg_val, - mm->context.tsb_map_vaddr, - mm->context.tsb_map_pte, - __pa(&mm->context.tsb_descr)); + __tsb_context_switch(__pa(mm->pgd), + &mm->context.tsb_block[0], +#ifdef CONFIG_HUGETLB_PAGE + (mm->context.tsb_block[1].tsb ? + &mm->context.tsb_block[1] : + NULL) +#else + NULL +#endif + , __pa(&mm->context.tsb_descr[0])); } -extern void tsb_grow(struct mm_struct *mm, unsigned long mm_rss); +extern void tsb_grow(struct mm_struct *mm, unsigned long tsb_index, unsigned long mm_rss); #ifdef CONFIG_SMP extern void smp_tsb_sync(struct mm_struct *mm); #else diff --git a/include/asm-sparc64/page.h b/include/asm-sparc64/page.h index fcb2812265f..66fe4ac59fd 100644 --- a/include/asm-sparc64/page.h +++ b/include/asm-sparc64/page.h @@ -30,6 +30,23 @@ #ifdef __KERNEL__ +#if defined(CONFIG_HUGETLB_PAGE_SIZE_4MB) +#define HPAGE_SHIFT 22 +#elif defined(CONFIG_HUGETLB_PAGE_SIZE_512K) +#define HPAGE_SHIFT 19 +#elif defined(CONFIG_HUGETLB_PAGE_SIZE_64K) +#define HPAGE_SHIFT 16 +#endif + +#ifdef CONFIG_HUGETLB_PAGE +#define HPAGE_SIZE (_AC(1,UL) << HPAGE_SHIFT) +#define HPAGE_MASK (~(HPAGE_SIZE - 1UL)) +#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT) +#define ARCH_HAS_SETCLEAR_HUGE_PTE +#define ARCH_HAS_HUGETLB_PREFAULT_HOOK +#define HAVE_ARCH_HUGETLB_UNMAPPED_AREA +#endif + #ifndef __ASSEMBLY__ extern void _clear_page(void *page); @@ -90,23 +107,6 @@ typedef unsigned long pgprot_t; #endif /* (STRICT_MM_TYPECHECKS) */ -#if defined(CONFIG_HUGETLB_PAGE_SIZE_4MB) -#define HPAGE_SHIFT 22 -#elif defined(CONFIG_HUGETLB_PAGE_SIZE_512K) -#define HPAGE_SHIFT 19 -#elif defined(CONFIG_HUGETLB_PAGE_SIZE_64K) -#define HPAGE_SHIFT 16 -#endif - -#ifdef CONFIG_HUGETLB_PAGE -#define HPAGE_SIZE (_AC(1,UL) << HPAGE_SHIFT) -#define HPAGE_MASK (~(HPAGE_SIZE - 1UL)) -#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT) -#define ARCH_HAS_SETCLEAR_HUGE_PTE -#define ARCH_HAS_HUGETLB_PREFAULT_HOOK -#define HAVE_ARCH_HUGETLB_UNMAPPED_AREA -#endif - #define TASK_UNMAPPED_BASE (test_thread_flag(TIF_32BIT) ? \ (_AC(0x0000000070000000,UL)) : \ (_AC(0xfffff80000000000,UL) + (1UL << 32UL))) diff --git a/include/asm-sparc64/pgtable.h b/include/asm-sparc64/pgtable.h index ed4124edf83..c44e7466534 100644 --- a/include/asm-sparc64/pgtable.h +++ b/include/asm-sparc64/pgtable.h @@ -105,6 +105,7 @@ #define _PAGE_RES1_4U _AC(0x0002000000000000,UL) /* Reserved */ #define _PAGE_SZ32MB_4U _AC(0x0001000000000000,UL) /* (Panther) 32MB page */ #define _PAGE_SZ256MB_4U _AC(0x2001000000000000,UL) /* (Panther) 256MB page */ +#define _PAGE_SZALL_4U _AC(0x6001000000000000,UL) /* All pgsz bits */ #define _PAGE_SN_4U _AC(0x0000800000000000,UL) /* (Cheetah) Snoop */ #define _PAGE_RES2_4U _AC(0x0000780000000000,UL) /* Reserved */ #define _PAGE_PADDR_4U _AC(0x000007FFFFFFE000,UL) /* (Cheetah) pa[42:13] */ @@ -150,6 +151,7 @@ #define _PAGE_SZ512K_4V _AC(0x0000000000000002,UL) /* 512K Page */ #define _PAGE_SZ64K_4V _AC(0x0000000000000001,UL) /* 64K Page */ #define _PAGE_SZ8K_4V _AC(0x0000000000000000,UL) /* 8K Page */ +#define _PAGE_SZALL_4V _AC(0x0000000000000007,UL) /* All pgsz bits */ #if PAGE_SHIFT == 13 #define _PAGE_SZBITS_4U _PAGE_SZ8K_4U diff --git a/include/asm-x86_64/pgtable.h b/include/asm-x86_64/pgtable.h index 715fd94cf57..a617d364d08 100644 --- a/include/asm-x86_64/pgtable.h +++ b/include/asm-x86_64/pgtable.h @@ -273,7 +273,7 @@ static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; } static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; } static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_RW; } static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; } -static inline int pte_huge(pte_t pte) { return (pte_val(pte) & __LARGE_PTE) == __LARGE_PTE; } +static inline int pte_huge(pte_t pte) { return pte_val(pte) & _PAGE_PSE; } static inline pte_t pte_rdprotect(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_USER)); return pte; } static inline pte_t pte_exprotect(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_USER)); return pte; } @@ -285,7 +285,7 @@ static inline pte_t pte_mkexec(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _ static inline pte_t pte_mkdirty(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_DIRTY)); return pte; } static inline pte_t pte_mkyoung(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_ACCESSED)); return pte; } static inline pte_t pte_mkwrite(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_RW)); return pte; } -static inline pte_t pte_mkhuge(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | __LARGE_PTE)); return pte; } +static inline pte_t pte_mkhuge(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_PSE)); return pte; } struct vm_area_struct; diff --git a/include/linux/device.h b/include/linux/device.h index 5b595fdfb67..f6e72a65a3f 100644 --- a/include/linux/device.h +++ b/include/linux/device.h @@ -378,6 +378,7 @@ extern void device_bind_driver(struct device * dev); extern void device_release_driver(struct device * dev); extern int device_attach(struct device * dev); extern void driver_attach(struct device_driver * drv); +extern void device_reprobe(struct device *dev); /* @@ -399,7 +400,7 @@ extern struct device * get_device(struct device * dev); extern void put_device(struct device * dev); -/* drivers/base/power.c */ +/* drivers/base/power/shutdown.c */ extern void device_shutdown(void); diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index 68d82ad6b17..d6f1019625a 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h @@ -20,10 +20,7 @@ void unmap_hugepage_range(struct vm_area_struct *, unsigned long, unsigned long) int hugetlb_prefault(struct address_space *, struct vm_area_struct *); int hugetlb_report_meminfo(char *); int hugetlb_report_node_meminfo(int, char *); -int is_hugepage_mem_enough(size_t); unsigned long hugetlb_total_pages(void); -struct page *alloc_huge_page(struct vm_area_struct *, unsigned long); -void free_huge_page(struct page *); int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long address, int write_access); @@ -39,18 +36,35 @@ struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address, int write); struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address, pmd_t *pmd, int write); -int is_aligned_hugepage_range(unsigned long addr, unsigned long len); int pmd_huge(pmd_t pmd); +void hugetlb_change_protection(struct vm_area_struct *vma, + unsigned long address, unsigned long end, pgprot_t newprot); #ifndef ARCH_HAS_HUGEPAGE_ONLY_RANGE #define is_hugepage_only_range(mm, addr, len) 0 -#define hugetlb_free_pgd_range(tlb, addr, end, floor, ceiling) \ - do { } while (0) +#endif + +#ifndef ARCH_HAS_HUGETLB_FREE_PGD_RANGE +#define hugetlb_free_pgd_range free_pgd_range +#else +void hugetlb_free_pgd_range(struct mmu_gather **tlb, unsigned long addr, + unsigned long end, unsigned long floor, + unsigned long ceiling); #endif #ifndef ARCH_HAS_PREPARE_HUGEPAGE_RANGE -#define prepare_hugepage_range(addr, len) \ - is_aligned_hugepage_range(addr, len) +/* + * If the arch doesn't supply something else, assume that hugepage + * size aligned regions are ok without further preparation. + */ +static inline int prepare_hugepage_range(unsigned long addr, unsigned long len) +{ + if (len & ~HPAGE_MASK) + return -EINVAL; + if (addr & ~HPAGE_MASK) + return -EINVAL; + return 0; +} #else int prepare_hugepage_range(unsigned long addr, unsigned long len); #endif @@ -87,20 +101,17 @@ static inline unsigned long hugetlb_total_pages(void) #define copy_hugetlb_page_range(src, dst, vma) ({ BUG(); 0; }) #define hugetlb_prefault(mapping, vma) ({ BUG(); 0; }) #define unmap_hugepage_range(vma, start, end) BUG() -#define is_hugepage_mem_enough(size) 0 #define hugetlb_report_meminfo(buf) 0 #define hugetlb_report_node_meminfo(n, buf) 0 #define follow_huge_pmd(mm, addr, pmd, write) NULL -#define is_aligned_hugepage_range(addr, len) 0 #define prepare_hugepage_range(addr, len) (-EINVAL) #define pmd_huge(x) 0 #define is_hugepage_only_range(mm, addr, len) 0 -#define hugetlb_free_pgd_range(tlb, addr, end, floor, ceiling) \ - do { } while (0) -#define alloc_huge_page(vma, addr) ({ NULL; }) -#define free_huge_page(p) ({ (void)(p); BUG(); }) +#define hugetlb_free_pgd_range(tlb, addr, end, floor, ceiling) ({BUG(); 0; }) #define hugetlb_fault(mm, vma, addr, write) ({ BUG(); 0; }) +#define hugetlb_change_protection(vma, address, end, newprot) + #ifndef HPAGE_MASK #define HPAGE_MASK PAGE_MASK /* Keep the compiler happy */ #define HPAGE_SIZE PAGE_SIZE @@ -128,6 +139,8 @@ struct hugetlbfs_sb_info { struct hugetlbfs_inode_info { struct shared_policy policy; + /* Protected by the (global) hugetlb_lock */ + unsigned long prereserved_hpages; struct inode vfs_inode; }; @@ -144,6 +157,10 @@ static inline struct hugetlbfs_sb_info *HUGETLBFS_SB(struct super_block *sb) extern struct file_operations hugetlbfs_file_operations; extern struct vm_operations_struct hugetlb_vm_ops; struct file *hugetlb_zero_setup(size_t); +int hugetlb_extend_reservation(struct hugetlbfs_inode_info *info, + unsigned long atleast_hpages); +void hugetlb_truncate_reservation(struct hugetlbfs_inode_info *info, + unsigned long atmost_hpages); int hugetlb_get_quota(struct address_space *mapping); void hugetlb_put_quota(struct address_space *mapping); diff --git a/include/linux/libata.h b/include/linux/libata.h index 239408ecfdd..204c37a55f0 100644 --- a/include/linux/libata.h +++ b/include/linux/libata.h @@ -508,7 +508,6 @@ extern void ata_host_set_remove(struct ata_host_set *host_set); extern int ata_scsi_detect(struct scsi_host_template *sht); extern int ata_scsi_ioctl(struct scsi_device *dev, int cmd, void __user *arg); extern int ata_scsi_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)); -extern enum scsi_eh_timer_return ata_scsi_timed_out(struct scsi_cmnd *cmd); extern int ata_scsi_error(struct Scsi_Host *host); extern void ata_eh_qc_complete(struct ata_queued_cmd *qc); extern void ata_eh_qc_retry(struct ata_queued_cmd *qc); diff --git a/include/linux/migrate.h b/include/linux/migrate.h new file mode 100644 index 00000000000..7d09962c3c0 --- /dev/null +++ b/include/linux/migrate.h @@ -0,0 +1,36 @@ +#ifndef _LINUX_MIGRATE_H +#define _LINUX_MIGRATE_H + +#include <linux/config.h> +#include <linux/mm.h> + +#ifdef CONFIG_MIGRATION +extern int isolate_lru_page(struct page *p, struct list_head *pagelist); +extern int putback_lru_pages(struct list_head *l); +extern int migrate_page(struct page *, struct page *); +extern void migrate_page_copy(struct page *, struct page *); +extern int migrate_page_remove_references(struct page *, struct page *, int); +extern int migrate_pages(struct list_head *l, struct list_head *t, + struct list_head *moved, struct list_head *failed); +int migrate_pages_to(struct list_head *pagelist, + struct vm_area_struct *vma, int dest); +extern int fail_migrate_page(struct page *, struct page *); + +extern int migrate_prep(void); + +#else + +static inline int isolate_lru_page(struct page *p, struct list_head *list) + { return -ENOSYS; } +static inline int putback_lru_pages(struct list_head *l) { return 0; } +static inline int migrate_pages(struct list_head *l, struct list_head *t, + struct list_head *moved, struct list_head *failed) { return -ENOSYS; } + +static inline int migrate_prep(void) { return -ENOSYS; } + +/* Possible settings for the migrate_page() method in address_operations */ +#define migrate_page NULL +#define fail_migrate_page NULL + +#endif /* CONFIG_MIGRATION */ +#endif /* _LINUX_MIGRATE_H */ diff --git a/include/linux/mm.h b/include/linux/mm.h index 498ff8778fb..6aa016f1d3a 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -286,43 +286,34 @@ struct page { * * Also, many kernel routines increase the page count before a critical * routine so they can be sure the page doesn't go away from under them. - * - * Since 2.6.6 (approx), a free page has ->_count = -1. This is so that we - * can use atomic_add_negative(-1, page->_count) to detect when the page - * becomes free and so that we can also use atomic_inc_and_test to atomically - * detect when we just tried to grab a ref on a page which some other CPU has - * already deemed to be freeable. - * - * NO code should make assumptions about this internal detail! Use the provided - * macros which retain the old rules: page_count(page) == 0 is a free page. */ /* * Drop a ref, return true if the logical refcount fell to zero (the page has * no users) */ -#define put_page_testzero(p) \ - ({ \ - BUG_ON(atomic_read(&(p)->_count) == -1);\ - atomic_add_negative(-1, &(p)->_count); \ - }) +static inline int put_page_testzero(struct page *page) +{ + BUG_ON(atomic_read(&page->_count) == 0); + return atomic_dec_and_test(&page->_count); +} /* - * Grab a ref, return true if the page previously had a logical refcount of - * zero. ie: returns true if we just grabbed an already-deemed-to-be-free page + * Try to grab a ref unless the page has a refcount of zero, return false if + * that is the case. */ -#define get_page_testone(p) atomic_inc_and_test(&(p)->_count) - -#define set_page_count(p,v) atomic_set(&(p)->_count, (v) - 1) -#define __put_page(p) atomic_dec(&(p)->_count) +static inline int get_page_unless_zero(struct page *page) +{ + return atomic_inc_not_zero(&page->_count); +} extern void FASTCALL(__page_cache_release(struct page *)); static inline int page_count(struct page *page) { - if (PageCompound(page)) + if (unlikely(PageCompound(page))) page = (struct page *)page_private(page); - return atomic_read(&page->_count) + 1; + return atomic_read(&page->_count); } static inline void get_page(struct page *page) @@ -332,8 +323,19 @@ static inline void get_page(struct page *page) atomic_inc(&page->_count); } +/* + * Setup the page count before being freed into the page allocator for + * the first time (boot or memory hotplug) + */ +static inline void init_page_count(struct page *page) +{ + atomic_set(&page->_count, 1); +} + void put_page(struct page *page); +void split_page(struct page *page, unsigned int order); + /* * Multiple processes may "see" the same page. E.g. for untouched * mappings of /dev/null, all processes see the same page full of @@ -1046,7 +1048,7 @@ int in_gate_area_no_task(unsigned long addr); int drop_caches_sysctl_handler(struct ctl_table *, int, struct file *, void __user *, size_t *, loff_t *); -int shrink_slab(unsigned long scanned, gfp_t gfp_mask, +unsigned long shrink_slab(unsigned long scanned, gfp_t gfp_mask, unsigned long lru_pages); void drop_pagecache(void); void drop_slab(void); diff --git a/include/linux/mm_inline.h b/include/linux/mm_inline.h index 8ac854f7f19..3b6723dfaff 100644 --- a/include/linux/mm_inline.h +++ b/include/linux/mm_inline.h @@ -32,7 +32,7 @@ del_page_from_lru(struct zone *zone, struct page *page) { list_del(&page->lru); if (PageActive(page)) { - ClearPageActive(page); + __ClearPageActive(page); zone->nr_active--; } else { zone->nr_inactive--; diff --git a/include/linux/msdos_fs.h b/include/linux/msdos_fs.h index e933e2a355a..8bcd9450d92 100644 --- a/include/linux/msdos_fs.h +++ b/include/linux/msdos_fs.h @@ -199,7 +199,7 @@ struct fat_mount_options { sys_immutable:1, /* set = system files are immutable */ dotsOK:1, /* set = hidden and system files are named '.filename' */ isvfat:1, /* 0=no vfat long filename support, 1=vfat support */ - utf8:1, /* Use of UTF8 character set (Default) */ + utf8:1, /* Use of UTF-8 character set (Default) */ unicode_xlate:1, /* create escape sequences for unhandled Unicode */ numtail:1, /* Does first alias have a numeric '~1' type tail? */ atari:1, /* Use Atari GEMDOS variation of MS-DOS fs */ diff --git a/include/linux/net.h b/include/linux/net.h index 152fa6551fd..84a490e5f0a 100644 --- a/include/linux/net.h +++ b/include/linux/net.h @@ -143,6 +143,8 @@ struct proto_ops { struct poll_table_struct *wait); int (*ioctl) (struct socket *sock, unsigned int cmd, unsigned long arg); + int (*compat_ioctl) (struct socket *sock, unsigned int cmd, + unsigned long arg); int (*listen) (struct socket *sock, int len); int (*shutdown) (struct socket *sock, int flags); int (*setsockopt)(struct socket *sock, int level, @@ -251,6 +253,8 @@ SOCKCALL_UWRAP(name, poll, (struct file *file, struct socket *sock, struct poll_ (file, sock, wait)) \ SOCKCALL_WRAP(name, ioctl, (struct socket *sock, unsigned int cmd, \ unsigned long arg), (sock, cmd, arg)) \ +SOCKCALL_WRAP(name, compat_ioctl, (struct socket *sock, unsigned int cmd, \ + unsigned long arg), (sock, cmd, arg)) \ SOCKCALL_WRAP(name, listen, (struct socket *sock, int len), (sock, len)) \ SOCKCALL_WRAP(name, shutdown, (struct socket *sock, int flags), (sock, flags)) \ SOCKCALL_WRAP(name, setsockopt, (struct socket *sock, int level, int optname, \ @@ -275,6 +279,7 @@ static const struct proto_ops name##_ops = { \ .getname = __lock_##name##_getname, \ .poll = __lock_##name##_poll, \ .ioctl = __lock_##name##_ioctl, \ + .compat_ioctl = __lock_##name##_compat_ioctl, \ .listen = __lock_##name##_listen, \ .shutdown = __lock_##name##_shutdown, \ .setsockopt = __lock_##name##_setsockopt, \ @@ -283,6 +288,7 @@ static const struct proto_ops name##_ops = { \ .recvmsg = __lock_##name##_recvmsg, \ .mmap = __lock_##name##_mmap, \ }; + #endif #define MODULE_ALIAS_NETPROTO(proto) \ diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h index d52999c4333..9ea629c02a4 100644 --- a/include/linux/page-flags.h +++ b/include/linux/page-flags.h @@ -86,8 +86,9 @@ * - The __xxx_page_state variants can be used safely when interrupts are * disabled. * - The __xxx_page_state variants can be used if the field is only - * modified from process context, or only modified from interrupt context. - * In this case, the field should be commented here. + * modified from process context and protected from preemption, or only + * modified from interrupt context. In this case, the field should be + * commented here. */ struct page_state { unsigned long nr_dirty; /* Dirty writeable pages */ @@ -239,22 +240,19 @@ extern void __mod_page_state_offset(unsigned long offset, unsigned long delta); #define __ClearPageDirty(page) __clear_bit(PG_dirty, &(page)->flags) #define TestClearPageDirty(page) test_and_clear_bit(PG_dirty, &(page)->flags) -#define SetPageLRU(page) set_bit(PG_lru, &(page)->flags) #define PageLRU(page) test_bit(PG_lru, &(page)->flags) -#define TestSetPageLRU(page) test_and_set_bit(PG_lru, &(page)->flags) -#define TestClearPageLRU(page) test_and_clear_bit(PG_lru, &(page)->flags) +#define SetPageLRU(page) set_bit(PG_lru, &(page)->flags) +#define ClearPageLRU(page) clear_bit(PG_lru, &(page)->flags) +#define __ClearPageLRU(page) __clear_bit(PG_lru, &(page)->flags) #define PageActive(page) test_bit(PG_active, &(page)->flags) #define SetPageActive(page) set_bit(PG_active, &(page)->flags) #define ClearPageActive(page) clear_bit(PG_active, &(page)->flags) -#define TestClearPageActive(page) test_and_clear_bit(PG_active, &(page)->flags) -#define TestSetPageActive(page) test_and_set_bit(PG_active, &(page)->flags) +#define __ClearPageActive(page) __clear_bit(PG_active, &(page)->flags) #define PageSlab(page) test_bit(PG_slab, &(page)->flags) -#define SetPageSlab(page) set_bit(PG_slab, &(page)->flags) -#define ClearPageSlab(page) clear_bit(PG_slab, &(page)->flags) -#define TestClearPageSlab(page) test_and_clear_bit(PG_slab, &(page)->flags) -#define TestSetPageSlab(page) test_and_set_bit(PG_slab, &(page)->flags) +#define __SetPageSlab(page) __set_bit(PG_slab, &(page)->flags) +#define __ClearPageSlab(page) __clear_bit(PG_slab, &(page)->flags) #ifdef CONFIG_HIGHMEM #define PageHighMem(page) is_highmem(page_zone(page)) @@ -329,8 +327,8 @@ extern void __mod_page_state_offset(unsigned long offset, unsigned long delta); #define TestClearPageReclaim(page) test_and_clear_bit(PG_reclaim, &(page)->flags) #define PageCompound(page) test_bit(PG_compound, &(page)->flags) -#define SetPageCompound(page) set_bit(PG_compound, &(page)->flags) -#define ClearPageCompound(page) clear_bit(PG_compound, &(page)->flags) +#define __SetPageCompound(page) __set_bit(PG_compound, &(page)->flags) +#define __ClearPageCompound(page) __clear_bit(PG_compound, &(page)->flags) #ifdef CONFIG_SWAP #define PageSwapCache(page) test_bit(PG_swapcache, &(page)->flags) diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index b9810ddf435..ec3c3293262 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h @@ -852,6 +852,8 @@ #define PCI_DEVICE_ID_QLOGIC_ISP2432 0x2432 #define PCI_DEVICE_ID_QLOGIC_ISP2512 0x2512 #define PCI_DEVICE_ID_QLOGIC_ISP2522 0x2522 +#define PCI_DEVICE_ID_QLOGIC_ISP5422 0x5422 +#define PCI_DEVICE_ID_QLOGIC_ISP5432 0x5432 #define PCI_VENDOR_ID_CYRIX 0x1078 #define PCI_DEVICE_ID_CYRIX_5510 0x0000 diff --git a/include/linux/rtc.h b/include/linux/rtc.h index 0b2ba67ff13..b739ac1f7ca 100644 --- a/include/linux/rtc.h +++ b/include/linux/rtc.h @@ -11,8 +11,6 @@ #ifndef _LINUX_RTC_H_ #define _LINUX_RTC_H_ -#include <linux/interrupt.h> - /* * The struct used to pass data via the following ioctl. Similar to the * struct tm in <time.h>, but it needs to be here so that the kernel @@ -95,6 +93,8 @@ struct rtc_pll_info { #ifdef __KERNEL__ +#include <linux/interrupt.h> + typedef struct rtc_task { void (*func)(void *private_data); void *private_data; diff --git a/include/linux/slab.h b/include/linux/slab.h index 8cf52939d0a..2b28c849d75 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h @@ -38,7 +38,6 @@ typedef struct kmem_cache kmem_cache_t; #define SLAB_DEBUG_INITIAL 0x00000200UL /* Call constructor (as verifier) */ #define SLAB_RED_ZONE 0x00000400UL /* Red zone objs in a cache */ #define SLAB_POISON 0x00000800UL /* Poison objects */ -#define SLAB_NO_REAP 0x00001000UL /* never reap from the cache */ #define SLAB_HWCACHE_ALIGN 0x00002000UL /* align objs on a h/w cache lines */ #define SLAB_CACHE_DMA 0x00004000UL /* use GFP_DMA memory */ #define SLAB_MUST_HWCACHE_ALIGN 0x00008000UL /* force alignment */ @@ -118,7 +117,7 @@ extern void *kzalloc(size_t, gfp_t); */ static inline void *kcalloc(size_t n, size_t size, gfp_t flags) { - if (n != 0 && size > INT_MAX / n) + if (n != 0 && size > ULONG_MAX / n) return NULL; return kzalloc(n * size, flags); } diff --git a/include/linux/smp.h b/include/linux/smp.h index 44153fdf73f..d699a16b0cb 100644 --- a/include/linux/smp.h +++ b/include/linux/smp.h @@ -52,23 +52,12 @@ extern void smp_cpus_done(unsigned int max_cpus); /* * Call a function on all other processors */ -extern int smp_call_function (void (*func) (void *info), void *info, - int retry, int wait); +int smp_call_function(void(*func)(void *info), void *info, int retry, int wait); /* * Call a function on all processors */ -static inline int on_each_cpu(void (*func) (void *info), void *info, - int retry, int wait) -{ - int ret = 0; - - preempt_disable(); - ret = smp_call_function(func, info, retry, wait); - func(info); - preempt_enable(); - return ret; -} +int on_each_cpu(void (*func) (void *info), void *info, int retry, int wait); #define MSG_ALL_BUT_SELF 0x8000 /* Assume <32768 CPU's */ #define MSG_ALL 0x8001 @@ -94,7 +83,13 @@ void smp_prepare_boot_cpu(void); #define raw_smp_processor_id() 0 #define hard_smp_processor_id() 0 #define smp_call_function(func,info,retry,wait) ({ 0; }) -#define on_each_cpu(func,info,retry,wait) ({ func(info); 0; }) +#define on_each_cpu(func,info,retry,wait) \ + ({ \ + local_irq_disable(); \ + func(info); \ + local_irq_enable(); \ + 0; \ + }) static inline void smp_send_reschedule(int cpu) { } #define num_booting_cpus() 1 #define smp_prepare_boot_cpu() do {} while (0) diff --git a/include/linux/swap.h b/include/linux/swap.h index d572b19afb7..12415dd9445 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -172,9 +172,24 @@ extern int rotate_reclaimable_page(struct page *page); extern void swap_setup(void); /* linux/mm/vmscan.c */ -extern int try_to_free_pages(struct zone **, gfp_t); -extern int shrink_all_memory(int); +extern unsigned long try_to_free_pages(struct zone **, gfp_t); +extern unsigned long shrink_all_memory(unsigned long nr_pages); extern int vm_swappiness; +extern int remove_mapping(struct address_space *mapping, struct page *page); + +/* possible outcome of pageout() */ +typedef enum { + /* failed to write page out, page is locked */ + PAGE_KEEP, + /* move page to the active list, page is locked */ + PAGE_ACTIVATE, + /* page has been sent to the disk successfully, page is unlocked */ + PAGE_SUCCESS, + /* page is clean and locked */ + PAGE_CLEAN, +} pageout_t; + +extern pageout_t pageout(struct page *page, struct address_space *mapping); #ifdef CONFIG_NUMA extern int zone_reclaim_mode; @@ -188,25 +203,6 @@ static inline int zone_reclaim(struct zone *z, gfp_t mask, unsigned int order) } #endif -#ifdef CONFIG_MIGRATION -extern int isolate_lru_page(struct page *p); -extern int putback_lru_pages(struct list_head *l); -extern int migrate_page(struct page *, struct page *); -extern void migrate_page_copy(struct page *, struct page *); -extern int migrate_page_remove_references(struct page *, struct page *, int); -extern int migrate_pages(struct list_head *l, struct list_head *t, - struct list_head *moved, struct list_head *failed); -extern int fail_migrate_page(struct page *, struct page *); -#else -static inline int isolate_lru_page(struct page *p) { return -ENOSYS; } -static inline int putback_lru_pages(struct list_head *l) { return 0; } -static inline int migrate_pages(struct list_head *l, struct list_head *t, - struct list_head *moved, struct list_head *failed) { return -ENOSYS; } -/* Possible settings for the migrate_page() method in address_operations */ -#define migrate_page NULL -#define fail_migrate_page NULL -#endif - #ifdef CONFIG_MMU /* linux/mm/shmem.c */ extern int shmem_unuse(swp_entry_t entry, struct page *page); diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h index 86b11130023..957c21c16d6 100644 --- a/include/linux/workqueue.h +++ b/include/linux/workqueue.h @@ -20,6 +20,10 @@ struct work_struct { struct timer_list timer; }; +struct execute_work { + struct work_struct work; +}; + #define __WORK_INITIALIZER(n, f, d) { \ .entry = { &(n).entry, &(n).entry }, \ .func = (f), \ @@ -74,6 +78,8 @@ extern void init_workqueues(void); void cancel_rearming_delayed_work(struct work_struct *work); void cancel_rearming_delayed_workqueue(struct workqueue_struct *, struct work_struct *); +int execute_in_process_context(void (*fn)(void *), void *, + struct execute_work *); /* * Kill off a pending schedule_delayed_work(). Note that the work callback diff --git a/include/linux/x25.h b/include/linux/x25.h index 16d44931afa..d035e4e87d0 100644 --- a/include/linux/x25.h +++ b/include/linux/x25.h @@ -11,6 +11,8 @@ #ifndef X25_KERNEL_H #define X25_KERNEL_H +#include <linux/types.h> + #define SIOCX25GSUBSCRIP (SIOCPROTOPRIVATE + 0) #define SIOCX25SSUBSCRIP (SIOCPROTOPRIVATE + 1) #define SIOCX25GFACILITIES (SIOCPROTOPRIVATE + 2) @@ -21,6 +23,8 @@ #define SIOCX25SCUDMATCHLEN (SIOCPROTOPRIVATE + 7) #define SIOCX25CALLACCPTAPPRV (SIOCPROTOPRIVATE + 8) #define SIOCX25SENDCALLACCPT (SIOCPROTOPRIVATE + 9) +#define SIOCX25GDTEFACILITIES (SIOCPROTOPRIVATE + 10) +#define SIOCX25SDTEFACILITIES (SIOCPROTOPRIVATE + 11) /* * Values for {get,set}sockopt. @@ -77,6 +81,8 @@ struct x25_subscrip_struct { #define X25_MASK_PACKET_SIZE 0x04 #define X25_MASK_WINDOW_SIZE 0x08 +#define X25_MASK_CALLING_AE 0x10 +#define X25_MASK_CALLED_AE 0x20 /* @@ -99,6 +105,26 @@ struct x25_facilities { }; /* +* ITU DTE facilities +* Only the called and calling address +* extension are currently implemented. +* The rest are in place to avoid the struct +* changing size if someone needs them later +*/ + +struct x25_dte_facilities { + __u16 delay_cumul; + __u16 delay_target; + __u16 delay_max; + __u8 min_throughput; + __u8 expedited; + __u8 calling_len; + __u8 called_len; + __u8 calling_ae[20]; + __u8 called_ae[20]; +}; + +/* * Call User Data structure. */ struct x25_calluserdata { diff --git a/include/net/compat.h b/include/net/compat.h index 290bab46d45..8662b8f43df 100644 --- a/include/net/compat.h +++ b/include/net/compat.h @@ -23,6 +23,9 @@ struct compat_cmsghdr { compat_int_t cmsg_type; }; +struct sock; +extern int compat_sock_get_timestamp(struct sock *, struct timeval __user *); + #else /* defined(CONFIG_COMPAT) */ #define compat_msghdr msghdr /* to avoid compiler warnings */ #endif /* defined(CONFIG_COMPAT) */ @@ -34,7 +37,6 @@ extern asmlinkage long compat_sys_recvmsg(int,struct compat_msghdr __user *,unsi extern asmlinkage long compat_sys_getsockopt(int, int, int, char __user *, int __user *); extern int put_cmsg_compat(struct msghdr*, int, int, int, void *); -struct sock; extern int cmsghdr_from_user_compat_to_kern(struct msghdr *, struct sock *, unsigned char *, int); #endif /* NET_COMPAT_H */ diff --git a/include/net/x25.h b/include/net/x25.h index fee62ff8c19..0ad90ebcf86 100644 --- a/include/net/x25.h +++ b/include/net/x25.h @@ -101,9 +101,17 @@ enum { #define X25_FAC_PACKET_SIZE 0x42 #define X25_FAC_WINDOW_SIZE 0x43 -#define X25_MAX_FAC_LEN 20 /* Plenty to spare */ +#define X25_MAX_FAC_LEN 60 #define X25_MAX_CUD_LEN 128 +#define X25_FAC_CALLING_AE 0xCB +#define X25_FAC_CALLED_AE 0xC9 + +#define X25_MARKER 0x00 +#define X25_DTE_SERVICES 0x0F +#define X25_MAX_AE_LEN 40 /* Max num of semi-octets in AE - OSI Nw */ +#define X25_MAX_DTE_FACIL_LEN 21 /* Max length of DTE facility params */ + /** * struct x25_route - x25 routing entry * @node - entry in x25_list_lock @@ -148,6 +156,7 @@ struct x25_sock { struct timer_list timer; struct x25_causediag causediag; struct x25_facilities facilities; + struct x25_dte_facilities dte_facilities; struct x25_calluserdata calluserdata; unsigned long vc_facil_mask; /* inc_call facilities mask */ }; @@ -180,9 +189,13 @@ extern void x25_establish_link(struct x25_neigh *); extern void x25_terminate_link(struct x25_neigh *); /* x25_facilities.c */ -extern int x25_parse_facilities(struct sk_buff *, struct x25_facilities *, unsigned long *); -extern int x25_create_facilities(unsigned char *, struct x25_facilities *, unsigned long); -extern int x25_negotiate_facilities(struct sk_buff *, struct sock *, struct x25_facilities *); +extern int x25_parse_facilities(struct sk_buff *, struct x25_facilities *, + struct x25_dte_facilities *, unsigned long *); +extern int x25_create_facilities(unsigned char *, struct x25_facilities *, + struct x25_dte_facilities *, unsigned long); +extern int x25_negotiate_facilities(struct sk_buff *, struct sock *, + struct x25_facilities *, + struct x25_dte_facilities *); extern void x25_limit_facilities(struct x25_facilities *, struct x25_neigh *); /* x25_in.c */ diff --git a/include/scsi/scsi.h b/include/scsi/scsi.h index 9c331258bc2..c60b8ff2f5e 100644 --- a/include/scsi/scsi.h +++ b/include/scsi/scsi.h @@ -433,6 +433,4 @@ struct scsi_lun { /* Used to obtain the PCI location of a device */ #define SCSI_IOCTL_GET_PCI 0x5387 -int scsi_execute_in_process_context(void (*fn)(void *data), void *data); - #endif /* _SCSI_SCSI_H */ diff --git a/include/scsi/scsi_cmnd.h b/include/scsi/scsi_cmnd.h index 7529f4388bb..1ace1b9fe53 100644 --- a/include/scsi/scsi_cmnd.h +++ b/include/scsi/scsi_cmnd.h @@ -104,10 +104,10 @@ struct scsi_cmnd { working on */ #define SCSI_SENSE_BUFFERSIZE 96 - unsigned char sense_buffer[SCSI_SENSE_BUFFERSIZE]; /* obtained by REQUEST SENSE - * when CHECK CONDITION is - * received on original command - * (auto-sense) */ + unsigned char sense_buffer[SCSI_SENSE_BUFFERSIZE]; + /* obtained by REQUEST SENSE when + * CHECK CONDITION is received on original + * command (auto-sense) */ /* Low-level done function - can be used by low-level driver to point * to completion function. Not used by mid/upper level code. */ @@ -120,12 +120,12 @@ struct scsi_cmnd { struct scsi_pointer SCp; /* Scratchpad used by some host adapters */ unsigned char *host_scribble; /* The host adapter is allowed to - * call scsi_malloc and get some memory - * and hang it here. The host adapter - * is also expected to call scsi_free - * to release this memory. (The memory - * obtained by scsi_malloc is guaranteed - * to be at an address < 16Mb). */ + * call scsi_malloc and get some memory + * and hang it here. The host adapter + * is also expected to call scsi_free + * to release this memory. (The memory + * obtained by scsi_malloc is guaranteed + * to be at an address < 16Mb). */ int result; /* Status code from lower level driver */ diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h index 290e3b4d2ae..895d212864c 100644 --- a/include/scsi/scsi_device.h +++ b/include/scsi/scsi_device.h @@ -4,6 +4,7 @@ #include <linux/device.h> #include <linux/list.h> #include <linux/spinlock.h> +#include <linux/workqueue.h> #include <asm/atomic.h> struct request_queue; @@ -73,7 +74,6 @@ struct scsi_device { unsigned sector_size; /* size in bytes */ void *hostdata; /* available to low-level driver */ - char devfs_name[256]; /* devfs junk */ char type; char scsi_level; char inq_periph_qual; /* PQ from INQUIRY data */ @@ -138,6 +138,8 @@ struct scsi_device { struct device sdev_gendev; struct class_device sdev_classdev; + struct execute_work ew; /* used to get process context on put */ + enum scsi_device_state sdev_state; unsigned long sdev_data[0]; } __attribute__((aligned(sizeof(unsigned long)))); @@ -154,6 +156,11 @@ struct scsi_device { #define scmd_printk(prefix, scmd, fmt, a...) \ dev_printk(prefix, &(scmd)->device->sdev_gendev, fmt, ##a) +enum scsi_target_state { + STARGET_RUNNING = 1, + STARGET_DEL, +}; + /* * scsi_target: representation of a scsi target, for now, this is only * used for single_lun devices. If no one has active IO to the target, @@ -168,8 +175,13 @@ struct scsi_target { unsigned int channel; unsigned int id; /* target id ... replace * scsi_device.id eventually */ - unsigned long create:1; /* signal that it needs to be added */ + unsigned int create:1; /* signal that it needs to be added */ + unsigned int pdt_1f_for_no_lun; /* PDT = 0x1f */ + /* means no lun present */ + char scsi_level; + struct execute_work ew; + enum scsi_target_state state; void *hostdata; /* available to low-level driver */ unsigned long starget_data[0]; /* for the transport */ /* starget_data must be the last element!!!! */ @@ -249,6 +261,11 @@ extern int scsi_mode_sense(struct scsi_device *sdev, int dbd, int modepage, unsigned char *buffer, int len, int timeout, int retries, struct scsi_mode_data *data, struct scsi_sense_hdr *); +extern int scsi_mode_select(struct scsi_device *sdev, int pf, int sp, + int modepage, unsigned char *buffer, int len, + int timeout, int retries, + struct scsi_mode_data *data, + struct scsi_sense_hdr *); extern int scsi_test_unit_ready(struct scsi_device *sdev, int timeout, int retries); extern int scsi_device_set_state(struct scsi_device *sdev, @@ -281,6 +298,11 @@ extern int scsi_execute_async(struct scsi_device *sdev, void (*done)(void *, char *, int, int), gfp_t gfp); +static inline void scsi_device_reprobe(struct scsi_device *sdev) +{ + device_reprobe(&sdev->sdev_gendev); +} + static inline unsigned int sdev_channel(struct scsi_device *sdev) { return sdev->channel; diff --git a/include/scsi/scsi_host.h b/include/scsi/scsi_host.h index 827992949c4..a6cf3e535c0 100644 --- a/include/scsi/scsi_host.h +++ b/include/scsi/scsi_host.h @@ -147,20 +147,6 @@ struct scsi_host_template { int (* eh_host_reset_handler)(struct scsi_cmnd *); /* - * This is an optional routine to notify the host that the scsi - * timer just fired. The returns tell the timer routine what to - * do about this: - * - * EH_HANDLED: I fixed the error, please complete the command - * EH_RESET_TIMER: I need more time, reset the timer and - * begin counting again - * EH_NOT_HANDLED Begin normal error recovery - * - * Status: OPTIONAL - */ - enum scsi_eh_timer_return (* eh_timed_out)(struct scsi_cmnd *); - - /* * Before the mid layer attempts to scan for a new device where none * currently exists, it will call this entry in your driver. Should * your driver need to allocate any structs or perform any other init diff --git a/include/scsi/scsi_transport.h b/include/scsi/scsi_transport.h index e7b1054adf8..b3657f11193 100644 --- a/include/scsi/scsi_transport.h +++ b/include/scsi/scsi_transport.h @@ -48,6 +48,17 @@ struct scsi_transport_template { * True if the transport wants to use a host-based work-queue */ unsigned int create_work_queue : 1; + + /* + * This is an optional routine that allows the transport to become + * involved when a scsi io timer fires. The return value tells the + * timer routine how to finish the io timeout handling: + * EH_HANDLED: I fixed the error, please complete the command + * EH_RESET_TIMER: I need more time, reset the timer and + * begin counting again + * EH_NOT_HANDLED Begin normal error recovery + */ + enum scsi_eh_timer_return (* eh_timed_out)(struct scsi_cmnd *); }; #define transport_class_to_shost(tc) \ diff --git a/include/scsi/scsi_transport_sas.h b/include/scsi/scsi_transport_sas.h index b91400bfb02..93cfb4bf421 100644 --- a/include/scsi/scsi_transport_sas.h +++ b/include/scsi/scsi_transport_sas.h @@ -30,6 +30,7 @@ enum sas_linkrate { SAS_SATA_PORT_SELECTOR, SAS_LINK_RATE_1_5_GBPS, SAS_LINK_RATE_3_0_GBPS, + SAS_LINK_RATE_6_0_GBPS, SAS_LINK_VIRTUAL, }; @@ -89,11 +90,45 @@ struct sas_rphy { dev_to_rphy((cdev)->dev) #define rphy_to_shost(rphy) \ dev_to_shost((rphy)->dev.parent) +#define target_to_rphy(targ) \ + dev_to_rphy((targ)->dev.parent) + +struct sas_end_device { + struct sas_rphy rphy; + /* flags */ + unsigned ready_led_meaning:1; + /* parameters */ + u16 I_T_nexus_loss_timeout; + u16 initiator_response_timeout; +}; +#define rphy_to_end_device(r) \ + container_of((r), struct sas_end_device, rphy) + +struct sas_expander_device { + int level; + + #define SAS_EXPANDER_VENDOR_ID_LEN 8 + char vendor_id[SAS_EXPANDER_VENDOR_ID_LEN+1]; + #define SAS_EXPANDER_PRODUCT_ID_LEN 16 + char product_id[SAS_EXPANDER_PRODUCT_ID_LEN+1]; + #define SAS_EXPANDER_PRODUCT_REV_LEN 4 + char product_rev[SAS_EXPANDER_PRODUCT_REV_LEN+1]; + #define SAS_EXPANDER_COMPONENT_VENDOR_ID_LEN 8 + char component_vendor_id[SAS_EXPANDER_COMPONENT_VENDOR_ID_LEN+1]; + u16 component_id; + u8 component_revision_id; + struct sas_rphy rphy; + +}; +#define rphy_to_expander_device(r) \ + container_of((r), struct sas_expander_device, rphy) /* The functions by which the transport class and the driver communicate */ struct sas_function_template { int (*get_linkerrors)(struct sas_phy *); + int (*get_enclosure_identifier)(struct sas_rphy *, u64 *); + int (*get_bay_identifier)(struct sas_rphy *); int (*phy_reset)(struct sas_phy *, int); }; @@ -106,7 +141,8 @@ extern int sas_phy_add(struct sas_phy *); extern void sas_phy_delete(struct sas_phy *); extern int scsi_is_sas_phy(const struct device *); -extern struct sas_rphy *sas_rphy_alloc(struct sas_phy *); +extern struct sas_rphy *sas_end_device_alloc(struct sas_phy *); +extern struct sas_rphy *sas_expander_alloc(struct sas_phy *, enum sas_device_type); void sas_rphy_free(struct sas_rphy *); extern int sas_rphy_add(struct sas_rphy *); extern void sas_rphy_delete(struct sas_rphy *); @@ -115,5 +151,17 @@ extern int scsi_is_sas_rphy(const struct device *); extern struct scsi_transport_template * sas_attach_transport(struct sas_function_template *); extern void sas_release_transport(struct scsi_transport_template *); +int sas_read_port_mode_page(struct scsi_device *); + +static inline int +scsi_is_sas_expander_device(struct device *dev) +{ + struct sas_rphy *rphy; + if (!scsi_is_sas_rphy(dev)) + return 0; + rphy = dev_to_rphy(dev); + return rphy->identify.device_type == SAS_FANOUT_EXPANDER_DEVICE || + rphy->identify.device_type == SAS_EDGE_EXPANDER_DEVICE; +} #endif /* SCSI_TRANSPORT_SAS_H */ diff --git a/include/scsi/scsi_transport_spi.h b/include/scsi/scsi_transport_spi.h index fb5a2ffae93..5e1d61913d4 100644 --- a/include/scsi/scsi_transport_spi.h +++ b/include/scsi/scsi_transport_spi.h @@ -148,5 +148,9 @@ void spi_schedule_dv_device(struct scsi_device *); void spi_dv_device(struct scsi_device *); void spi_display_xfer_agreement(struct scsi_target *); int spi_print_msg(const unsigned char *); +int spi_populate_width_msg(unsigned char *msg, int width); +int spi_populate_sync_msg(unsigned char *msg, int period, int offset); +int spi_populate_ppr_msg(unsigned char *msg, int period, int offset, int width, + int options); #endif /* SCSI_TRANSPORT_SPI_H */ |