From abe6602bf197167efb3b37161b9c11748fa076e1 Mon Sep 17 00:00:00 2001 From: FUJITA Tomonori Date: Mon, 5 Jan 2009 23:47:21 +0900 Subject: x86: add map_page and unmap_page to struct dma_mapping_ops This patch adds map_page and unmap_page to struct dma_mapping_ops. This is a preparation of struct dma_mapping_ops unification. We use map_page and unmap_page instead of map_single and unmap_single. We will remove map_single and unmap_single hooks in the last patch in this patchset. Signed-off-by: FUJITA Tomonori Signed-off-by: Ingo Molnar --- arch/x86/include/asm/dma-mapping.h | 8 ++++++++ 1 file changed, 8 insertions(+) (limited to 'arch/x86') diff --git a/arch/x86/include/asm/dma-mapping.h b/arch/x86/include/asm/dma-mapping.h index 4035357f5b9..3fe05046fb3 100644 --- a/arch/x86/include/asm/dma-mapping.h +++ b/arch/x86/include/asm/dma-mapping.h @@ -7,6 +7,7 @@ */ #include +#include #include #include #include @@ -50,6 +51,13 @@ struct dma_mapping_ops { void (*unmap_sg)(struct device *hwdev, struct scatterlist *sg, int nents, int direction); + dma_addr_t (*map_page)(struct device *dev, struct page *page, + unsigned long offset, size_t size, + enum dma_data_direction dir, + struct dma_attrs *attrs); + void (*unmap_page)(struct device *dev, dma_addr_t dma_handle, + size_t size, enum dma_data_direction dir, + struct dma_attrs *attrs); int (*dma_supported)(struct device *hwdev, u64 mask); int is_phys; }; -- cgit v1.2.3 From 4cf37bb7d9dc6dfb5d5fca7f735ba65ba173dabc Mon Sep 17 00:00:00 2001 From: FUJITA Tomonori Date: Mon, 5 Jan 2009 23:47:22 +0900 Subject: x86, swiotlb: add map_page and unmap_page This is a preparation of struct dma_mapping_ops unification. We use map_page and unmap_page instead of map_single and unmap_single. This is sorta temporary workaround. We will move them to lib/swiotlb.c to enable x86's swiotlb code to directly use them. We will remove map_single and unmap_single hooks in the last patch in this patchset. Signed-off-by: FUJITA Tomonori Signed-off-by: Ingo Molnar --- arch/x86/kernel/pci-swiotlb_64.c | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) (limited to 'arch/x86') diff --git a/arch/x86/kernel/pci-swiotlb_64.c b/arch/x86/kernel/pci-swiotlb_64.c index d59c9174766..d1c0366886d 100644 --- a/arch/x86/kernel/pci-swiotlb_64.c +++ b/arch/x86/kernel/pci-swiotlb_64.c @@ -45,6 +45,23 @@ swiotlb_map_single_phys(struct device *hwdev, phys_addr_t paddr, size_t size, return swiotlb_map_single(hwdev, phys_to_virt(paddr), size, direction); } +/* these will be moved to lib/swiotlb.c later on */ + +static dma_addr_t swiotlb_map_page(struct device *dev, struct page *page, + unsigned long offset, size_t size, + enum dma_data_direction dir, + struct dma_attrs *attrs) +{ + return swiotlb_map_single(dev, page_address(page) + offset, size, dir); +} + +static void swiotlb_unmap_page(struct device *dev, dma_addr_t dma_handle, + size_t size, enum dma_data_direction dir, + struct dma_attrs *attrs) +{ + swiotlb_unmap_single(dev, dma_handle, size, dir); +} + static void *x86_swiotlb_alloc_coherent(struct device *hwdev, size_t size, dma_addr_t *dma_handle, gfp_t flags) { @@ -71,6 +88,8 @@ struct dma_mapping_ops swiotlb_dma_ops = { .sync_sg_for_device = swiotlb_sync_sg_for_device, .map_sg = swiotlb_map_sg, .unmap_sg = swiotlb_unmap_sg, + .map_page = swiotlb_map_page, + .unmap_page = swiotlb_unmap_page, .dma_supported = NULL, }; -- cgit v1.2.3 From 052aedbfb6cd4fd1a73010735da88a9dcc224673 Mon Sep 17 00:00:00 2001 From: FUJITA Tomonori Date: Mon, 5 Jan 2009 23:47:23 +0900 Subject: x86, gart: add map_page and unmap_page This is a preparation of struct dma_mapping_ops unification. We use map_page and unmap_page instead of map_single and unmap_single. We will remove map_single and unmap_single hooks in the last patch in this patchset. Signed-off-by: FUJITA Tomonori Signed-off-by: Ingo Molnar --- arch/x86/kernel/pci-gart_64.c | 27 +++++++++++++++++++++++---- 1 file changed, 23 insertions(+), 4 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/kernel/pci-gart_64.c b/arch/x86/kernel/pci-gart_64.c index 00c2bcd4146..e49c6dd0e8c 100644 --- a/arch/x86/kernel/pci-gart_64.c +++ b/arch/x86/kernel/pci-gart_64.c @@ -255,10 +255,13 @@ static dma_addr_t dma_map_area(struct device *dev, dma_addr_t phys_mem, } /* Map a single area into the IOMMU */ -static dma_addr_t -gart_map_single(struct device *dev, phys_addr_t paddr, size_t size, int dir) +static dma_addr_t gart_map_page(struct device *dev, struct page *page, + unsigned long offset, size_t size, + enum dma_data_direction dir, + struct dma_attrs *attrs) { unsigned long bus; + phys_addr_t paddr = page_to_phys(page) + offset; if (!dev) dev = &x86_dma_fallback_dev; @@ -272,11 +275,19 @@ gart_map_single(struct device *dev, phys_addr_t paddr, size_t size, int dir) return bus; } +static dma_addr_t gart_map_single(struct device *dev, phys_addr_t paddr, + size_t size, int dir) +{ + return gart_map_page(dev, pfn_to_page(paddr >> PAGE_SHIFT), + paddr & ~PAGE_MASK, size, dir, NULL); +} + /* * Free a DMA mapping. */ -static void gart_unmap_single(struct device *dev, dma_addr_t dma_addr, - size_t size, int direction) +static void gart_unmap_page(struct device *dev, dma_addr_t dma_addr, + size_t size, enum dma_data_direction dir, + struct dma_attrs *attrs) { unsigned long iommu_page; int npages; @@ -295,6 +306,12 @@ static void gart_unmap_single(struct device *dev, dma_addr_t dma_addr, free_iommu(iommu_page, npages); } +static void gart_unmap_single(struct device *dev, dma_addr_t dma_addr, + size_t size, int direction) +{ + gart_unmap_page(dev, dma_addr, size, direction, NULL); +} + /* * Wrapper for pci_unmap_single working with scatterlists. */ @@ -712,6 +729,8 @@ static struct dma_mapping_ops gart_dma_ops = { .unmap_single = gart_unmap_single, .map_sg = gart_map_sg, .unmap_sg = gart_unmap_sg, + .map_page = gart_map_page, + .unmap_page = gart_unmap_page, .alloc_coherent = gart_alloc_coherent, .free_coherent = gart_free_coherent, }; -- cgit v1.2.3 From 3991605c40b1059d0204d9fddfcf878429ab8948 Mon Sep 17 00:00:00 2001 From: FUJITA Tomonori Date: Mon, 5 Jan 2009 23:47:24 +0900 Subject: x86, calgary: add map_page and unmap_page This is a preparation of struct dma_mapping_ops unification. We use map_page and unmap_page instead of map_single and unmap_single. We will remove map_single and unmap_single hooks in the last patch in this patchset. Signed-off-by: FUJITA Tomonori Acked-by: Muli Ben-Yehuda Signed-off-by: Ingo Molnar --- arch/x86/kernel/pci-calgary_64.c | 35 +++++++++++++++++++++++++++-------- 1 file changed, 27 insertions(+), 8 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/kernel/pci-calgary_64.c b/arch/x86/kernel/pci-calgary_64.c index d28bbdc35e4..e33cfcf1af5 100644 --- a/arch/x86/kernel/pci-calgary_64.c +++ b/arch/x86/kernel/pci-calgary_64.c @@ -445,10 +445,12 @@ error: return 0; } -static dma_addr_t calgary_map_single(struct device *dev, phys_addr_t paddr, - size_t size, int direction) +static dma_addr_t calgary_map_page(struct device *dev, struct page *page, + unsigned long offset, size_t size, + enum dma_data_direction dir, + struct dma_attrs *attrs) { - void *vaddr = phys_to_virt(paddr); + void *vaddr = page_address(page) + offset; unsigned long uaddr; unsigned int npages; struct iommu_table *tbl = find_iommu_table(dev); @@ -456,17 +458,32 @@ static dma_addr_t calgary_map_single(struct device *dev, phys_addr_t paddr, uaddr = (unsigned long)vaddr; npages = iommu_num_pages(uaddr, size, PAGE_SIZE); - return iommu_alloc(dev, tbl, vaddr, npages, direction); + return iommu_alloc(dev, tbl, vaddr, npages, dir); } -static void calgary_unmap_single(struct device *dev, dma_addr_t dma_handle, - size_t size, int direction) +static dma_addr_t calgary_map_single(struct device *dev, phys_addr_t paddr, + size_t size, int direction) +{ + return calgary_map_page(dev, pfn_to_page(paddr >> PAGE_SHIFT), + paddr & ~PAGE_MASK, size, + direction, NULL); +} + +static void calgary_unmap_page(struct device *dev, dma_addr_t dma_addr, + size_t size, enum dma_data_direction dir, + struct dma_attrs *attrs) { struct iommu_table *tbl = find_iommu_table(dev); unsigned int npages; - npages = iommu_num_pages(dma_handle, size, PAGE_SIZE); - iommu_free(tbl, dma_handle, npages); + npages = iommu_num_pages(dma_addr, size, PAGE_SIZE); + iommu_free(tbl, dma_addr, npages); +} + +static void calgary_unmap_single(struct device *dev, dma_addr_t dma_handle, + size_t size, int direction) +{ + calgary_unmap_page(dev, dma_handle, size, direction, NULL); } static void* calgary_alloc_coherent(struct device *dev, size_t size, @@ -522,6 +539,8 @@ static struct dma_mapping_ops calgary_dma_ops = { .unmap_single = calgary_unmap_single, .map_sg = calgary_map_sg, .unmap_sg = calgary_unmap_sg, + .map_page = calgary_map_page, + .unmap_page = calgary_unmap_page, }; static inline void __iomem * busno_to_bbar(unsigned char num) -- cgit v1.2.3 From 51491367c2541c51a9d435eec88b0e846223fb59 Mon Sep 17 00:00:00 2001 From: FUJITA Tomonori Date: Mon, 5 Jan 2009 23:47:25 +0900 Subject: x86, AMD IOMMU: add map_page and unmap_page This is a preparation of struct dma_mapping_ops unification. We use map_page and unmap_page instead of map_single and unmap_single. We will remove map_single and unmap_single hooks in the last patch in this patchset. Signed-off-by: FUJITA Tomonori Signed-off-by: Ingo Molnar --- arch/x86/kernel/amd_iommu.c | 27 +++++++++++++++++++++++---- 1 file changed, 23 insertions(+), 4 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c index 5113c080f0c..85704418644 100644 --- a/arch/x86/kernel/amd_iommu.c +++ b/arch/x86/kernel/amd_iommu.c @@ -22,6 +22,7 @@ #include #include #include +#include #include #ifdef CONFIG_IOMMU_API #include @@ -1297,8 +1298,10 @@ static void __unmap_single(struct amd_iommu *iommu, /* * The exported map_single function for dma_ops. */ -static dma_addr_t map_single(struct device *dev, phys_addr_t paddr, - size_t size, int dir) +static dma_addr_t map_page(struct device *dev, struct page *page, + unsigned long offset, size_t size, + enum dma_data_direction dir, + struct dma_attrs *attrs) { unsigned long flags; struct amd_iommu *iommu; @@ -1306,6 +1309,7 @@ static dma_addr_t map_single(struct device *dev, phys_addr_t paddr, u16 devid; dma_addr_t addr; u64 dma_mask; + phys_addr_t paddr = page_to_phys(page) + offset; INC_STATS_COUNTER(cnt_map_single); @@ -1337,11 +1341,18 @@ out: return addr; } +static dma_addr_t map_single(struct device *dev, phys_addr_t paddr, + size_t size, int dir) +{ + return map_page(dev, pfn_to_page(paddr >> PAGE_SHIFT), + paddr & ~PAGE_MASK, size, dir, NULL); +} + /* * The exported unmap_single function for dma_ops. */ -static void unmap_single(struct device *dev, dma_addr_t dma_addr, - size_t size, int dir) +static void unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size, + enum dma_data_direction dir, struct dma_attrs *attrs) { unsigned long flags; struct amd_iommu *iommu; @@ -1367,6 +1378,12 @@ static void unmap_single(struct device *dev, dma_addr_t dma_addr, spin_unlock_irqrestore(&domain->lock, flags); } +static void unmap_single(struct device *dev, dma_addr_t dma_addr, + size_t size, int dir) +{ + return unmap_page(dev, dma_addr, size, dir, NULL); +} + /* * This is a special map_sg function which is used if we should map a * device which is not handled by an AMD IOMMU in the system. @@ -1649,6 +1666,8 @@ static struct dma_mapping_ops amd_iommu_dma_ops = { .free_coherent = free_coherent, .map_single = map_single, .unmap_single = unmap_single, + .map_page = map_page, + .unmap_page = unmap_page, .map_sg = map_sg, .unmap_sg = unmap_sg, .dma_supported = amd_iommu_dma_supported, -- cgit v1.2.3 From 33feffd4525fc2e4dd0a322fb5d07d61f85d791e Mon Sep 17 00:00:00 2001 From: FUJITA Tomonori Date: Mon, 5 Jan 2009 23:47:27 +0900 Subject: x86, pci-nommu: add map_page This is a preparation of struct dma_mapping_ops unification. We use map_page and unmap_page instead of map_single and unmap_single. We will remove map_single hook in the last patch in this patchset. Signed-off-by: FUJITA Tomonori Signed-off-by: Ingo Molnar --- arch/x86/kernel/pci-nommu.c | 20 ++++++++++++++------ 1 file changed, 14 insertions(+), 6 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/kernel/pci-nommu.c b/arch/x86/kernel/pci-nommu.c index c70ab5a5d4c..5a73a824ac1 100644 --- a/arch/x86/kernel/pci-nommu.c +++ b/arch/x86/kernel/pci-nommu.c @@ -25,18 +25,25 @@ check_addr(char *name, struct device *hwdev, dma_addr_t bus, size_t size) return 1; } -static dma_addr_t -nommu_map_single(struct device *hwdev, phys_addr_t paddr, size_t size, - int direction) +static dma_addr_t nommu_map_page(struct device *dev, struct page *page, + unsigned long offset, size_t size, + enum dma_data_direction dir, + struct dma_attrs *attrs) { - dma_addr_t bus = paddr; + dma_addr_t bus = page_to_phys(page) + offset; WARN_ON(size == 0); - if (!check_addr("map_single", hwdev, bus, size)) - return bad_dma_address; + if (!check_addr("map_single", dev, bus, size)) + return bad_dma_address; flush_write_buffers(); return bus; } +static dma_addr_t nommu_map_single(struct device *hwdev, phys_addr_t paddr, + size_t size, int direction) +{ + return nommu_map_page(hwdev, pfn_to_page(paddr >> PAGE_SHIFT), + paddr & ~PAGE_MASK, size, direction, NULL); +} /* Map a set of buffers described by scatterlist in streaming * mode for DMA. This is the scatter-gather version of the @@ -83,6 +90,7 @@ struct dma_mapping_ops nommu_dma_ops = { .free_coherent = nommu_free_coherent, .map_single = nommu_map_single, .map_sg = nommu_map_sg, + .map_page = nommu_map_page, .is_phys = 1, }; -- cgit v1.2.3 From d7dff84053524186b139342ac66a4160ce6bb517 Mon Sep 17 00:00:00 2001 From: FUJITA Tomonori Date: Mon, 5 Jan 2009 23:47:28 +0900 Subject: x86: remove map_single and unmap_single in struct dma_mapping_ops This patch converts dma_map_single and dma_unmap_single to use map_page and unmap_page respectively and removes unnecessary map_single and unmap_single in struct dma_mapping_ops. This leaves intel-iommu's dma_map_single and dma_unmap_single since IA64 uses them. They will be removed after the unification. Signed-off-by: FUJITA Tomonori Signed-off-by: Ingo Molnar --- arch/x86/include/asm/dma-mapping.h | 15 ++++++--------- arch/x86/kernel/amd_iommu.c | 15 --------------- arch/x86/kernel/pci-calgary_64.c | 16 ---------------- arch/x86/kernel/pci-gart_64.c | 19 ++----------------- arch/x86/kernel/pci-nommu.c | 8 -------- arch/x86/kernel/pci-swiotlb_64.c | 9 --------- 6 files changed, 8 insertions(+), 74 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/include/asm/dma-mapping.h b/arch/x86/include/asm/dma-mapping.h index 3fe05046fb3..b81f82268a1 100644 --- a/arch/x86/include/asm/dma-mapping.h +++ b/arch/x86/include/asm/dma-mapping.h @@ -24,10 +24,6 @@ struct dma_mapping_ops { dma_addr_t *dma_handle, gfp_t gfp); void (*free_coherent)(struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle); - dma_addr_t (*map_single)(struct device *hwdev, phys_addr_t ptr, - size_t size, int direction); - void (*unmap_single)(struct device *dev, dma_addr_t addr, - size_t size, int direction); void (*sync_single_for_cpu)(struct device *hwdev, dma_addr_t dma_handle, size_t size, int direction); @@ -103,7 +99,9 @@ dma_map_single(struct device *hwdev, void *ptr, size_t size, struct dma_mapping_ops *ops = get_dma_ops(hwdev); BUG_ON(!valid_dma_direction(direction)); - return ops->map_single(hwdev, virt_to_phys(ptr), size, direction); + return ops->map_page(hwdev, virt_to_page(ptr), + (unsigned long)ptr & ~PAGE_MASK, size, + direction, NULL); } static inline void @@ -113,8 +111,8 @@ dma_unmap_single(struct device *dev, dma_addr_t addr, size_t size, struct dma_mapping_ops *ops = get_dma_ops(dev); BUG_ON(!valid_dma_direction(direction)); - if (ops->unmap_single) - ops->unmap_single(dev, addr, size, direction); + if (ops->unmap_page) + ops->unmap_page(dev, addr, size, direction, NULL); } static inline int @@ -221,8 +219,7 @@ static inline dma_addr_t dma_map_page(struct device *dev, struct page *page, struct dma_mapping_ops *ops = get_dma_ops(dev); BUG_ON(!valid_dma_direction(direction)); - return ops->map_single(dev, page_to_phys(page) + offset, - size, direction); + return ops->map_page(dev, page, offset, size, direction, NULL); } static inline void dma_unmap_page(struct device *dev, dma_addr_t addr, diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c index 85704418644..a5dedb690a9 100644 --- a/arch/x86/kernel/amd_iommu.c +++ b/arch/x86/kernel/amd_iommu.c @@ -1341,13 +1341,6 @@ out: return addr; } -static dma_addr_t map_single(struct device *dev, phys_addr_t paddr, - size_t size, int dir) -{ - return map_page(dev, pfn_to_page(paddr >> PAGE_SHIFT), - paddr & ~PAGE_MASK, size, dir, NULL); -} - /* * The exported unmap_single function for dma_ops. */ @@ -1378,12 +1371,6 @@ static void unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size, spin_unlock_irqrestore(&domain->lock, flags); } -static void unmap_single(struct device *dev, dma_addr_t dma_addr, - size_t size, int dir) -{ - return unmap_page(dev, dma_addr, size, dir, NULL); -} - /* * This is a special map_sg function which is used if we should map a * device which is not handled by an AMD IOMMU in the system. @@ -1664,8 +1651,6 @@ static void prealloc_protection_domains(void) static struct dma_mapping_ops amd_iommu_dma_ops = { .alloc_coherent = alloc_coherent, .free_coherent = free_coherent, - .map_single = map_single, - .unmap_single = unmap_single, .map_page = map_page, .unmap_page = unmap_page, .map_sg = map_sg, diff --git a/arch/x86/kernel/pci-calgary_64.c b/arch/x86/kernel/pci-calgary_64.c index e33cfcf1af5..756138b604e 100644 --- a/arch/x86/kernel/pci-calgary_64.c +++ b/arch/x86/kernel/pci-calgary_64.c @@ -461,14 +461,6 @@ static dma_addr_t calgary_map_page(struct device *dev, struct page *page, return iommu_alloc(dev, tbl, vaddr, npages, dir); } -static dma_addr_t calgary_map_single(struct device *dev, phys_addr_t paddr, - size_t size, int direction) -{ - return calgary_map_page(dev, pfn_to_page(paddr >> PAGE_SHIFT), - paddr & ~PAGE_MASK, size, - direction, NULL); -} - static void calgary_unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size, enum dma_data_direction dir, struct dma_attrs *attrs) @@ -480,12 +472,6 @@ static void calgary_unmap_page(struct device *dev, dma_addr_t dma_addr, iommu_free(tbl, dma_addr, npages); } -static void calgary_unmap_single(struct device *dev, dma_addr_t dma_handle, - size_t size, int direction) -{ - calgary_unmap_page(dev, dma_handle, size, direction, NULL); -} - static void* calgary_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flag) { @@ -535,8 +521,6 @@ static void calgary_free_coherent(struct device *dev, size_t size, static struct dma_mapping_ops calgary_dma_ops = { .alloc_coherent = calgary_alloc_coherent, .free_coherent = calgary_free_coherent, - .map_single = calgary_map_single, - .unmap_single = calgary_unmap_single, .map_sg = calgary_map_sg, .unmap_sg = calgary_unmap_sg, .map_page = calgary_map_page, diff --git a/arch/x86/kernel/pci-gart_64.c b/arch/x86/kernel/pci-gart_64.c index e49c6dd0e8c..9c557c0c928 100644 --- a/arch/x86/kernel/pci-gart_64.c +++ b/arch/x86/kernel/pci-gart_64.c @@ -275,13 +275,6 @@ static dma_addr_t gart_map_page(struct device *dev, struct page *page, return bus; } -static dma_addr_t gart_map_single(struct device *dev, phys_addr_t paddr, - size_t size, int dir) -{ - return gart_map_page(dev, pfn_to_page(paddr >> PAGE_SHIFT), - paddr & ~PAGE_MASK, size, dir, NULL); -} - /* * Free a DMA mapping. */ @@ -306,12 +299,6 @@ static void gart_unmap_page(struct device *dev, dma_addr_t dma_addr, free_iommu(iommu_page, npages); } -static void gart_unmap_single(struct device *dev, dma_addr_t dma_addr, - size_t size, int direction) -{ - gart_unmap_page(dev, dma_addr, size, direction, NULL); -} - /* * Wrapper for pci_unmap_single working with scatterlists. */ @@ -324,7 +311,7 @@ gart_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, int dir) for_each_sg(sg, s, nents, i) { if (!s->dma_length || !s->length) break; - gart_unmap_single(dev, s->dma_address, s->dma_length, dir); + gart_unmap_page(dev, s->dma_address, s->dma_length, dir, NULL); } } @@ -538,7 +525,7 @@ static void gart_free_coherent(struct device *dev, size_t size, void *vaddr, dma_addr_t dma_addr) { - gart_unmap_single(dev, dma_addr, size, DMA_BIDIRECTIONAL); + gart_unmap_page(dev, dma_addr, size, DMA_BIDIRECTIONAL, NULL); free_pages((unsigned long)vaddr, get_order(size)); } @@ -725,8 +712,6 @@ static __init int init_k8_gatt(struct agp_kern_info *info) } static struct dma_mapping_ops gart_dma_ops = { - .map_single = gart_map_single, - .unmap_single = gart_unmap_single, .map_sg = gart_map_sg, .unmap_sg = gart_unmap_sg, .map_page = gart_map_page, diff --git a/arch/x86/kernel/pci-nommu.c b/arch/x86/kernel/pci-nommu.c index 5a73a824ac1..d42b69c90b4 100644 --- a/arch/x86/kernel/pci-nommu.c +++ b/arch/x86/kernel/pci-nommu.c @@ -38,13 +38,6 @@ static dma_addr_t nommu_map_page(struct device *dev, struct page *page, return bus; } -static dma_addr_t nommu_map_single(struct device *hwdev, phys_addr_t paddr, - size_t size, int direction) -{ - return nommu_map_page(hwdev, pfn_to_page(paddr >> PAGE_SHIFT), - paddr & ~PAGE_MASK, size, direction, NULL); -} - /* Map a set of buffers described by scatterlist in streaming * mode for DMA. This is the scatter-gather version of the * above pci_map_single interface. Here the scatter gather list @@ -88,7 +81,6 @@ static void nommu_free_coherent(struct device *dev, size_t size, void *vaddr, struct dma_mapping_ops nommu_dma_ops = { .alloc_coherent = dma_generic_alloc_coherent, .free_coherent = nommu_free_coherent, - .map_single = nommu_map_single, .map_sg = nommu_map_sg, .map_page = nommu_map_page, .is_phys = 1, diff --git a/arch/x86/kernel/pci-swiotlb_64.c b/arch/x86/kernel/pci-swiotlb_64.c index d1c0366886d..3ae354c0fde 100644 --- a/arch/x86/kernel/pci-swiotlb_64.c +++ b/arch/x86/kernel/pci-swiotlb_64.c @@ -38,13 +38,6 @@ int __weak swiotlb_arch_range_needs_mapping(void *ptr, size_t size) return 0; } -static dma_addr_t -swiotlb_map_single_phys(struct device *hwdev, phys_addr_t paddr, size_t size, - int direction) -{ - return swiotlb_map_single(hwdev, phys_to_virt(paddr), size, direction); -} - /* these will be moved to lib/swiotlb.c later on */ static dma_addr_t swiotlb_map_page(struct device *dev, struct page *page, @@ -78,8 +71,6 @@ struct dma_mapping_ops swiotlb_dma_ops = { .mapping_error = swiotlb_dma_mapping_error, .alloc_coherent = x86_swiotlb_alloc_coherent, .free_coherent = swiotlb_free_coherent, - .map_single = swiotlb_map_single_phys, - .unmap_single = swiotlb_unmap_single, .sync_single_for_cpu = swiotlb_sync_single_for_cpu, .sync_single_for_device = swiotlb_sync_single_for_device, .sync_single_range_for_cpu = swiotlb_sync_single_range_for_cpu, -- cgit v1.2.3 From 160c1d8e40866edfeae7d68816b7005d70acf391 Mon Sep 17 00:00:00 2001 From: FUJITA Tomonori Date: Mon, 5 Jan 2009 23:59:02 +0900 Subject: x86, ia64: convert to use generic dma_map_ops struct This converts X86 and IA64 to use include/linux/dma-mapping.h. It's a bit large but pretty boring. The major change for X86 is converting 'int dir' to 'enum dma_data_direction dir' in DMA mapping operations. The major changes for IA64 is using map_page and unmap_page instead of map_single and unmap_single. Signed-off-by: FUJITA Tomonori Acked-by: Tony Luck Signed-off-by: Ingo Molnar --- arch/x86/include/asm/device.h | 2 +- arch/x86/include/asm/dma-mapping.h | 146 ++++++++++++++----------------------- arch/x86/include/asm/iommu.h | 2 +- arch/x86/kernel/amd_iommu.c | 8 +- arch/x86/kernel/pci-calgary_64.c | 15 ++-- arch/x86/kernel/pci-dma.c | 4 +- arch/x86/kernel/pci-gart_64.c | 14 ++-- arch/x86/kernel/pci-nommu.c | 5 +- arch/x86/kernel/pci-swiotlb_64.c | 6 +- 9 files changed, 83 insertions(+), 119 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/include/asm/device.h b/arch/x86/include/asm/device.h index 3c034f48fdb..4994a20acbc 100644 --- a/arch/x86/include/asm/device.h +++ b/arch/x86/include/asm/device.h @@ -6,7 +6,7 @@ struct dev_archdata { void *acpi_handle; #endif #ifdef CONFIG_X86_64 -struct dma_mapping_ops *dma_ops; +struct dma_map_ops *dma_ops; #endif #ifdef CONFIG_DMAR void *iommu; /* hook for IOMMU specific extension */ diff --git a/arch/x86/include/asm/dma-mapping.h b/arch/x86/include/asm/dma-mapping.h index b81f82268a1..5a347805a6c 100644 --- a/arch/x86/include/asm/dma-mapping.h +++ b/arch/x86/include/asm/dma-mapping.h @@ -17,50 +17,9 @@ extern int iommu_merge; extern struct device x86_dma_fallback_dev; extern int panic_on_overflow; -struct dma_mapping_ops { - int (*mapping_error)(struct device *dev, - dma_addr_t dma_addr); - void* (*alloc_coherent)(struct device *dev, size_t size, - dma_addr_t *dma_handle, gfp_t gfp); - void (*free_coherent)(struct device *dev, size_t size, - void *vaddr, dma_addr_t dma_handle); - void (*sync_single_for_cpu)(struct device *hwdev, - dma_addr_t dma_handle, size_t size, - int direction); - void (*sync_single_for_device)(struct device *hwdev, - dma_addr_t dma_handle, size_t size, - int direction); - void (*sync_single_range_for_cpu)(struct device *hwdev, - dma_addr_t dma_handle, unsigned long offset, - size_t size, int direction); - void (*sync_single_range_for_device)(struct device *hwdev, - dma_addr_t dma_handle, unsigned long offset, - size_t size, int direction); - void (*sync_sg_for_cpu)(struct device *hwdev, - struct scatterlist *sg, int nelems, - int direction); - void (*sync_sg_for_device)(struct device *hwdev, - struct scatterlist *sg, int nelems, - int direction); - int (*map_sg)(struct device *hwdev, struct scatterlist *sg, - int nents, int direction); - void (*unmap_sg)(struct device *hwdev, - struct scatterlist *sg, int nents, - int direction); - dma_addr_t (*map_page)(struct device *dev, struct page *page, - unsigned long offset, size_t size, - enum dma_data_direction dir, - struct dma_attrs *attrs); - void (*unmap_page)(struct device *dev, dma_addr_t dma_handle, - size_t size, enum dma_data_direction dir, - struct dma_attrs *attrs); - int (*dma_supported)(struct device *hwdev, u64 mask); - int is_phys; -}; - -extern struct dma_mapping_ops *dma_ops; - -static inline struct dma_mapping_ops *get_dma_ops(struct device *dev) +extern struct dma_map_ops *dma_ops; + +static inline struct dma_map_ops *get_dma_ops(struct device *dev) { #ifdef CONFIG_X86_32 return dma_ops; @@ -75,7 +34,7 @@ static inline struct dma_mapping_ops *get_dma_ops(struct device *dev) /* Make sure we keep the same behaviour */ static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) { - struct dma_mapping_ops *ops = get_dma_ops(dev); + struct dma_map_ops *ops = get_dma_ops(dev); if (ops->mapping_error) return ops->mapping_error(dev, dma_addr); @@ -94,138 +53,139 @@ extern void *dma_generic_alloc_coherent(struct device *dev, size_t size, static inline dma_addr_t dma_map_single(struct device *hwdev, void *ptr, size_t size, - int direction) + enum dma_data_direction dir) { - struct dma_mapping_ops *ops = get_dma_ops(hwdev); + struct dma_map_ops *ops = get_dma_ops(hwdev); - BUG_ON(!valid_dma_direction(direction)); + BUG_ON(!valid_dma_direction(dir)); return ops->map_page(hwdev, virt_to_page(ptr), (unsigned long)ptr & ~PAGE_MASK, size, - direction, NULL); + dir, NULL); } static inline void dma_unmap_single(struct device *dev, dma_addr_t addr, size_t size, - int direction) + enum dma_data_direction dir) { - struct dma_mapping_ops *ops = get_dma_ops(dev); + struct dma_map_ops *ops = get_dma_ops(dev); - BUG_ON(!valid_dma_direction(direction)); + BUG_ON(!valid_dma_direction(dir)); if (ops->unmap_page) - ops->unmap_page(dev, addr, size, direction, NULL); + ops->unmap_page(dev, addr, size, dir, NULL); } static inline int dma_map_sg(struct device *hwdev, struct scatterlist *sg, - int nents, int direction) + int nents, enum dma_data_direction dir) { - struct dma_mapping_ops *ops = get_dma_ops(hwdev); + struct dma_map_ops *ops = get_dma_ops(hwdev); - BUG_ON(!valid_dma_direction(direction)); - return ops->map_sg(hwdev, sg, nents, direction); + BUG_ON(!valid_dma_direction(dir)); + return ops->map_sg(hwdev, sg, nents, dir, NULL); } static inline void dma_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents, - int direction) + enum dma_data_direction dir) { - struct dma_mapping_ops *ops = get_dma_ops(hwdev); + struct dma_map_ops *ops = get_dma_ops(hwdev); - BUG_ON(!valid_dma_direction(direction)); + BUG_ON(!valid_dma_direction(dir)); if (ops->unmap_sg) - ops->unmap_sg(hwdev, sg, nents, direction); + ops->unmap_sg(hwdev, sg, nents, dir, NULL); } static inline void dma_sync_single_for_cpu(struct device *hwdev, dma_addr_t dma_handle, - size_t size, int direction) + size_t size, enum dma_data_direction dir) { - struct dma_mapping_ops *ops = get_dma_ops(hwdev); + struct dma_map_ops *ops = get_dma_ops(hwdev); - BUG_ON(!valid_dma_direction(direction)); + BUG_ON(!valid_dma_direction(dir)); if (ops->sync_single_for_cpu) - ops->sync_single_for_cpu(hwdev, dma_handle, size, direction); + ops->sync_single_for_cpu(hwdev, dma_handle, size, dir); flush_write_buffers(); } static inline void dma_sync_single_for_device(struct device *hwdev, dma_addr_t dma_handle, - size_t size, int direction) + size_t size, enum dma_data_direction dir) { - struct dma_mapping_ops *ops = get_dma_ops(hwdev); + struct dma_map_ops *ops = get_dma_ops(hwdev); - BUG_ON(!valid_dma_direction(direction)); + BUG_ON(!valid_dma_direction(dir)); if (ops->sync_single_for_device) - ops->sync_single_for_device(hwdev, dma_handle, size, direction); + ops->sync_single_for_device(hwdev, dma_handle, size, dir); flush_write_buffers(); } static inline void dma_sync_single_range_for_cpu(struct device *hwdev, dma_addr_t dma_handle, - unsigned long offset, size_t size, int direction) + unsigned long offset, size_t size, + enum dma_data_direction dir) { - struct dma_mapping_ops *ops = get_dma_ops(hwdev); + struct dma_map_ops *ops = get_dma_ops(hwdev); - BUG_ON(!valid_dma_direction(direction)); + BUG_ON(!valid_dma_direction(dir)); if (ops->sync_single_range_for_cpu) ops->sync_single_range_for_cpu(hwdev, dma_handle, offset, - size, direction); + size, dir); flush_write_buffers(); } static inline void dma_sync_single_range_for_device(struct device *hwdev, dma_addr_t dma_handle, unsigned long offset, size_t size, - int direction) + enum dma_data_direction dir) { - struct dma_mapping_ops *ops = get_dma_ops(hwdev); + struct dma_map_ops *ops = get_dma_ops(hwdev); - BUG_ON(!valid_dma_direction(direction)); + BUG_ON(!valid_dma_direction(dir)); if (ops->sync_single_range_for_device) ops->sync_single_range_for_device(hwdev, dma_handle, - offset, size, direction); + offset, size, dir); flush_write_buffers(); } static inline void dma_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg, - int nelems, int direction) + int nelems, enum dma_data_direction dir) { - struct dma_mapping_ops *ops = get_dma_ops(hwdev); + struct dma_map_ops *ops = get_dma_ops(hwdev); - BUG_ON(!valid_dma_direction(direction)); + BUG_ON(!valid_dma_direction(dir)); if (ops->sync_sg_for_cpu) - ops->sync_sg_for_cpu(hwdev, sg, nelems, direction); + ops->sync_sg_for_cpu(hwdev, sg, nelems, dir); flush_write_buffers(); } static inline void dma_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg, - int nelems, int direction) + int nelems, enum dma_data_direction dir) { - struct dma_mapping_ops *ops = get_dma_ops(hwdev); + struct dma_map_ops *ops = get_dma_ops(hwdev); - BUG_ON(!valid_dma_direction(direction)); + BUG_ON(!valid_dma_direction(dir)); if (ops->sync_sg_for_device) - ops->sync_sg_for_device(hwdev, sg, nelems, direction); + ops->sync_sg_for_device(hwdev, sg, nelems, dir); flush_write_buffers(); } static inline dma_addr_t dma_map_page(struct device *dev, struct page *page, size_t offset, size_t size, - int direction) + enum dma_data_direction dir) { - struct dma_mapping_ops *ops = get_dma_ops(dev); + struct dma_map_ops *ops = get_dma_ops(dev); - BUG_ON(!valid_dma_direction(direction)); - return ops->map_page(dev, page, offset, size, direction, NULL); + BUG_ON(!valid_dma_direction(dir)); + return ops->map_page(dev, page, offset, size, dir, NULL); } static inline void dma_unmap_page(struct device *dev, dma_addr_t addr, - size_t size, int direction) + size_t size, enum dma_data_direction dir) { - dma_unmap_single(dev, addr, size, direction); + dma_unmap_single(dev, addr, size, dir); } static inline void @@ -271,7 +231,7 @@ static inline void * dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t gfp) { - struct dma_mapping_ops *ops = get_dma_ops(dev); + struct dma_map_ops *ops = get_dma_ops(dev); void *memory; gfp &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32); @@ -297,7 +257,7 @@ dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, static inline void dma_free_coherent(struct device *dev, size_t size, void *vaddr, dma_addr_t bus) { - struct dma_mapping_ops *ops = get_dma_ops(dev); + struct dma_map_ops *ops = get_dma_ops(dev); WARN_ON(irqs_disabled()); /* for portability */ diff --git a/arch/x86/include/asm/iommu.h b/arch/x86/include/asm/iommu.h index a6ee9e6f530..af326a2975b 100644 --- a/arch/x86/include/asm/iommu.h +++ b/arch/x86/include/asm/iommu.h @@ -3,7 +3,7 @@ extern void pci_iommu_shutdown(void); extern void no_iommu_init(void); -extern struct dma_mapping_ops nommu_dma_ops; +extern struct dma_map_ops nommu_dma_ops; extern int force_iommu, no_iommu; extern int iommu_detected; diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c index a5dedb690a9..008e522b953 100644 --- a/arch/x86/kernel/amd_iommu.c +++ b/arch/x86/kernel/amd_iommu.c @@ -1394,7 +1394,8 @@ static int map_sg_no_iommu(struct device *dev, struct scatterlist *sglist, * lists). */ static int map_sg(struct device *dev, struct scatterlist *sglist, - int nelems, int dir) + int nelems, enum dma_data_direction dir, + struct dma_attrs *attrs) { unsigned long flags; struct amd_iommu *iommu; @@ -1461,7 +1462,8 @@ unmap: * lists). */ static void unmap_sg(struct device *dev, struct scatterlist *sglist, - int nelems, int dir) + int nelems, enum dma_data_direction dir, + struct dma_attrs *attrs) { unsigned long flags; struct amd_iommu *iommu; @@ -1648,7 +1650,7 @@ static void prealloc_protection_domains(void) } } -static struct dma_mapping_ops amd_iommu_dma_ops = { +static struct dma_map_ops amd_iommu_dma_ops = { .alloc_coherent = alloc_coherent, .free_coherent = free_coherent, .map_page = map_page, diff --git a/arch/x86/kernel/pci-calgary_64.c b/arch/x86/kernel/pci-calgary_64.c index 756138b604e..755c21e906f 100644 --- a/arch/x86/kernel/pci-calgary_64.c +++ b/arch/x86/kernel/pci-calgary_64.c @@ -380,8 +380,9 @@ static inline struct iommu_table *find_iommu_table(struct device *dev) return tbl; } -static void calgary_unmap_sg(struct device *dev, - struct scatterlist *sglist, int nelems, int direction) +static void calgary_unmap_sg(struct device *dev, struct scatterlist *sglist, + int nelems,enum dma_data_direction dir, + struct dma_attrs *attrs) { struct iommu_table *tbl = find_iommu_table(dev); struct scatterlist *s; @@ -404,7 +405,8 @@ static void calgary_unmap_sg(struct device *dev, } static int calgary_map_sg(struct device *dev, struct scatterlist *sg, - int nelems, int direction) + int nelems, enum dma_data_direction dir, + struct dma_attrs *attrs) { struct iommu_table *tbl = find_iommu_table(dev); struct scatterlist *s; @@ -429,15 +431,14 @@ static int calgary_map_sg(struct device *dev, struct scatterlist *sg, s->dma_address = (entry << PAGE_SHIFT) | s->offset; /* insert into HW table */ - tce_build(tbl, entry, npages, vaddr & PAGE_MASK, - direction); + tce_build(tbl, entry, npages, vaddr & PAGE_MASK, dir); s->dma_length = s->length; } return nelems; error: - calgary_unmap_sg(dev, sg, nelems, direction); + calgary_unmap_sg(dev, sg, nelems, dir, NULL); for_each_sg(sg, s, nelems, i) { sg->dma_address = bad_dma_address; sg->dma_length = 0; @@ -518,7 +519,7 @@ static void calgary_free_coherent(struct device *dev, size_t size, free_pages((unsigned long)vaddr, get_order(size)); } -static struct dma_mapping_ops calgary_dma_ops = { +static struct dma_map_ops calgary_dma_ops = { .alloc_coherent = calgary_alloc_coherent, .free_coherent = calgary_free_coherent, .map_sg = calgary_map_sg, diff --git a/arch/x86/kernel/pci-dma.c b/arch/x86/kernel/pci-dma.c index 19a1044a0cd..0d75c129b18 100644 --- a/arch/x86/kernel/pci-dma.c +++ b/arch/x86/kernel/pci-dma.c @@ -12,7 +12,7 @@ static int forbid_dac __read_mostly; -struct dma_mapping_ops *dma_ops; +struct dma_map_ops *dma_ops; EXPORT_SYMBOL(dma_ops); static int iommu_sac_force __read_mostly; @@ -224,7 +224,7 @@ early_param("iommu", iommu_setup); int dma_supported(struct device *dev, u64 mask) { - struct dma_mapping_ops *ops = get_dma_ops(dev); + struct dma_map_ops *ops = get_dma_ops(dev); #ifdef CONFIG_PCI if (mask > 0xffffffff && forbid_dac > 0) { diff --git a/arch/x86/kernel/pci-gart_64.c b/arch/x86/kernel/pci-gart_64.c index 9c557c0c928..8cb3e45439c 100644 --- a/arch/x86/kernel/pci-gart_64.c +++ b/arch/x86/kernel/pci-gart_64.c @@ -302,8 +302,8 @@ static void gart_unmap_page(struct device *dev, dma_addr_t dma_addr, /* * Wrapper for pci_unmap_single working with scatterlists. */ -static void -gart_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, int dir) +static void gart_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, + enum dma_data_direction dir, struct dma_attrs *attrs) { struct scatterlist *s; int i; @@ -333,7 +333,7 @@ static int dma_map_sg_nonforce(struct device *dev, struct scatterlist *sg, addr = dma_map_area(dev, addr, s->length, dir, 0); if (addr == bad_dma_address) { if (i > 0) - gart_unmap_sg(dev, sg, i, dir); + gart_unmap_sg(dev, sg, i, dir, NULL); nents = 0; sg[0].dma_length = 0; break; @@ -404,8 +404,8 @@ dma_map_cont(struct device *dev, struct scatterlist *start, int nelems, * DMA map all entries in a scatterlist. * Merge chunks that have page aligned sizes into a continuous mapping. */ -static int -gart_map_sg(struct device *dev, struct scatterlist *sg, int nents, int dir) +static int gart_map_sg(struct device *dev, struct scatterlist *sg, int nents, + enum dma_data_direction dir, struct dma_attrs *attrs) { struct scatterlist *s, *ps, *start_sg, *sgmap; int need = 0, nextneed, i, out, start; @@ -472,7 +472,7 @@ gart_map_sg(struct device *dev, struct scatterlist *sg, int nents, int dir) error: flush_gart(); - gart_unmap_sg(dev, sg, out, dir); + gart_unmap_sg(dev, sg, out, dir, NULL); /* When it was forced or merged try again in a dumb way */ if (force_iommu || iommu_merge) { @@ -711,7 +711,7 @@ static __init int init_k8_gatt(struct agp_kern_info *info) return -1; } -static struct dma_mapping_ops gart_dma_ops = { +static struct dma_map_ops gart_dma_ops = { .map_sg = gart_map_sg, .unmap_sg = gart_unmap_sg, .map_page = gart_map_page, diff --git a/arch/x86/kernel/pci-nommu.c b/arch/x86/kernel/pci-nommu.c index d42b69c90b4..fe50214db87 100644 --- a/arch/x86/kernel/pci-nommu.c +++ b/arch/x86/kernel/pci-nommu.c @@ -54,7 +54,8 @@ static dma_addr_t nommu_map_page(struct device *dev, struct page *page, * the same here. */ static int nommu_map_sg(struct device *hwdev, struct scatterlist *sg, - int nents, int direction) + int nents, enum dma_data_direction dir, + struct dma_attrs *attrs) { struct scatterlist *s; int i; @@ -78,7 +79,7 @@ static void nommu_free_coherent(struct device *dev, size_t size, void *vaddr, free_pages((unsigned long)vaddr, get_order(size)); } -struct dma_mapping_ops nommu_dma_ops = { +struct dma_map_ops nommu_dma_ops = { .alloc_coherent = dma_generic_alloc_coherent, .free_coherent = nommu_free_coherent, .map_sg = nommu_map_sg, diff --git a/arch/x86/kernel/pci-swiotlb_64.c b/arch/x86/kernel/pci-swiotlb_64.c index 3ae354c0fde..3f0d9924dd1 100644 --- a/arch/x86/kernel/pci-swiotlb_64.c +++ b/arch/x86/kernel/pci-swiotlb_64.c @@ -67,7 +67,7 @@ static void *x86_swiotlb_alloc_coherent(struct device *hwdev, size_t size, return swiotlb_alloc_coherent(hwdev, size, dma_handle, flags); } -struct dma_mapping_ops swiotlb_dma_ops = { +struct dma_map_ops swiotlb_dma_ops = { .mapping_error = swiotlb_dma_mapping_error, .alloc_coherent = x86_swiotlb_alloc_coherent, .free_coherent = swiotlb_free_coherent, @@ -77,8 +77,8 @@ struct dma_mapping_ops swiotlb_dma_ops = { .sync_single_range_for_device = swiotlb_sync_single_range_for_device, .sync_sg_for_cpu = swiotlb_sync_sg_for_cpu, .sync_sg_for_device = swiotlb_sync_sg_for_device, - .map_sg = swiotlb_map_sg, - .unmap_sg = swiotlb_unmap_sg, + .map_sg = swiotlb_map_sg_attrs, + .unmap_sg = swiotlb_unmap_sg_attrs, .map_page = swiotlb_map_page, .unmap_page = swiotlb_unmap_page, .dma_supported = NULL, -- cgit v1.2.3 From f98eee8ea99fe74ee9c4e867ba178ec3072793be Mon Sep 17 00:00:00 2001 From: FUJITA Tomonori Date: Mon, 5 Jan 2009 23:59:03 +0900 Subject: x86, ia64: remove duplicated swiotlb code This adds swiotlb_map_page and swiotlb_unmap_page to lib/swiotlb.c and remove IA64 and X86's swiotlb_map_page and swiotlb_unmap_page. This also removes unnecessary swiotlb_map_single, swiotlb_map_single_attrs, swiotlb_unmap_single and swiotlb_unmap_single_attrs. Signed-off-by: FUJITA Tomonori Acked-by: Tony Luck Signed-off-by: Ingo Molnar --- arch/x86/kernel/pci-swiotlb_64.c | 17 ----------------- 1 file changed, 17 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/kernel/pci-swiotlb_64.c b/arch/x86/kernel/pci-swiotlb_64.c index 3f0d9924dd1..5e32c4f6a7b 100644 --- a/arch/x86/kernel/pci-swiotlb_64.c +++ b/arch/x86/kernel/pci-swiotlb_64.c @@ -38,23 +38,6 @@ int __weak swiotlb_arch_range_needs_mapping(void *ptr, size_t size) return 0; } -/* these will be moved to lib/swiotlb.c later on */ - -static dma_addr_t swiotlb_map_page(struct device *dev, struct page *page, - unsigned long offset, size_t size, - enum dma_data_direction dir, - struct dma_attrs *attrs) -{ - return swiotlb_map_single(dev, page_address(page) + offset, size, dir); -} - -static void swiotlb_unmap_page(struct device *dev, dma_addr_t dma_handle, - size_t size, enum dma_data_direction dir, - struct dma_attrs *attrs) -{ - swiotlb_unmap_single(dev, dma_handle, size, dir); -} - static void *x86_swiotlb_alloc_coherent(struct device *hwdev, size_t size, dma_addr_t *dma_handle, gfp_t flags) { -- cgit v1.2.3 From 0b8698ab5847cbe25775083659f00c658a8161c9 Mon Sep 17 00:00:00 2001 From: Ian Campbell Date: Fri, 9 Jan 2009 18:32:09 +0000 Subject: swiotlb: range_needs_mapping should take a physical address. The swiotlb_arch_range_needs_mapping() hook should take a physical address rather than a virtual address in order to support highmem pages. Signed-off-by: Ian Campbell Signed-off-by: Ingo Molnar --- arch/x86/kernel/pci-swiotlb_64.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/x86') diff --git a/arch/x86/kernel/pci-swiotlb_64.c b/arch/x86/kernel/pci-swiotlb_64.c index 5e32c4f6a7b..34f12e9996e 100644 --- a/arch/x86/kernel/pci-swiotlb_64.c +++ b/arch/x86/kernel/pci-swiotlb_64.c @@ -33,7 +33,7 @@ phys_addr_t swiotlb_bus_to_phys(dma_addr_t baddr) return baddr; } -int __weak swiotlb_arch_range_needs_mapping(void *ptr, size_t size) +int __weak swiotlb_arch_range_needs_mapping(phys_addr_t paddr, size_t size) { return 0; } -- cgit v1.2.3 From 42bb8cc5e81028e217105299001070d57eb84ad7 Mon Sep 17 00:00:00 2001 From: Andi Kleen Date: Fri, 9 Jan 2009 12:17:40 -0800 Subject: x86: hpet: allow force enable on ICH10 HPET Intel "Smackover" x58 BIOS don't have HPET enabled in the BIOS, so allow to force enable it at least. The register layout is the same as in other recent ICHs, so all the code can be reused. Using numerical PCI-ID because it's unlikely the PCI-ID will be used anywhere else. Signed-off-by: Andi Kleen Signed-off-by: Andrew Morton Signed-off-by: Ingo Molnar --- arch/x86/kernel/quirks.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'arch/x86') diff --git a/arch/x86/kernel/quirks.c b/arch/x86/kernel/quirks.c index 309949e9e1c..697d1b78cfb 100644 --- a/arch/x86/kernel/quirks.c +++ b/arch/x86/kernel/quirks.c @@ -172,7 +172,8 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH8_4, ich_force_enable_hpet); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH9_7, ich_force_enable_hpet); - +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x3a16, /* ICH10 */ + ich_force_enable_hpet); static struct pci_dev *cached_dev; -- cgit v1.2.3 From b041cf22ddb742874040d0a3259d0be46f3d3a4b Mon Sep 17 00:00:00 2001 From: Ian Campbell Date: Fri, 23 Jan 2009 10:56:16 +0000 Subject: x86: rename arch/x86/kernel/pci-swiotlb_64.c => pci-swiotlb.c The file is used for 32 and 64 bit since: commit cfb80c9eae8c7ed8f2ee81090062d15ead51cbe8 Author: Jeremy Fitzhardinge Date: Tue Dec 16 12:17:36 2008 -0800 x86: unify pci iommu setup and allow swiotlb to compile for 32 bit Signed-off-by: Ian Campbell Signed-off-by: Ingo Molnar --- arch/x86/kernel/Makefile | 2 +- arch/x86/kernel/pci-swiotlb.c | 84 ++++++++++++++++++++++++++++++++++++++++ arch/x86/kernel/pci-swiotlb_64.c | 84 ---------------------------------------- 3 files changed, 85 insertions(+), 85 deletions(-) create mode 100644 arch/x86/kernel/pci-swiotlb.c delete mode 100644 arch/x86/kernel/pci-swiotlb_64.c (limited to 'arch/x86') diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile index d364df03c1d..bb1eef62d8f 100644 --- a/arch/x86/kernel/Makefile +++ b/arch/x86/kernel/Makefile @@ -109,7 +109,7 @@ obj-$(CONFIG_MICROCODE) += microcode.o obj-$(CONFIG_X86_CHECK_BIOS_CORRUPTION) += check.o -obj-$(CONFIG_SWIOTLB) += pci-swiotlb_64.o # NB rename without _64 +obj-$(CONFIG_SWIOTLB) += pci-swiotlb.o ### # 64 bit specific files diff --git a/arch/x86/kernel/pci-swiotlb.c b/arch/x86/kernel/pci-swiotlb.c new file mode 100644 index 00000000000..34f12e9996e --- /dev/null +++ b/arch/x86/kernel/pci-swiotlb.c @@ -0,0 +1,84 @@ +/* Glue code to lib/swiotlb.c */ + +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +int swiotlb __read_mostly; + +void * __init swiotlb_alloc_boot(size_t size, unsigned long nslabs) +{ + return alloc_bootmem_low_pages(size); +} + +void *swiotlb_alloc(unsigned order, unsigned long nslabs) +{ + return (void *)__get_free_pages(GFP_DMA | __GFP_NOWARN, order); +} + +dma_addr_t swiotlb_phys_to_bus(struct device *hwdev, phys_addr_t paddr) +{ + return paddr; +} + +phys_addr_t swiotlb_bus_to_phys(dma_addr_t baddr) +{ + return baddr; +} + +int __weak swiotlb_arch_range_needs_mapping(phys_addr_t paddr, size_t size) +{ + return 0; +} + +static void *x86_swiotlb_alloc_coherent(struct device *hwdev, size_t size, + dma_addr_t *dma_handle, gfp_t flags) +{ + void *vaddr; + + vaddr = dma_generic_alloc_coherent(hwdev, size, dma_handle, flags); + if (vaddr) + return vaddr; + + return swiotlb_alloc_coherent(hwdev, size, dma_handle, flags); +} + +struct dma_map_ops swiotlb_dma_ops = { + .mapping_error = swiotlb_dma_mapping_error, + .alloc_coherent = x86_swiotlb_alloc_coherent, + .free_coherent = swiotlb_free_coherent, + .sync_single_for_cpu = swiotlb_sync_single_for_cpu, + .sync_single_for_device = swiotlb_sync_single_for_device, + .sync_single_range_for_cpu = swiotlb_sync_single_range_for_cpu, + .sync_single_range_for_device = swiotlb_sync_single_range_for_device, + .sync_sg_for_cpu = swiotlb_sync_sg_for_cpu, + .sync_sg_for_device = swiotlb_sync_sg_for_device, + .map_sg = swiotlb_map_sg_attrs, + .unmap_sg = swiotlb_unmap_sg_attrs, + .map_page = swiotlb_map_page, + .unmap_page = swiotlb_unmap_page, + .dma_supported = NULL, +}; + +void __init pci_swiotlb_init(void) +{ + /* don't initialize swiotlb if iommu=off (no_iommu=1) */ +#ifdef CONFIG_X86_64 + if (!iommu_detected && !no_iommu && max_pfn > MAX_DMA32_PFN) + swiotlb = 1; +#endif + if (swiotlb_force) + swiotlb = 1; + if (swiotlb) { + printk(KERN_INFO "PCI-DMA: Using software bounce buffering for IO (SWIOTLB)\n"); + swiotlb_init(); + dma_ops = &swiotlb_dma_ops; + } +} diff --git a/arch/x86/kernel/pci-swiotlb_64.c b/arch/x86/kernel/pci-swiotlb_64.c deleted file mode 100644 index 34f12e9996e..00000000000 --- a/arch/x86/kernel/pci-swiotlb_64.c +++ /dev/null @@ -1,84 +0,0 @@ -/* Glue code to lib/swiotlb.c */ - -#include -#include -#include -#include -#include -#include - -#include -#include -#include - -int swiotlb __read_mostly; - -void * __init swiotlb_alloc_boot(size_t size, unsigned long nslabs) -{ - return alloc_bootmem_low_pages(size); -} - -void *swiotlb_alloc(unsigned order, unsigned long nslabs) -{ - return (void *)__get_free_pages(GFP_DMA | __GFP_NOWARN, order); -} - -dma_addr_t swiotlb_phys_to_bus(struct device *hwdev, phys_addr_t paddr) -{ - return paddr; -} - -phys_addr_t swiotlb_bus_to_phys(dma_addr_t baddr) -{ - return baddr; -} - -int __weak swiotlb_arch_range_needs_mapping(phys_addr_t paddr, size_t size) -{ - return 0; -} - -static void *x86_swiotlb_alloc_coherent(struct device *hwdev, size_t size, - dma_addr_t *dma_handle, gfp_t flags) -{ - void *vaddr; - - vaddr = dma_generic_alloc_coherent(hwdev, size, dma_handle, flags); - if (vaddr) - return vaddr; - - return swiotlb_alloc_coherent(hwdev, size, dma_handle, flags); -} - -struct dma_map_ops swiotlb_dma_ops = { - .mapping_error = swiotlb_dma_mapping_error, - .alloc_coherent = x86_swiotlb_alloc_coherent, - .free_coherent = swiotlb_free_coherent, - .sync_single_for_cpu = swiotlb_sync_single_for_cpu, - .sync_single_for_device = swiotlb_sync_single_for_device, - .sync_single_range_for_cpu = swiotlb_sync_single_range_for_cpu, - .sync_single_range_for_device = swiotlb_sync_single_range_for_device, - .sync_sg_for_cpu = swiotlb_sync_sg_for_cpu, - .sync_sg_for_device = swiotlb_sync_sg_for_device, - .map_sg = swiotlb_map_sg_attrs, - .unmap_sg = swiotlb_unmap_sg_attrs, - .map_page = swiotlb_map_page, - .unmap_page = swiotlb_unmap_page, - .dma_supported = NULL, -}; - -void __init pci_swiotlb_init(void) -{ - /* don't initialize swiotlb if iommu=off (no_iommu=1) */ -#ifdef CONFIG_X86_64 - if (!iommu_detected && !no_iommu && max_pfn > MAX_DMA32_PFN) - swiotlb = 1; -#endif - if (swiotlb_force) - swiotlb = 1; - if (swiotlb) { - printk(KERN_INFO "PCI-DMA: Using software bounce buffering for IO (SWIOTLB)\n"); - swiotlb_init(); - dma_ops = &swiotlb_dma_ops; - } -} -- cgit v1.2.3 From cb9eff097831007afb30d64373f29d99825d0068 Mon Sep 17 00:00:00 2001 From: Patrick Ohly Date: Thu, 12 Feb 2009 05:03:36 +0000 Subject: net: new user space API for time stamping of incoming and outgoing packets User space can request hardware and/or software time stamping. Reporting of the result(s) via a new control message is enabled separately for each field in the message because some of the fields may require additional computation and thus cause overhead. User space can tell the different kinds of time stamps apart and choose what suits its needs. When a TX timestamp operation is requested, the TX skb will be cloned and the clone will be time stamped (in hardware or software) and added to the socket error queue of the skb, if the skb has a socket associated with it. The actual TX timestamp will reach userspace as a RX timestamp on the cloned packet. If timestamping is requested and no timestamping is done in the device driver (potentially this may use hardware timestamping), it will be done in software after the device's start_hard_xmit routine. Signed-off-by: Patrick Ohly Signed-off-by: David S. Miller --- arch/x86/include/asm/socket.h | 3 +++ 1 file changed, 3 insertions(+) (limited to 'arch/x86') diff --git a/arch/x86/include/asm/socket.h b/arch/x86/include/asm/socket.h index 8ab9cc8b2ec..ca8bf2cd0ba 100644 --- a/arch/x86/include/asm/socket.h +++ b/arch/x86/include/asm/socket.h @@ -54,4 +54,7 @@ #define SO_MARK 36 +#define SO_TIMESTAMPING 37 +#define SCM_TIMESTAMPING SO_TIMESTAMPING + #endif /* _ASM_X86_SOCKET_H */ -- cgit v1.2.3 From 109568e110ed67d4be1b28609b9fa00fca97f8eb Mon Sep 17 00:00:00 2001 From: Huang Ying Date: Fri, 9 Jan 2009 16:49:30 +1100 Subject: crypto: aes - Move key_length in struct crypto_aes_ctx to be the last field The Intel AES-NI AES acceleration instructions need key_enc, key_dec in struct crypto_aes_ctx to be 16 byte aligned, it make this easier to move key_length to be the last one. Signed-off-by: Huang Ying Signed-off-by: Herbert Xu --- arch/x86/crypto/aes-i586-asm_32.S | 6 +++--- arch/x86/crypto/aes-x86_64-asm_64.S | 4 ++-- 2 files changed, 5 insertions(+), 5 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/crypto/aes-i586-asm_32.S b/arch/x86/crypto/aes-i586-asm_32.S index e41b147f450..5252c388017 100644 --- a/arch/x86/crypto/aes-i586-asm_32.S +++ b/arch/x86/crypto/aes-i586-asm_32.S @@ -46,9 +46,9 @@ #define in_blk 16 /* offsets in crypto_tfm structure */ -#define klen (crypto_tfm_ctx_offset + 0) -#define ekey (crypto_tfm_ctx_offset + 4) -#define dkey (crypto_tfm_ctx_offset + 244) +#define klen (crypto_tfm_ctx_offset + 480) +#define ekey (crypto_tfm_ctx_offset + 0) +#define dkey (crypto_tfm_ctx_offset + 240) // register mapping for encrypt and decrypt subroutines diff --git a/arch/x86/crypto/aes-x86_64-asm_64.S b/arch/x86/crypto/aes-x86_64-asm_64.S index a120f526c3d..7f28f62737d 100644 --- a/arch/x86/crypto/aes-x86_64-asm_64.S +++ b/arch/x86/crypto/aes-x86_64-asm_64.S @@ -56,13 +56,13 @@ .align 8; \ FUNC: movq r1,r2; \ movq r3,r4; \ - leaq BASE+KEY+48+4(r8),r9; \ + leaq BASE+KEY+48(r8),r9; \ movq r10,r11; \ movl (r7),r5 ## E; \ movl 4(r7),r1 ## E; \ movl 8(r7),r6 ## E; \ movl 12(r7),r7 ## E; \ - movl BASE+0(r8),r10 ## E; \ + movl BASE+480(r8),r10 ## E; \ xorl -48(r9),r5 ## E; \ xorl -44(r9),r1 ## E; \ xorl -40(r9),r6 ## E; \ -- cgit v1.2.3 From 07bf44f86989f5ed866510374fe761d1903681fb Mon Sep 17 00:00:00 2001 From: Huang Ying Date: Fri, 9 Jan 2009 17:25:50 +1100 Subject: crypto: aes - Export x86 AES encrypt/decrypt functions Intel AES-NI AES acceleration instructions touch XMM state, to use that in soft_irq context, general x86 AES implementation is used as fallback. The first parameter is changed from struct crypto_tfm * to struct crypto_aes_ctx * to make it easier to deal with 16 bytes alignment requirement of AES-NI implementation. Signed-off-by: Huang Ying Signed-off-by: Herbert Xu --- arch/x86/crypto/aes-i586-asm_32.S | 18 +++++++++--------- arch/x86/crypto/aes-x86_64-asm_64.S | 6 ++---- arch/x86/crypto/aes_glue.c | 20 ++++++++++++++++---- arch/x86/include/asm/aes.h | 11 +++++++++++ 4 files changed, 38 insertions(+), 17 deletions(-) create mode 100644 arch/x86/include/asm/aes.h (limited to 'arch/x86') diff --git a/arch/x86/crypto/aes-i586-asm_32.S b/arch/x86/crypto/aes-i586-asm_32.S index 5252c388017..b949ec2f9af 100644 --- a/arch/x86/crypto/aes-i586-asm_32.S +++ b/arch/x86/crypto/aes-i586-asm_32.S @@ -41,14 +41,14 @@ #define tlen 1024 // length of each of 4 'xor' arrays (256 32-bit words) /* offsets to parameters with one register pushed onto stack */ -#define tfm 8 +#define ctx 8 #define out_blk 12 #define in_blk 16 -/* offsets in crypto_tfm structure */ -#define klen (crypto_tfm_ctx_offset + 480) -#define ekey (crypto_tfm_ctx_offset + 0) -#define dkey (crypto_tfm_ctx_offset + 240) +/* offsets in crypto_aes_ctx structure */ +#define klen (480) +#define ekey (0) +#define dkey (240) // register mapping for encrypt and decrypt subroutines @@ -217,7 +217,7 @@ do_col (table, r5,r0,r1,r4, r2,r3); /* idx=r5 */ // AES (Rijndael) Encryption Subroutine -/* void aes_enc_blk(struct crypto_tfm *tfm, u8 *out_blk, const u8 *in_blk) */ +/* void aes_enc_blk(struct crypto_aes_ctx *ctx, u8 *out_blk, const u8 *in_blk) */ .global aes_enc_blk @@ -228,7 +228,7 @@ aes_enc_blk: push %ebp - mov tfm(%esp),%ebp + mov ctx(%esp),%ebp // CAUTION: the order and the values used in these assigns // rely on the register mappings @@ -292,7 +292,7 @@ aes_enc_blk: ret // AES (Rijndael) Decryption Subroutine -/* void aes_dec_blk(struct crypto_tfm *tfm, u8 *out_blk, const u8 *in_blk) */ +/* void aes_dec_blk(struct crypto_aes_ctx *ctx, u8 *out_blk, const u8 *in_blk) */ .global aes_dec_blk @@ -303,7 +303,7 @@ aes_enc_blk: aes_dec_blk: push %ebp - mov tfm(%esp),%ebp + mov ctx(%esp),%ebp // CAUTION: the order and the values used in these assigns // rely on the register mappings diff --git a/arch/x86/crypto/aes-x86_64-asm_64.S b/arch/x86/crypto/aes-x86_64-asm_64.S index 7f28f62737d..5b577d5a059 100644 --- a/arch/x86/crypto/aes-x86_64-asm_64.S +++ b/arch/x86/crypto/aes-x86_64-asm_64.S @@ -17,8 +17,6 @@ #include -#define BASE crypto_tfm_ctx_offset - #define R1 %rax #define R1E %eax #define R1X %ax @@ -56,13 +54,13 @@ .align 8; \ FUNC: movq r1,r2; \ movq r3,r4; \ - leaq BASE+KEY+48(r8),r9; \ + leaq KEY+48(r8),r9; \ movq r10,r11; \ movl (r7),r5 ## E; \ movl 4(r7),r1 ## E; \ movl 8(r7),r6 ## E; \ movl 12(r7),r7 ## E; \ - movl BASE+480(r8),r10 ## E; \ + movl 480(r8),r10 ## E; \ xorl -48(r9),r5 ## E; \ xorl -44(r9),r1 ## E; \ xorl -40(r9),r6 ## E; \ diff --git a/arch/x86/crypto/aes_glue.c b/arch/x86/crypto/aes_glue.c index 71f45782711..49ae9fe32b2 100644 --- a/arch/x86/crypto/aes_glue.c +++ b/arch/x86/crypto/aes_glue.c @@ -5,17 +5,29 @@ #include -asmlinkage void aes_enc_blk(struct crypto_tfm *tfm, u8 *out, const u8 *in); -asmlinkage void aes_dec_blk(struct crypto_tfm *tfm, u8 *out, const u8 *in); +asmlinkage void aes_enc_blk(struct crypto_aes_ctx *ctx, u8 *out, const u8 *in); +asmlinkage void aes_dec_blk(struct crypto_aes_ctx *ctx, u8 *out, const u8 *in); + +void crypto_aes_encrypt_x86(struct crypto_aes_ctx *ctx, u8 *dst, const u8 *src) +{ + aes_enc_blk(ctx, dst, src); +} +EXPORT_SYMBOL_GPL(crypto_aes_encrypt_x86); + +void crypto_aes_decrypt_x86(struct crypto_aes_ctx *ctx, u8 *dst, const u8 *src) +{ + aes_dec_blk(ctx, dst, src); +} +EXPORT_SYMBOL_GPL(crypto_aes_decrypt_x86); static void aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) { - aes_enc_blk(tfm, dst, src); + aes_enc_blk(crypto_tfm_ctx(tfm), dst, src); } static void aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) { - aes_dec_blk(tfm, dst, src); + aes_dec_blk(crypto_tfm_ctx(tfm), dst, src); } static struct crypto_alg aes_alg = { diff --git a/arch/x86/include/asm/aes.h b/arch/x86/include/asm/aes.h new file mode 100644 index 00000000000..80545a1cbe3 --- /dev/null +++ b/arch/x86/include/asm/aes.h @@ -0,0 +1,11 @@ +#ifndef ASM_X86_AES_H +#define ASM_X86_AES_H + +#include +#include + +void crypto_aes_encrypt_x86(struct crypto_aes_ctx *ctx, u8 *dst, + const u8 *src); +void crypto_aes_decrypt_x86(struct crypto_aes_ctx *ctx, u8 *dst, + const u8 *src); +#endif -- cgit v1.2.3 From 54b6a1bd5364aca95cd6ffae00f2b64c6511122c Mon Sep 17 00:00:00 2001 From: Huang Ying Date: Sun, 18 Jan 2009 16:28:34 +1100 Subject: crypto: aes-ni - Add support to Intel AES-NI instructions for x86_64 platform Intel AES-NI is a new set of Single Instruction Multiple Data (SIMD) instructions that are going to be introduced in the next generation of Intel processor, as of 2009. These instructions enable fast and secure data encryption and decryption, using the Advanced Encryption Standard (AES), defined by FIPS Publication number 197. The architecture introduces six instructions that offer full hardware support for AES. Four of them support high performance data encryption and decryption, and the other two instructions support the AES key expansion procedure. The white paper can be downloaded from: http://softwarecommunity.intel.com/isn/downloads/intelavx/AES-Instructions-Set_WP.pdf AES may be used in soft_irq context, but MMX/SSE context can not be touched safely in soft_irq context. So in_interrupt() is checked, if in IRQ or soft_irq context, the general x86_64 implementation are used instead. Signed-off-by: Huang Ying Signed-off-by: Herbert Xu --- arch/x86/crypto/Makefile | 3 + arch/x86/crypto/aesni-intel_asm.S | 896 +++++++++++++++++++++++++++++++++++++ arch/x86/crypto/aesni-intel_glue.c | 461 +++++++++++++++++++ arch/x86/include/asm/cpufeature.h | 1 + 4 files changed, 1361 insertions(+) create mode 100644 arch/x86/crypto/aesni-intel_asm.S create mode 100644 arch/x86/crypto/aesni-intel_glue.c (limited to 'arch/x86') diff --git a/arch/x86/crypto/Makefile b/arch/x86/crypto/Makefile index 903de4aa509..ebe7deedd5b 100644 --- a/arch/x86/crypto/Makefile +++ b/arch/x86/crypto/Makefile @@ -9,6 +9,7 @@ obj-$(CONFIG_CRYPTO_SALSA20_586) += salsa20-i586.o obj-$(CONFIG_CRYPTO_AES_X86_64) += aes-x86_64.o obj-$(CONFIG_CRYPTO_TWOFISH_X86_64) += twofish-x86_64.o obj-$(CONFIG_CRYPTO_SALSA20_X86_64) += salsa20-x86_64.o +obj-$(CONFIG_CRYPTO_AES_NI_INTEL) += aesni-intel.o obj-$(CONFIG_CRYPTO_CRC32C_INTEL) += crc32c-intel.o @@ -19,3 +20,5 @@ salsa20-i586-y := salsa20-i586-asm_32.o salsa20_glue.o aes-x86_64-y := aes-x86_64-asm_64.o aes_glue.o twofish-x86_64-y := twofish-x86_64-asm_64.o twofish_glue.o salsa20-x86_64-y := salsa20-x86_64-asm_64.o salsa20_glue.o + +aesni-intel-y := aesni-intel_asm.o aesni-intel_glue.o diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S new file mode 100644 index 00000000000..caba9960170 --- /dev/null +++ b/arch/x86/crypto/aesni-intel_asm.S @@ -0,0 +1,896 @@ +/* + * Implement AES algorithm in Intel AES-NI instructions. + * + * The white paper of AES-NI instructions can be downloaded from: + * http://softwarecommunity.intel.com/isn/downloads/intelavx/AES-Instructions-Set_WP.pdf + * + * Copyright (C) 2008, Intel Corp. + * Author: Huang Ying + * Vinodh Gopal + * Kahraman Akdemir + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include + +.text + +#define STATE1 %xmm0 +#define STATE2 %xmm4 +#define STATE3 %xmm5 +#define STATE4 %xmm6 +#define STATE STATE1 +#define IN1 %xmm1 +#define IN2 %xmm7 +#define IN3 %xmm8 +#define IN4 %xmm9 +#define IN IN1 +#define KEY %xmm2 +#define IV %xmm3 + +#define KEYP %rdi +#define OUTP %rsi +#define INP %rdx +#define LEN %rcx +#define IVP %r8 +#define KLEN %r9d +#define T1 %r10 +#define TKEYP T1 +#define T2 %r11 + +_key_expansion_128: +_key_expansion_256a: + pshufd $0b11111111, %xmm1, %xmm1 + shufps $0b00010000, %xmm0, %xmm4 + pxor %xmm4, %xmm0 + shufps $0b10001100, %xmm0, %xmm4 + pxor %xmm4, %xmm0 + pxor %xmm1, %xmm0 + movaps %xmm0, (%rcx) + add $0x10, %rcx + ret + +_key_expansion_192a: + pshufd $0b01010101, %xmm1, %xmm1 + shufps $0b00010000, %xmm0, %xmm4 + pxor %xmm4, %xmm0 + shufps $0b10001100, %xmm0, %xmm4 + pxor %xmm4, %xmm0 + pxor %xmm1, %xmm0 + + movaps %xmm2, %xmm5 + movaps %xmm2, %xmm6 + pslldq $4, %xmm5 + pshufd $0b11111111, %xmm0, %xmm3 + pxor %xmm3, %xmm2 + pxor %xmm5, %xmm2 + + movaps %xmm0, %xmm1 + shufps $0b01000100, %xmm0, %xmm6 + movaps %xmm6, (%rcx) + shufps $0b01001110, %xmm2, %xmm1 + movaps %xmm1, 16(%rcx) + add $0x20, %rcx + ret + +_key_expansion_192b: + pshufd $0b01010101, %xmm1, %xmm1 + shufps $0b00010000, %xmm0, %xmm4 + pxor %xmm4, %xmm0 + shufps $0b10001100, %xmm0, %xmm4 + pxor %xmm4, %xmm0 + pxor %xmm1, %xmm0 + + movaps %xmm2, %xmm5 + pslldq $4, %xmm5 + pshufd $0b11111111, %xmm0, %xmm3 + pxor %xmm3, %xmm2 + pxor %xmm5, %xmm2 + + movaps %xmm0, (%rcx) + add $0x10, %rcx + ret + +_key_expansion_256b: + pshufd $0b10101010, %xmm1, %xmm1 + shufps $0b00010000, %xmm2, %xmm4 + pxor %xmm4, %xmm2 + shufps $0b10001100, %xmm2, %xmm4 + pxor %xmm4, %xmm2 + pxor %xmm1, %xmm2 + movaps %xmm2, (%rcx) + add $0x10, %rcx + ret + +/* + * int aesni_set_key(struct crypto_aes_ctx *ctx, const u8 *in_key, + * unsigned int key_len) + */ +ENTRY(aesni_set_key) + movups (%rsi), %xmm0 # user key (first 16 bytes) + movaps %xmm0, (%rdi) + lea 0x10(%rdi), %rcx # key addr + movl %edx, 480(%rdi) + pxor %xmm4, %xmm4 # xmm4 is assumed 0 in _key_expansion_x + cmp $24, %dl + jb .Lenc_key128 + je .Lenc_key192 + movups 0x10(%rsi), %xmm2 # other user key + movaps %xmm2, (%rcx) + add $0x10, %rcx + # aeskeygenassist $0x1, %xmm2, %xmm1 # round 1 + .byte 0x66, 0x0f, 0x3a, 0xdf, 0xca, 0x01 + call _key_expansion_256a + # aeskeygenassist $0x1, %xmm0, %xmm1 + .byte 0x66, 0x0f, 0x3a, 0xdf, 0xc8, 0x01 + call _key_expansion_256b + # aeskeygenassist $0x2, %xmm2, %xmm1 # round 2 + .byte 0x66, 0x0f, 0x3a, 0xdf, 0xca, 0x02 + call _key_expansion_256a + # aeskeygenassist $0x2, %xmm0, %xmm1 + .byte 0x66, 0x0f, 0x3a, 0xdf, 0xc8, 0x02 + call _key_expansion_256b + # aeskeygenassist $0x4, %xmm2, %xmm1 # round 3 + .byte 0x66, 0x0f, 0x3a, 0xdf, 0xca, 0x04 + call _key_expansion_256a + # aeskeygenassist $0x4, %xmm0, %xmm1 + .byte 0x66, 0x0f, 0x3a, 0xdf, 0xc8, 0x04 + call _key_expansion_256b + # aeskeygenassist $0x8, %xmm2, %xmm1 # round 4 + .byte 0x66, 0x0f, 0x3a, 0xdf, 0xca, 0x08 + call _key_expansion_256a + # aeskeygenassist $0x8, %xmm0, %xmm1 + .byte 0x66, 0x0f, 0x3a, 0xdf, 0xc8, 0x08 + call _key_expansion_256b + # aeskeygenassist $0x10, %xmm2, %xmm1 # round 5 + .byte 0x66, 0x0f, 0x3a, 0xdf, 0xca, 0x10 + call _key_expansion_256a + # aeskeygenassist $0x10, %xmm0, %xmm1 + .byte 0x66, 0x0f, 0x3a, 0xdf, 0xc8, 0x10 + call _key_expansion_256b + # aeskeygenassist $0x20, %xmm2, %xmm1 # round 6 + .byte 0x66, 0x0f, 0x3a, 0xdf, 0xca, 0x20 + call _key_expansion_256a + # aeskeygenassist $0x20, %xmm0, %xmm1 + .byte 0x66, 0x0f, 0x3a, 0xdf, 0xc8, 0x20 + call _key_expansion_256b + # aeskeygenassist $0x40, %xmm2, %xmm1 # round 7 + .byte 0x66, 0x0f, 0x3a, 0xdf, 0xca, 0x40 + call _key_expansion_256a + jmp .Ldec_key +.Lenc_key192: + movq 0x10(%rsi), %xmm2 # other user key + # aeskeygenassist $0x1, %xmm2, %xmm1 # round 1 + .byte 0x66, 0x0f, 0x3a, 0xdf, 0xca, 0x01 + call _key_expansion_192a + # aeskeygenassist $0x2, %xmm2, %xmm1 # round 2 + .byte 0x66, 0x0f, 0x3a, 0xdf, 0xca, 0x02 + call _key_expansion_192b + # aeskeygenassist $0x4, %xmm2, %xmm1 # round 3 + .byte 0x66, 0x0f, 0x3a, 0xdf, 0xca, 0x04 + call _key_expansion_192a + # aeskeygenassist $0x8, %xmm2, %xmm1 # round 4 + .byte 0x66, 0x0f, 0x3a, 0xdf, 0xca, 0x08 + call _key_expansion_192b + # aeskeygenassist $0x10, %xmm2, %xmm1 # round 5 + .byte 0x66, 0x0f, 0x3a, 0xdf, 0xca, 0x10 + call _key_expansion_192a + # aeskeygenassist $0x20, %xmm2, %xmm1 # round 6 + .byte 0x66, 0x0f, 0x3a, 0xdf, 0xca, 0x20 + call _key_expansion_192b + # aeskeygenassist $0x40, %xmm2, %xmm1 # round 7 + .byte 0x66, 0x0f, 0x3a, 0xdf, 0xca, 0x40 + call _key_expansion_192a + # aeskeygenassist $0x80, %xmm2, %xmm1 # round 8 + .byte 0x66, 0x0f, 0x3a, 0xdf, 0xca, 0x80 + call _key_expansion_192b + jmp .Ldec_key +.Lenc_key128: + # aeskeygenassist $0x1, %xmm0, %xmm1 # round 1 + .byte 0x66, 0x0f, 0x3a, 0xdf, 0xc8, 0x01 + call _key_expansion_128 + # aeskeygenassist $0x2, %xmm0, %xmm1 # round 2 + .byte 0x66, 0x0f, 0x3a, 0xdf, 0xc8, 0x02 + call _key_expansion_128 + # aeskeygenassist $0x4, %xmm0, %xmm1 # round 3 + .byte 0x66, 0x0f, 0x3a, 0xdf, 0xc8, 0x04 + call _key_expansion_128 + # aeskeygenassist $0x8, %xmm0, %xmm1 # round 4 + .byte 0x66, 0x0f, 0x3a, 0xdf, 0xc8, 0x08 + call _key_expansion_128 + # aeskeygenassist $0x10, %xmm0, %xmm1 # round 5 + .byte 0x66, 0x0f, 0x3a, 0xdf, 0xc8, 0x10 + call _key_expansion_128 + # aeskeygenassist $0x20, %xmm0, %xmm1 # round 6 + .byte 0x66, 0x0f, 0x3a, 0xdf, 0xc8, 0x20 + call _key_expansion_128 + # aeskeygenassist $0x40, %xmm0, %xmm1 # round 7 + .byte 0x66, 0x0f, 0x3a, 0xdf, 0xc8, 0x40 + call _key_expansion_128 + # aeskeygenassist $0x80, %xmm0, %xmm1 # round 8 + .byte 0x66, 0x0f, 0x3a, 0xdf, 0xc8, 0x80 + call _key_expansion_128 + # aeskeygenassist $0x1b, %xmm0, %xmm1 # round 9 + .byte 0x66, 0x0f, 0x3a, 0xdf, 0xc8, 0x1b + call _key_expansion_128 + # aeskeygenassist $0x36, %xmm0, %xmm1 # round 10 + .byte 0x66, 0x0f, 0x3a, 0xdf, 0xc8, 0x36 + call _key_expansion_128 +.Ldec_key: + sub $0x10, %rcx + movaps (%rdi), %xmm0 + movaps (%rcx), %xmm1 + movaps %xmm0, 240(%rcx) + movaps %xmm1, 240(%rdi) + add $0x10, %rdi + lea 240-16(%rcx), %rsi +.align 4 +.Ldec_key_loop: + movaps (%rdi), %xmm0 + # aesimc %xmm0, %xmm1 + .byte 0x66, 0x0f, 0x38, 0xdb, 0xc8 + movaps %xmm1, (%rsi) + add $0x10, %rdi + sub $0x10, %rsi + cmp %rcx, %rdi + jb .Ldec_key_loop + xor %rax, %rax + ret + +/* + * void aesni_enc(struct crypto_aes_ctx *ctx, u8 *dst, const u8 *src) + */ +ENTRY(aesni_enc) + movl 480(KEYP), KLEN # key length + movups (INP), STATE # input + call _aesni_enc1 + movups STATE, (OUTP) # output + ret + +/* + * _aesni_enc1: internal ABI + * input: + * KEYP: key struct pointer + * KLEN: round count + * STATE: initial state (input) + * output: + * STATE: finial state (output) + * changed: + * KEY + * TKEYP (T1) + */ +_aesni_enc1: + movaps (KEYP), KEY # key + mov KEYP, TKEYP + pxor KEY, STATE # round 0 + add $0x30, TKEYP + cmp $24, KLEN + jb .Lenc128 + lea 0x20(TKEYP), TKEYP + je .Lenc192 + add $0x20, TKEYP + movaps -0x60(TKEYP), KEY + # aesenc KEY, STATE + .byte 0x66, 0x0f, 0x38, 0xdc, 0xc2 + movaps -0x50(TKEYP), KEY + # aesenc KEY, STATE + .byte 0x66, 0x0f, 0x38, 0xdc, 0xc2 +.align 4 +.Lenc192: + movaps -0x40(TKEYP), KEY + # aesenc KEY, STATE + .byte 0x66, 0x0f, 0x38, 0xdc, 0xc2 + movaps -0x30(TKEYP), KEY + # aesenc KEY, STATE + .byte 0x66, 0x0f, 0x38, 0xdc, 0xc2 +.align 4 +.Lenc128: + movaps -0x20(TKEYP), KEY + # aesenc KEY, STATE + .byte 0x66, 0x0f, 0x38, 0xdc, 0xc2 + movaps -0x10(TKEYP), KEY + # aesenc KEY, STATE + .byte 0x66, 0x0f, 0x38, 0xdc, 0xc2 + movaps (TKEYP), KEY + # aesenc KEY, STATE + .byte 0x66, 0x0f, 0x38, 0xdc, 0xc2 + movaps 0x10(TKEYP), KEY + # aesenc KEY, STATE + .byte 0x66, 0x0f, 0x38, 0xdc, 0xc2 + movaps 0x20(TKEYP), KEY + # aesenc KEY, STATE + .byte 0x66, 0x0f, 0x38, 0xdc, 0xc2 + movaps 0x30(TKEYP), KEY + # aesenc KEY, STATE + .byte 0x66, 0x0f, 0x38, 0xdc, 0xc2 + movaps 0x40(TKEYP), KEY + # aesenc KEY, STATE + .byte 0x66, 0x0f, 0x38, 0xdc, 0xc2 + movaps 0x50(TKEYP), KEY + # aesenc KEY, STATE + .byte 0x66, 0x0f, 0x38, 0xdc, 0xc2 + movaps 0x60(TKEYP), KEY + # aesenc KEY, STATE + .byte 0x66, 0x0f, 0x38, 0xdc, 0xc2 + movaps 0x70(TKEYP), KEY + # aesenclast KEY, STATE # last round + .byte 0x66, 0x0f, 0x38, 0xdd, 0xc2 + ret + +/* + * _aesni_enc4: internal ABI + * input: + * KEYP: key struct pointer + * KLEN: round count + * STATE1: initial state (input) + * STATE2 + * STATE3 + * STATE4 + * output: + * STATE1: finial state (output) + * STATE2 + * STATE3 + * STATE4 + * changed: + * KEY + * TKEYP (T1) + */ +_aesni_enc4: + movaps (KEYP), KEY # key + mov KEYP, TKEYP + pxor KEY, STATE1 # round 0 + pxor KEY, STATE2 + pxor KEY, STATE3 + pxor KEY, STATE4 + add $0x30, TKEYP + cmp $24, KLEN + jb .L4enc128 + lea 0x20(TKEYP), TKEYP + je .L4enc192 + add $0x20, TKEYP + movaps -0x60(TKEYP), KEY + # aesenc KEY, STATE1 + .byte 0x66, 0x0f, 0x38, 0xdc, 0xc2 + # aesenc KEY, STATE2 + .byte 0x66, 0x0f, 0x38, 0xdc, 0xe2 + # aesenc KEY, STATE3 + .byte 0x66, 0x0f, 0x38, 0xdc, 0xea + # aesenc KEY, STATE4 + .byte 0x66, 0x0f, 0x38, 0xdc, 0xf2 + movaps -0x50(TKEYP), KEY + # aesenc KEY, STATE1 + .byte 0x66, 0x0f, 0x38, 0xdc, 0xc2 + # aesenc KEY, STATE2 + .byte 0x66, 0x0f, 0x38, 0xdc, 0xe2 + # aesenc KEY, STATE3 + .byte 0x66, 0x0f, 0x38, 0xdc, 0xea + # aesenc KEY, STATE4 + .byte 0x66, 0x0f, 0x38, 0xdc, 0xf2 +#.align 4 +.L4enc192: + movaps -0x40(TKEYP), KEY + # aesenc KEY, STATE1 + .byte 0x66, 0x0f, 0x38, 0xdc, 0xc2 + # aesenc KEY, STATE2 + .byte 0x66, 0x0f, 0x38, 0xdc, 0xe2 + # aesenc KEY, STATE3 + .byte 0x66, 0x0f, 0x38, 0xdc, 0xea + # aesenc KEY, STATE4 + .byte 0x66, 0x0f, 0x38, 0xdc, 0xf2 + movaps -0x30(TKEYP), KEY + # aesenc KEY, STATE1 + .byte 0x66, 0x0f, 0x38, 0xdc, 0xc2 + # aesenc KEY, STATE2 + .byte 0x66, 0x0f, 0x38, 0xdc, 0xe2 + # aesenc KEY, STATE3 + .byte 0x66, 0x0f, 0x38, 0xdc, 0xea + # aesenc KEY, STATE4 + .byte 0x66, 0x0f, 0x38, 0xdc, 0xf2 +#.align 4 +.L4enc128: + movaps -0x20(TKEYP), KEY + # aesenc KEY, STATE1 + .byte 0x66, 0x0f, 0x38, 0xdc, 0xc2 + # aesenc KEY, STATE2 + .byte 0x66, 0x0f, 0x38, 0xdc, 0xe2 + # aesenc KEY, STATE3 + .byte 0x66, 0x0f, 0x38, 0xdc, 0xea + # aesenc KEY, STATE4 + .byte 0x66, 0x0f, 0x38, 0xdc, 0xf2 + movaps -0x10(TKEYP), KEY + # aesenc KEY, STATE1 + .byte 0x66, 0x0f, 0x38, 0xdc, 0xc2 + # aesenc KEY, STATE2 + .byte 0x66, 0x0f, 0x38, 0xdc, 0xe2 + # aesenc KEY, STATE3 + .byte 0x66, 0x0f, 0x38, 0xdc, 0xea + # aesenc KEY, STATE4 + .byte 0x66, 0x0f, 0x38, 0xdc, 0xf2 + movaps (TKEYP), KEY + # aesenc KEY, STATE1 + .byte 0x66, 0x0f, 0x38, 0xdc, 0xc2 + # aesenc KEY, STATE2 + .byte 0x66, 0x0f, 0x38, 0xdc, 0xe2 + # aesenc KEY, STATE3 + .byte 0x66, 0x0f, 0x38, 0xdc, 0xea + # aesenc KEY, STATE4 + .byte 0x66, 0x0f, 0x38, 0xdc, 0xf2 + movaps 0x10(TKEYP), KEY + # aesenc KEY, STATE1 + .byte 0x66, 0x0f, 0x38, 0xdc, 0xc2 + # aesenc KEY, STATE2 + .byte 0x66, 0x0f, 0x38, 0xdc, 0xe2 + # aesenc KEY, STATE3 + .byte 0x66, 0x0f, 0x38, 0xdc, 0xea + # aesenc KEY, STATE4 + .byte 0x66, 0x0f, 0x38, 0xdc, 0xf2 + movaps 0x20(TKEYP), KEY + # aesenc KEY, STATE1 + .byte 0x66, 0x0f, 0x38, 0xdc, 0xc2 + # aesenc KEY, STATE2 + .byte 0x66, 0x0f, 0x38, 0xdc, 0xe2 + # aesenc KEY, STATE3 + .byte 0x66, 0x0f, 0x38, 0xdc, 0xea + # aesenc KEY, STATE4 + .byte 0x66, 0x0f, 0x38, 0xdc, 0xf2 + movaps 0x30(TKEYP), KEY + # aesenc KEY, STATE1 + .byte 0x66, 0x0f, 0x38, 0xdc, 0xc2 + # aesenc KEY, STATE2 + .byte 0x66, 0x0f, 0x38, 0xdc, 0xe2 + # aesenc KEY, STATE3 + .byte 0x66, 0x0f, 0x38, 0xdc, 0xea + # aesenc KEY, STATE4 + .byte 0x66, 0x0f, 0x38, 0xdc, 0xf2 + movaps 0x40(TKEYP), KEY + # aesenc KEY, STATE1 + .byte 0x66, 0x0f, 0x38, 0xdc, 0xc2 + # aesenc KEY, STATE2 + .byte 0x66, 0x0f, 0x38, 0xdc, 0xe2 + # aesenc KEY, STATE3 + .byte 0x66, 0x0f, 0x38, 0xdc, 0xea + # aesenc KEY, STATE4 + .byte 0x66, 0x0f, 0x38, 0xdc, 0xf2 + movaps 0x50(TKEYP), KEY + # aesenc KEY, STATE1 + .byte 0x66, 0x0f, 0x38, 0xdc, 0xc2 + # aesenc KEY, STATE2 + .byte 0x66, 0x0f, 0x38, 0xdc, 0xe2 + # aesenc KEY, STATE3 + .byte 0x66, 0x0f, 0x38, 0xdc, 0xea + # aesenc KEY, STATE4 + .byte 0x66, 0x0f, 0x38, 0xdc, 0xf2 + movaps 0x60(TKEYP), KEY + # aesenc KEY, STATE1 + .byte 0x66, 0x0f, 0x38, 0xdc, 0xc2 + # aesenc KEY, STATE2 + .byte 0x66, 0x0f, 0x38, 0xdc, 0xe2 + # aesenc KEY, STATE3 + .byte 0x66, 0x0f, 0x38, 0xdc, 0xea + # aesenc KEY, STATE4 + .byte 0x66, 0x0f, 0x38, 0xdc, 0xf2 + movaps 0x70(TKEYP), KEY + # aesenclast KEY, STATE1 # last round + .byte 0x66, 0x0f, 0x38, 0xdd, 0xc2 + # aesenclast KEY, STATE2 + .byte 0x66, 0x0f, 0x38, 0xdd, 0xe2 + # aesenclast KEY, STATE3 + .byte 0x66, 0x0f, 0x38, 0xdd, 0xea + # aesenclast KEY, STATE4 + .byte 0x66, 0x0f, 0x38, 0xdd, 0xf2 + ret + +/* + * void aesni_dec (struct crypto_aes_ctx *ctx, u8 *dst, const u8 *src) + */ +ENTRY(aesni_dec) + mov 480(KEYP), KLEN # key length + add $240, KEYP + movups (INP), STATE # input + call _aesni_dec1 + movups STATE, (OUTP) #output + ret + +/* + * _aesni_dec1: internal ABI + * input: + * KEYP: key struct pointer + * KLEN: key length + * STATE: initial state (input) + * output: + * STATE: finial state (output) + * changed: + * KEY + * TKEYP (T1) + */ +_aesni_dec1: + movaps (KEYP), KEY # key + mov KEYP, TKEYP + pxor KEY, STATE # round 0 + add $0x30, TKEYP + cmp $24, KLEN + jb .Ldec128 + lea 0x20(TKEYP), TKEYP + je .Ldec192 + add $0x20, TKEYP + movaps -0x60(TKEYP), KEY + # aesdec KEY, STATE + .byte 0x66, 0x0f, 0x38, 0xde, 0xc2 + movaps -0x50(TKEYP), KEY + # aesdec KEY, STATE + .byte 0x66, 0x0f, 0x38, 0xde, 0xc2 +.align 4 +.Ldec192: + movaps -0x40(TKEYP), KEY + # aesdec KEY, STATE + .byte 0x66, 0x0f, 0x38, 0xde, 0xc2 + movaps -0x30(TKEYP), KEY + # aesdec KEY, STATE + .byte 0x66, 0x0f, 0x38, 0xde, 0xc2 +.align 4 +.Ldec128: + movaps -0x20(TKEYP), KEY + # aesdec KEY, STATE + .byte 0x66, 0x0f, 0x38, 0xde, 0xc2 + movaps -0x10(TKEYP), KEY + # aesdec KEY, STATE + .byte 0x66, 0x0f, 0x38, 0xde, 0xc2 + movaps (TKEYP), KEY + # aesdec KEY, STATE + .byte 0x66, 0x0f, 0x38, 0xde, 0xc2 + movaps 0x10(TKEYP), KEY + # aesdec KEY, STATE + .byte 0x66, 0x0f, 0x38, 0xde, 0xc2 + movaps 0x20(TKEYP), KEY + # aesdec KEY, STATE + .byte 0x66, 0x0f, 0x38, 0xde, 0xc2 + movaps 0x30(TKEYP), KEY + # aesdec KEY, STATE + .byte 0x66, 0x0f, 0x38, 0xde, 0xc2 + movaps 0x40(TKEYP), KEY + # aesdec KEY, STATE + .byte 0x66, 0x0f, 0x38, 0xde, 0xc2 + movaps 0x50(TKEYP), KEY + # aesdec KEY, STATE + .byte 0x66, 0x0f, 0x38, 0xde, 0xc2 + movaps 0x60(TKEYP), KEY + # aesdec KEY, STATE + .byte 0x66, 0x0f, 0x38, 0xde, 0xc2 + movaps 0x70(TKEYP), KEY + # aesdeclast KEY, STATE # last round + .byte 0x66, 0x0f, 0x38, 0xdf, 0xc2 + ret + +/* + * _aesni_dec4: internal ABI + * input: + * KEYP: key struct pointer + * KLEN: key length + * STATE1: initial state (input) + * STATE2 + * STATE3 + * STATE4 + * output: + * STATE1: finial state (output) + * STATE2 + * STATE3 + * STATE4 + * changed: + * KEY + * TKEYP (T1) + */ +_aesni_dec4: + movaps (KEYP), KEY # key + mov KEYP, TKEYP + pxor KEY, STATE1 # round 0 + pxor KEY, STATE2 + pxor KEY, STATE3 + pxor KEY, STATE4 + add $0x30, TKEYP + cmp $24, KLEN + jb .L4dec128 + lea 0x20(TKEYP), TKEYP + je .L4dec192 + add $0x20, TKEYP + movaps -0x60(TKEYP), KEY + # aesdec KEY, STATE1 + .byte 0x66, 0x0f, 0x38, 0xde, 0xc2 + # aesdec KEY, STATE2 + .byte 0x66, 0x0f, 0x38, 0xde, 0xe2 + # aesdec KEY, STATE3 + .byte 0x66, 0x0f, 0x38, 0xde, 0xea + # aesdec KEY, STATE4 + .byte 0x66, 0x0f, 0x38, 0xde, 0xf2 + movaps -0x50(TKEYP), KEY + # aesdec KEY, STATE1 + .byte 0x66, 0x0f, 0x38, 0xde, 0xc2 + # aesdec KEY, STATE2 + .byte 0x66, 0x0f, 0x38, 0xde, 0xe2 + # aesdec KEY, STATE3 + .byte 0x66, 0x0f, 0x38, 0xde, 0xea + # aesdec KEY, STATE4 + .byte 0x66, 0x0f, 0x38, 0xde, 0xf2 +.align 4 +.L4dec192: + movaps -0x40(TKEYP), KEY + # aesdec KEY, STATE1 + .byte 0x66, 0x0f, 0x38, 0xde, 0xc2 + # aesdec KEY, STATE2 + .byte 0x66, 0x0f, 0x38, 0xde, 0xe2 + # aesdec KEY, STATE3 + .byte 0x66, 0x0f, 0x38, 0xde, 0xea + # aesdec KEY, STATE4 + .byte 0x66, 0x0f, 0x38, 0xde, 0xf2 + movaps -0x30(TKEYP), KEY + # aesdec KEY, STATE1 + .byte 0x66, 0x0f, 0x38, 0xde, 0xc2 + # aesdec KEY, STATE2 + .byte 0x66, 0x0f, 0x38, 0xde, 0xe2 + # aesdec KEY, STATE3 + .byte 0x66, 0x0f, 0x38, 0xde, 0xea + # aesdec KEY, STATE4 + .byte 0x66, 0x0f, 0x38, 0xde, 0xf2 +.align 4 +.L4dec128: + movaps -0x20(TKEYP), KEY + # aesdec KEY, STATE1 + .byte 0x66, 0x0f, 0x38, 0xde, 0xc2 + # aesdec KEY, STATE2 + .byte 0x66, 0x0f, 0x38, 0xde, 0xe2 + # aesdec KEY, STATE3 + .byte 0x66, 0x0f, 0x38, 0xde, 0xea + # aesdec KEY, STATE4 + .byte 0x66, 0x0f, 0x38, 0xde, 0xf2 + movaps -0x10(TKEYP), KEY + # aesdec KEY, STATE1 + .byte 0x66, 0x0f, 0x38, 0xde, 0xc2 + # aesdec KEY, STATE2 + .byte 0x66, 0x0f, 0x38, 0xde, 0xe2 + # aesdec KEY, STATE3 + .byte 0x66, 0x0f, 0x38, 0xde, 0xea + # aesdec KEY, STATE4 + .byte 0x66, 0x0f, 0x38, 0xde, 0xf2 + movaps (TKEYP), KEY + # aesdec KEY, STATE1 + .byte 0x66, 0x0f, 0x38, 0xde, 0xc2 + # aesdec KEY, STATE2 + .byte 0x66, 0x0f, 0x38, 0xde, 0xe2 + # aesdec KEY, STATE3 + .byte 0x66, 0x0f, 0x38, 0xde, 0xea + # aesdec KEY, STATE4 + .byte 0x66, 0x0f, 0x38, 0xde, 0xf2 + movaps 0x10(TKEYP), KEY + # aesdec KEY, STATE1 + .byte 0x66, 0x0f, 0x38, 0xde, 0xc2 + # aesdec KEY, STATE2 + .byte 0x66, 0x0f, 0x38, 0xde, 0xe2 + # aesdec KEY, STATE3 + .byte 0x66, 0x0f, 0x38, 0xde, 0xea + # aesdec KEY, STATE4 + .byte 0x66, 0x0f, 0x38, 0xde, 0xf2 + movaps 0x20(TKEYP), KEY + # aesdec KEY, STATE1 + .byte 0x66, 0x0f, 0x38, 0xde, 0xc2 + # aesdec KEY, STATE2 + .byte 0x66, 0x0f, 0x38, 0xde, 0xe2 + # aesdec KEY, STATE3 + .byte 0x66, 0x0f, 0x38, 0xde, 0xea + # aesdec KEY, STATE4 + .byte 0x66, 0x0f, 0x38, 0xde, 0xf2 + movaps 0x30(TKEYP), KEY + # aesdec KEY, STATE1 + .byte 0x66, 0x0f, 0x38, 0xde, 0xc2 + # aesdec KEY, STATE2 + .byte 0x66, 0x0f, 0x38, 0xde, 0xe2 + # aesdec KEY, STATE3 + .byte 0x66, 0x0f, 0x38, 0xde, 0xea + # aesdec KEY, STATE4 + .byte 0x66, 0x0f, 0x38, 0xde, 0xf2 + movaps 0x40(TKEYP), KEY + # aesdec KEY, STATE1 + .byte 0x66, 0x0f, 0x38, 0xde, 0xc2 + # aesdec KEY, STATE2 + .byte 0x66, 0x0f, 0x38, 0xde, 0xe2 + # aesdec KEY, STATE3 + .byte 0x66, 0x0f, 0x38, 0xde, 0xea + # aesdec KEY, STATE4 + .byte 0x66, 0x0f, 0x38, 0xde, 0xf2 + movaps 0x50(TKEYP), KEY + # aesdec KEY, STATE1 + .byte 0x66, 0x0f, 0x38, 0xde, 0xc2 + # aesdec KEY, STATE2 + .byte 0x66, 0x0f, 0x38, 0xde, 0xe2 + # aesdec KEY, STATE3 + .byte 0x66, 0x0f, 0x38, 0xde, 0xea + # aesdec KEY, STATE4 + .byte 0x66, 0x0f, 0x38, 0xde, 0xf2 + movaps 0x60(TKEYP), KEY + # aesdec KEY, STATE1 + .byte 0x66, 0x0f, 0x38, 0xde, 0xc2 + # aesdec KEY, STATE2 + .byte 0x66, 0x0f, 0x38, 0xde, 0xe2 + # aesdec KEY, STATE3 + .byte 0x66, 0x0f, 0x38, 0xde, 0xea + # aesdec KEY, STATE4 + .byte 0x66, 0x0f, 0x38, 0xde, 0xf2 + movaps 0x70(TKEYP), KEY + # aesdeclast KEY, STATE1 # last round + .byte 0x66, 0x0f, 0x38, 0xdf, 0xc2 + # aesdeclast KEY, STATE2 + .byte 0x66, 0x0f, 0x38, 0xdf, 0xe2 + # aesdeclast KEY, STATE3 + .byte 0x66, 0x0f, 0x38, 0xdf, 0xea + # aesdeclast KEY, STATE4 + .byte 0x66, 0x0f, 0x38, 0xdf, 0xf2 + ret + +/* + * void aesni_ecb_enc(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src, + * size_t len) + */ +ENTRY(aesni_ecb_enc) + test LEN, LEN # check length + jz .Lecb_enc_ret + mov 480(KEYP), KLEN + cmp $16, LEN + jb .Lecb_enc_ret + cmp $64, LEN + jb .Lecb_enc_loop1 +.align 4 +.Lecb_enc_loop4: + movups (INP), STATE1 + movups 0x10(INP), STATE2 + movups 0x20(INP), STATE3 + movups 0x30(INP), STATE4 + call _aesni_enc4 + movups STATE1, (OUTP) + movups STATE2, 0x10(OUTP) + movups STATE3, 0x20(OUTP) + movups STATE4, 0x30(OUTP) + sub $64, LEN + add $64, INP + add $64, OUTP + cmp $64, LEN + jge .Lecb_enc_loop4 + cmp $16, LEN + jb .Lecb_enc_ret +.align 4 +.Lecb_enc_loop1: + movups (INP), STATE1 + call _aesni_enc1 + movups STATE1, (OUTP) + sub $16, LEN + add $16, INP + add $16, OUTP + cmp $16, LEN + jge .Lecb_enc_loop1 +.Lecb_enc_ret: + ret + +/* + * void aesni_ecb_dec(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src, + * size_t len); + */ +ENTRY(aesni_ecb_dec) + test LEN, LEN + jz .Lecb_dec_ret + mov 480(KEYP), KLEN + add $240, KEYP + cmp $16, LEN + jb .Lecb_dec_ret + cmp $64, LEN + jb .Lecb_dec_loop1 +.align 4 +.Lecb_dec_loop4: + movups (INP), STATE1 + movups 0x10(INP), STATE2 + movups 0x20(INP), STATE3 + movups 0x30(INP), STATE4 + call _aesni_dec4 + movups STATE1, (OUTP) + movups STATE2, 0x10(OUTP) + movups STATE3, 0x20(OUTP) + movups STATE4, 0x30(OUTP) + sub $64, LEN + add $64, INP + add $64, OUTP + cmp $64, LEN + jge .Lecb_dec_loop4 + cmp $16, LEN + jb .Lecb_dec_ret +.align 4 +.Lecb_dec_loop1: + movups (INP), STATE1 + call _aesni_dec1 + movups STATE1, (OUTP) + sub $16, LEN + add $16, INP + add $16, OUTP + cmp $16, LEN + jge .Lecb_dec_loop1 +.Lecb_dec_ret: + ret + +/* + * void aesni_cbc_enc(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src, + * size_t len, u8 *iv) + */ +ENTRY(aesni_cbc_enc) + cmp $16, LEN + jb .Lcbc_enc_ret + mov 480(KEYP), KLEN + movups (IVP), STATE # load iv as initial state +.align 4 +.Lcbc_enc_loop: + movups (INP), IN # load input + pxor IN, STATE + call _aesni_enc1 + movups STATE, (OUTP) # store output + sub $16, LEN + add $16, INP + add $16, OUTP + cmp $16, LEN + jge .Lcbc_enc_loop + movups STATE, (IVP) +.Lcbc_enc_ret: + ret + +/* + * void aesni_cbc_dec(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src, + * size_t len, u8 *iv) + */ +ENTRY(aesni_cbc_dec) + cmp $16, LEN + jb .Lcbc_dec_ret + mov 480(KEYP), KLEN + add $240, KEYP + movups (IVP), IV + cmp $64, LEN + jb .Lcbc_dec_loop1 +.align 4 +.Lcbc_dec_loop4: + movups (INP), IN1 + movaps IN1, STATE1 + movups 0x10(INP), IN2 + movaps IN2, STATE2 + movups 0x20(INP), IN3 + movaps IN3, STATE3 + movups 0x30(INP), IN4 + movaps IN4, STATE4 + call _aesni_dec4 + pxor IV, STATE1 + pxor IN1, STATE2 + pxor IN2, STATE3 + pxor IN3, STATE4 + movaps IN4, IV + movups STATE1, (OUTP) + movups STATE2, 0x10(OUTP) + movups STATE3, 0x20(OUTP) + movups STATE4, 0x30(OUTP) + sub $64, LEN + add $64, INP + add $64, OUTP + cmp $64, LEN + jge .Lcbc_dec_loop4 + cmp $16, LEN + jb .Lcbc_dec_ret +.align 4 +.Lcbc_dec_loop1: + movups (INP), IN + movaps IN, STATE + call _aesni_dec1 + pxor IV, STATE + movups STATE, (OUTP) + movaps IN, IV + sub $16, LEN + add $16, INP + add $16, OUTP + cmp $16, LEN + jge .Lcbc_dec_loop1 + movups IV, (IVP) +.Lcbc_dec_ret: + ret diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c new file mode 100644 index 00000000000..02af0af6549 --- /dev/null +++ b/arch/x86/crypto/aesni-intel_glue.c @@ -0,0 +1,461 @@ +/* + * Support for Intel AES-NI instructions. This file contains glue + * code, the real AES implementation is in intel-aes_asm.S. + * + * Copyright (C) 2008, Intel Corp. + * Author: Huang Ying + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +struct async_aes_ctx { + struct cryptd_ablkcipher *cryptd_tfm; +}; + +#define AESNI_ALIGN 16 +#define AES_BLOCK_MASK (~(AES_BLOCK_SIZE-1)) + +asmlinkage int aesni_set_key(struct crypto_aes_ctx *ctx, const u8 *in_key, + unsigned int key_len); +asmlinkage void aesni_enc(struct crypto_aes_ctx *ctx, u8 *out, + const u8 *in); +asmlinkage void aesni_dec(struct crypto_aes_ctx *ctx, u8 *out, + const u8 *in); +asmlinkage void aesni_ecb_enc(struct crypto_aes_ctx *ctx, u8 *out, + const u8 *in, unsigned int len); +asmlinkage void aesni_ecb_dec(struct crypto_aes_ctx *ctx, u8 *out, + const u8 *in, unsigned int len); +asmlinkage void aesni_cbc_enc(struct crypto_aes_ctx *ctx, u8 *out, + const u8 *in, unsigned int len, u8 *iv); +asmlinkage void aesni_cbc_dec(struct crypto_aes_ctx *ctx, u8 *out, + const u8 *in, unsigned int len, u8 *iv); + +static inline int kernel_fpu_using(void) +{ + if (in_interrupt() && !(read_cr0() & X86_CR0_TS)) + return 1; + return 0; +} + +static inline struct crypto_aes_ctx *aes_ctx(void *raw_ctx) +{ + unsigned long addr = (unsigned long)raw_ctx; + unsigned long align = AESNI_ALIGN; + + if (align <= crypto_tfm_ctx_alignment()) + align = 1; + return (struct crypto_aes_ctx *)ALIGN(addr, align); +} + +static int aes_set_key_common(struct crypto_tfm *tfm, void *raw_ctx, + const u8 *in_key, unsigned int key_len) +{ + struct crypto_aes_ctx *ctx = aes_ctx(raw_ctx); + u32 *flags = &tfm->crt_flags; + int err; + + if (key_len != AES_KEYSIZE_128 && key_len != AES_KEYSIZE_192 && + key_len != AES_KEYSIZE_256) { + *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; + return -EINVAL; + } + + if (kernel_fpu_using()) + err = crypto_aes_expand_key(ctx, in_key, key_len); + else { + kernel_fpu_begin(); + err = aesni_set_key(ctx, in_key, key_len); + kernel_fpu_end(); + } + + return err; +} + +static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key, + unsigned int key_len) +{ + return aes_set_key_common(tfm, crypto_tfm_ctx(tfm), in_key, key_len); +} + +static void aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) +{ + struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm)); + + if (kernel_fpu_using()) + crypto_aes_encrypt_x86(ctx, dst, src); + else { + kernel_fpu_begin(); + aesni_enc(ctx, dst, src); + kernel_fpu_end(); + } +} + +static void aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) +{ + struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm)); + + if (kernel_fpu_using()) + crypto_aes_decrypt_x86(ctx, dst, src); + else { + kernel_fpu_begin(); + aesni_dec(ctx, dst, src); + kernel_fpu_end(); + } +} + +static struct crypto_alg aesni_alg = { + .cra_name = "aes", + .cra_driver_name = "aes-aesni", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_TYPE_CIPHER, + .cra_blocksize = AES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct crypto_aes_ctx)+AESNI_ALIGN-1, + .cra_alignmask = 0, + .cra_module = THIS_MODULE, + .cra_list = LIST_HEAD_INIT(aesni_alg.cra_list), + .cra_u = { + .cipher = { + .cia_min_keysize = AES_MIN_KEY_SIZE, + .cia_max_keysize = AES_MAX_KEY_SIZE, + .cia_setkey = aes_set_key, + .cia_encrypt = aes_encrypt, + .cia_decrypt = aes_decrypt + } + } +}; + +static int ecb_encrypt(struct blkcipher_desc *desc, + struct scatterlist *dst, struct scatterlist *src, + unsigned int nbytes) +{ + struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm)); + struct blkcipher_walk walk; + int err; + + blkcipher_walk_init(&walk, dst, src, nbytes); + err = blkcipher_walk_virt(desc, &walk); + + kernel_fpu_begin(); + while ((nbytes = walk.nbytes)) { + aesni_ecb_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr, + nbytes & AES_BLOCK_MASK); + nbytes &= AES_BLOCK_SIZE - 1; + err = blkcipher_walk_done(desc, &walk, nbytes); + } + kernel_fpu_end(); + + return err; +} + +static int ecb_decrypt(struct blkcipher_desc *desc, + struct scatterlist *dst, struct scatterlist *src, + unsigned int nbytes) +{ + struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm)); + struct blkcipher_walk walk; + int err; + + blkcipher_walk_init(&walk, dst, src, nbytes); + err = blkcipher_walk_virt(desc, &walk); + + kernel_fpu_begin(); + while ((nbytes = walk.nbytes)) { + aesni_ecb_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr, + nbytes & AES_BLOCK_MASK); + nbytes &= AES_BLOCK_SIZE - 1; + err = blkcipher_walk_done(desc, &walk, nbytes); + } + kernel_fpu_end(); + + return err; +} + +static struct crypto_alg blk_ecb_alg = { + .cra_name = "__ecb-aes-aesni", + .cra_driver_name = "__driver-ecb-aes-aesni", + .cra_priority = 0, + .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, + .cra_blocksize = AES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct crypto_aes_ctx)+AESNI_ALIGN-1, + .cra_alignmask = 0, + .cra_type = &crypto_blkcipher_type, + .cra_module = THIS_MODULE, + .cra_list = LIST_HEAD_INIT(blk_ecb_alg.cra_list), + .cra_u = { + .blkcipher = { + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + .setkey = aes_set_key, + .encrypt = ecb_encrypt, + .decrypt = ecb_decrypt, + }, + }, +}; + +static int cbc_encrypt(struct blkcipher_desc *desc, + struct scatterlist *dst, struct scatterlist *src, + unsigned int nbytes) +{ + struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm)); + struct blkcipher_walk walk; + int err; + + blkcipher_walk_init(&walk, dst, src, nbytes); + err = blkcipher_walk_virt(desc, &walk); + + kernel_fpu_begin(); + while ((nbytes = walk.nbytes)) { + aesni_cbc_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr, + nbytes & AES_BLOCK_MASK, walk.iv); + nbytes &= AES_BLOCK_SIZE - 1; + err = blkcipher_walk_done(desc, &walk, nbytes); + } + kernel_fpu_end(); + + return err; +} + +static int cbc_decrypt(struct blkcipher_desc *desc, + struct scatterlist *dst, struct scatterlist *src, + unsigned int nbytes) +{ + struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm)); + struct blkcipher_walk walk; + int err; + + blkcipher_walk_init(&walk, dst, src, nbytes); + err = blkcipher_walk_virt(desc, &walk); + + kernel_fpu_begin(); + while ((nbytes = walk.nbytes)) { + aesni_cbc_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr, + nbytes & AES_BLOCK_MASK, walk.iv); + nbytes &= AES_BLOCK_SIZE - 1; + err = blkcipher_walk_done(desc, &walk, nbytes); + } + kernel_fpu_end(); + + return err; +} + +static struct crypto_alg blk_cbc_alg = { + .cra_name = "__cbc-aes-aesni", + .cra_driver_name = "__driver-cbc-aes-aesni", + .cra_priority = 0, + .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, + .cra_blocksize = AES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct crypto_aes_ctx)+AESNI_ALIGN-1, + .cra_alignmask = 0, + .cra_type = &crypto_blkcipher_type, + .cra_module = THIS_MODULE, + .cra_list = LIST_HEAD_INIT(blk_cbc_alg.cra_list), + .cra_u = { + .blkcipher = { + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + .setkey = aes_set_key, + .encrypt = cbc_encrypt, + .decrypt = cbc_decrypt, + }, + }, +}; + +static int ablk_set_key(struct crypto_ablkcipher *tfm, const u8 *key, + unsigned int key_len) +{ + struct async_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm); + + return crypto_ablkcipher_setkey(&ctx->cryptd_tfm->base, key, key_len); +} + +static int ablk_encrypt(struct ablkcipher_request *req) +{ + struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); + struct async_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm); + + if (kernel_fpu_using()) { + struct ablkcipher_request *cryptd_req = + ablkcipher_request_ctx(req); + memcpy(cryptd_req, req, sizeof(*req)); + ablkcipher_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base); + return crypto_ablkcipher_encrypt(cryptd_req); + } else { + struct blkcipher_desc desc; + desc.tfm = cryptd_ablkcipher_child(ctx->cryptd_tfm); + desc.info = req->info; + desc.flags = 0; + return crypto_blkcipher_crt(desc.tfm)->encrypt( + &desc, req->dst, req->src, req->nbytes); + } +} + +static int ablk_decrypt(struct ablkcipher_request *req) +{ + struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); + struct async_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm); + + if (kernel_fpu_using()) { + struct ablkcipher_request *cryptd_req = + ablkcipher_request_ctx(req); + memcpy(cryptd_req, req, sizeof(*req)); + ablkcipher_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base); + return crypto_ablkcipher_decrypt(cryptd_req); + } else { + struct blkcipher_desc desc; + desc.tfm = cryptd_ablkcipher_child(ctx->cryptd_tfm); + desc.info = req->info; + desc.flags = 0; + return crypto_blkcipher_crt(desc.tfm)->decrypt( + &desc, req->dst, req->src, req->nbytes); + } +} + +static void ablk_exit(struct crypto_tfm *tfm) +{ + struct async_aes_ctx *ctx = crypto_tfm_ctx(tfm); + + cryptd_free_ablkcipher(ctx->cryptd_tfm); +} + +static void ablk_init_common(struct crypto_tfm *tfm, + struct cryptd_ablkcipher *cryptd_tfm) +{ + struct async_aes_ctx *ctx = crypto_tfm_ctx(tfm); + + ctx->cryptd_tfm = cryptd_tfm; + tfm->crt_ablkcipher.reqsize = sizeof(struct ablkcipher_request) + + crypto_ablkcipher_reqsize(&cryptd_tfm->base); +} + +static int ablk_ecb_init(struct crypto_tfm *tfm) +{ + struct cryptd_ablkcipher *cryptd_tfm; + + cryptd_tfm = cryptd_alloc_ablkcipher("__driver-ecb-aes-aesni", 0, 0); + if (IS_ERR(cryptd_tfm)) + return PTR_ERR(cryptd_tfm); + ablk_init_common(tfm, cryptd_tfm); + return 0; +} + +static struct crypto_alg ablk_ecb_alg = { + .cra_name = "ecb(aes)", + .cra_driver_name = "ecb-aes-aesni", + .cra_priority = 400, + .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER|CRYPTO_ALG_ASYNC, + .cra_blocksize = AES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct async_aes_ctx), + .cra_alignmask = 0, + .cra_type = &crypto_ablkcipher_type, + .cra_module = THIS_MODULE, + .cra_list = LIST_HEAD_INIT(ablk_ecb_alg.cra_list), + .cra_init = ablk_ecb_init, + .cra_exit = ablk_exit, + .cra_u = { + .ablkcipher = { + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + .setkey = ablk_set_key, + .encrypt = ablk_encrypt, + .decrypt = ablk_decrypt, + }, + }, +}; + +static int ablk_cbc_init(struct crypto_tfm *tfm) +{ + struct cryptd_ablkcipher *cryptd_tfm; + + cryptd_tfm = cryptd_alloc_ablkcipher("__driver-cbc-aes-aesni", 0, 0); + if (IS_ERR(cryptd_tfm)) + return PTR_ERR(cryptd_tfm); + ablk_init_common(tfm, cryptd_tfm); + return 0; +} + +static struct crypto_alg ablk_cbc_alg = { + .cra_name = "cbc(aes)", + .cra_driver_name = "cbc-aes-aesni", + .cra_priority = 400, + .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER|CRYPTO_ALG_ASYNC, + .cra_blocksize = AES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct async_aes_ctx), + .cra_alignmask = 0, + .cra_type = &crypto_ablkcipher_type, + .cra_module = THIS_MODULE, + .cra_list = LIST_HEAD_INIT(ablk_cbc_alg.cra_list), + .cra_init = ablk_cbc_init, + .cra_exit = ablk_exit, + .cra_u = { + .ablkcipher = { + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + .ivsize = AES_BLOCK_SIZE, + .setkey = ablk_set_key, + .encrypt = ablk_encrypt, + .decrypt = ablk_decrypt, + }, + }, +}; + +static int __init aesni_init(void) +{ + int err; + + if (!cpu_has_aes) { + printk(KERN_ERR "Intel AES-NI instructions are not detected.\n"); + return -ENODEV; + } + if ((err = crypto_register_alg(&aesni_alg))) + goto aes_err; + if ((err = crypto_register_alg(&blk_ecb_alg))) + goto blk_ecb_err; + if ((err = crypto_register_alg(&blk_cbc_alg))) + goto blk_cbc_err; + if ((err = crypto_register_alg(&ablk_ecb_alg))) + goto ablk_ecb_err; + if ((err = crypto_register_alg(&ablk_cbc_alg))) + goto ablk_cbc_err; + + return err; + +ablk_cbc_err: + crypto_unregister_alg(&ablk_ecb_alg); +ablk_ecb_err: + crypto_unregister_alg(&blk_cbc_alg); +blk_cbc_err: + crypto_unregister_alg(&blk_ecb_alg); +blk_ecb_err: + crypto_unregister_alg(&aesni_alg); +aes_err: + return err; +} + +static void __exit aesni_exit(void) +{ + crypto_unregister_alg(&ablk_cbc_alg); + crypto_unregister_alg(&ablk_ecb_alg); + crypto_unregister_alg(&blk_cbc_alg); + crypto_unregister_alg(&blk_ecb_alg); + crypto_unregister_alg(&aesni_alg); +} + +module_init(aesni_init); +module_exit(aesni_exit); + +MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm, Intel AES-NI instructions optimized"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("aes"); diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h index 7301e60dc4a..0beba0d1468 100644 --- a/arch/x86/include/asm/cpufeature.h +++ b/arch/x86/include/asm/cpufeature.h @@ -213,6 +213,7 @@ extern const char * const x86_power_flags[32]; #define cpu_has_xmm boot_cpu_has(X86_FEATURE_XMM) #define cpu_has_xmm2 boot_cpu_has(X86_FEATURE_XMM2) #define cpu_has_xmm3 boot_cpu_has(X86_FEATURE_XMM3) +#define cpu_has_aes boot_cpu_has(X86_FEATURE_AES) #define cpu_has_ht boot_cpu_has(X86_FEATURE_HT) #define cpu_has_mp boot_cpu_has(X86_FEATURE_MP) #define cpu_has_nx boot_cpu_has(X86_FEATURE_NX) -- cgit v1.2.3 From 712406a6bf59ebf4a00358bb59a4a2a1b2953d90 Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Mon, 9 Feb 2009 10:54:03 -0800 Subject: tracing/function-graph-tracer: make arch generic push pop functions There is nothing really arch specific of the push and pop functions used by the function graph tracer. This patch moves them to generic code. Acked-by: Frederic Weisbecker Acked-by: Ingo Molnar Signed-off-by: Steven Rostedt --- arch/x86/include/asm/ftrace.h | 25 --------------- arch/x86/kernel/dumpstack.c | 1 + arch/x86/kernel/ftrace.c | 75 +------------------------------------------ 3 files changed, 2 insertions(+), 99 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/include/asm/ftrace.h b/arch/x86/include/asm/ftrace.h index b55b4a7fbef..db24c2278be 100644 --- a/arch/x86/include/asm/ftrace.h +++ b/arch/x86/include/asm/ftrace.h @@ -55,29 +55,4 @@ struct dyn_arch_ftrace { #endif /* __ASSEMBLY__ */ #endif /* CONFIG_FUNCTION_TRACER */ -#ifdef CONFIG_FUNCTION_GRAPH_TRACER - -#ifndef __ASSEMBLY__ - -/* - * Stack of return addresses for functions - * of a thread. - * Used in struct thread_info - */ -struct ftrace_ret_stack { - unsigned long ret; - unsigned long func; - unsigned long long calltime; -}; - -/* - * Primary handler of a function return. - * It relays on ftrace_return_to_handler. - * Defined in entry_32/64.S - */ -extern void return_to_handler(void); - -#endif /* __ASSEMBLY__ */ -#endif /* CONFIG_FUNCTION_GRAPH_TRACER */ - #endif /* _ASM_X86_FTRACE_H */ diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c index 6b1f6f6f866..c0852291b62 100644 --- a/arch/x86/kernel/dumpstack.c +++ b/arch/x86/kernel/dumpstack.c @@ -10,6 +10,7 @@ #include #include #include +#include #include #include #include diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c index 231bdd3c5b1..76f7141e0f9 100644 --- a/arch/x86/kernel/ftrace.c +++ b/arch/x86/kernel/ftrace.c @@ -389,79 +389,6 @@ void ftrace_nmi_exit(void) #endif /* !CONFIG_DYNAMIC_FTRACE */ -/* Add a function return address to the trace stack on thread info.*/ -static int push_return_trace(unsigned long ret, unsigned long long time, - unsigned long func, int *depth) -{ - int index; - - if (!current->ret_stack) - return -EBUSY; - - /* The return trace stack is full */ - if (current->curr_ret_stack == FTRACE_RETFUNC_DEPTH - 1) { - atomic_inc(¤t->trace_overrun); - return -EBUSY; - } - - index = ++current->curr_ret_stack; - barrier(); - current->ret_stack[index].ret = ret; - current->ret_stack[index].func = func; - current->ret_stack[index].calltime = time; - *depth = index; - - return 0; -} - -/* Retrieve a function return address to the trace stack on thread info.*/ -static void pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret) -{ - int index; - - index = current->curr_ret_stack; - - if (unlikely(index < 0)) { - ftrace_graph_stop(); - WARN_ON(1); - /* Might as well panic, otherwise we have no where to go */ - *ret = (unsigned long)panic; - return; - } - - *ret = current->ret_stack[index].ret; - trace->func = current->ret_stack[index].func; - trace->calltime = current->ret_stack[index].calltime; - trace->overrun = atomic_read(¤t->trace_overrun); - trace->depth = index; - barrier(); - current->curr_ret_stack--; - -} - -/* - * Send the trace to the ring-buffer. - * @return the original return address. - */ -unsigned long ftrace_return_to_handler(void) -{ - struct ftrace_graph_ret trace; - unsigned long ret; - - pop_return_trace(&trace, &ret); - trace.rettime = cpu_clock(raw_smp_processor_id()); - ftrace_graph_return(&trace); - - if (unlikely(!ret)) { - ftrace_graph_stop(); - WARN_ON(1); - /* Might as well panic. What else to do? */ - ret = (unsigned long)panic; - } - - return ret; -} - /* * Hook the return address and push it in the stack of return addrs * in current thread info. @@ -521,7 +448,7 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr) calltime = cpu_clock(raw_smp_processor_id()); - if (push_return_trace(old, calltime, + if (ftrace_push_return_trace(old, calltime, self_addr, &trace.depth) == -EBUSY) { *parent = old; return; -- cgit v1.2.3 From b98103a5597b87211a1c74077b06faeac554bedc Mon Sep 17 00:00:00 2001 From: Andreas Herrmann Date: Sat, 21 Feb 2009 00:09:47 +0100 Subject: x86: hpet: print HPET registers during setup (if hpet=verbose is used) Signed-off-by: Andreas Herrmann Cc: Mark Hounschell Cc: Borislav Petkov Signed-off-by: Ingo Molnar --- arch/x86/kernel/hpet.c | 45 +++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 45 insertions(+) (limited to 'arch/x86') diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c index a00545fe5cd..1d86ca3c1f9 100644 --- a/arch/x86/kernel/hpet.c +++ b/arch/x86/kernel/hpet.c @@ -80,6 +80,7 @@ static inline void hpet_clear_mapping(void) */ static int boot_hpet_disable; int hpet_force_user; +static int hpet_verbose; static int __init hpet_setup(char *str) { @@ -88,6 +89,8 @@ static int __init hpet_setup(char *str) boot_hpet_disable = 1; if (!strncmp("force", str, 5)) hpet_force_user = 1; + if (!strncmp("verbose", str, 7)) + hpet_verbose = 1; } return 1; } @@ -119,6 +122,43 @@ int is_hpet_enabled(void) } EXPORT_SYMBOL_GPL(is_hpet_enabled); +static void _hpet_print_config(const char *function, int line) +{ + u32 i, timers, l, h; + printk(KERN_INFO "hpet: %s(%d):\n", function, line); + l = hpet_readl(HPET_ID); + h = hpet_readl(HPET_PERIOD); + timers = ((l & HPET_ID_NUMBER) >> HPET_ID_NUMBER_SHIFT) + 1; + printk(KERN_INFO "hpet: ID: 0x%x, PERIOD: 0x%x\n", l, h); + l = hpet_readl(HPET_CFG); + h = hpet_readl(HPET_STATUS); + printk(KERN_INFO "hpet: CFG: 0x%x, STATUS: 0x%x\n", l, h); + l = hpet_readl(HPET_COUNTER); + h = hpet_readl(HPET_COUNTER+4); + printk(KERN_INFO "hpet: COUNTER_l: 0x%x, COUNTER_h: 0x%x\n", l, h); + + for (i = 0; i < timers; i++) { + l = hpet_readl(HPET_Tn_CFG(i)); + h = hpet_readl(HPET_Tn_CFG(i)+4); + printk(KERN_INFO "hpet: T%d: CFG_l: 0x%x, CFG_h: 0x%x\n", + i, l, h); + l = hpet_readl(HPET_Tn_CMP(i)); + h = hpet_readl(HPET_Tn_CMP(i)+4); + printk(KERN_INFO "hpet: T%d: CMP_l: 0x%x, CMP_h: 0x%x\n", + i, l, h); + l = hpet_readl(HPET_Tn_ROUTE(i)); + h = hpet_readl(HPET_Tn_ROUTE(i)+4); + printk(KERN_INFO "hpet: T%d ROUTE_l: 0x%x, ROUTE_h: 0x%x\n", + i, l, h); + } +} + +#define hpet_print_config() \ +do { \ + if (hpet_verbose) \ + _hpet_print_config(__FUNCTION__, __LINE__); \ +} while (0) + /* * When the hpet driver (/dev/hpet) is enabled, we need to reserve * timer 0 and timer 1 in case of RTC emulation. @@ -282,6 +322,7 @@ static void hpet_set_mode(enum clock_event_mode mode, hpet_writel(cmp, HPET_Tn_CMP(timer)); udelay(1); hpet_writel((unsigned long) delta, HPET_Tn_CMP(timer)); + hpet_print_config(); break; case CLOCK_EVT_MODE_ONESHOT: @@ -308,6 +349,7 @@ static void hpet_set_mode(enum clock_event_mode mode, irq_set_affinity(hdev->irq, cpumask_of(hdev->cpu)); enable_irq(hdev->irq); } + hpet_print_config(); break; } } @@ -526,6 +568,7 @@ static void hpet_msi_capability_lookup(unsigned int start_timer) num_timers = ((id & HPET_ID_NUMBER) >> HPET_ID_NUMBER_SHIFT); num_timers++; /* Value read out starts from 0 */ + hpet_print_config(); hpet_devs = kzalloc(sizeof(struct hpet_dev) * num_timers, GFP_KERNEL); if (!hpet_devs) @@ -793,6 +836,7 @@ int __init hpet_enable(void) * information and the number of channels */ id = hpet_readl(HPET_ID); + hpet_print_config(); #ifdef CONFIG_HPET_EMULATE_RTC /* @@ -845,6 +889,7 @@ static __init int hpet_late_init(void) return -ENODEV; hpet_reserve_platform_timers(hpet_readl(HPET_ID)); + hpet_print_config(); for_each_online_cpu(cpu) { hpet_cpuhp_notify(NULL, CPU_ONLINE, (void *)(long)cpu); -- cgit v1.2.3 From 8d6f0c8214928f7c5083dd54ecb69c5d615b516e Mon Sep 17 00:00:00 2001 From: Andreas Herrmann Date: Sat, 21 Feb 2009 00:10:44 +0100 Subject: x86: hpet: provide separate functions to stop and start the counter By splitting up existing hpet_start_counter function. Signed-off-by: Andreas Herrmann Cc: Mark Hounschell Cc: Borislav Petkov Signed-off-by: Ingo Molnar --- arch/x86/kernel/hpet.c | 22 ++++++++++++++++------ 1 file changed, 16 insertions(+), 6 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c index 1d86ca3c1f9..7ae1f1e8b9b 100644 --- a/arch/x86/kernel/hpet.c +++ b/arch/x86/kernel/hpet.c @@ -231,27 +231,37 @@ static struct clock_event_device hpet_clockevent = { .rating = 50, }; -static void hpet_start_counter(void) +static void hpet_stop_counter(void) { unsigned long cfg = hpet_readl(HPET_CFG); - cfg &= ~HPET_CFG_ENABLE; hpet_writel(cfg, HPET_CFG); hpet_writel(0, HPET_COUNTER); hpet_writel(0, HPET_COUNTER + 4); +} + +static void hpet_start_counter(void) +{ + unsigned long cfg = hpet_readl(HPET_CFG); cfg |= HPET_CFG_ENABLE; hpet_writel(cfg, HPET_CFG); } +static void hpet_restart_counter(void) +{ + hpet_stop_counter(); + hpet_start_counter(); +} + static void hpet_resume_device(void) { force_hpet_resume(); } -static void hpet_restart_counter(void) +static void hpet_resume_counter(void) { hpet_resume_device(); - hpet_start_counter(); + hpet_restart_counter(); } static void hpet_enable_legacy_int(void) @@ -738,7 +748,7 @@ static struct clocksource clocksource_hpet = { .mask = HPET_MASK, .shift = HPET_SHIFT, .flags = CLOCK_SOURCE_IS_CONTINUOUS, - .resume = hpet_restart_counter, + .resume = hpet_resume_counter, #ifdef CONFIG_X86_64 .vread = vread_hpet, #endif @@ -750,7 +760,7 @@ static int hpet_clocksource_register(void) cycle_t t1; /* Start the counter */ - hpet_start_counter(); + hpet_restart_counter(); /* Verify whether hpet counter works */ t1 = read_hpet(); -- cgit v1.2.3 From c23e253e67c9d8a91a0ffa33c1f571a17f0a2403 Mon Sep 17 00:00:00 2001 From: Andreas Herrmann Date: Sat, 21 Feb 2009 00:16:35 +0100 Subject: x86: hpet: stop HPET_COUNTER when programming periodic mode Impact: fix system hang on some systems operating with HZ_1000 On a system that stalled with HZ_1000, the first value written to T0_CMP (when the main counter was not stopped) did not trigger an interrupt. Instead after the main counter wrapped around (after several minutes) an interrupt was triggered and afterwards the periodic interrupt took effect. This can be fixed by implementing HPET spec recommendation for programming the periodic mode (i.e. stopping the main counter). Signed-off-by: Andreas Herrmann Cc: Mark Hounschell Cc: Borislav Petkov Signed-off-by: Ingo Molnar --- arch/x86/kernel/hpet.c | 13 +++---------- 1 file changed, 3 insertions(+), 10 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c index 7ae1f1e8b9b..648b3a2a3a4 100644 --- a/arch/x86/kernel/hpet.c +++ b/arch/x86/kernel/hpet.c @@ -309,29 +309,22 @@ static int hpet_setup_msi_irq(unsigned int irq); static void hpet_set_mode(enum clock_event_mode mode, struct clock_event_device *evt, int timer) { - unsigned long cfg, cmp, now; + unsigned long cfg; uint64_t delta; switch (mode) { case CLOCK_EVT_MODE_PERIODIC: + hpet_stop_counter(); delta = ((uint64_t)(NSEC_PER_SEC/HZ)) * evt->mult; delta >>= evt->shift; - now = hpet_readl(HPET_COUNTER); - cmp = now + (unsigned long) delta; cfg = hpet_readl(HPET_Tn_CFG(timer)); /* Make sure we use edge triggered interrupts */ cfg &= ~HPET_TN_LEVEL; cfg |= HPET_TN_ENABLE | HPET_TN_PERIODIC | HPET_TN_SETVAL | HPET_TN_32BIT; hpet_writel(cfg, HPET_Tn_CFG(timer)); - /* - * The first write after writing TN_SETVAL to the - * config register sets the counter value, the second - * write sets the period. - */ - hpet_writel(cmp, HPET_Tn_CMP(timer)); - udelay(1); hpet_writel((unsigned long) delta, HPET_Tn_CMP(timer)); + hpet_start_counter(); hpet_print_config(); break; -- cgit v1.2.3 From fff78ad5cee1d6f695103ec590cbd2a9f3c39e8c Mon Sep 17 00:00:00 2001 From: Dave Jones Date: Sat, 17 Jan 2009 22:28:42 -0500 Subject: [CPUFREQ] Stupidly trivial CodingStyle fix GNU indent complains about this being ambiguous, because it's dumb. One of my automated tests relies on the output of indent, so this shuts it up. Signed-off-by: Dave Jones --- arch/x86/kernel/cpu/cpufreq/powernow-k7.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/x86') diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k7.c b/arch/x86/kernel/cpu/cpufreq/powernow-k7.c index 1b446d79a8f..9cd73d15743 100644 --- a/arch/x86/kernel/cpu/cpufreq/powernow-k7.c +++ b/arch/x86/kernel/cpu/cpufreq/powernow-k7.c @@ -94,7 +94,7 @@ static struct cpufreq_frequency_table *powernow_table; static unsigned int can_scale_bus; static unsigned int can_scale_vid; -static unsigned int minimum_speed=-1; +static unsigned int minimum_speed = -1; static unsigned int maximum_speed; static unsigned int number_scales; static unsigned int fsb; -- cgit v1.2.3 From b5c916666240032b29f73a1ca52c3e0fac37335c Mon Sep 17 00:00:00 2001 From: Dave Jones Date: Sat, 17 Jan 2009 22:39:47 -0500 Subject: [CPUFREQ] checkpatch cleanups for cpufreq-nforce2 Signed-off-by: Dave Jones --- arch/x86/kernel/cpu/cpufreq/cpufreq-nforce2.c | 38 +++++++++++++++------------ 1 file changed, 21 insertions(+), 17 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/kernel/cpu/cpufreq/cpufreq-nforce2.c b/arch/x86/kernel/cpu/cpufreq/cpufreq-nforce2.c index 965ea52767a..ad8284f0e86 100644 --- a/arch/x86/kernel/cpu/cpufreq/cpufreq-nforce2.c +++ b/arch/x86/kernel/cpu/cpufreq/cpufreq-nforce2.c @@ -32,7 +32,7 @@ * nforce2_chipset: * FSB is changed using the chipset */ -static struct pci_dev *nforce2_chipset_dev; +static struct pci_dev *nforce2_dev; /* fid: * multiplier * 10 @@ -56,7 +56,8 @@ MODULE_PARM_DESC(fid, "CPU multiplier to use (11.5 = 115)"); MODULE_PARM_DESC(min_fsb, "Minimum FSB to use, if not defined: current FSB - 50"); -#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, "cpufreq-nforce2", msg) +#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, \ + "cpufreq-nforce2", msg) /** * nforce2_calc_fsb - calculate FSB @@ -118,11 +119,11 @@ static void nforce2_write_pll(int pll) int temp; /* Set the pll addr. to 0x00 */ - pci_write_config_dword(nforce2_chipset_dev, NFORCE2_PLLADR, 0); + pci_write_config_dword(nforce2_dev, NFORCE2_PLLADR, 0); /* Now write the value in all 64 registers */ for (temp = 0; temp <= 0x3f; temp++) - pci_write_config_dword(nforce2_chipset_dev, NFORCE2_PLLREG, pll); + pci_write_config_dword(nforce2_dev, NFORCE2_PLLREG, pll); return; } @@ -139,8 +140,8 @@ static unsigned int nforce2_fsb_read(int bootfsb) u32 fsb, temp = 0; /* Get chipset boot FSB from subdevice 5 (FSB at boot-time) */ - nforce2_sub5 = pci_get_subsys(PCI_VENDOR_ID_NVIDIA, - 0x01EF, PCI_ANY_ID, PCI_ANY_ID, NULL); + nforce2_sub5 = pci_get_subsys(PCI_VENDOR_ID_NVIDIA, 0x01EF, + PCI_ANY_ID, PCI_ANY_ID, NULL); if (!nforce2_sub5) return 0; @@ -148,13 +149,13 @@ static unsigned int nforce2_fsb_read(int bootfsb) fsb /= 1000000; /* Check if PLL register is already set */ - pci_read_config_byte(nforce2_chipset_dev, NFORCE2_PLLENABLE, (u8 *)&temp); + pci_read_config_byte(nforce2_dev, NFORCE2_PLLENABLE, (u8 *)&temp); if (bootfsb || !temp) return fsb; /* Use PLL register FSB value */ - pci_read_config_dword(nforce2_chipset_dev, NFORCE2_PLLREG, &temp); + pci_read_config_dword(nforce2_dev, NFORCE2_PLLREG, &temp); fsb = nforce2_calc_fsb(temp); return fsb; @@ -185,7 +186,7 @@ static int nforce2_set_fsb(unsigned int fsb) } /* First write? Then set actual value */ - pci_read_config_byte(nforce2_chipset_dev, NFORCE2_PLLENABLE, (u8 *)&temp); + pci_read_config_byte(nforce2_dev, NFORCE2_PLLENABLE, (u8 *)&temp); if (!temp) { pll = nforce2_calc_pll(tfsb); @@ -197,7 +198,7 @@ static int nforce2_set_fsb(unsigned int fsb) /* Enable write access */ temp = 0x01; - pci_write_config_byte(nforce2_chipset_dev, NFORCE2_PLLENABLE, (u8)temp); + pci_write_config_byte(nforce2_dev, NFORCE2_PLLENABLE, (u8)temp); diff = tfsb - fsb; @@ -222,7 +223,7 @@ static int nforce2_set_fsb(unsigned int fsb) } temp = 0x40; - pci_write_config_byte(nforce2_chipset_dev, NFORCE2_PLLADR, (u8)temp); + pci_write_config_byte(nforce2_dev, NFORCE2_PLLADR, (u8)temp); return 0; } @@ -244,7 +245,8 @@ static unsigned int nforce2_get(unsigned int cpu) * nforce2_target - set a new CPUFreq policy * @policy: new policy * @target_freq: the target frequency - * @relation: how that frequency relates to achieved frequency (CPUFREQ_RELATION_L or CPUFREQ_RELATION_H) + * @relation: how that frequency relates to achieved frequency + * (CPUFREQ_RELATION_L or CPUFREQ_RELATION_H) * * Sets a new CPUFreq policy. */ @@ -328,7 +330,8 @@ static int nforce2_cpu_init(struct cpufreq_policy *policy) if (!fid) { if (!cpu_khz) { printk(KERN_WARNING - "cpufreq: cpu_khz not set, can't calculate multiplier!\n"); + "cpufreq: cpu_khz not set, " + "can't calculate multiplier!\n"); return -ENODEV; } @@ -392,17 +395,18 @@ static struct cpufreq_driver nforce2_driver = { */ static unsigned int nforce2_detect_chipset(void) { - nforce2_chipset_dev = pci_get_subsys(PCI_VENDOR_ID_NVIDIA, + nforce2_dev = pci_get_subsys(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE2, PCI_ANY_ID, PCI_ANY_ID, NULL); - if (nforce2_chipset_dev == NULL) + if (nforce2_dev == NULL) return -ENODEV; printk(KERN_INFO "cpufreq: Detected nForce2 chipset revision %X\n", - nforce2_chipset_dev->revision); + nforce2_dev->revision); printk(KERN_INFO - "cpufreq: FSB changing is maybe unstable and can lead to crashes and data loss.\n"); + "cpufreq: FSB changing is maybe unstable and can lead to " + "crashes and data loss.\n"); return 0; } -- cgit v1.2.3 From 20174b65d9fdc8dddef3d2ab9647e01fdab13064 Mon Sep 17 00:00:00 2001 From: Dave Jones Date: Sat, 17 Jan 2009 22:42:19 -0500 Subject: [CPUFREQ] nforce2: Use driver prefix, not cpufreq prefix. Signed-off-by: Dave Jones --- arch/x86/kernel/cpu/cpufreq/cpufreq-nforce2.c | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/kernel/cpu/cpufreq/cpufreq-nforce2.c b/arch/x86/kernel/cpu/cpufreq/cpufreq-nforce2.c index ad8284f0e86..99262906838 100644 --- a/arch/x86/kernel/cpu/cpufreq/cpufreq-nforce2.c +++ b/arch/x86/kernel/cpu/cpufreq/cpufreq-nforce2.c @@ -56,6 +56,7 @@ MODULE_PARM_DESC(fid, "CPU multiplier to use (11.5 = 115)"); MODULE_PARM_DESC(min_fsb, "Minimum FSB to use, if not defined: current FSB - 50"); +#define PFX "cpufreq-nforce2: " #define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, \ "cpufreq-nforce2", msg) @@ -175,13 +176,13 @@ static int nforce2_set_fsb(unsigned int fsb) int pll = 0; if ((fsb > max_fsb) || (fsb < NFORCE2_MIN_FSB)) { - printk(KERN_ERR "cpufreq: FSB %d is out of range!\n", fsb); + printk(KERN_ERR PFX "FSB %d is out of range!\n", fsb); return -EINVAL; } tfsb = nforce2_fsb_read(0); if (!tfsb) { - printk(KERN_ERR "cpufreq: Error while reading the FSB\n"); + printk(KERN_ERR PFX "Error while reading the FSB\n"); return -EINVAL; } @@ -278,7 +279,7 @@ static int nforce2_target(struct cpufreq_policy *policy, /* local_irq_save(flags); */ if (nforce2_set_fsb(target_fsb) < 0) - printk(KERN_ERR "cpufreq: Changing FSB to %d failed\n", + printk(KERN_ERR PFX "Changing FSB to %d failed\n", target_fsb); else dprintk("Changed FSB successfully to %d\n", @@ -329,9 +330,8 @@ static int nforce2_cpu_init(struct cpufreq_policy *policy) /* FIX: Get FID from CPU */ if (!fid) { if (!cpu_khz) { - printk(KERN_WARNING - "cpufreq: cpu_khz not set, " - "can't calculate multiplier!\n"); + printk(KERN_WARNING PFX + "cpu_khz not set, can't calculate multiplier!\n"); return -ENODEV; } @@ -346,7 +346,7 @@ static int nforce2_cpu_init(struct cpufreq_policy *policy) } } - printk(KERN_INFO "cpufreq: FSB currently at %i MHz, FID %d.%d\n", fsb, + printk(KERN_INFO PFX "FSB currently at %i MHz, FID %d.%d\n", fsb, fid / 10, fid % 10); /* Set maximum FSB to FSB at boot time */ @@ -402,10 +402,10 @@ static unsigned int nforce2_detect_chipset(void) if (nforce2_dev == NULL) return -ENODEV; - printk(KERN_INFO "cpufreq: Detected nForce2 chipset revision %X\n", + printk(KERN_INFO PFX "Detected nForce2 chipset revision %X\n", nforce2_dev->revision); - printk(KERN_INFO - "cpufreq: FSB changing is maybe unstable and can lead to " + printk(KERN_INFO PFX + "FSB changing is maybe unstable and can lead to " "crashes and data loss.\n"); return 0; @@ -424,7 +424,7 @@ static int __init nforce2_init(void) /* detect chipset */ if (nforce2_detect_chipset()) { - printk(KERN_ERR "cpufreq: No nForce2 chipset.\n"); + printk(KERN_ERR PFX "No nForce2 chipset.\n"); return -ENODEV; } -- cgit v1.2.3 From 04cd1a99dc22bcaa1eccbe8f8386df8c1295e979 Mon Sep 17 00:00:00 2001 From: Dave Jones Date: Sat, 17 Jan 2009 22:50:33 -0500 Subject: [CPUFREQ] checkpatch cleanups for elanfreq The remaining warning about the simple_strtoul conversion to strict_strtoul seems kind of pointless to me. Signed-off-by: Dave Jones --- arch/x86/kernel/cpu/cpufreq/elanfreq.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/kernel/cpu/cpufreq/elanfreq.c b/arch/x86/kernel/cpu/cpufreq/elanfreq.c index fe613c93b36..006b278b0d5 100644 --- a/arch/x86/kernel/cpu/cpufreq/elanfreq.c +++ b/arch/x86/kernel/cpu/cpufreq/elanfreq.c @@ -184,7 +184,8 @@ static int elanfreq_target(struct cpufreq_policy *policy, { unsigned int newstate = 0; - if (cpufreq_frequency_table_target(policy, &elanfreq_table[0], target_freq, relation, &newstate)) + if (cpufreq_frequency_table_target(policy, &elanfreq_table[0], + target_freq, relation, &newstate)) return -EINVAL; elanfreq_set_cpu_state(newstate); @@ -301,7 +302,8 @@ static void __exit elanfreq_exit(void) module_param(max_freq, int, 0444); MODULE_LICENSE("GPL"); -MODULE_AUTHOR("Robert Schwebel , Sven Geggus "); +MODULE_AUTHOR("Robert Schwebel , " + "Sven Geggus "); MODULE_DESCRIPTION("cpufreq driver for AMD's Elan CPUs"); module_init(elanfreq_init); -- cgit v1.2.3 From c9b8c871525aeda153494996d84a832270205d81 Mon Sep 17 00:00:00 2001 From: Dave Jones Date: Sat, 17 Jan 2009 22:54:47 -0500 Subject: [CPUFREQ] checkpatch cleanups for e_powersaver Signed-off-by: Dave Jones --- arch/x86/kernel/cpu/cpufreq/e_powersaver.c | 21 ++++++++++++--------- 1 file changed, 12 insertions(+), 9 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/kernel/cpu/cpufreq/e_powersaver.c b/arch/x86/kernel/cpu/cpufreq/e_powersaver.c index c2f930d8664..3f83ea12c47 100644 --- a/arch/x86/kernel/cpu/cpufreq/e_powersaver.c +++ b/arch/x86/kernel/cpu/cpufreq/e_powersaver.c @@ -12,12 +12,12 @@ #include #include #include +#include +#include +#include #include #include -#include -#include -#include #define EPS_BRAND_C7M 0 #define EPS_BRAND_C7 1 @@ -184,7 +184,7 @@ static int eps_cpu_init(struct cpufreq_policy *policy) break; } - switch(brand) { + switch (brand) { case EPS_BRAND_C7M: printk(KERN_CONT "C7-M\n"); break; @@ -218,17 +218,20 @@ static int eps_cpu_init(struct cpufreq_policy *policy) /* Print voltage and multiplier */ rdmsr(MSR_IA32_PERF_STATUS, lo, hi); current_voltage = lo & 0xff; - printk(KERN_INFO "eps: Current voltage = %dmV\n", current_voltage * 16 + 700); + printk(KERN_INFO "eps: Current voltage = %dmV\n", + current_voltage * 16 + 700); current_multiplier = (lo >> 8) & 0xff; printk(KERN_INFO "eps: Current multiplier = %d\n", current_multiplier); /* Print limits */ max_voltage = hi & 0xff; - printk(KERN_INFO "eps: Highest voltage = %dmV\n", max_voltage * 16 + 700); + printk(KERN_INFO "eps: Highest voltage = %dmV\n", + max_voltage * 16 + 700); max_multiplier = (hi >> 8) & 0xff; printk(KERN_INFO "eps: Highest multiplier = %d\n", max_multiplier); min_voltage = (hi >> 16) & 0xff; - printk(KERN_INFO "eps: Lowest voltage = %dmV\n", min_voltage * 16 + 700); + printk(KERN_INFO "eps: Lowest voltage = %dmV\n", + min_voltage * 16 + 700); min_multiplier = (hi >> 24) & 0xff; printk(KERN_INFO "eps: Lowest multiplier = %d\n", min_multiplier); @@ -318,7 +321,7 @@ static int eps_cpu_exit(struct cpufreq_policy *policy) return 0; } -static struct freq_attr* eps_attr[] = { +static struct freq_attr *eps_attr[] = { &cpufreq_freq_attr_scaling_available_freqs, NULL, }; @@ -356,7 +359,7 @@ static void __exit eps_exit(void) cpufreq_unregister_driver(&eps_driver); } -MODULE_AUTHOR("Rafa³ Bilski "); +MODULE_AUTHOR("Rafal Bilski "); MODULE_DESCRIPTION("Enhanced PowerSaver driver for VIA C7 CPU's."); MODULE_LICENSE("GPL"); -- cgit v1.2.3 From 00f6a235bf241e62dfadf32b4322309e65c7b177 Mon Sep 17 00:00:00 2001 From: Dave Jones Date: Sat, 17 Jan 2009 23:06:57 -0500 Subject: [CPUFREQ] checkpatch cleanups for gx-suspmod Signed-off-by: Dave Jones --- arch/x86/kernel/cpu/cpufreq/gx-suspmod.c | 105 +++++++++++++++++++------------ 1 file changed, 65 insertions(+), 40 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/kernel/cpu/cpufreq/gx-suspmod.c b/arch/x86/kernel/cpu/cpufreq/gx-suspmod.c index 9d9eae82e60..ac27ec2264d 100644 --- a/arch/x86/kernel/cpu/cpufreq/gx-suspmod.c +++ b/arch/x86/kernel/cpu/cpufreq/gx-suspmod.c @@ -79,8 +79,9 @@ #include #include #include +#include + #include -#include /* PCI config registers, all at F0 */ #define PCI_PMER1 0x80 /* power management enable register 1 */ @@ -122,8 +123,8 @@ static struct gxfreq_params *gx_params; static int stock_freq; /* PCI bus clock - defaults to 30.000 if cpu_khz is not available */ -static int pci_busclk = 0; -module_param (pci_busclk, int, 0444); +static int pci_busclk; +module_param(pci_busclk, int, 0444); /* maximum duration for which the cpu may be suspended * (32us * MAX_DURATION). If no parameter is given, this defaults @@ -132,7 +133,7 @@ module_param (pci_busclk, int, 0444); * is suspended -- processing power is just 0.39% of what it used to be, * though. 781.25 kHz(!) for a 200 MHz processor -- wow. */ static int max_duration = 255; -module_param (max_duration, int, 0444); +module_param(max_duration, int, 0444); /* For the default policy, we want at least some processing power * - let's say 5%. (min = maxfreq / POLICY_MIN_DIV) @@ -140,7 +141,8 @@ module_param (max_duration, int, 0444); #define POLICY_MIN_DIV 20 -#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, "gx-suspmod", msg) +#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, \ + "gx-suspmod", msg) /** * we can detect a core multipiler from dir0_lsb @@ -166,12 +168,20 @@ static int gx_freq_mult[16] = { * Low Level chipset interface * ****************************************************************/ static struct pci_device_id gx_chipset_tbl[] __initdata = { - { PCI_VENDOR_ID_CYRIX, PCI_DEVICE_ID_CYRIX_5530_LEGACY, PCI_ANY_ID, PCI_ANY_ID }, - { PCI_VENDOR_ID_CYRIX, PCI_DEVICE_ID_CYRIX_5520, PCI_ANY_ID, PCI_ANY_ID }, - { PCI_VENDOR_ID_CYRIX, PCI_DEVICE_ID_CYRIX_5510, PCI_ANY_ID, PCI_ANY_ID }, + { PCI_VENDOR_ID_CYRIX, PCI_DEVICE_ID_CYRIX_5530_LEGACY, + PCI_ANY_ID, PCI_ANY_ID }, + { PCI_VENDOR_ID_CYRIX, PCI_DEVICE_ID_CYRIX_5520, + PCI_ANY_ID, PCI_ANY_ID }, + { PCI_VENDOR_ID_CYRIX, PCI_DEVICE_ID_CYRIX_5510, + PCI_ANY_ID, PCI_ANY_ID }, { 0, }, }; +static void gx_write_byte(int reg, int value) +{ + pci_write_config_byte(gx_params->cs55x0, reg, value); +} + /** * gx_detect_chipset: * @@ -200,7 +210,8 @@ static __init struct pci_dev *gx_detect_chipset(void) /** * gx_get_cpuspeed: * - * Finds out at which efficient frequency the Cyrix MediaGX/NatSemi Geode CPU runs. + * Finds out at which efficient frequency the Cyrix MediaGX/NatSemi + * Geode CPU runs. */ static unsigned int gx_get_cpuspeed(unsigned int cpu) { @@ -217,17 +228,18 @@ static unsigned int gx_get_cpuspeed(unsigned int cpu) * **/ -static unsigned int gx_validate_speed(unsigned int khz, u8 *on_duration, u8 *off_duration) +static unsigned int gx_validate_speed(unsigned int khz, u8 *on_duration, + u8 *off_duration) { unsigned int i; u8 tmp_on, tmp_off; int old_tmp_freq = stock_freq; int tmp_freq; - *off_duration=1; - *on_duration=0; + *off_duration = 1; + *on_duration = 0; - for (i=max_duration; i>0; i--) { + for (i = max_duration; i > 0; i--) { tmp_off = ((khz * i) / stock_freq) & 0xff; tmp_on = i - tmp_off; tmp_freq = (stock_freq * tmp_off) / i; @@ -259,26 +271,34 @@ static void gx_set_cpuspeed(unsigned int khz) freqs.cpu = 0; freqs.old = gx_get_cpuspeed(0); - new_khz = gx_validate_speed(khz, &gx_params->on_duration, &gx_params->off_duration); + new_khz = gx_validate_speed(khz, &gx_params->on_duration, + &gx_params->off_duration); freqs.new = new_khz; cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); local_irq_save(flags); - if (new_khz != stock_freq) { /* if new khz == 100% of CPU speed, it is special case */ + + + if (new_khz != stock_freq) { + /* if new khz == 100% of CPU speed, it is special case */ switch (gx_params->cs55x0->device) { case PCI_DEVICE_ID_CYRIX_5530_LEGACY: pmer1 = gx_params->pci_pmer1 | IRQ_SPDUP | VID_SPDUP; /* FIXME: need to test other values -- Zwane,Miura */ - pci_write_config_byte(gx_params->cs55x0, PCI_IRQTC, 4); /* typical 2 to 4ms */ - pci_write_config_byte(gx_params->cs55x0, PCI_VIDTC, 100);/* typical 50 to 100ms */ - pci_write_config_byte(gx_params->cs55x0, PCI_PMER1, pmer1); - - if (gx_params->cs55x0->revision < 0x10) { /* CS5530(rev 1.2, 1.3) */ - suscfg = gx_params->pci_suscfg | SUSMOD; - } else { /* CS5530A,B.. */ - suscfg = gx_params->pci_suscfg | SUSMOD | PWRSVE; + /* typical 2 to 4ms */ + gx_write_byte(PCI_IRQTC, 4); + /* typical 50 to 100ms */ + gx_write_byte(PCI_VIDTC, 100); + gx_write_byte(PCI_PMER1, pmer1); + + if (gx_params->cs55x0->revision < 0x10) { + /* CS5530(rev 1.2, 1.3) */ + suscfg = gx_params->pci_suscfg|SUSMOD; + } else { + /* CS5530A,B.. */ + suscfg = gx_params->pci_suscfg|SUSMOD|PWRSVE; } break; case PCI_DEVICE_ID_CYRIX_5520: @@ -294,13 +314,13 @@ static void gx_set_cpuspeed(unsigned int khz) suscfg = gx_params->pci_suscfg & ~(SUSMOD); gx_params->off_duration = 0; gx_params->on_duration = 0; - dprintk("suspend modulation disabled: cpu runs 100 percent speed.\n"); + dprintk("suspend modulation disabled: cpu runs 100%% speed.\n"); } - pci_write_config_byte(gx_params->cs55x0, PCI_MODOFF, gx_params->off_duration); - pci_write_config_byte(gx_params->cs55x0, PCI_MODON, gx_params->on_duration); + gx_write_byte(PCI_MODOFF, gx_params->off_duration); + gx_write_byte(PCI_MODON, gx_params->on_duration); - pci_write_config_byte(gx_params->cs55x0, PCI_SUSCFG, suscfg); + gx_write_byte(PCI_SUSCFG, suscfg); pci_read_config_byte(gx_params->cs55x0, PCI_SUSCFG, &suscfg); local_irq_restore(flags); @@ -334,7 +354,8 @@ static int cpufreq_gx_verify(struct cpufreq_policy *policy) return -EINVAL; policy->cpu = 0; - cpufreq_verify_within_limits(policy, (stock_freq / max_duration), stock_freq); + cpufreq_verify_within_limits(policy, (stock_freq / max_duration), + stock_freq); /* it needs to be assured that at least one supported frequency is * within policy->min and policy->max. If it is not, policy->max @@ -354,7 +375,8 @@ static int cpufreq_gx_verify(struct cpufreq_policy *policy) policy->max = tmp_freq; if (policy->max < policy->min) policy->max = policy->min; - cpufreq_verify_within_limits(policy, (stock_freq / max_duration), stock_freq); + cpufreq_verify_within_limits(policy, (stock_freq / max_duration), + stock_freq); return 0; } @@ -398,18 +420,18 @@ static int cpufreq_gx_cpu_init(struct cpufreq_policy *policy) return -ENODEV; /* determine maximum frequency */ - if (pci_busclk) { + if (pci_busclk) maxfreq = pci_busclk * gx_freq_mult[getCx86(CX86_DIR1) & 0x0f]; - } else if (cpu_khz) { + else if (cpu_khz) maxfreq = cpu_khz; - } else { + else maxfreq = 30000 * gx_freq_mult[getCx86(CX86_DIR1) & 0x0f]; - } + stock_freq = maxfreq; curfreq = gx_get_cpuspeed(0); dprintk("cpu max frequency is %d.\n", maxfreq); - dprintk("cpu current frequency is %dkHz.\n",curfreq); + dprintk("cpu current frequency is %dkHz.\n", curfreq); /* setup basic struct for cpufreq API */ policy->cpu = 0; @@ -447,7 +469,8 @@ static int __init cpufreq_gx_init(void) struct pci_dev *gx_pci; /* Test if we have the right hardware */ - if ((gx_pci = gx_detect_chipset()) == NULL) + gx_pci = gx_detect_chipset(); + if (gx_pci == NULL) return -ENODEV; /* check whether module parameters are sane */ @@ -468,9 +491,11 @@ static int __init cpufreq_gx_init(void) pci_read_config_byte(params->cs55x0, PCI_PMER1, &(params->pci_pmer1)); pci_read_config_byte(params->cs55x0, PCI_PMER2, &(params->pci_pmer2)); pci_read_config_byte(params->cs55x0, PCI_MODON, &(params->on_duration)); - pci_read_config_byte(params->cs55x0, PCI_MODOFF, &(params->off_duration)); + pci_read_config_byte(params->cs55x0, PCI_MODOFF, + &(params->off_duration)); - if ((ret = cpufreq_register_driver(&gx_suspmod_driver))) { + ret = cpufreq_register_driver(&gx_suspmod_driver); + if (ret) { kfree(params); return ret; /* register error! */ } @@ -485,9 +510,9 @@ static void __exit cpufreq_gx_exit(void) kfree(gx_params); } -MODULE_AUTHOR ("Hiroshi Miura "); -MODULE_DESCRIPTION ("Cpufreq driver for Cyrix MediaGX and NatSemi Geode"); -MODULE_LICENSE ("GPL"); +MODULE_AUTHOR("Hiroshi Miura "); +MODULE_DESCRIPTION("Cpufreq driver for Cyrix MediaGX and NatSemi Geode"); +MODULE_LICENSE("GPL"); module_init(cpufreq_gx_init); module_exit(cpufreq_gx_exit); -- cgit v1.2.3 From ac617bd0f7b959dc6708ad2f0d6b9dcf4382f1ed Mon Sep 17 00:00:00 2001 From: Dave Jones Date: Sat, 17 Jan 2009 23:29:53 -0500 Subject: [CPUFREQ] checkpatch cleanups for longhaul Signed-off-by: Dave Jones --- arch/x86/kernel/cpu/cpufreq/longhaul.c | 182 +++++++++++++++++---------------- arch/x86/kernel/cpu/cpufreq/longhaul.h | 12 +-- 2 files changed, 100 insertions(+), 94 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/kernel/cpu/cpufreq/longhaul.c b/arch/x86/kernel/cpu/cpufreq/longhaul.c index a4cff5d6e38..dc03622a83a 100644 --- a/arch/x86/kernel/cpu/cpufreq/longhaul.c +++ b/arch/x86/kernel/cpu/cpufreq/longhaul.c @@ -30,12 +30,12 @@ #include #include #include +#include +#include +#include +#include #include -#include -#include -#include -#include #include #include "longhaul.h" @@ -58,7 +58,7 @@ #define USE_NORTHBRIDGE (1 << 2) static int cpu_model; -static unsigned int numscales=16; +static unsigned int numscales = 16; static unsigned int fsb; static const struct mV_pos *vrm_mV_table; @@ -67,8 +67,8 @@ static const unsigned char *mV_vrm_table; static unsigned int highest_speed, lowest_speed; /* kHz */ static unsigned int minmult, maxmult; static int can_scale_voltage; -static struct acpi_processor *pr = NULL; -static struct acpi_processor_cx *cx = NULL; +static struct acpi_processor *pr; +static struct acpi_processor_cx *cx; static u32 acpi_regs_addr; static u8 longhaul_flags; static unsigned int longhaul_index; @@ -78,12 +78,13 @@ static int scale_voltage; static int disable_acpi_c3; static int revid_errata; -#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, "longhaul", msg) +#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, \ + "longhaul", msg) /* Clock ratios multiplied by 10 */ -static int clock_ratio[32]; -static int eblcr_table[32]; +static int mults[32]; +static int eblcr[32]; static int longhaul_version; static struct cpufreq_frequency_table *longhaul_table; @@ -93,7 +94,7 @@ static char speedbuffer[8]; static char *print_speed(int speed) { if (speed < 1000) { - snprintf(speedbuffer, sizeof(speedbuffer),"%dMHz", speed); + snprintf(speedbuffer, sizeof(speedbuffer), "%dMHz", speed); return speedbuffer; } @@ -122,27 +123,28 @@ static unsigned int calc_speed(int mult) static int longhaul_get_cpu_mult(void) { - unsigned long invalue=0,lo, hi; + unsigned long invalue = 0, lo, hi; - rdmsr (MSR_IA32_EBL_CR_POWERON, lo, hi); - invalue = (lo & (1<<22|1<<23|1<<24|1<<25)) >>22; - if (longhaul_version==TYPE_LONGHAUL_V2 || longhaul_version==TYPE_POWERSAVER) { + rdmsr(MSR_IA32_EBL_CR_POWERON, lo, hi); + invalue = (lo & (1<<22|1<<23|1<<24|1<<25))>>22; + if (longhaul_version == TYPE_LONGHAUL_V2 || + longhaul_version == TYPE_POWERSAVER) { if (lo & (1<<27)) - invalue+=16; + invalue += 16; } - return eblcr_table[invalue]; + return eblcr[invalue]; } /* For processor with BCR2 MSR */ -static void do_longhaul1(unsigned int clock_ratio_index) +static void do_longhaul1(unsigned int mults_index) { union msr_bcr2 bcr2; rdmsrl(MSR_VIA_BCR2, bcr2.val); /* Enable software clock multiplier */ bcr2.bits.ESOFTBF = 1; - bcr2.bits.CLOCKMUL = clock_ratio_index & 0xff; + bcr2.bits.CLOCKMUL = mults_index & 0xff; /* Sync to timer tick */ safe_halt(); @@ -161,7 +163,7 @@ static void do_longhaul1(unsigned int clock_ratio_index) /* For processor with Longhaul MSR */ -static void do_powersaver(int cx_address, unsigned int clock_ratio_index, +static void do_powersaver(int cx_address, unsigned int mults_index, unsigned int dir) { union msr_longhaul longhaul; @@ -173,11 +175,11 @@ static void do_powersaver(int cx_address, unsigned int clock_ratio_index, longhaul.bits.RevisionKey = longhaul.bits.RevisionID; else longhaul.bits.RevisionKey = 0; - longhaul.bits.SoftBusRatio = clock_ratio_index & 0xf; - longhaul.bits.SoftBusRatio4 = (clock_ratio_index & 0x10) >> 4; + longhaul.bits.SoftBusRatio = mults_index & 0xf; + longhaul.bits.SoftBusRatio4 = (mults_index & 0x10) >> 4; /* Setup new voltage */ if (can_scale_voltage) - longhaul.bits.SoftVID = (clock_ratio_index >> 8) & 0x1f; + longhaul.bits.SoftVID = (mults_index >> 8) & 0x1f; /* Sync to timer tick */ safe_halt(); /* Raise voltage if necessary */ @@ -240,14 +242,14 @@ static void do_powersaver(int cx_address, unsigned int clock_ratio_index, /** * longhaul_set_cpu_frequency() - * @clock_ratio_index : bitpattern of the new multiplier. + * @mults_index : bitpattern of the new multiplier. * * Sets a new clock ratio. */ static void longhaul_setstate(unsigned int table_index) { - unsigned int clock_ratio_index; + unsigned int mults_index; int speed, mult; struct cpufreq_freqs freqs; unsigned long flags; @@ -256,9 +258,9 @@ static void longhaul_setstate(unsigned int table_index) u32 bm_timeout = 1000; unsigned int dir = 0; - clock_ratio_index = longhaul_table[table_index].index; + mults_index = longhaul_table[table_index].index; /* Safety precautions */ - mult = clock_ratio[clock_ratio_index & 0x1f]; + mult = mults[mults_index & 0x1f]; if (mult == -1) return; speed = calc_speed(mult); @@ -274,7 +276,7 @@ static void longhaul_setstate(unsigned int table_index) cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); - dprintk ("Setting to FSB:%dMHz Mult:%d.%dx (%s)\n", + dprintk("Setting to FSB:%dMHz Mult:%d.%dx (%s)\n", fsb, mult/10, mult%10, print_speed(speed/1000)); retry_loop: preempt_disable(); @@ -282,8 +284,8 @@ retry_loop: pic2_mask = inb(0xA1); pic1_mask = inb(0x21); /* works on C3. save mask. */ - outb(0xFF,0xA1); /* Overkill */ - outb(0xFE,0x21); /* TMR0 only */ + outb(0xFF, 0xA1); /* Overkill */ + outb(0xFE, 0x21); /* TMR0 only */ /* Wait while PCI bus is busy. */ if (acpi_regs_addr && (longhaul_flags & USE_NORTHBRIDGE @@ -312,7 +314,7 @@ retry_loop: * Software controlled multipliers only. */ case TYPE_LONGHAUL_V1: - do_longhaul1(clock_ratio_index); + do_longhaul1(mults_index); break; /* @@ -327,9 +329,9 @@ retry_loop: if (longhaul_flags & USE_ACPI_C3) { /* Don't allow wakeup */ acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 0); - do_powersaver(cx->address, clock_ratio_index, dir); + do_powersaver(cx->address, mults_index, dir); } else { - do_powersaver(0, clock_ratio_index, dir); + do_powersaver(0, mults_index, dir); } break; } @@ -341,8 +343,8 @@ retry_loop: /* Enable bus master arbitration */ acpi_set_register(ACPI_BITREG_ARB_DISABLE, 0); } - outb(pic2_mask,0xA1); /* restore mask */ - outb(pic1_mask,0x21); + outb(pic2_mask, 0xA1); /* restore mask */ + outb(pic1_mask, 0x21); local_irq_restore(flags); preempt_enable(); @@ -392,7 +394,8 @@ retry_loop: cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); if (!bm_timeout) - printk(KERN_INFO PFX "Warning: Timeout while waiting for idle PCI bus.\n"); + printk(KERN_INFO PFX "Warning: Timeout while waiting for " + "idle PCI bus.\n"); } /* @@ -458,31 +461,32 @@ static int __init longhaul_get_ranges(void) break; } - dprintk ("MinMult:%d.%dx MaxMult:%d.%dx\n", + dprintk("MinMult:%d.%dx MaxMult:%d.%dx\n", minmult/10, minmult%10, maxmult/10, maxmult%10); highest_speed = calc_speed(maxmult); lowest_speed = calc_speed(minmult); - dprintk ("FSB:%dMHz Lowest speed: %s Highest speed:%s\n", fsb, + dprintk("FSB:%dMHz Lowest speed: %s Highest speed:%s\n", fsb, print_speed(lowest_speed/1000), print_speed(highest_speed/1000)); if (lowest_speed == highest_speed) { - printk (KERN_INFO PFX "highestspeed == lowest, aborting.\n"); + printk(KERN_INFO PFX "highestspeed == lowest, aborting.\n"); return -EINVAL; } if (lowest_speed > highest_speed) { - printk (KERN_INFO PFX "nonsense! lowest (%d > %d) !\n", + printk(KERN_INFO PFX "nonsense! lowest (%d > %d) !\n", lowest_speed, highest_speed); return -EINVAL; } - longhaul_table = kmalloc((numscales + 1) * sizeof(struct cpufreq_frequency_table), GFP_KERNEL); - if(!longhaul_table) + longhaul_table = kmalloc((numscales + 1) * sizeof(*longhaul_table), + GFP_KERNEL); + if (!longhaul_table) return -ENOMEM; for (j = 0; j < numscales; j++) { - ratio = clock_ratio[j]; + ratio = mults[j]; if (ratio == -1) continue; if (ratio > maxmult || ratio < minmult) @@ -521,7 +525,7 @@ static int __init longhaul_get_ranges(void) /* Find index we are running on */ for (j = 0; j < k; j++) { - if (clock_ratio[longhaul_table[j].index & 0x1f] == mult) { + if (mults[longhaul_table[j].index & 0x1f] == mult) { longhaul_index = j; break; } @@ -559,20 +563,22 @@ static void __init longhaul_setup_voltagescaling(void) maxvid = vrm_mV_table[longhaul.bits.MaximumVID]; if (minvid.mV == 0 || maxvid.mV == 0 || minvid.mV > maxvid.mV) { - printk (KERN_INFO PFX "Bogus values Min:%d.%03d Max:%d.%03d. " + printk(KERN_INFO PFX "Bogus values Min:%d.%03d Max:%d.%03d. " "Voltage scaling disabled.\n", - minvid.mV/1000, minvid.mV%1000, maxvid.mV/1000, maxvid.mV%1000); + minvid.mV/1000, minvid.mV%1000, + maxvid.mV/1000, maxvid.mV%1000); return; } if (minvid.mV == maxvid.mV) { - printk (KERN_INFO PFX "Claims to support voltage scaling but min & max are " - "both %d.%03d. Voltage scaling disabled\n", + printk(KERN_INFO PFX "Claims to support voltage scaling but " + "min & max are both %d.%03d. " + "Voltage scaling disabled\n", maxvid.mV/1000, maxvid.mV%1000); return; } - /* How many voltage steps */ + /* How many voltage steps*/ numvscales = maxvid.pos - minvid.pos + 1; printk(KERN_INFO PFX "Max VID=%d.%03d " @@ -586,7 +592,7 @@ static void __init longhaul_setup_voltagescaling(void) j = longhaul.bits.MinMHzBR; if (longhaul.bits.MinMHzBR4) j += 16; - min_vid_speed = eblcr_table[j]; + min_vid_speed = eblcr[j]; if (min_vid_speed == -1) return; switch (longhaul.bits.MinMHzFSB) { @@ -617,7 +623,8 @@ static void __init longhaul_setup_voltagescaling(void) pos = minvid.pos; longhaul_table[j].index |= mV_vrm_table[pos] << 8; vid = vrm_mV_table[mV_vrm_table[pos]]; - printk(KERN_INFO PFX "f: %d kHz, index: %d, vid: %d mV\n", speed, j, vid.mV); + printk(KERN_INFO PFX "f: %d kHz, index: %d, vid: %d mV\n", + speed, j, vid.mV); j++; } @@ -640,7 +647,8 @@ static int longhaul_target(struct cpufreq_policy *policy, unsigned int dir = 0; u8 vid, current_vid; - if (cpufreq_frequency_table_target(policy, longhaul_table, target_freq, relation, &table_index)) + if (cpufreq_frequency_table_target(policy, longhaul_table, target_freq, + relation, &table_index)) return -EINVAL; /* Don't set same frequency again */ @@ -656,7 +664,8 @@ static int longhaul_target(struct cpufreq_policy *policy, * this in hardware, C3 is old and we need to do this * in software. */ i = longhaul_index; - current_vid = (longhaul_table[longhaul_index].index >> 8) & 0x1f; + current_vid = (longhaul_table[longhaul_index].index >> 8); + current_vid &= 0x1f; if (table_index > longhaul_index) dir = 1; while (i != table_index) { @@ -691,9 +700,9 @@ static acpi_status longhaul_walk_callback(acpi_handle obj_handle, { struct acpi_device *d; - if ( acpi_bus_get_device(obj_handle, &d) ) { + if (acpi_bus_get_device(obj_handle, &d)) return 0; - } + *return_value = acpi_driver_data(d); return 1; } @@ -750,7 +759,7 @@ static int longhaul_setup_southbridge(void) /* Find VT8235 southbridge */ dev = pci_get_device(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8235, NULL); if (dev == NULL) - /* Find VT8237 southbridge */ + /* Find VT8237 southbridge */ dev = pci_get_device(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237, NULL); if (dev != NULL) { @@ -769,7 +778,8 @@ static int longhaul_setup_southbridge(void) if (pci_cmd & 1 << 7) { pci_read_config_dword(dev, 0x88, &acpi_regs_addr); acpi_regs_addr &= 0xff00; - printk(KERN_INFO PFX "ACPI I/O at 0x%x\n", acpi_regs_addr); + printk(KERN_INFO PFX "ACPI I/O at 0x%x\n", + acpi_regs_addr); } pci_dev_put(dev); @@ -781,7 +791,7 @@ static int longhaul_setup_southbridge(void) static int __init longhaul_cpu_init(struct cpufreq_policy *policy) { struct cpuinfo_x86 *c = &cpu_data(0); - char *cpuname=NULL; + char *cpuname = NULL; int ret; u32 lo, hi; @@ -791,8 +801,8 @@ static int __init longhaul_cpu_init(struct cpufreq_policy *policy) cpu_model = CPU_SAMUEL; cpuname = "C3 'Samuel' [C5A]"; longhaul_version = TYPE_LONGHAUL_V1; - memcpy (clock_ratio, samuel1_clock_ratio, sizeof(samuel1_clock_ratio)); - memcpy (eblcr_table, samuel1_eblcr, sizeof(samuel1_eblcr)); + memcpy(mults, samuel1_mults, sizeof(samuel1_mults)); + memcpy(eblcr, samuel1_eblcr, sizeof(samuel1_eblcr)); break; case 7: @@ -803,10 +813,8 @@ static int __init longhaul_cpu_init(struct cpufreq_policy *policy) cpuname = "C3 'Samuel 2' [C5B]"; /* Note, this is not a typo, early Samuel2's had * Samuel1 ratios. */ - memcpy(clock_ratio, samuel1_clock_ratio, - sizeof(samuel1_clock_ratio)); - memcpy(eblcr_table, samuel2_eblcr, - sizeof(samuel2_eblcr)); + memcpy(mults, samuel1_mults, sizeof(samuel1_mults)); + memcpy(eblcr, samuel2_eblcr, sizeof(samuel2_eblcr)); break; case 1 ... 15: longhaul_version = TYPE_LONGHAUL_V1; @@ -817,10 +825,8 @@ static int __init longhaul_cpu_init(struct cpufreq_policy *policy) cpu_model = CPU_EZRA; cpuname = "C3 'Ezra' [C5C]"; } - memcpy(clock_ratio, ezra_clock_ratio, - sizeof(ezra_clock_ratio)); - memcpy(eblcr_table, ezra_eblcr, - sizeof(ezra_eblcr)); + memcpy(mults, ezra_mults, sizeof(ezra_mults)); + memcpy(eblcr, ezra_eblcr, sizeof(ezra_eblcr)); break; } break; @@ -829,18 +835,16 @@ static int __init longhaul_cpu_init(struct cpufreq_policy *policy) cpu_model = CPU_EZRA_T; cpuname = "C3 'Ezra-T' [C5M]"; longhaul_version = TYPE_POWERSAVER; - numscales=32; - memcpy (clock_ratio, ezrat_clock_ratio, sizeof(ezrat_clock_ratio)); - memcpy (eblcr_table, ezrat_eblcr, sizeof(ezrat_eblcr)); + numscales = 32; + memcpy(mults, ezrat_mults, sizeof(ezrat_mults)); + memcpy(eblcr, ezrat_eblcr, sizeof(ezrat_eblcr)); break; case 9: longhaul_version = TYPE_POWERSAVER; numscales = 32; - memcpy(clock_ratio, - nehemiah_clock_ratio, - sizeof(nehemiah_clock_ratio)); - memcpy(eblcr_table, nehemiah_eblcr, sizeof(nehemiah_eblcr)); + memcpy(mults, nehemiah_mults, sizeof(nehemiah_mults)); + memcpy(eblcr, nehemiah_eblcr, sizeof(nehemiah_eblcr)); switch (c->x86_mask) { case 0 ... 1: cpu_model = CPU_NEHEMIAH; @@ -869,14 +873,14 @@ static int __init longhaul_cpu_init(struct cpufreq_policy *policy) longhaul_version = TYPE_LONGHAUL_V1; } - printk (KERN_INFO PFX "VIA %s CPU detected. ", cpuname); + printk(KERN_INFO PFX "VIA %s CPU detected. ", cpuname); switch (longhaul_version) { case TYPE_LONGHAUL_V1: case TYPE_LONGHAUL_V2: - printk ("Longhaul v%d supported.\n", longhaul_version); + printk(KERN_CONT "Longhaul v%d supported.\n", longhaul_version); break; case TYPE_POWERSAVER: - printk ("Powersaver supported.\n"); + printk(KERN_CONT "Powersaver supported.\n"); break; }; @@ -940,7 +944,7 @@ static int __devexit longhaul_cpu_exit(struct cpufreq_policy *policy) return 0; } -static struct freq_attr* longhaul_attr[] = { +static struct freq_attr *longhaul_attr[] = { &cpufreq_freq_attr_scaling_available_freqs, NULL, }; @@ -966,13 +970,15 @@ static int __init longhaul_init(void) #ifdef CONFIG_SMP if (num_online_cpus() > 1) { - printk(KERN_ERR PFX "More than 1 CPU detected, longhaul disabled.\n"); + printk(KERN_ERR PFX "More than 1 CPU detected, " + "longhaul disabled.\n"); return -ENODEV; } #endif #ifdef CONFIG_X86_IO_APIC if (cpu_has_apic) { - printk(KERN_ERR PFX "APIC detected. Longhaul is currently broken in this configuration.\n"); + printk(KERN_ERR PFX "APIC detected. Longhaul is currently " + "broken in this configuration.\n"); return -ENODEV; } #endif @@ -993,8 +999,8 @@ static void __exit longhaul_exit(void) { int i; - for (i=0; i < numscales; i++) { - if (clock_ratio[i] == maxmult) { + for (i = 0; i < numscales; i++) { + if (mults[i] == maxmult) { longhaul_setstate(i); break; } @@ -1007,11 +1013,11 @@ static void __exit longhaul_exit(void) /* Even if BIOS is exporting ACPI C3 state, and it is used * with success when CPU is idle, this state doesn't * trigger frequency transition in some cases. */ -module_param (disable_acpi_c3, int, 0644); +module_param(disable_acpi_c3, int, 0644); MODULE_PARM_DESC(disable_acpi_c3, "Don't use ACPI C3 support"); /* Change CPU voltage with frequency. Very usefull to save * power, but most VIA C3 processors aren't supporting it. */ -module_param (scale_voltage, int, 0644); +module_param(scale_voltage, int, 0644); MODULE_PARM_DESC(scale_voltage, "Scale voltage of processor"); /* Force revision key to 0 for processors which doesn't * support voltage scaling, but are introducing itself as @@ -1019,9 +1025,9 @@ MODULE_PARM_DESC(scale_voltage, "Scale voltage of processor"); module_param(revid_errata, int, 0644); MODULE_PARM_DESC(revid_errata, "Ignore CPU Revision ID"); -MODULE_AUTHOR ("Dave Jones "); -MODULE_DESCRIPTION ("Longhaul driver for VIA Cyrix processors."); -MODULE_LICENSE ("GPL"); +MODULE_AUTHOR("Dave Jones "); +MODULE_DESCRIPTION("Longhaul driver for VIA Cyrix processors."); +MODULE_LICENSE("GPL"); late_initcall(longhaul_init); module_exit(longhaul_exit); diff --git a/arch/x86/kernel/cpu/cpufreq/longhaul.h b/arch/x86/kernel/cpu/cpufreq/longhaul.h index 4fcc320997d..e2360a469f7 100644 --- a/arch/x86/kernel/cpu/cpufreq/longhaul.h +++ b/arch/x86/kernel/cpu/cpufreq/longhaul.h @@ -49,14 +49,14 @@ union msr_longhaul { /* * Clock ratio tables. Div/Mod by 10 to get ratio. - * The eblcr ones specify the ratio read from the CPU. - * The clock_ratio ones specify what to write to the CPU. + * The eblcr values specify the ratio read from the CPU. + * The mults values specify what to write to the CPU. */ /* * VIA C3 Samuel 1 & Samuel 2 (stepping 0) */ -static const int __initdata samuel1_clock_ratio[16] = { +static const int __initdata samuel1_mults[16] = { -1, /* 0000 -> RESERVED */ 30, /* 0001 -> 3.0x */ 40, /* 0010 -> 4.0x */ @@ -119,7 +119,7 @@ static const int __initdata samuel2_eblcr[16] = { /* * VIA C3 Ezra */ -static const int __initdata ezra_clock_ratio[16] = { +static const int __initdata ezra_mults[16] = { 100, /* 0000 -> 10.0x */ 30, /* 0001 -> 3.0x */ 40, /* 0010 -> 4.0x */ @@ -160,7 +160,7 @@ static const int __initdata ezra_eblcr[16] = { /* * VIA C3 (Ezra-T) [C5M]. */ -static const int __initdata ezrat_clock_ratio[32] = { +static const int __initdata ezrat_mults[32] = { 100, /* 0000 -> 10.0x */ 30, /* 0001 -> 3.0x */ 40, /* 0010 -> 4.0x */ @@ -235,7 +235,7 @@ static const int __initdata ezrat_eblcr[32] = { /* * VIA C3 Nehemiah */ -static const int __initdata nehemiah_clock_ratio[32] = { +static const int __initdata nehemiah_mults[32] = { 100, /* 0000 -> 10.0x */ -1, /* 0001 -> 16.0x */ 40, /* 0010 -> 4.0x */ -- cgit v1.2.3 From 48ee923a666d4cc54e48d55fc573c57492501122 Mon Sep 17 00:00:00 2001 From: Dave Jones Date: Sat, 17 Jan 2009 23:32:50 -0500 Subject: [CPUFREQ] checkpatch cleanups for longrun Signed-off-by: Dave Jones --- arch/x86/kernel/cpu/cpufreq/longrun.c | 25 ++++++++++++++----------- 1 file changed, 14 insertions(+), 11 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/kernel/cpu/cpufreq/longrun.c b/arch/x86/kernel/cpu/cpufreq/longrun.c index 777a7ff075d..da5f70fcb76 100644 --- a/arch/x86/kernel/cpu/cpufreq/longrun.c +++ b/arch/x86/kernel/cpu/cpufreq/longrun.c @@ -11,12 +11,13 @@ #include #include #include +#include #include #include -#include -#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, "longrun", msg) +#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, \ + "longrun", msg) static struct cpufreq_driver longrun_driver; @@ -51,7 +52,7 @@ static void __init longrun_get_policy(struct cpufreq_policy *policy) msr_lo &= 0x0000007F; msr_hi &= 0x0000007F; - if ( longrun_high_freq <= longrun_low_freq ) { + if (longrun_high_freq <= longrun_low_freq) { /* Assume degenerate Longrun table */ policy->min = policy->max = longrun_high_freq; } else { @@ -79,7 +80,7 @@ static int longrun_set_policy(struct cpufreq_policy *policy) if (!policy) return -EINVAL; - if ( longrun_high_freq <= longrun_low_freq ) { + if (longrun_high_freq <= longrun_low_freq) { /* Assume degenerate Longrun table */ pctg_lo = pctg_hi = 100; } else { @@ -152,7 +153,7 @@ static unsigned int longrun_get(unsigned int cpu) cpuid(0x80860007, &eax, &ebx, &ecx, &edx); dprintk("cpuid eax is %u\n", eax); - return (eax * 1000); + return eax * 1000; } /** @@ -196,7 +197,8 @@ static unsigned int __init longrun_determine_freqs(unsigned int *low_freq, rdmsr(MSR_TMTA_LRTI_VOLT_MHZ, msr_lo, msr_hi); *high_freq = msr_lo * 1000; /* to kHz */ - dprintk("longrun table interface told %u - %u kHz\n", *low_freq, *high_freq); + dprintk("longrun table interface told %u - %u kHz\n", + *low_freq, *high_freq); if (*low_freq > *high_freq) *low_freq = *high_freq; @@ -219,7 +221,7 @@ static unsigned int __init longrun_determine_freqs(unsigned int *low_freq, cpuid(0x80860007, &eax, &ebx, &ecx, &edx); /* try decreasing in 10% steps, some processors react only * on some barrier values */ - for (try_hi = 80; try_hi > 0 && ecx > 90; try_hi -=10) { + for (try_hi = 80; try_hi > 0 && ecx > 90; try_hi -= 10) { /* set to 0 to try_hi perf_pctg */ msr_lo &= 0xFFFFFF80; msr_hi &= 0xFFFFFF80; @@ -236,7 +238,7 @@ static unsigned int __init longrun_determine_freqs(unsigned int *low_freq, /* performance_pctg = (current_freq - low_freq)/(high_freq - low_freq) * eqals - * low_freq * ( 1 - perf_pctg) = (cur_freq - high_freq * perf_pctg) + * low_freq * (1 - perf_pctg) = (cur_freq - high_freq * perf_pctg) * * high_freq * perf_pctg is stored tempoarily into "ebx". */ @@ -317,9 +319,10 @@ static void __exit longrun_exit(void) } -MODULE_AUTHOR ("Dominik Brodowski "); -MODULE_DESCRIPTION ("LongRun driver for Transmeta Crusoe and Efficeon processors."); -MODULE_LICENSE ("GPL"); +MODULE_AUTHOR("Dominik Brodowski "); +MODULE_DESCRIPTION("LongRun driver for Transmeta Crusoe and " + "Efficeon processors."); +MODULE_LICENSE("GPL"); module_init(longrun_init); module_exit(longrun_exit); -- cgit v1.2.3 From 14a6650f13b958aabc30ddd575b0902384b22457 Mon Sep 17 00:00:00 2001 From: Dave Jones Date: Sun, 18 Jan 2009 00:00:04 -0500 Subject: [CPUFREQ] checkpatch cleanups for powernow-k6 Signed-off-by: Dave Jones --- arch/x86/kernel/cpu/cpufreq/powernow-k6.c | 44 ++++++++++++++++++------------- 1 file changed, 26 insertions(+), 18 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k6.c b/arch/x86/kernel/cpu/cpufreq/powernow-k6.c index c1ac5790c63..f10dea409f4 100644 --- a/arch/x86/kernel/cpu/cpufreq/powernow-k6.c +++ b/arch/x86/kernel/cpu/cpufreq/powernow-k6.c @@ -1,6 +1,7 @@ /* * This file was based upon code in Powertweak Linux (http://powertweak.sf.net) - * (C) 2000-2003 Dave Jones, Arjan van de Ven, Janne Pänkälä, Dominik Brodowski. + * (C) 2000-2003 Dave Jones, Arjan van de Ven, Janne Pänkälä, + * Dominik Brodowski. * * Licensed under the terms of the GNU GPL License version 2. * @@ -13,14 +14,15 @@ #include #include #include - -#include #include #include +#include + #define POWERNOW_IOPORT 0xfff0 /* it doesn't matter where, as long as it is unused */ +#define PFX "powernow-k6: " static unsigned int busfreq; /* FSB, in 10 kHz */ static unsigned int max_multiplier; @@ -47,8 +49,8 @@ static struct cpufreq_frequency_table clock_ratio[] = { */ static int powernow_k6_get_cpu_multiplier(void) { - u64 invalue = 0; - u32 msrval; + u64 invalue = 0; + u32 msrval; msrval = POWERNOW_IOPORT + 0x1; wrmsr(MSR_K6_EPMR, msrval, 0); /* enable the PowerNow port */ @@ -68,12 +70,12 @@ static int powernow_k6_get_cpu_multiplier(void) */ static void powernow_k6_set_state(unsigned int best_i) { - unsigned long outvalue = 0, invalue = 0; - unsigned long msrval; - struct cpufreq_freqs freqs; + unsigned long outvalue = 0, invalue = 0; + unsigned long msrval; + struct cpufreq_freqs freqs; if (clock_ratio[best_i].index > max_multiplier) { - printk(KERN_ERR "cpufreq: invalid target frequency\n"); + printk(KERN_ERR PFX "invalid target frequency\n"); return; } @@ -119,7 +121,8 @@ static int powernow_k6_verify(struct cpufreq_policy *policy) * powernow_k6_setpolicy - sets a new CPUFreq policy * @policy: new policy * @target_freq: the target frequency - * @relation: how that frequency relates to achieved frequency (CPUFREQ_RELATION_L or CPUFREQ_RELATION_H) + * @relation: how that frequency relates to achieved frequency + * (CPUFREQ_RELATION_L or CPUFREQ_RELATION_H) * * sets a new CPUFreq policy */ @@ -127,9 +130,10 @@ static int powernow_k6_target(struct cpufreq_policy *policy, unsigned int target_freq, unsigned int relation) { - unsigned int newstate = 0; + unsigned int newstate = 0; - if (cpufreq_frequency_table_target(policy, &clock_ratio[0], target_freq, relation, &newstate)) + if (cpufreq_frequency_table_target(policy, &clock_ratio[0], + target_freq, relation, &newstate)) return -EINVAL; powernow_k6_set_state(newstate); @@ -140,7 +144,7 @@ static int powernow_k6_target(struct cpufreq_policy *policy, static int powernow_k6_cpu_init(struct cpufreq_policy *policy) { - unsigned int i; + unsigned int i, f; int result; if (policy->cpu != 0) @@ -152,10 +156,11 @@ static int powernow_k6_cpu_init(struct cpufreq_policy *policy) /* table init */ for (i = 0; (clock_ratio[i].frequency != CPUFREQ_TABLE_END); i++) { - if (clock_ratio[i].index > max_multiplier) + f = clock_ratio[i].index; + if (f > max_multiplier) clock_ratio[i].frequency = CPUFREQ_ENTRY_INVALID; else - clock_ratio[i].frequency = busfreq * clock_ratio[i].index; + clock_ratio[i].frequency = busfreq * f; } /* cpuinfo and default policy values */ @@ -185,7 +190,9 @@ static int powernow_k6_cpu_exit(struct cpufreq_policy *policy) static unsigned int powernow_k6_get(unsigned int cpu) { - return busfreq * powernow_k6_get_cpu_multiplier(); + unsigned int ret; + ret = (busfreq * powernow_k6_get_cpu_multiplier()); + return ret; } static struct freq_attr *powernow_k6_attr[] = { @@ -221,7 +228,7 @@ static int __init powernow_k6_init(void) return -ENODEV; if (!request_region(POWERNOW_IOPORT, 16, "PowerNow!")) { - printk("cpufreq: PowerNow IOPORT region already used.\n"); + printk(KERN_INFO PFX "PowerNow IOPORT region already used.\n"); return -EIO; } @@ -246,7 +253,8 @@ static void __exit powernow_k6_exit(void) } -MODULE_AUTHOR("Arjan van de Ven, Dave Jones , Dominik Brodowski "); +MODULE_AUTHOR("Arjan van de Ven, Dave Jones , " + "Dominik Brodowski "); MODULE_DESCRIPTION("PowerNow! driver for AMD K6-2+ / K6-3+ processors."); MODULE_LICENSE("GPL"); -- cgit v1.2.3 From 6072ace436800a011c3cb9a75ee49276bd90445a Mon Sep 17 00:00:00 2001 From: Dave Jones Date: Sun, 18 Jan 2009 01:27:35 -0500 Subject: [CPUFREQ] checkpatch cleanups for sc520 Signed-off-by: Dave Jones --- arch/x86/kernel/cpu/cpufreq/sc520_freq.c | 30 +++++++++++++++++------------- 1 file changed, 17 insertions(+), 13 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/kernel/cpu/cpufreq/sc520_freq.c b/arch/x86/kernel/cpu/cpufreq/sc520_freq.c index 42da9bd677d..435a996a613 100644 --- a/arch/x86/kernel/cpu/cpufreq/sc520_freq.c +++ b/arch/x86/kernel/cpu/cpufreq/sc520_freq.c @@ -19,17 +19,19 @@ #include #include +#include +#include #include -#include -#include #define MMCR_BASE 0xfffef000 /* The default base address */ #define OFFS_CPUCTL 0x2 /* CPU Control Register */ static __u8 __iomem *cpuctl; -#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, "sc520_freq", msg) +#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, \ + "sc520_freq", msg) +#define PFX "sc520_freq: " static struct cpufreq_frequency_table sc520_freq_table[] = { {0x01, 100000}, @@ -43,7 +45,8 @@ static unsigned int sc520_freq_get_cpu_frequency(unsigned int cpu) switch (clockspeed_reg & 0x03) { default: - printk(KERN_ERR "sc520_freq: error: cpuctl register has unexpected value %02x\n", clockspeed_reg); + printk(KERN_ERR PFX "error: cpuctl register has unexpected " + "value %02x\n", clockspeed_reg); case 0x01: return 100000; case 0x02: @@ -51,7 +54,7 @@ static unsigned int sc520_freq_get_cpu_frequency(unsigned int cpu) } } -static void sc520_freq_set_cpu_state (unsigned int state) +static void sc520_freq_set_cpu_state(unsigned int state) { struct cpufreq_freqs freqs; @@ -76,18 +79,19 @@ static void sc520_freq_set_cpu_state (unsigned int state) cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); }; -static int sc520_freq_verify (struct cpufreq_policy *policy) +static int sc520_freq_verify(struct cpufreq_policy *policy) { return cpufreq_frequency_table_verify(policy, &sc520_freq_table[0]); } -static int sc520_freq_target (struct cpufreq_policy *policy, +static int sc520_freq_target(struct cpufreq_policy *policy, unsigned int target_freq, unsigned int relation) { unsigned int newstate = 0; - if (cpufreq_frequency_table_target(policy, sc520_freq_table, target_freq, relation, &newstate)) + if (cpufreq_frequency_table_target(policy, sc520_freq_table, + target_freq, relation, &newstate)) return -EINVAL; sc520_freq_set_cpu_state(newstate); @@ -116,7 +120,7 @@ static int sc520_freq_cpu_init(struct cpufreq_policy *policy) result = cpufreq_frequency_table_cpuinfo(policy, sc520_freq_table); if (result) - return (result); + return result; cpufreq_frequency_table_get_attr(sc520_freq_table, policy->cpu); @@ -131,7 +135,7 @@ static int sc520_freq_cpu_exit(struct cpufreq_policy *policy) } -static struct freq_attr* sc520_freq_attr[] = { +static struct freq_attr *sc520_freq_attr[] = { &cpufreq_freq_attr_scaling_available_freqs, NULL, }; @@ -155,13 +159,13 @@ static int __init sc520_freq_init(void) int err; /* Test if we have the right hardware */ - if(c->x86_vendor != X86_VENDOR_AMD || - c->x86 != 4 || c->x86_model != 9) { + if (c->x86_vendor != X86_VENDOR_AMD || + c->x86 != 4 || c->x86_model != 9) { dprintk("no Elan SC520 processor found!\n"); return -ENODEV; } cpuctl = ioremap((unsigned long)(MMCR_BASE + OFFS_CPUCTL), 1); - if(!cpuctl) { + if (!cpuctl) { printk(KERN_ERR "sc520_freq: error: failed to remap memory\n"); return -ENOMEM; } -- cgit v1.2.3 From bbfebd66554b934b270c4c49442f4fe5e62df0e5 Mon Sep 17 00:00:00 2001 From: Dave Jones Date: Sat, 17 Jan 2009 23:55:22 -0500 Subject: [CPUFREQ] checkpatch cleanups for speedstep related drivers. Signed-off-by: Dave Jones --- arch/x86/kernel/cpu/cpufreq/p4-clockmod.c | 60 +++++----- arch/x86/kernel/cpu/cpufreq/speedstep-ich.c | 70 ++++++------ arch/x86/kernel/cpu/cpufreq/speedstep-lib.c | 129 ++++++++++++--------- arch/x86/kernel/cpu/cpufreq/speedstep-lib.h | 18 +-- arch/x86/kernel/cpu/cpufreq/speedstep-smi.c | 166 +++++++++++++++++----------- 5 files changed, 258 insertions(+), 185 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c b/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c index b585e04cbc9..46a2a7a5314 100644 --- a/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c +++ b/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c @@ -27,15 +27,16 @@ #include #include #include +#include #include #include -#include #include "speedstep-lib.h" #define PFX "p4-clockmod: " -#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, "p4-clockmod", msg) +#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, \ + "p4-clockmod", msg) /* * Duty Cycle (3bits), note DC_DISABLE is not specified in @@ -58,7 +59,8 @@ static int cpufreq_p4_setdc(unsigned int cpu, unsigned int newstate) { u32 l, h; - if (!cpu_online(cpu) || (newstate > DC_DISABLE) || (newstate == DC_RESV)) + if (!cpu_online(cpu) || + (newstate > DC_DISABLE) || (newstate == DC_RESV)) return -EINVAL; rdmsr_on_cpu(cpu, MSR_IA32_THERM_STATUS, &l, &h); @@ -66,7 +68,8 @@ static int cpufreq_p4_setdc(unsigned int cpu, unsigned int newstate) if (l & 0x01) dprintk("CPU#%d currently thermal throttled\n", cpu); - if (has_N44_O17_errata[cpu] && (newstate == DC_25PT || newstate == DC_DFLT)) + if (has_N44_O17_errata[cpu] && + (newstate == DC_25PT || newstate == DC_DFLT)) newstate = DC_38PT; rdmsr_on_cpu(cpu, MSR_IA32_THERM_CONTROL, &l, &h); @@ -112,7 +115,8 @@ static int cpufreq_p4_target(struct cpufreq_policy *policy, struct cpufreq_freqs freqs; int i; - if (cpufreq_frequency_table_target(policy, &p4clockmod_table[0], target_freq, relation, &newstate)) + if (cpufreq_frequency_table_target(policy, &p4clockmod_table[0], + target_freq, relation, &newstate)) return -EINVAL; freqs.old = cpufreq_p4_get(policy->cpu); @@ -127,7 +131,8 @@ static int cpufreq_p4_target(struct cpufreq_policy *policy, cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); } - /* run on each logical CPU, see section 13.15.3 of IA32 Intel Architecture Software + /* run on each logical CPU, + * see section 13.15.3 of IA32 Intel Architecture Software * Developer's Manual, Volume 3 */ for_each_cpu(i, policy->cpus) @@ -153,28 +158,30 @@ static unsigned int cpufreq_p4_get_frequency(struct cpuinfo_x86 *c) { if (c->x86 == 0x06) { if (cpu_has(c, X86_FEATURE_EST)) - printk(KERN_WARNING PFX "Warning: EST-capable CPU detected. " - "The acpi-cpufreq module offers voltage scaling" - " in addition of frequency scaling. You should use " - "that instead of p4-clockmod, if possible.\n"); + printk(KERN_WARNING PFX "Warning: EST-capable CPU " + "detected. The acpi-cpufreq module offers " + "voltage scaling in addition of frequency " + "scaling. You should use that instead of " + "p4-clockmod, if possible.\n"); switch (c->x86_model) { case 0x0E: /* Core */ case 0x0F: /* Core Duo */ case 0x16: /* Celeron Core */ p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS; - return speedstep_get_processor_frequency(SPEEDSTEP_PROCESSOR_PCORE); + return speedstep_get_frequency(SPEEDSTEP_CPU_PCORE); case 0x0D: /* Pentium M (Dothan) */ p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS; /* fall through */ case 0x09: /* Pentium M (Banias) */ - return speedstep_get_processor_frequency(SPEEDSTEP_PROCESSOR_PM); + return speedstep_get_frequency(SPEEDSTEP_CPU_PM); } } if (c->x86 != 0xF) { if (!cpu_has(c, X86_FEATURE_EST)) - printk(KERN_WARNING PFX "Unknown p4-clockmod-capable CPU. " - "Please send an e-mail to \n"); + printk(KERN_WARNING PFX "Unknown CPU. " + "Please send an e-mail to " + "\n"); return 0; } @@ -182,16 +189,16 @@ static unsigned int cpufreq_p4_get_frequency(struct cpuinfo_x86 *c) * throttling is active or not. */ p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS; - if (speedstep_detect_processor() == SPEEDSTEP_PROCESSOR_P4M) { + if (speedstep_detect_processor() == SPEEDSTEP_CPU_P4M) { printk(KERN_WARNING PFX "Warning: Pentium 4-M detected. " "The speedstep-ich or acpi cpufreq modules offer " "voltage scaling in addition of frequency scaling. " "You should use either one instead of p4-clockmod, " "if possible.\n"); - return speedstep_get_processor_frequency(SPEEDSTEP_PROCESSOR_P4M); + return speedstep_get_frequency(SPEEDSTEP_CPU_P4M); } - return speedstep_get_processor_frequency(SPEEDSTEP_PROCESSOR_P4D); + return speedstep_get_frequency(SPEEDSTEP_CPU_P4D); } @@ -223,8 +230,8 @@ static int cpufreq_p4_cpu_init(struct cpufreq_policy *policy) return -EINVAL; /* table init */ - for (i=1; (p4clockmod_table[i].frequency != CPUFREQ_TABLE_END); i++) { - if ((i<2) && (has_N44_O17_errata[policy->cpu])) + for (i = 1; (p4clockmod_table[i].frequency != CPUFREQ_TABLE_END); i++) { + if ((i < 2) && (has_N44_O17_errata[policy->cpu])) p4clockmod_table[i].frequency = CPUFREQ_ENTRY_INVALID; else p4clockmod_table[i].frequency = (stock_freq * i)/8; @@ -258,12 +265,12 @@ static unsigned int cpufreq_p4_get(unsigned int cpu) l = DC_DISABLE; if (l != DC_DISABLE) - return (stock_freq * l / 8); + return stock_freq * l / 8; return stock_freq; } -static struct freq_attr* p4clockmod_attr[] = { +static struct freq_attr *p4clockmod_attr[] = { &cpufreq_freq_attr_scaling_available_freqs, NULL, }; @@ -299,9 +306,10 @@ static int __init cpufreq_p4_init(void) ret = cpufreq_register_driver(&p4clockmod_driver); if (!ret) - printk(KERN_INFO PFX "P4/Xeon(TM) CPU On-Demand Clock Modulation available\n"); + printk(KERN_INFO PFX "P4/Xeon(TM) CPU On-Demand Clock " + "Modulation available\n"); - return (ret); + return ret; } @@ -311,9 +319,9 @@ static void __exit cpufreq_p4_exit(void) } -MODULE_AUTHOR ("Zwane Mwaikambo "); -MODULE_DESCRIPTION ("cpufreq driver for Pentium(TM) 4/Xeon(TM)"); -MODULE_LICENSE ("GPL"); +MODULE_AUTHOR("Zwane Mwaikambo "); +MODULE_DESCRIPTION("cpufreq driver for Pentium(TM) 4/Xeon(TM)"); +MODULE_LICENSE("GPL"); late_initcall(cpufreq_p4_init); module_exit(cpufreq_p4_exit); diff --git a/arch/x86/kernel/cpu/cpufreq/speedstep-ich.c b/arch/x86/kernel/cpu/cpufreq/speedstep-ich.c index dedc1e98f16..8bbb11adb31 100644 --- a/arch/x86/kernel/cpu/cpufreq/speedstep-ich.c +++ b/arch/x86/kernel/cpu/cpufreq/speedstep-ich.c @@ -39,7 +39,7 @@ static struct pci_dev *speedstep_chipset_dev; /* speedstep_processor */ -static unsigned int speedstep_processor = 0; +static unsigned int speedstep_processor; static u32 pmbase; @@ -54,7 +54,8 @@ static struct cpufreq_frequency_table speedstep_freqs[] = { }; -#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, "speedstep-ich", msg) +#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, \ + "speedstep-ich", msg) /** @@ -62,7 +63,7 @@ static struct cpufreq_frequency_table speedstep_freqs[] = { * * Returns: -ENODEV if no register could be found */ -static int speedstep_find_register (void) +static int speedstep_find_register(void) { if (!speedstep_chipset_dev) return -ENODEV; @@ -90,7 +91,7 @@ static int speedstep_find_register (void) * * Tries to change the SpeedStep state. */ -static void speedstep_set_state (unsigned int state) +static void speedstep_set_state(unsigned int state) { u8 pm2_blk; u8 value; @@ -133,11 +134,11 @@ static void speedstep_set_state (unsigned int state) dprintk("read at pmbase 0x%x + 0x50 returned 0x%x\n", pmbase, value); - if (state == (value & 0x1)) { - dprintk("change to %u MHz succeeded\n", (speedstep_get_processor_frequency(speedstep_processor) / 1000)); - } else { - printk (KERN_ERR "cpufreq: change failed - I/O error\n"); - } + if (state == (value & 0x1)) + dprintk("change to %u MHz succeeded\n", + speedstep_get_frequency(speedstep_processor) / 1000); + else + printk(KERN_ERR "cpufreq: change failed - I/O error\n"); return; } @@ -149,7 +150,7 @@ static void speedstep_set_state (unsigned int state) * Tries to activate the SpeedStep status and control registers. * Returns -EINVAL on an unsupported chipset, and zero on success. */ -static int speedstep_activate (void) +static int speedstep_activate(void) { u16 value = 0; @@ -175,20 +176,18 @@ static int speedstep_activate (void) * functions. Returns the SPEEDSTEP_CHIPSET_-number for the detected * chipset, or zero on failure. */ -static unsigned int speedstep_detect_chipset (void) +static unsigned int speedstep_detect_chipset(void) { speedstep_chipset_dev = pci_get_subsys(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_12, - PCI_ANY_ID, - PCI_ANY_ID, + PCI_ANY_ID, PCI_ANY_ID, NULL); if (speedstep_chipset_dev) return 4; /* 4-M */ speedstep_chipset_dev = pci_get_subsys(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_12, - PCI_ANY_ID, - PCI_ANY_ID, + PCI_ANY_ID, PCI_ANY_ID, NULL); if (speedstep_chipset_dev) return 3; /* 3-M */ @@ -196,8 +195,7 @@ static unsigned int speedstep_detect_chipset (void) speedstep_chipset_dev = pci_get_subsys(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_10, - PCI_ANY_ID, - PCI_ANY_ID, + PCI_ANY_ID, PCI_ANY_ID, NULL); if (speedstep_chipset_dev) { /* speedstep.c causes lockups on Dell Inspirons 8000 and @@ -208,8 +206,7 @@ static unsigned int speedstep_detect_chipset (void) hostbridge = pci_get_subsys(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82815_MC, - PCI_ANY_ID, - PCI_ANY_ID, + PCI_ANY_ID, PCI_ANY_ID, NULL); if (!hostbridge) @@ -236,7 +233,7 @@ static unsigned int _speedstep_get(const struct cpumask *cpus) cpus_allowed = current->cpus_allowed; set_cpus_allowed_ptr(current, cpus); - speed = speedstep_get_processor_frequency(speedstep_processor); + speed = speedstep_get_frequency(speedstep_processor); set_cpus_allowed_ptr(current, &cpus_allowed); dprintk("detected %u kHz as current frequency\n", speed); return speed; @@ -251,11 +248,12 @@ static unsigned int speedstep_get(unsigned int cpu) * speedstep_target - set a new CPUFreq policy * @policy: new policy * @target_freq: the target frequency - * @relation: how that frequency relates to achieved frequency (CPUFREQ_RELATION_L or CPUFREQ_RELATION_H) + * @relation: how that frequency relates to achieved frequency + * (CPUFREQ_RELATION_L or CPUFREQ_RELATION_H) * * Sets a new CPUFreq policy. */ -static int speedstep_target (struct cpufreq_policy *policy, +static int speedstep_target(struct cpufreq_policy *policy, unsigned int target_freq, unsigned int relation) { @@ -264,7 +262,8 @@ static int speedstep_target (struct cpufreq_policy *policy, cpumask_t cpus_allowed; int i; - if (cpufreq_frequency_table_target(policy, &speedstep_freqs[0], target_freq, relation, &newstate)) + if (cpufreq_frequency_table_target(policy, &speedstep_freqs[0], + target_freq, relation, &newstate)) return -EINVAL; freqs.old = _speedstep_get(policy->cpus); @@ -308,7 +307,7 @@ static int speedstep_target (struct cpufreq_policy *policy, * Limit must be within speedstep_low_freq and speedstep_high_freq, with * at least one border included. */ -static int speedstep_verify (struct cpufreq_policy *policy) +static int speedstep_verify(struct cpufreq_policy *policy) { return cpufreq_frequency_table_verify(policy, &speedstep_freqs[0]); } @@ -344,7 +343,8 @@ static int speedstep_cpu_init(struct cpufreq_policy *policy) return -EIO; dprintk("currently at %s speed setting - %i MHz\n", - (speed == speedstep_freqs[SPEEDSTEP_LOW].frequency) ? "low" : "high", + (speed == speedstep_freqs[SPEEDSTEP_LOW].frequency) + ? "low" : "high", (speed / 1000)); /* cpuinfo and default policy values */ @@ -352,9 +352,9 @@ static int speedstep_cpu_init(struct cpufreq_policy *policy) result = cpufreq_frequency_table_cpuinfo(policy, speedstep_freqs); if (result) - return (result); + return result; - cpufreq_frequency_table_get_attr(speedstep_freqs, policy->cpu); + cpufreq_frequency_table_get_attr(speedstep_freqs, policy->cpu); return 0; } @@ -366,7 +366,7 @@ static int speedstep_cpu_exit(struct cpufreq_policy *policy) return 0; } -static struct freq_attr* speedstep_attr[] = { +static struct freq_attr *speedstep_attr[] = { &cpufreq_freq_attr_scaling_available_freqs, NULL, }; @@ -396,13 +396,15 @@ static int __init speedstep_init(void) /* detect processor */ speedstep_processor = speedstep_detect_processor(); if (!speedstep_processor) { - dprintk("Intel(R) SpeedStep(TM) capable processor not found\n"); + dprintk("Intel(R) SpeedStep(TM) capable processor " + "not found\n"); return -ENODEV; } /* detect chipset */ if (!speedstep_detect_chipset()) { - dprintk("Intel(R) SpeedStep(TM) for this chipset not (yet) available.\n"); + dprintk("Intel(R) SpeedStep(TM) for this chipset not " + "(yet) available.\n"); return -ENODEV; } @@ -431,9 +433,11 @@ static void __exit speedstep_exit(void) } -MODULE_AUTHOR ("Dave Jones , Dominik Brodowski "); -MODULE_DESCRIPTION ("Speedstep driver for Intel mobile processors on chipsets with ICH-M southbridges."); -MODULE_LICENSE ("GPL"); +MODULE_AUTHOR("Dave Jones , " + "Dominik Brodowski "); +MODULE_DESCRIPTION("Speedstep driver for Intel mobile processors on chipsets " + "with ICH-M southbridges."); +MODULE_LICENSE("GPL"); module_init(speedstep_init); module_exit(speedstep_exit); diff --git a/arch/x86/kernel/cpu/cpufreq/speedstep-lib.c b/arch/x86/kernel/cpu/cpufreq/speedstep-lib.c index cdac7d62369..55c696daa05 100644 --- a/arch/x86/kernel/cpu/cpufreq/speedstep-lib.c +++ b/arch/x86/kernel/cpu/cpufreq/speedstep-lib.c @@ -18,10 +18,13 @@ #include #include "speedstep-lib.h" -#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, "speedstep-lib", msg) +#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, \ + "speedstep-lib", msg) + +#define PFX "speedstep-lib: " #ifdef CONFIG_X86_SPEEDSTEP_RELAXED_CAP_CHECK -static int relaxed_check = 0; +static int relaxed_check; #else #define relaxed_check 0 #endif @@ -30,14 +33,14 @@ static int relaxed_check = 0; * GET PROCESSOR CORE SPEED IN KHZ * *********************************************************************/ -static unsigned int pentium3_get_frequency (unsigned int processor) +static unsigned int pentium3_get_frequency(unsigned int processor) { - /* See table 14 of p3_ds.pdf and table 22 of 29834003.pdf */ + /* See table 14 of p3_ds.pdf and table 22 of 29834003.pdf */ struct { unsigned int ratio; /* Frequency Multiplier (x10) */ u8 bitmap; /* power on configuration bits [27, 25:22] (in MSR 0x2a) */ - } msr_decode_mult [] = { + } msr_decode_mult[] = { { 30, 0x01 }, { 35, 0x05 }, { 40, 0x02 }, @@ -52,7 +55,7 @@ static unsigned int pentium3_get_frequency (unsigned int processor) { 85, 0x26 }, { 90, 0x20 }, { 100, 0x2b }, - { 0, 0xff } /* error or unknown value */ + { 0, 0xff } /* error or unknown value */ }; /* PIII(-M) FSB settings: see table b1-b of 24547206.pdf */ @@ -60,7 +63,7 @@ static unsigned int pentium3_get_frequency (unsigned int processor) unsigned int value; /* Front Side Bus speed in MHz */ u8 bitmap; /* power on configuration bits [18: 19] (in MSR 0x2a) */ - } msr_decode_fsb [] = { + } msr_decode_fsb[] = { { 66, 0x0 }, { 100, 0x2 }, { 133, 0x1 }, @@ -85,7 +88,7 @@ static unsigned int pentium3_get_frequency (unsigned int processor) } /* decode the multiplier */ - if (processor == SPEEDSTEP_PROCESSOR_PIII_C_EARLY) { + if (processor == SPEEDSTEP_CPU_PIII_C_EARLY) { dprintk("workaround for early PIIIs\n"); msr_lo &= 0x03c00000; } else @@ -97,9 +100,10 @@ static unsigned int pentium3_get_frequency (unsigned int processor) j++; } - dprintk("speed is %u\n", (msr_decode_mult[j].ratio * msr_decode_fsb[i].value * 100)); + dprintk("speed is %u\n", + (msr_decode_mult[j].ratio * msr_decode_fsb[i].value * 100)); - return (msr_decode_mult[j].ratio * msr_decode_fsb[i].value * 100); + return msr_decode_mult[j].ratio * msr_decode_fsb[i].value * 100; } @@ -112,20 +116,23 @@ static unsigned int pentiumM_get_frequency(void) /* see table B-2 of 24547212.pdf */ if (msr_lo & 0x00040000) { - printk(KERN_DEBUG "speedstep-lib: PM - invalid FSB: 0x%x 0x%x\n", msr_lo, msr_tmp); + printk(KERN_DEBUG PFX "PM - invalid FSB: 0x%x 0x%x\n", + msr_lo, msr_tmp); return 0; } msr_tmp = (msr_lo >> 22) & 0x1f; - dprintk("bits 22-26 are 0x%x, speed is %u\n", msr_tmp, (msr_tmp * 100 * 1000)); + dprintk("bits 22-26 are 0x%x, speed is %u\n", + msr_tmp, (msr_tmp * 100 * 1000)); - return (msr_tmp * 100 * 1000); + return msr_tmp * 100 * 1000; } static unsigned int pentium_core_get_frequency(void) { u32 fsb = 0; u32 msr_lo, msr_tmp; + int ret; rdmsr(MSR_FSB_FREQ, msr_lo, msr_tmp); /* see table B-2 of 25366920.pdf */ @@ -153,12 +160,15 @@ static unsigned int pentium_core_get_frequency(void) } rdmsr(MSR_IA32_EBL_CR_POWERON, msr_lo, msr_tmp); - dprintk("PCORE - MSR_IA32_EBL_CR_POWERON: 0x%x 0x%x\n", msr_lo, msr_tmp); + dprintk("PCORE - MSR_IA32_EBL_CR_POWERON: 0x%x 0x%x\n", + msr_lo, msr_tmp); msr_tmp = (msr_lo >> 22) & 0x1f; - dprintk("bits 22-26 are 0x%x, speed is %u\n", msr_tmp, (msr_tmp * fsb)); + dprintk("bits 22-26 are 0x%x, speed is %u\n", + msr_tmp, (msr_tmp * fsb)); - return (msr_tmp * fsb); + ret = (msr_tmp * fsb); + return ret; } @@ -167,6 +177,7 @@ static unsigned int pentium4_get_frequency(void) struct cpuinfo_x86 *c = &boot_cpu_data; u32 msr_lo, msr_hi, mult; unsigned int fsb = 0; + unsigned int ret; rdmsr(0x2c, msr_lo, msr_hi); @@ -195,44 +206,47 @@ static unsigned int pentium4_get_frequency(void) } if (!fsb) - printk(KERN_DEBUG "speedstep-lib: couldn't detect FSB speed. Please send an e-mail to \n"); + printk(KERN_DEBUG PFX "couldn't detect FSB speed. " + "Please send an e-mail to \n"); /* Multiplier. */ mult = msr_lo >> 24; - dprintk("P4 - FSB %u kHz; Multiplier %u; Speed %u kHz\n", fsb, mult, (fsb * mult)); + dprintk("P4 - FSB %u kHz; Multiplier %u; Speed %u kHz\n", + fsb, mult, (fsb * mult)); - return (fsb * mult); + ret = (fsb * mult); + return ret; } -unsigned int speedstep_get_processor_frequency(unsigned int processor) +unsigned int speedstep_get_frequency(unsigned int processor) { switch (processor) { - case SPEEDSTEP_PROCESSOR_PCORE: + case SPEEDSTEP_CPU_PCORE: return pentium_core_get_frequency(); - case SPEEDSTEP_PROCESSOR_PM: + case SPEEDSTEP_CPU_PM: return pentiumM_get_frequency(); - case SPEEDSTEP_PROCESSOR_P4D: - case SPEEDSTEP_PROCESSOR_P4M: + case SPEEDSTEP_CPU_P4D: + case SPEEDSTEP_CPU_P4M: return pentium4_get_frequency(); - case SPEEDSTEP_PROCESSOR_PIII_T: - case SPEEDSTEP_PROCESSOR_PIII_C: - case SPEEDSTEP_PROCESSOR_PIII_C_EARLY: + case SPEEDSTEP_CPU_PIII_T: + case SPEEDSTEP_CPU_PIII_C: + case SPEEDSTEP_CPU_PIII_C_EARLY: return pentium3_get_frequency(processor); default: return 0; }; return 0; } -EXPORT_SYMBOL_GPL(speedstep_get_processor_frequency); +EXPORT_SYMBOL_GPL(speedstep_get_frequency); /********************************************************************* * DETECT SPEEDSTEP-CAPABLE PROCESSOR * *********************************************************************/ -unsigned int speedstep_detect_processor (void) +unsigned int speedstep_detect_processor(void) { struct cpuinfo_x86 *c = &cpu_data(0); u32 ebx, msr_lo, msr_hi; @@ -261,7 +275,7 @@ unsigned int speedstep_detect_processor (void) * sample has ebx = 0x0f, production has 0x0e. */ if ((ebx == 0x0e) || (ebx == 0x0f)) - return SPEEDSTEP_PROCESSOR_P4M; + return SPEEDSTEP_CPU_P4M; break; case 7: /* @@ -272,7 +286,7 @@ unsigned int speedstep_detect_processor (void) * samples are only of B-stepping... */ if (ebx == 0x0e) - return SPEEDSTEP_PROCESSOR_P4M; + return SPEEDSTEP_CPU_P4M; break; case 9: /* @@ -288,10 +302,13 @@ unsigned int speedstep_detect_processor (void) * M-P4-Ms may have either ebx=0xe or 0xf [see above] * M-P4/533 have either ebx=0xe or 0xf. [25317607.pdf] * also, M-P4M HTs have ebx=0x8, too - * For now, they are distinguished by the model_id string + * For now, they are distinguished by the model_id + * string */ - if ((ebx == 0x0e) || (strstr(c->x86_model_id,"Mobile Intel(R) Pentium(R) 4") != NULL)) - return SPEEDSTEP_PROCESSOR_P4M; + if ((ebx == 0x0e) || + (strstr(c->x86_model_id, + "Mobile Intel(R) Pentium(R) 4") != NULL)) + return SPEEDSTEP_CPU_P4M; break; default: break; @@ -301,7 +318,8 @@ unsigned int speedstep_detect_processor (void) switch (c->x86_model) { case 0x0B: /* Intel PIII [Tualatin] */ - /* cpuid_ebx(1) is 0x04 for desktop PIII, 0x06 for mobile PIII-M */ + /* cpuid_ebx(1) is 0x04 for desktop PIII, + * 0x06 for mobile PIII-M */ ebx = cpuid_ebx(0x00000001); dprintk("ebx is %x\n", ebx); @@ -313,14 +331,15 @@ unsigned int speedstep_detect_processor (void) /* So far all PIII-M processors support SpeedStep. See * Intel's 24540640.pdf of June 2003 */ - return SPEEDSTEP_PROCESSOR_PIII_T; + return SPEEDSTEP_CPU_PIII_T; case 0x08: /* Intel PIII [Coppermine] */ /* all mobile PIII Coppermines have FSB 100 MHz * ==> sort out a few desktop PIIIs. */ rdmsr(MSR_IA32_EBL_CR_POWERON, msr_lo, msr_hi); - dprintk("Coppermine: MSR_IA32_EBL_CR_POWERON is 0x%x, 0x%x\n", msr_lo, msr_hi); + dprintk("Coppermine: MSR_IA32_EBL_CR_POWERON is 0x%x, 0x%x\n", + msr_lo, msr_hi); msr_lo &= 0x00c0000; if (msr_lo != 0x0080000) return 0; @@ -332,13 +351,15 @@ unsigned int speedstep_detect_processor (void) * bit 56 or 57 is set */ rdmsr(MSR_IA32_PLATFORM_ID, msr_lo, msr_hi); - dprintk("Coppermine: MSR_IA32_PLATFORM ID is 0x%x, 0x%x\n", msr_lo, msr_hi); - if ((msr_hi & (1<<18)) && (relaxed_check ? 1 : (msr_hi & (3<<24)))) { + dprintk("Coppermine: MSR_IA32_PLATFORM ID is 0x%x, 0x%x\n", + msr_lo, msr_hi); + if ((msr_hi & (1<<18)) && + (relaxed_check ? 1 : (msr_hi & (3<<24)))) { if (c->x86_mask == 0x01) { dprintk("early PIII version\n"); - return SPEEDSTEP_PROCESSOR_PIII_C_EARLY; + return SPEEDSTEP_CPU_PIII_C_EARLY; } else - return SPEEDSTEP_PROCESSOR_PIII_C; + return SPEEDSTEP_CPU_PIII_C; } default: @@ -369,7 +390,7 @@ unsigned int speedstep_get_freqs(unsigned int processor, dprintk("trying to determine both speeds\n"); /* get current speed */ - prev_speed = speedstep_get_processor_frequency(processor); + prev_speed = speedstep_get_frequency(processor); if (!prev_speed) return -EIO; @@ -379,7 +400,7 @@ unsigned int speedstep_get_freqs(unsigned int processor, /* switch to low state */ set_state(SPEEDSTEP_LOW); - *low_speed = speedstep_get_processor_frequency(processor); + *low_speed = speedstep_get_frequency(processor); if (!*low_speed) { ret = -EIO; goto out; @@ -398,7 +419,7 @@ unsigned int speedstep_get_freqs(unsigned int processor, if (transition_latency) do_gettimeofday(&tv2); - *high_speed = speedstep_get_processor_frequency(processor); + *high_speed = speedstep_get_frequency(processor); if (!*high_speed) { ret = -EIO; goto out; @@ -426,9 +447,12 @@ unsigned int speedstep_get_freqs(unsigned int processor, /* check if the latency measurement is too high or too low * and set it to a safe value (500uSec) in that case */ - if (*transition_latency > 10000000 || *transition_latency < 50000) { - printk (KERN_WARNING "speedstep: frequency transition measured seems out of " - "range (%u nSec), falling back to a safe one of %u nSec.\n", + if (*transition_latency > 10000000 || + *transition_latency < 50000) { + printk(KERN_WARNING PFX "frequency transition " + "measured seems out of range (%u " + "nSec), falling back to a safe one of" + "%u nSec.\n", *transition_latency, 500000); *transition_latency = 500000; } @@ -436,15 +460,16 @@ unsigned int speedstep_get_freqs(unsigned int processor, out: local_irq_restore(flags); - return (ret); + return ret; } EXPORT_SYMBOL_GPL(speedstep_get_freqs); #ifdef CONFIG_X86_SPEEDSTEP_RELAXED_CAP_CHECK module_param(relaxed_check, int, 0444); -MODULE_PARM_DESC(relaxed_check, "Don't do all checks for speedstep capability."); +MODULE_PARM_DESC(relaxed_check, + "Don't do all checks for speedstep capability."); #endif -MODULE_AUTHOR ("Dominik Brodowski "); -MODULE_DESCRIPTION ("Library for Intel SpeedStep 1 or 2 cpufreq drivers."); -MODULE_LICENSE ("GPL"); +MODULE_AUTHOR("Dominik Brodowski "); +MODULE_DESCRIPTION("Library for Intel SpeedStep 1 or 2 cpufreq drivers."); +MODULE_LICENSE("GPL"); diff --git a/arch/x86/kernel/cpu/cpufreq/speedstep-lib.h b/arch/x86/kernel/cpu/cpufreq/speedstep-lib.h index b11bcc608ca..2b6c04e5a30 100644 --- a/arch/x86/kernel/cpu/cpufreq/speedstep-lib.h +++ b/arch/x86/kernel/cpu/cpufreq/speedstep-lib.h @@ -12,17 +12,17 @@ /* processors */ -#define SPEEDSTEP_PROCESSOR_PIII_C_EARLY 0x00000001 /* Coppermine core */ -#define SPEEDSTEP_PROCESSOR_PIII_C 0x00000002 /* Coppermine core */ -#define SPEEDSTEP_PROCESSOR_PIII_T 0x00000003 /* Tualatin core */ -#define SPEEDSTEP_PROCESSOR_P4M 0x00000004 /* P4-M */ +#define SPEEDSTEP_CPU_PIII_C_EARLY 0x00000001 /* Coppermine core */ +#define SPEEDSTEP_CPU_PIII_C 0x00000002 /* Coppermine core */ +#define SPEEDSTEP_CPU_PIII_T 0x00000003 /* Tualatin core */ +#define SPEEDSTEP_CPU_P4M 0x00000004 /* P4-M */ /* the following processors are not speedstep-capable and are not auto-detected * in speedstep_detect_processor(). However, their speed can be detected using - * the speedstep_get_processor_frequency() call. */ -#define SPEEDSTEP_PROCESSOR_PM 0xFFFFFF03 /* Pentium M */ -#define SPEEDSTEP_PROCESSOR_P4D 0xFFFFFF04 /* desktop P4 */ -#define SPEEDSTEP_PROCESSOR_PCORE 0xFFFFFF05 /* Core */ + * the speedstep_get_frequency() call. */ +#define SPEEDSTEP_CPU_PM 0xFFFFFF03 /* Pentium M */ +#define SPEEDSTEP_CPU_P4D 0xFFFFFF04 /* desktop P4 */ +#define SPEEDSTEP_CPU_PCORE 0xFFFFFF05 /* Core */ /* speedstep states -- only two of them */ @@ -34,7 +34,7 @@ extern unsigned int speedstep_detect_processor (void); /* detect the current speed (in khz) of the processor */ -extern unsigned int speedstep_get_processor_frequency(unsigned int processor); +extern unsigned int speedstep_get_frequency(unsigned int processor); /* detect the low and high speeds of the processor. The callback diff --git a/arch/x86/kernel/cpu/cpufreq/speedstep-smi.c b/arch/x86/kernel/cpu/cpufreq/speedstep-smi.c index 8a85c93bd62..befea088e4f 100644 --- a/arch/x86/kernel/cpu/cpufreq/speedstep-smi.c +++ b/arch/x86/kernel/cpu/cpufreq/speedstep-smi.c @@ -19,8 +19,8 @@ #include #include #include +#include #include -#include #include "speedstep-lib.h" @@ -30,12 +30,12 @@ * If user gives it, these are used. * */ -static int smi_port = 0; -static int smi_cmd = 0; -static unsigned int smi_sig = 0; +static int smi_port; +static int smi_cmd; +static unsigned int smi_sig; /* info about the processor */ -static unsigned int speedstep_processor = 0; +static unsigned int speedstep_processor; /* * There are only two frequency states for each processor. Values @@ -56,12 +56,13 @@ static struct cpufreq_frequency_table speedstep_freqs[] = { * of DMA activity going on? */ #define SMI_TRIES 5 -#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, "speedstep-smi", msg) +#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, \ + "speedstep-smi", msg) /** * speedstep_smi_ownership */ -static int speedstep_smi_ownership (void) +static int speedstep_smi_ownership(void) { u32 command, result, magic, dummy; u32 function = GET_SPEEDSTEP_OWNER; @@ -70,16 +71,18 @@ static int speedstep_smi_ownership (void) command = (smi_sig & 0xffffff00) | (smi_cmd & 0xff); magic = virt_to_phys(magic_data); - dprintk("trying to obtain ownership with command %x at port %x\n", command, smi_port); + dprintk("trying to obtain ownership with command %x at port %x\n", + command, smi_port); __asm__ __volatile__( "push %%ebp\n" "out %%al, (%%dx)\n" "pop %%ebp\n" - : "=D" (result), "=a" (dummy), "=b" (dummy), "=c" (dummy), "=d" (dummy), - "=S" (dummy) + : "=D" (result), + "=a" (dummy), "=b" (dummy), "=c" (dummy), "=d" (dummy), + "=S" (dummy) : "a" (command), "b" (function), "c" (0), "d" (smi_port), - "D" (0), "S" (magic) + "D" (0), "S" (magic) : "memory" ); @@ -97,10 +100,10 @@ static int speedstep_smi_ownership (void) * even hangs [cf. bugme.osdl.org # 1422] on earlier systems. Empirical testing * shows that the latter occurs if !(ist_info.event & 0xFFFF). */ -static int speedstep_smi_get_freqs (unsigned int *low, unsigned int *high) +static int speedstep_smi_get_freqs(unsigned int *low, unsigned int *high) { u32 command, result = 0, edi, high_mhz, low_mhz, dummy; - u32 state=0; + u32 state = 0; u32 function = GET_SPEEDSTEP_FREQS; if (!(ist_info.event & 0xFFFF)) { @@ -110,17 +113,25 @@ static int speedstep_smi_get_freqs (unsigned int *low, unsigned int *high) command = (smi_sig & 0xffffff00) | (smi_cmd & 0xff); - dprintk("trying to determine frequencies with command %x at port %x\n", command, smi_port); + dprintk("trying to determine frequencies with command %x at port %x\n", + command, smi_port); __asm__ __volatile__( "push %%ebp\n" "out %%al, (%%dx)\n" "pop %%ebp" - : "=a" (result), "=b" (high_mhz), "=c" (low_mhz), "=d" (state), "=D" (edi), "=S" (dummy) - : "a" (command), "b" (function), "c" (state), "d" (smi_port), "S" (0), "D" (0) + : "=a" (result), + "=b" (high_mhz), + "=c" (low_mhz), + "=d" (state), "=D" (edi), "=S" (dummy) + : "a" (command), + "b" (function), + "c" (state), + "d" (smi_port), "S" (0), "D" (0) ); - dprintk("result %x, low_freq %u, high_freq %u\n", result, low_mhz, high_mhz); + dprintk("result %x, low_freq %u, high_freq %u\n", + result, low_mhz, high_mhz); /* abort if results are obviously incorrect... */ if ((high_mhz + low_mhz) < 600) @@ -137,26 +148,30 @@ static int speedstep_smi_get_freqs (unsigned int *low, unsigned int *high) * @state: processor frequency state (SPEEDSTEP_LOW or SPEEDSTEP_HIGH) * */ -static int speedstep_get_state (void) +static int speedstep_get_state(void) { - u32 function=GET_SPEEDSTEP_STATE; + u32 function = GET_SPEEDSTEP_STATE; u32 result, state, edi, command, dummy; command = (smi_sig & 0xffffff00) | (smi_cmd & 0xff); - dprintk("trying to determine current setting with command %x at port %x\n", command, smi_port); + dprintk("trying to determine current setting with command %x " + "at port %x\n", command, smi_port); __asm__ __volatile__( "push %%ebp\n" "out %%al, (%%dx)\n" "pop %%ebp\n" - : "=a" (result), "=b" (state), "=D" (edi), "=c" (dummy), "=d" (dummy), "=S" (dummy) - : "a" (command), "b" (function), "c" (0), "d" (smi_port), "S" (0), "D" (0) + : "=a" (result), + "=b" (state), "=D" (edi), + "=c" (dummy), "=d" (dummy), "=S" (dummy) + : "a" (command), "b" (function), "c" (0), + "d" (smi_port), "S" (0), "D" (0) ); dprintk("state is %x, result is %x\n", state, result); - return (state & 1); + return state & 1; } @@ -165,11 +180,11 @@ static int speedstep_get_state (void) * @state: new processor frequency state (SPEEDSTEP_LOW or SPEEDSTEP_HIGH) * */ -static void speedstep_set_state (unsigned int state) +static void speedstep_set_state(unsigned int state) { unsigned int result = 0, command, new_state, dummy; unsigned long flags; - unsigned int function=SET_SPEEDSTEP_STATE; + unsigned int function = SET_SPEEDSTEP_STATE; unsigned int retry = 0; if (state > 0x1) @@ -180,11 +195,14 @@ static void speedstep_set_state (unsigned int state) command = (smi_sig & 0xffffff00) | (smi_cmd & 0xff); - dprintk("trying to set frequency to state %u with command %x at port %x\n", state, command, smi_port); + dprintk("trying to set frequency to state %u " + "with command %x at port %x\n", + state, command, smi_port); do { if (retry) { - dprintk("retry %u, previous result %u, waiting...\n", retry, result); + dprintk("retry %u, previous result %u, waiting...\n", + retry, result); mdelay(retry * 50); } retry++; @@ -192,20 +210,26 @@ static void speedstep_set_state (unsigned int state) "push %%ebp\n" "out %%al, (%%dx)\n" "pop %%ebp" - : "=b" (new_state), "=D" (result), "=c" (dummy), "=a" (dummy), - "=d" (dummy), "=S" (dummy) - : "a" (command), "b" (function), "c" (state), "d" (smi_port), "S" (0), "D" (0) + : "=b" (new_state), "=D" (result), + "=c" (dummy), "=a" (dummy), + "=d" (dummy), "=S" (dummy) + : "a" (command), "b" (function), "c" (state), + "d" (smi_port), "S" (0), "D" (0) ); } while ((new_state != state) && (retry <= SMI_TRIES)); /* enable IRQs */ local_irq_restore(flags); - if (new_state == state) { - dprintk("change to %u MHz succeeded after %u tries with result %u\n", (speedstep_freqs[new_state].frequency / 1000), retry, result); - } else { - printk(KERN_ERR "cpufreq: change to state %u failed with new_state %u and result %u\n", state, new_state, result); - } + if (new_state == state) + dprintk("change to %u MHz succeeded after %u tries " + "with result %u\n", + (speedstep_freqs[new_state].frequency / 1000), + retry, result); + else + printk(KERN_ERR "cpufreq: change to state %u " + "failed with new_state %u and result %u\n", + state, new_state, result); return; } @@ -219,13 +243,14 @@ static void speedstep_set_state (unsigned int state) * * Sets a new CPUFreq policy/freq. */ -static int speedstep_target (struct cpufreq_policy *policy, +static int speedstep_target(struct cpufreq_policy *policy, unsigned int target_freq, unsigned int relation) { unsigned int newstate = 0; struct cpufreq_freqs freqs; - if (cpufreq_frequency_table_target(policy, &speedstep_freqs[0], target_freq, relation, &newstate)) + if (cpufreq_frequency_table_target(policy, &speedstep_freqs[0], + target_freq, relation, &newstate)) return -EINVAL; freqs.old = speedstep_freqs[speedstep_get_state()].frequency; @@ -250,7 +275,7 @@ static int speedstep_target (struct cpufreq_policy *policy, * Limit must be within speedstep_low_freq and speedstep_high_freq, with * at least one border included. */ -static int speedstep_verify (struct cpufreq_policy *policy) +static int speedstep_verify(struct cpufreq_policy *policy) { return cpufreq_frequency_table_verify(policy, &speedstep_freqs[0]); } @@ -259,7 +284,8 @@ static int speedstep_verify (struct cpufreq_policy *policy) static int speedstep_cpu_init(struct cpufreq_policy *policy) { int result; - unsigned int speed,state; + unsigned int speed, state; + unsigned int *low, *high; /* capability check */ if (policy->cpu != 0) @@ -272,19 +298,23 @@ static int speedstep_cpu_init(struct cpufreq_policy *policy) } /* detect low and high frequency */ - result = speedstep_smi_get_freqs(&speedstep_freqs[SPEEDSTEP_LOW].frequency, - &speedstep_freqs[SPEEDSTEP_HIGH].frequency); + low = &speedstep_freqs[SPEEDSTEP_LOW].frequency; + high = &speedstep_freqs[SPEEDSTEP_HIGH].frequency; + + result = speedstep_smi_get_freqs(low, high); if (result) { - /* fall back to speedstep_lib.c dection mechanism: try both states out */ - dprintk("could not detect low and high frequencies by SMI call.\n"); + /* fall back to speedstep_lib.c dection mechanism: + * try both states out */ + dprintk("could not detect low and high frequencies " + "by SMI call.\n"); result = speedstep_get_freqs(speedstep_processor, - &speedstep_freqs[SPEEDSTEP_LOW].frequency, - &speedstep_freqs[SPEEDSTEP_HIGH].frequency, + low, high, NULL, &speedstep_set_state); if (result) { - dprintk("could not detect two different speeds -- aborting.\n"); + dprintk("could not detect two different speeds" + " -- aborting.\n"); return result; } else dprintk("workaround worked.\n"); @@ -295,7 +325,8 @@ static int speedstep_cpu_init(struct cpufreq_policy *policy) speed = speedstep_freqs[state].frequency; dprintk("currently at %s speed setting - %i MHz\n", - (speed == speedstep_freqs[SPEEDSTEP_LOW].frequency) ? "low" : "high", + (speed == speedstep_freqs[SPEEDSTEP_LOW].frequency) + ? "low" : "high", (speed / 1000)); /* cpuinfo and default policy values */ @@ -304,7 +335,7 @@ static int speedstep_cpu_init(struct cpufreq_policy *policy) result = cpufreq_frequency_table_cpuinfo(policy, speedstep_freqs); if (result) - return (result); + return result; cpufreq_frequency_table_get_attr(speedstep_freqs, policy->cpu); @@ -321,7 +352,7 @@ static unsigned int speedstep_get(unsigned int cpu) { if (cpu) return -ENODEV; - return speedstep_get_processor_frequency(speedstep_processor); + return speedstep_get_frequency(speedstep_processor); } @@ -335,7 +366,7 @@ static int speedstep_resume(struct cpufreq_policy *policy) return result; } -static struct freq_attr* speedstep_attr[] = { +static struct freq_attr *speedstep_attr[] = { &cpufreq_freq_attr_scaling_available_freqs, NULL, }; @@ -364,21 +395,23 @@ static int __init speedstep_init(void) speedstep_processor = speedstep_detect_processor(); switch (speedstep_processor) { - case SPEEDSTEP_PROCESSOR_PIII_T: - case SPEEDSTEP_PROCESSOR_PIII_C: - case SPEEDSTEP_PROCESSOR_PIII_C_EARLY: + case SPEEDSTEP_CPU_PIII_T: + case SPEEDSTEP_CPU_PIII_C: + case SPEEDSTEP_CPU_PIII_C_EARLY: break; default: speedstep_processor = 0; } if (!speedstep_processor) { - dprintk ("No supported Intel CPU detected.\n"); + dprintk("No supported Intel CPU detected.\n"); return -ENODEV; } - dprintk("signature:0x%.8lx, command:0x%.8lx, event:0x%.8lx, perf_level:0x%.8lx.\n", - ist_info.signature, ist_info.command, ist_info.event, ist_info.perf_level); + dprintk("signature:0x%.8lx, command:0x%.8lx, " + "event:0x%.8lx, perf_level:0x%.8lx.\n", + ist_info.signature, ist_info.command, + ist_info.event, ist_info.perf_level); /* Error if no IST-SMI BIOS or no PARM sig= 'ISGE' aka 'Intel Speedstep Gate E' */ @@ -416,17 +449,20 @@ static void __exit speedstep_exit(void) cpufreq_unregister_driver(&speedstep_driver); } -module_param(smi_port, int, 0444); -module_param(smi_cmd, int, 0444); -module_param(smi_sig, uint, 0444); +module_param(smi_port, int, 0444); +module_param(smi_cmd, int, 0444); +module_param(smi_sig, uint, 0444); -MODULE_PARM_DESC(smi_port, "Override the BIOS-given IST port with this value -- Intel's default setting is 0xb2"); -MODULE_PARM_DESC(smi_cmd, "Override the BIOS-given IST command with this value -- Intel's default setting is 0x82"); -MODULE_PARM_DESC(smi_sig, "Set to 1 to fake the IST signature when using the SMI interface."); +MODULE_PARM_DESC(smi_port, "Override the BIOS-given IST port with this value " + "-- Intel's default setting is 0xb2"); +MODULE_PARM_DESC(smi_cmd, "Override the BIOS-given IST command with this value " + "-- Intel's default setting is 0x82"); +MODULE_PARM_DESC(smi_sig, "Set to 1 to fake the IST signature when using the " + "SMI interface."); -MODULE_AUTHOR ("Hiroshi Miura"); -MODULE_DESCRIPTION ("Speedstep driver for IST applet SMI interface."); -MODULE_LICENSE ("GPL"); +MODULE_AUTHOR("Hiroshi Miura"); +MODULE_DESCRIPTION("Speedstep driver for IST applet SMI interface."); +MODULE_LICENSE("GPL"); module_init(speedstep_init); module_exit(speedstep_exit); -- cgit v1.2.3 From b9e7638a301b1245d4675087a05fa90fb4fa1845 Mon Sep 17 00:00:00 2001 From: Dave Jones Date: Sun, 18 Jan 2009 00:32:26 -0500 Subject: [CPUFREQ] checkpatch cleanups for powernow-k7 The asm/timer.h warning can be ignored, it's needed for recalibrate_cpu_khz() Signed-off-by: Dave Jones --- arch/x86/kernel/cpu/cpufreq/powernow-k7.c | 237 +++++++++++++++++------------- 1 file changed, 137 insertions(+), 100 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k7.c b/arch/x86/kernel/cpu/cpufreq/powernow-k7.c index 9cd73d15743..3c28ccd4974 100644 --- a/arch/x86/kernel/cpu/cpufreq/powernow-k7.c +++ b/arch/x86/kernel/cpu/cpufreq/powernow-k7.c @@ -6,10 +6,12 @@ * Licensed under the terms of the GNU GPL License version 2. * Based upon datasheets & sample CPUs kindly provided by AMD. * - * Errata 5: Processor may fail to execute a FID/VID change in presence of interrupt. - * - We cli/sti on stepping A0 CPUs around the FID/VID transition. - * Errata 15: Processors with half frequency multipliers may hang upon wakeup from disconnect. - * - We disable half multipliers if ACPI is used on A0 stepping CPUs. + * Errata 5: + * CPU may fail to execute a FID/VID change in presence of interrupt. + * - We cli/sti on stepping A0 CPUs around the FID/VID transition. + * Errata 15: + * CPU with half frequency multipliers may hang upon wakeup from disconnect. + * - We disable half multipliers if ACPI is used on A0 stepping CPUs. */ #include @@ -20,11 +22,11 @@ #include #include #include +#include +#include +#include /* Needed for recalibrate_cpu_khz() */ #include -#include -#include -#include #include #ifdef CONFIG_X86_POWERNOW_K7_ACPI @@ -58,9 +60,9 @@ struct pst_s { union powernow_acpi_control_t { struct { unsigned long fid:5, - vid:5, - sgtc:20, - res1:2; + vid:5, + sgtc:20, + res1:2; } bits; unsigned long val; }; @@ -101,7 +103,8 @@ static unsigned int fsb; static unsigned int latency; static char have_a0; -#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, "powernow-k7", msg) +#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, \ + "powernow-k7", msg) static int check_fsb(unsigned int fsbspeed) { @@ -109,7 +112,7 @@ static int check_fsb(unsigned int fsbspeed) unsigned int f = fsb / 1000; delta = (fsbspeed > f) ? fsbspeed - f : f - fsbspeed; - return (delta < 5); + return delta < 5; } static int check_powernow(void) @@ -117,24 +120,26 @@ static int check_powernow(void) struct cpuinfo_x86 *c = &cpu_data(0); unsigned int maxei, eax, ebx, ecx, edx; - if ((c->x86_vendor != X86_VENDOR_AMD) || (c->x86 !=6)) { + if ((c->x86_vendor != X86_VENDOR_AMD) || (c->x86 != 6)) { #ifdef MODULE - printk (KERN_INFO PFX "This module only works with AMD K7 CPUs\n"); + printk(KERN_INFO PFX "This module only works with " + "AMD K7 CPUs\n"); #endif return 0; } /* Get maximum capabilities */ - maxei = cpuid_eax (0x80000000); + maxei = cpuid_eax(0x80000000); if (maxei < 0x80000007) { /* Any powernow info ? */ #ifdef MODULE - printk (KERN_INFO PFX "No powernow capabilities detected\n"); + printk(KERN_INFO PFX "No powernow capabilities detected\n"); #endif return 0; } if ((c->x86_model == 6) && (c->x86_mask == 0)) { - printk (KERN_INFO PFX "K7 660[A0] core detected, enabling errata workarounds\n"); + printk(KERN_INFO PFX "K7 660[A0] core detected, " + "enabling errata workarounds\n"); have_a0 = 1; } @@ -144,37 +149,42 @@ static int check_powernow(void) if (!(edx & (1 << 1 | 1 << 2))) return 0; - printk (KERN_INFO PFX "PowerNOW! Technology present. Can scale: "); + printk(KERN_INFO PFX "PowerNOW! Technology present. Can scale: "); if (edx & 1 << 1) { - printk ("frequency"); - can_scale_bus=1; + printk("frequency"); + can_scale_bus = 1; } if ((edx & (1 << 1 | 1 << 2)) == 0x6) - printk (" and "); + printk(" and "); if (edx & 1 << 2) { - printk ("voltage"); - can_scale_vid=1; + printk("voltage"); + can_scale_vid = 1; } - printk (".\n"); + printk(".\n"); return 1; } +static void invalidate_entry(unsigned int entry) +{ + powernow_table[entry].frequency = CPUFREQ_ENTRY_INVALID; +} -static int get_ranges (unsigned char *pst) +static int get_ranges(unsigned char *pst) { unsigned int j; unsigned int speed; u8 fid, vid; - powernow_table = kzalloc((sizeof(struct cpufreq_frequency_table) * (number_scales + 1)), GFP_KERNEL); + powernow_table = kzalloc((sizeof(struct cpufreq_frequency_table) * + (number_scales + 1)), GFP_KERNEL); if (!powernow_table) return -ENOMEM; - for (j=0 ; j < number_scales; j++) { + for (j = 0 ; j < number_scales; j++) { fid = *pst++; powernow_table[j].frequency = (fsb * fid_codes[fid]) / 10; @@ -182,10 +192,10 @@ static int get_ranges (unsigned char *pst) speed = powernow_table[j].frequency; - if ((fid_codes[fid] % 10)==5) { + if ((fid_codes[fid] % 10) == 5) { #ifdef CONFIG_X86_POWERNOW_K7_ACPI if (have_a0 == 1) - powernow_table[j].frequency = CPUFREQ_ENTRY_INVALID; + invalidate_entry(j); #endif } @@ -197,7 +207,7 @@ static int get_ranges (unsigned char *pst) vid = *pst++; powernow_table[j].index |= (vid << 8); /* upper 8 bits */ - dprintk (" FID: 0x%x (%d.%dx [%dMHz]) " + dprintk(" FID: 0x%x (%d.%dx [%dMHz]) " "VID: 0x%x (%d.%03dV)\n", fid, fid_codes[fid] / 10, fid_codes[fid] % 10, speed/1000, vid, mobile_vid_table[vid]/1000, @@ -214,13 +224,13 @@ static void change_FID(int fid) { union msr_fidvidctl fidvidctl; - rdmsrl (MSR_K7_FID_VID_CTL, fidvidctl.val); + rdmsrl(MSR_K7_FID_VID_CTL, fidvidctl.val); if (fidvidctl.bits.FID != fid) { fidvidctl.bits.SGTC = latency; fidvidctl.bits.FID = fid; fidvidctl.bits.VIDC = 0; fidvidctl.bits.FIDC = 1; - wrmsrl (MSR_K7_FID_VID_CTL, fidvidctl.val); + wrmsrl(MSR_K7_FID_VID_CTL, fidvidctl.val); } } @@ -229,18 +239,18 @@ static void change_VID(int vid) { union msr_fidvidctl fidvidctl; - rdmsrl (MSR_K7_FID_VID_CTL, fidvidctl.val); + rdmsrl(MSR_K7_FID_VID_CTL, fidvidctl.val); if (fidvidctl.bits.VID != vid) { fidvidctl.bits.SGTC = latency; fidvidctl.bits.VID = vid; fidvidctl.bits.FIDC = 0; fidvidctl.bits.VIDC = 1; - wrmsrl (MSR_K7_FID_VID_CTL, fidvidctl.val); + wrmsrl(MSR_K7_FID_VID_CTL, fidvidctl.val); } } -static void change_speed (unsigned int index) +static void change_speed(unsigned int index) { u8 fid, vid; struct cpufreq_freqs freqs; @@ -257,7 +267,7 @@ static void change_speed (unsigned int index) freqs.cpu = 0; - rdmsrl (MSR_K7_FID_VID_STATUS, fidvidstatus.val); + rdmsrl(MSR_K7_FID_VID_STATUS, fidvidstatus.val); cfid = fidvidstatus.bits.CFID; freqs.old = fsb * fid_codes[cfid] / 10; @@ -321,12 +331,14 @@ static int powernow_acpi_init(void) goto err1; } - if (acpi_processor_perf->control_register.space_id != ACPI_ADR_SPACE_FIXED_HARDWARE) { + if (acpi_processor_perf->control_register.space_id != + ACPI_ADR_SPACE_FIXED_HARDWARE) { retval = -ENODEV; goto err2; } - if (acpi_processor_perf->status_register.space_id != ACPI_ADR_SPACE_FIXED_HARDWARE) { + if (acpi_processor_perf->status_register.space_id != + ACPI_ADR_SPACE_FIXED_HARDWARE) { retval = -ENODEV; goto err2; } @@ -338,7 +350,8 @@ static int powernow_acpi_init(void) goto err2; } - powernow_table = kzalloc((number_scales + 1) * (sizeof(struct cpufreq_frequency_table)), GFP_KERNEL); + powernow_table = kzalloc((sizeof(struct cpufreq_frequency_table) * + (number_scales + 1)), GFP_KERNEL); if (!powernow_table) { retval = -ENOMEM; goto err2; @@ -352,7 +365,7 @@ static int powernow_acpi_init(void) unsigned int speed, speed_mhz; pc.val = (unsigned long) state->control; - dprintk ("acpi: P%d: %d MHz %d mW %d uS control %08x SGTC %d\n", + dprintk("acpi: P%d: %d MHz %d mW %d uS control %08x SGTC %d\n", i, (u32) state->core_frequency, (u32) state->power, @@ -381,12 +394,12 @@ static int powernow_acpi_init(void) if (speed % 1000 > 0) speed_mhz++; - if ((fid_codes[fid] % 10)==5) { + if ((fid_codes[fid] % 10) == 5) { if (have_a0 == 1) - powernow_table[i].frequency = CPUFREQ_ENTRY_INVALID; + invalidate_entry(i); } - dprintk (" FID: 0x%x (%d.%dx [%dMHz]) " + dprintk(" FID: 0x%x (%d.%dx [%dMHz]) " "VID: 0x%x (%d.%03dV)\n", fid, fid_codes[fid] / 10, fid_codes[fid] % 10, speed_mhz, vid, mobile_vid_table[vid]/1000, @@ -422,7 +435,8 @@ err1: err05: kfree(acpi_processor_perf); err0: - printk(KERN_WARNING PFX "ACPI perflib can not be used in this platform\n"); + printk(KERN_WARNING PFX "ACPI perflib can not be used on " + "this platform\n"); acpi_processor_perf = NULL; return retval; } @@ -435,7 +449,14 @@ static int powernow_acpi_init(void) } #endif -static int powernow_decode_bios (int maxfid, int startvid) +static void print_pst_entry(struct pst_s *pst, unsigned int j) +{ + dprintk("PST:%d (@%p)\n", j, pst); + dprintk(" cpuid: 0x%x fsb: %d maxFID: 0x%x startvid: 0x%x\n", + pst->cpuid, pst->fsbspeed, pst->maxfid, pst->startvid); +} + +static int powernow_decode_bios(int maxfid, int startvid) { struct psb_s *psb; struct pst_s *pst; @@ -446,61 +467,67 @@ static int powernow_decode_bios (int maxfid, int startvid) etuple = cpuid_eax(0x80000001); - for (i=0xC0000; i < 0xffff0 ; i+=16) { + for (i = 0xC0000; i < 0xffff0 ; i += 16) { p = phys_to_virt(i); - if (memcmp(p, "AMDK7PNOW!", 10) == 0){ - dprintk ("Found PSB header at %p\n", p); + if (memcmp(p, "AMDK7PNOW!", 10) == 0) { + dprintk("Found PSB header at %p\n", p); psb = (struct psb_s *) p; - dprintk ("Table version: 0x%x\n", psb->tableversion); + dprintk("Table version: 0x%x\n", psb->tableversion); if (psb->tableversion != 0x12) { - printk (KERN_INFO PFX "Sorry, only v1.2 tables supported right now\n"); + printk(KERN_INFO PFX "Sorry, only v1.2 tables" + " supported right now\n"); return -ENODEV; } - dprintk ("Flags: 0x%x\n", psb->flags); - if ((psb->flags & 1)==0) { - dprintk ("Mobile voltage regulator\n"); - } else { - dprintk ("Desktop voltage regulator\n"); - } + dprintk("Flags: 0x%x\n", psb->flags); + if ((psb->flags & 1) == 0) + dprintk("Mobile voltage regulator\n"); + else + dprintk("Desktop voltage regulator\n"); latency = psb->settlingtime; if (latency < 100) { - printk(KERN_INFO PFX "BIOS set settling time to %d microseconds. " - "Should be at least 100. Correcting.\n", latency); + printk(KERN_INFO PFX "BIOS set settling time " + "to %d microseconds. " + "Should be at least 100. " + "Correcting.\n", latency); latency = 100; } - dprintk ("Settling Time: %d microseconds.\n", psb->settlingtime); - dprintk ("Has %d PST tables. (Only dumping ones relevant to this CPU).\n", psb->numpst); + dprintk("Settling Time: %d microseconds.\n", + psb->settlingtime); + dprintk("Has %d PST tables. (Only dumping ones " + "relevant to this CPU).\n", + psb->numpst); - p += sizeof (struct psb_s); + p += sizeof(struct psb_s); pst = (struct pst_s *) p; - for (j=0; jnumpst; j++) { + for (j = 0; j < psb->numpst; j++) { pst = (struct pst_s *) p; number_scales = pst->numpstates; - if ((etuple == pst->cpuid) && check_fsb(pst->fsbspeed) && - (maxfid==pst->maxfid) && (startvid==pst->startvid)) - { - dprintk ("PST:%d (@%p)\n", j, pst); - dprintk (" cpuid: 0x%x fsb: %d maxFID: 0x%x startvid: 0x%x\n", - pst->cpuid, pst->fsbspeed, pst->maxfid, pst->startvid); - - ret = get_ranges ((char *) pst + sizeof (struct pst_s)); + if ((etuple == pst->cpuid) && + check_fsb(pst->fsbspeed) && + (maxfid == pst->maxfid) && + (startvid == pst->startvid)) { + print_pst_entry(pst, j); + p = (char *)pst + sizeof(struct pst_s); + ret = get_ranges(p); return ret; } else { unsigned int k; - p = (char *) pst + sizeof (struct pst_s); - for (k=0; kident); - printk(KERN_WARNING "You need to downgrade to 3A21 (09/09/2002), or try a newer BIOS than 3A71 (01/20/2003)\n"); - printk(KERN_WARNING "cpufreq scaling has been disabled as a result of this.\n"); + printk(KERN_WARNING PFX + "%s laptop with broken PST tables in BIOS detected.\n", + d->ident); + printk(KERN_WARNING PFX + "You need to downgrade to 3A21 (09/09/2002), or try a newer " + "BIOS than 3A71 (01/20/2003)\n"); + printk(KERN_WARNING PFX + "cpufreq scaling has been disabled as a result of this.\n"); return 0; } @@ -598,7 +631,7 @@ static struct dmi_system_id __initdata powernow_dmi_table[] = { { } }; -static int __init powernow_cpu_init (struct cpufreq_policy *policy) +static int __init powernow_cpu_init(struct cpufreq_policy *policy) { union msr_fidvidstatus fidvidstatus; int result; @@ -606,7 +639,7 @@ static int __init powernow_cpu_init (struct cpufreq_policy *policy) if (policy->cpu != 0) return -ENODEV; - rdmsrl (MSR_K7_FID_VID_STATUS, fidvidstatus.val); + rdmsrl(MSR_K7_FID_VID_STATUS, fidvidstatus.val); recalibrate_cpu_khz(); @@ -618,19 +651,21 @@ static int __init powernow_cpu_init (struct cpufreq_policy *policy) dprintk("FSB: %3dMHz\n", fsb/1000); if (dmi_check_system(powernow_dmi_table) || acpi_force) { - printk (KERN_INFO PFX "PSB/PST known to be broken. Trying ACPI instead\n"); + printk(KERN_INFO PFX "PSB/PST known to be broken. " + "Trying ACPI instead\n"); result = powernow_acpi_init(); } else { - result = powernow_decode_bios(fidvidstatus.bits.MFID, fidvidstatus.bits.SVID); + result = powernow_decode_bios(fidvidstatus.bits.MFID, + fidvidstatus.bits.SVID); if (result) { - printk (KERN_INFO PFX "Trying ACPI perflib\n"); + printk(KERN_INFO PFX "Trying ACPI perflib\n"); maximum_speed = 0; minimum_speed = -1; latency = 0; result = powernow_acpi_init(); if (result) { - printk (KERN_INFO PFX "ACPI and legacy methods failed\n"); - printk (KERN_INFO PFX "See http://www.codemonkey.org.uk/projects/cpufreq/powernow-k7.html\n"); + printk(KERN_INFO PFX + "ACPI and legacy methods failed\n"); } } else { /* SGTC use the bus clock as timer */ @@ -642,10 +677,11 @@ static int __init powernow_cpu_init (struct cpufreq_policy *policy) if (result) return result; - printk (KERN_INFO PFX "Minimum speed %d MHz. Maximum speed %d MHz.\n", + printk(KERN_INFO PFX "Minimum speed %d MHz. Maximum speed %d MHz.\n", minimum_speed/1000, maximum_speed/1000); - policy->cpuinfo.transition_latency = cpufreq_scale(2000000UL, fsb, latency); + policy->cpuinfo.transition_latency = + cpufreq_scale(2000000UL, fsb, latency); policy->cur = powernow_get(0); @@ -654,7 +690,8 @@ static int __init powernow_cpu_init (struct cpufreq_policy *policy) return cpufreq_frequency_table_cpuinfo(policy, powernow_table); } -static int powernow_cpu_exit (struct cpufreq_policy *policy) { +static int powernow_cpu_exit(struct cpufreq_policy *policy) +{ cpufreq_frequency_table_put_attr(policy->cpu); #ifdef CONFIG_X86_POWERNOW_K7_ACPI @@ -669,7 +706,7 @@ static int powernow_cpu_exit (struct cpufreq_policy *policy) { return 0; } -static struct freq_attr* powernow_table_attr[] = { +static struct freq_attr *powernow_table_attr[] = { &cpufreq_freq_attr_scaling_available_freqs, NULL, }; @@ -685,15 +722,15 @@ static struct cpufreq_driver powernow_driver = { .attr = powernow_table_attr, }; -static int __init powernow_init (void) +static int __init powernow_init(void) { - if (check_powernow()==0) + if (check_powernow() == 0) return -ENODEV; return cpufreq_register_driver(&powernow_driver); } -static void __exit powernow_exit (void) +static void __exit powernow_exit(void) { cpufreq_unregister_driver(&powernow_driver); } @@ -701,9 +738,9 @@ static void __exit powernow_exit (void) module_param(acpi_force, int, 0444); MODULE_PARM_DESC(acpi_force, "Force ACPI to be used."); -MODULE_AUTHOR ("Dave Jones "); -MODULE_DESCRIPTION ("Powernow driver for AMD K7 processors."); -MODULE_LICENSE ("GPL"); +MODULE_AUTHOR("Dave Jones "); +MODULE_DESCRIPTION("Powernow driver for AMD K7 processors."); +MODULE_LICENSE("GPL"); late_initcall(powernow_init); module_exit(powernow_exit); -- cgit v1.2.3 From 0e64a0c982c06a6b8f5e2a7f29eb108fdf257b2f Mon Sep 17 00:00:00 2001 From: Dave Jones Date: Wed, 4 Feb 2009 14:37:50 -0500 Subject: [CPUFREQ] checkpatch cleanups for powernow-k8 This driver has so many long function names, and deep nested if's The remaining warnings will need some code restructuring to clean up. Signed-off-by: Dave Jones --- arch/x86/kernel/cpu/cpufreq/powernow-k8.c | 351 +++++++++++++++++++----------- 1 file changed, 226 insertions(+), 125 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c index 6428aa17b40..4dd7e3bdee2 100644 --- a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c +++ b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c @@ -33,10 +33,10 @@ #include #include #include /* for current / set_cpus_allowed() */ +#include +#include #include -#include -#include #ifdef CONFIG_X86_POWERNOW_K8_ACPI #include @@ -71,7 +71,8 @@ static u32 find_khz_freq_from_fid(u32 fid) return 1000 * find_freq_from_fid(fid); } -static u32 find_khz_freq_from_pstate(struct cpufreq_frequency_table *data, u32 pstate) +static u32 find_khz_freq_from_pstate(struct cpufreq_frequency_table *data, + u32 pstate) { return data[pstate].frequency; } @@ -186,7 +187,9 @@ static int write_new_fid(struct powernow_k8_data *data, u32 fid) return 1; } - lo = fid | (data->currvid << MSR_C_LO_VID_SHIFT) | MSR_C_LO_INIT_FID_VID; + lo = fid; + lo |= (data->currvid << MSR_C_LO_VID_SHIFT); + lo |= MSR_C_LO_INIT_FID_VID; dprintk("writing fid 0x%x, lo 0x%x, hi 0x%x\n", fid, lo, data->plllock * PLL_LOCK_CONVERSION); @@ -194,7 +197,9 @@ static int write_new_fid(struct powernow_k8_data *data, u32 fid) do { wrmsr(MSR_FIDVID_CTL, lo, data->plllock * PLL_LOCK_CONVERSION); if (i++ > 100) { - printk(KERN_ERR PFX "Hardware error - pending bit very stuck - no further pstate changes possible\n"); + printk(KERN_ERR PFX + "Hardware error - pending bit very stuck - " + "no further pstate changes possible\n"); return 1; } } while (query_current_values_with_pending_wait(data)); @@ -202,14 +207,16 @@ static int write_new_fid(struct powernow_k8_data *data, u32 fid) count_off_irt(data); if (savevid != data->currvid) { - printk(KERN_ERR PFX "vid change on fid trans, old 0x%x, new 0x%x\n", - savevid, data->currvid); + printk(KERN_ERR PFX + "vid change on fid trans, old 0x%x, new 0x%x\n", + savevid, data->currvid); return 1; } if (fid != data->currfid) { - printk(KERN_ERR PFX "fid trans failed, fid 0x%x, curr 0x%x\n", fid, - data->currfid); + printk(KERN_ERR PFX + "fid trans failed, fid 0x%x, curr 0x%x\n", fid, + data->currfid); return 1; } @@ -228,7 +235,9 @@ static int write_new_vid(struct powernow_k8_data *data, u32 vid) return 1; } - lo = data->currfid | (vid << MSR_C_LO_VID_SHIFT) | MSR_C_LO_INIT_FID_VID; + lo = data->currfid; + lo |= (vid << MSR_C_LO_VID_SHIFT); + lo |= MSR_C_LO_INIT_FID_VID; dprintk("writing vid 0x%x, lo 0x%x, hi 0x%x\n", vid, lo, STOP_GRANT_5NS); @@ -236,20 +245,24 @@ static int write_new_vid(struct powernow_k8_data *data, u32 vid) do { wrmsr(MSR_FIDVID_CTL, lo, STOP_GRANT_5NS); if (i++ > 100) { - printk(KERN_ERR PFX "internal error - pending bit very stuck - no further pstate changes possible\n"); + printk(KERN_ERR PFX "internal error - pending bit " + "very stuck - no further pstate " + "changes possible\n"); return 1; } } while (query_current_values_with_pending_wait(data)); if (savefid != data->currfid) { - printk(KERN_ERR PFX "fid changed on vid trans, old 0x%x new 0x%x\n", + printk(KERN_ERR PFX "fid changed on vid trans, old " + "0x%x new 0x%x\n", savefid, data->currfid); return 1; } if (vid != data->currvid) { - printk(KERN_ERR PFX "vid trans failed, vid 0x%x, curr 0x%x\n", vid, - data->currvid); + printk(KERN_ERR PFX "vid trans failed, vid 0x%x, " + "curr 0x%x\n", + vid, data->currvid); return 1; } @@ -261,7 +274,8 @@ static int write_new_vid(struct powernow_k8_data *data, u32 vid) * Decreasing vid codes represent increasing voltages: * vid of 0 is 1.550V, vid of 0x1e is 0.800V, vid of VID_OFF is off. */ -static int decrease_vid_code_by_step(struct powernow_k8_data *data, u32 reqvid, u32 step) +static int decrease_vid_code_by_step(struct powernow_k8_data *data, + u32 reqvid, u32 step) { if ((data->currvid - reqvid) > step) reqvid = data->currvid - step; @@ -283,7 +297,8 @@ static int transition_pstate(struct powernow_k8_data *data, u32 pstate) } /* Change Opteron/Athlon64 fid and vid, by the 3 phases. */ -static int transition_fid_vid(struct powernow_k8_data *data, u32 reqfid, u32 reqvid) +static int transition_fid_vid(struct powernow_k8_data *data, + u32 reqfid, u32 reqvid) { if (core_voltage_pre_transition(data, reqvid)) return 1; @@ -298,7 +313,8 @@ static int transition_fid_vid(struct powernow_k8_data *data, u32 reqfid, u32 req return 1; if ((reqfid != data->currfid) || (reqvid != data->currvid)) { - printk(KERN_ERR PFX "failed (cpu%d): req 0x%x 0x%x, curr 0x%x 0x%x\n", + printk(KERN_ERR PFX "failed (cpu%d): req 0x%x 0x%x, " + "curr 0x%x 0x%x\n", smp_processor_id(), reqfid, reqvid, data->currfid, data->currvid); return 1; @@ -311,13 +327,15 @@ static int transition_fid_vid(struct powernow_k8_data *data, u32 reqfid, u32 req } /* Phase 1 - core voltage transition ... setup voltage */ -static int core_voltage_pre_transition(struct powernow_k8_data *data, u32 reqvid) +static int core_voltage_pre_transition(struct powernow_k8_data *data, + u32 reqvid) { u32 rvosteps = data->rvo; u32 savefid = data->currfid; u32 maxvid, lo; - dprintk("ph1 (cpu%d): start, currfid 0x%x, currvid 0x%x, reqvid 0x%x, rvo 0x%x\n", + dprintk("ph1 (cpu%d): start, currfid 0x%x, currvid 0x%x, " + "reqvid 0x%x, rvo 0x%x\n", smp_processor_id(), data->currfid, data->currvid, reqvid, data->rvo); @@ -340,7 +358,7 @@ static int core_voltage_pre_transition(struct powernow_k8_data *data, u32 reqvid } else { dprintk("ph1: changing vid for rvo, req 0x%x\n", data->currvid - 1); - if (decrease_vid_code_by_step(data, data->currvid - 1, 1)) + if (decrease_vid_code_by_step(data, data->currvid-1, 1)) return 1; rvosteps--; } @@ -350,7 +368,8 @@ static int core_voltage_pre_transition(struct powernow_k8_data *data, u32 reqvid return 1; if (savefid != data->currfid) { - printk(KERN_ERR PFX "ph1 err, currfid changed 0x%x\n", data->currfid); + printk(KERN_ERR PFX "ph1 err, currfid changed 0x%x\n", + data->currfid); return 1; } @@ -363,20 +382,24 @@ static int core_voltage_pre_transition(struct powernow_k8_data *data, u32 reqvid /* Phase 2 - core frequency transition */ static int core_frequency_transition(struct powernow_k8_data *data, u32 reqfid) { - u32 vcoreqfid, vcocurrfid, vcofiddiff, fid_interval, savevid = data->currvid; + u32 vcoreqfid, vcocurrfid, vcofiddiff; + u32 fid_interval, savevid = data->currvid; - if ((reqfid < HI_FID_TABLE_BOTTOM) && (data->currfid < HI_FID_TABLE_BOTTOM)) { - printk(KERN_ERR PFX "ph2: illegal lo-lo transition 0x%x 0x%x\n", - reqfid, data->currfid); + if ((reqfid < HI_FID_TABLE_BOTTOM) && + (data->currfid < HI_FID_TABLE_BOTTOM)) { + printk(KERN_ERR PFX "ph2: illegal lo-lo transition " + "0x%x 0x%x\n", reqfid, data->currfid); return 1; } if (data->currfid == reqfid) { - printk(KERN_ERR PFX "ph2 null fid transition 0x%x\n", data->currfid); + printk(KERN_ERR PFX "ph2 null fid transition 0x%x\n", + data->currfid); return 0; } - dprintk("ph2 (cpu%d): starting, currfid 0x%x, currvid 0x%x, reqfid 0x%x\n", + dprintk("ph2 (cpu%d): starting, currfid 0x%x, currvid 0x%x, " + "reqfid 0x%x\n", smp_processor_id(), data->currfid, data->currvid, reqfid); @@ -390,14 +413,14 @@ static int core_frequency_transition(struct powernow_k8_data *data, u32 reqfid) if (reqfid > data->currfid) { if (data->currfid > LO_FID_TABLE_TOP) { - if (write_new_fid(data, data->currfid + fid_interval)) { + if (write_new_fid(data, + data->currfid + fid_interval)) return 1; - } } else { if (write_new_fid - (data, 2 + convert_fid_to_vco_fid(data->currfid))) { + (data, + 2 + convert_fid_to_vco_fid(data->currfid))) return 1; - } } } else { if (write_new_fid(data, data->currfid - fid_interval)) @@ -417,7 +440,8 @@ static int core_frequency_transition(struct powernow_k8_data *data, u32 reqfid) if (data->currfid != reqfid) { printk(KERN_ERR PFX - "ph2: mismatch, failed fid transition, curr 0x%x, req 0x%x\n", + "ph2: mismatch, failed fid transition, " + "curr 0x%x, req 0x%x\n", data->currfid, reqfid); return 1; } @@ -435,7 +459,8 @@ static int core_frequency_transition(struct powernow_k8_data *data, u32 reqfid) } /* Phase 3 - core voltage transition flow ... jump to the final vid. */ -static int core_voltage_post_transition(struct powernow_k8_data *data, u32 reqvid) +static int core_voltage_post_transition(struct powernow_k8_data *data, + u32 reqvid) { u32 savefid = data->currfid; u32 savereqvid = reqvid; @@ -457,7 +482,8 @@ static int core_voltage_post_transition(struct powernow_k8_data *data, u32 reqvi if (data->currvid != reqvid) { printk(KERN_ERR PFX - "ph3: failed vid transition\n, req 0x%x, curr 0x%x", + "ph3: failed vid transition\n, " + "req 0x%x, curr 0x%x", reqvid, data->currvid); return 1; } @@ -508,7 +534,8 @@ static int check_supported_cpu(unsigned int cpu) if ((eax & CPUID_XFAM) == CPUID_XFAM_K8) { if (((eax & CPUID_USE_XFAM_XMOD) != CPUID_USE_XFAM_XMOD) || ((eax & CPUID_XMOD) > CPUID_XMOD_REV_MASK)) { - printk(KERN_INFO PFX "Processor cpuid %x not supported\n", eax); + printk(KERN_INFO PFX + "Processor cpuid %x not supported\n", eax); goto out; } @@ -520,8 +547,10 @@ static int check_supported_cpu(unsigned int cpu) } cpuid(CPUID_FREQ_VOLT_CAPABILITIES, &eax, &ebx, &ecx, &edx); - if ((edx & P_STATE_TRANSITION_CAPABLE) != P_STATE_TRANSITION_CAPABLE) { - printk(KERN_INFO PFX "Power state transitions not supported\n"); + if ((edx & P_STATE_TRANSITION_CAPABLE) + != P_STATE_TRANSITION_CAPABLE) { + printk(KERN_INFO PFX + "Power state transitions not supported\n"); goto out; } } else { /* must be a HW Pstate capable processor */ @@ -539,7 +568,8 @@ out: return rc; } -static int check_pst_table(struct powernow_k8_data *data, struct pst_s *pst, u8 maxvid) +static int check_pst_table(struct powernow_k8_data *data, struct pst_s *pst, + u8 maxvid) { unsigned int j; u8 lastfid = 0xff; @@ -550,12 +580,14 @@ static int check_pst_table(struct powernow_k8_data *data, struct pst_s *pst, u8 j, pst[j].vid); return -EINVAL; } - if (pst[j].vid < data->rvo) { /* vid + rvo >= 0 */ + if (pst[j].vid < data->rvo) { + /* vid + rvo >= 0 */ printk(KERN_ERR FW_BUG PFX "0 vid exceeded with pstate" " %d\n", j); return -ENODEV; } - if (pst[j].vid < maxvid + data->rvo) { /* vid + rvo >= maxvid */ + if (pst[j].vid < maxvid + data->rvo) { + /* vid + rvo >= maxvid */ printk(KERN_ERR FW_BUG PFX "maxvid exceeded with pstate" " %d\n", j); return -ENODEV; @@ -579,23 +611,31 @@ static int check_pst_table(struct powernow_k8_data *data, struct pst_s *pst, u8 return -EINVAL; } if (lastfid > LO_FID_TABLE_TOP) - printk(KERN_INFO FW_BUG PFX "first fid not from lo freq table\n"); + printk(KERN_INFO FW_BUG PFX + "first fid not from lo freq table\n"); return 0; } +static void invalidate_entry(struct powernow_k8_data *data, unsigned int entry) +{ + data->powernow_table[entry].frequency = CPUFREQ_ENTRY_INVALID; +} + static void print_basics(struct powernow_k8_data *data) { int j; for (j = 0; j < data->numps; j++) { - if (data->powernow_table[j].frequency != CPUFREQ_ENTRY_INVALID) { + if (data->powernow_table[j].frequency != + CPUFREQ_ENTRY_INVALID) { if (cpu_family == CPU_HW_PSTATE) { - printk(KERN_INFO PFX " %d : pstate %d (%d MHz)\n", - j, + printk(KERN_INFO PFX + " %d : pstate %d (%d MHz)\n", j, data->powernow_table[j].index, data->powernow_table[j].frequency/1000); } else { - printk(KERN_INFO PFX " %d : fid 0x%x (%d MHz), vid 0x%x\n", + printk(KERN_INFO PFX + " %d : fid 0x%x (%d MHz), vid 0x%x\n", j, data->powernow_table[j].index & 0xff, data->powernow_table[j].frequency/1000, @@ -604,20 +644,25 @@ static void print_basics(struct powernow_k8_data *data) } } if (data->batps) - printk(KERN_INFO PFX "Only %d pstates on battery\n", data->batps); + printk(KERN_INFO PFX "Only %d pstates on battery\n", + data->batps); } -static int fill_powernow_table(struct powernow_k8_data *data, struct pst_s *pst, u8 maxvid) +static int fill_powernow_table(struct powernow_k8_data *data, + struct pst_s *pst, u8 maxvid) { struct cpufreq_frequency_table *powernow_table; unsigned int j; - if (data->batps) { /* use ACPI support to get full speed on mains power */ - printk(KERN_WARNING PFX "Only %d pstates usable (use ACPI driver for full range\n", data->batps); + if (data->batps) { + /* use ACPI support to get full speed on mains power */ + printk(KERN_WARNING PFX + "Only %d pstates usable (use ACPI driver for full " + "range\n", data->batps); data->numps = data->batps; } - for ( j=1; jnumps; j++ ) { + for (j = 1; j < data->numps; j++) { if (pst[j-1].fid >= pst[j].fid) { printk(KERN_ERR PFX "PST out of sequence\n"); return -EINVAL; @@ -640,9 +685,11 @@ static int fill_powernow_table(struct powernow_k8_data *data, struct pst_s *pst, } for (j = 0; j < data->numps; j++) { + int freq; powernow_table[j].index = pst[j].fid; /* lower 8 bits */ powernow_table[j].index |= (pst[j].vid << 8); /* upper 8 bits */ - powernow_table[j].frequency = find_khz_freq_from_fid(pst[j].fid); + freq = find_khz_freq_from_fid(pst[j].fid); + powernow_table[j].frequency = freq; } powernow_table[data->numps].frequency = CPUFREQ_TABLE_END; powernow_table[data->numps].index = 0; @@ -658,7 +705,8 @@ static int fill_powernow_table(struct powernow_k8_data *data, struct pst_s *pst, print_basics(data); for (j = 0; j < data->numps; j++) - if ((pst[j].fid==data->currfid) && (pst[j].vid==data->currvid)) + if ((pst[j].fid == data->currfid) && + (pst[j].vid == data->currvid)) return 0; dprintk("currfid/vid do not match PST, ignoring\n"); @@ -698,7 +746,8 @@ static int find_psb_table(struct powernow_k8_data *data) } data->vstable = psb->vstable; - dprintk("voltage stabilization time: %d(*20us)\n", data->vstable); + dprintk("voltage stabilization time: %d(*20us)\n", + data->vstable); dprintk("flags2: 0x%x\n", psb->flags2); data->rvo = psb->flags2 & 3; @@ -713,11 +762,12 @@ static int find_psb_table(struct powernow_k8_data *data) dprintk("numpst: 0x%x\n", psb->num_tables); cpst = psb->num_tables; - if ((psb->cpuid == 0x00000fc0) || (psb->cpuid == 0x00000fe0) ){ + if ((psb->cpuid == 0x00000fc0) || + (psb->cpuid == 0x00000fe0)) { thiscpuid = cpuid_eax(CPUID_PROCESSOR_SIGNATURE); - if ((thiscpuid == 0x00000fc0) || (thiscpuid == 0x00000fe0) ) { + if ((thiscpuid == 0x00000fc0) || + (thiscpuid == 0x00000fe0)) cpst = 1; - } } if (cpst != 1) { printk(KERN_ERR FW_BUG PFX "numpst must be 1\n"); @@ -732,7 +782,8 @@ static int find_psb_table(struct powernow_k8_data *data) data->numps = psb->numps; dprintk("numpstates: 0x%x\n", data->numps); - return fill_powernow_table(data, (struct pst_s *)(psb+1), maxvid); + return fill_powernow_table(data, + (struct pst_s *)(psb+1), maxvid); } /* * If you see this message, complain to BIOS manufacturer. If @@ -750,23 +801,27 @@ static int find_psb_table(struct powernow_k8_data *data) } #ifdef CONFIG_X86_POWERNOW_K8_ACPI -static void powernow_k8_acpi_pst_values(struct powernow_k8_data *data, unsigned int index) +static void powernow_k8_acpi_pst_values(struct powernow_k8_data *data, + unsigned int index) { + acpi_integer control; + if (!data->acpi_data.state_count || (cpu_family == CPU_HW_PSTATE)) return; - data->irt = (data->acpi_data.states[index].control >> IRT_SHIFT) & IRT_MASK; - data->rvo = (data->acpi_data.states[index].control >> RVO_SHIFT) & RVO_MASK; - data->exttype = (data->acpi_data.states[index].control >> EXT_TYPE_SHIFT) & EXT_TYPE_MASK; - data->plllock = (data->acpi_data.states[index].control >> PLL_L_SHIFT) & PLL_L_MASK; - data->vidmvs = 1 << ((data->acpi_data.states[index].control >> MVS_SHIFT) & MVS_MASK); - data->vstable = (data->acpi_data.states[index].control >> VST_SHIFT) & VST_MASK; -} + control = data->acpi_data.states[index].control; data->irt = (control + >> IRT_SHIFT) & IRT_MASK; data->rvo = (control >> + RVO_SHIFT) & RVO_MASK; data->exttype = (control + >> EXT_TYPE_SHIFT) & EXT_TYPE_MASK; + data->plllock = (control >> PLL_L_SHIFT) & PLL_L_MASK; data->vidmvs = 1 + << ((control >> MVS_SHIFT) & MVS_MASK); data->vstable = + (control >> VST_SHIFT) & VST_MASK; } static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data) { struct cpufreq_frequency_table *powernow_table; int ret_val = -ENODEV; + acpi_integer space_id; if (acpi_processor_register_performance(&data->acpi_data, data->cpu)) { dprintk("register performance failed: bad ACPI data\n"); @@ -779,11 +834,12 @@ static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data) goto err_out; } - if ((data->acpi_data.control_register.space_id != ACPI_ADR_SPACE_FIXED_HARDWARE) || - (data->acpi_data.status_register.space_id != ACPI_ADR_SPACE_FIXED_HARDWARE)) { + space_id = data->acpi_data.control_register.space_id; + if ((space_id != ACPI_ADR_SPACE_FIXED_HARDWARE) || + (space_id != ACPI_ADR_SPACE_FIXED_HARDWARE)) { dprintk("Invalid control/status registers (%x - %x)\n", data->acpi_data.control_register.space_id, - data->acpi_data.status_register.space_id); + space_id); goto err_out; } @@ -802,7 +858,8 @@ static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data) if (ret_val) goto err_out_mem; - powernow_table[data->acpi_data.state_count].frequency = CPUFREQ_TABLE_END; + powernow_table[data->acpi_data.state_count].frequency = + CPUFREQ_TABLE_END; powernow_table[data->acpi_data.state_count].index = 0; data->powernow_table = powernow_table; @@ -830,13 +887,15 @@ err_out_mem: err_out: acpi_processor_unregister_performance(&data->acpi_data, data->cpu); - /* data->acpi_data.state_count informs us at ->exit() whether ACPI was used */ + /* data->acpi_data.state_count informs us at ->exit() + * whether ACPI was used */ data->acpi_data.state_count = 0; return ret_val; } -static int fill_powernow_table_pstate(struct powernow_k8_data *data, struct cpufreq_frequency_table *powernow_table) +static int fill_powernow_table_pstate(struct powernow_k8_data *data, + struct cpufreq_frequency_table *powernow_table) { int i; u32 hi = 0, lo = 0; @@ -848,84 +907,101 @@ static int fill_powernow_table_pstate(struct powernow_k8_data *data, struct cpuf index = data->acpi_data.states[i].control & HW_PSTATE_MASK; if (index > data->max_hw_pstate) { - printk(KERN_ERR PFX "invalid pstate %d - bad value %d.\n", i, index); - printk(KERN_ERR PFX "Please report to BIOS manufacturer\n"); - powernow_table[i].frequency = CPUFREQ_ENTRY_INVALID; + printk(KERN_ERR PFX "invalid pstate %d - " + "bad value %d.\n", i, index); + printk(KERN_ERR PFX "Please report to BIOS " + "manufacturer\n"); + invalidate_entry(data, i); continue; } rdmsr(MSR_PSTATE_DEF_BASE + index, lo, hi); if (!(hi & HW_PSTATE_VALID_MASK)) { dprintk("invalid pstate %d, ignoring\n", index); - powernow_table[i].frequency = CPUFREQ_ENTRY_INVALID; + invalidate_entry(data, i); continue; } powernow_table[i].index = index; - powernow_table[i].frequency = data->acpi_data.states[i].core_frequency * 1000; + powernow_table[i].frequency = + data->acpi_data.states[i].core_frequency * 1000; } return 0; } -static int fill_powernow_table_fidvid(struct powernow_k8_data *data, struct cpufreq_frequency_table *powernow_table) +static int fill_powernow_table_fidvid(struct powernow_k8_data *data, + struct cpufreq_frequency_table *powernow_table) { int i; int cntlofreq = 0; + for (i = 0; i < data->acpi_data.state_count; i++) { u32 fid; u32 vid; + u32 freq, index; + acpi_integer status, control; if (data->exttype) { - fid = data->acpi_data.states[i].status & EXT_FID_MASK; - vid = (data->acpi_data.states[i].status >> VID_SHIFT) & EXT_VID_MASK; + status = data->acpi_data.states[i].status; + fid = status & EXT_FID_MASK; + vid = (status >> VID_SHIFT) & EXT_VID_MASK; } else { - fid = data->acpi_data.states[i].control & FID_MASK; - vid = (data->acpi_data.states[i].control >> VID_SHIFT) & VID_MASK; + control = data->acpi_data.states[i].control; + fid = control & FID_MASK; + vid = (control >> VID_SHIFT) & VID_MASK; } dprintk(" %d : fid 0x%x, vid 0x%x\n", i, fid, vid); - powernow_table[i].index = fid; /* lower 8 bits */ - powernow_table[i].index |= (vid << 8); /* upper 8 bits */ - powernow_table[i].frequency = find_khz_freq_from_fid(fid); + index = fid | (vid<<8); + powernow_table[i].index = index; + + freq = find_khz_freq_from_fid(fid); + powernow_table[i].frequency = freq; /* verify frequency is OK */ - if ((powernow_table[i].frequency > (MAX_FREQ * 1000)) || - (powernow_table[i].frequency < (MIN_FREQ * 1000))) { - dprintk("invalid freq %u kHz, ignoring\n", powernow_table[i].frequency); - powernow_table[i].frequency = CPUFREQ_ENTRY_INVALID; + if ((freq > (MAX_FREQ * 1000)) || (freq < (MIN_FREQ * 1000))) { + dprintk("invalid freq %u kHz, ignoring\n", freq); + invalidate_entry(data, i); continue; } - /* verify voltage is OK - BIOSs are using "off" to indicate invalid */ + /* verify voltage is OK - + * BIOSs are using "off" to indicate invalid */ if (vid == VID_OFF) { dprintk("invalid vid %u, ignoring\n", vid); - powernow_table[i].frequency = CPUFREQ_ENTRY_INVALID; + invalidate_entry(data, i); continue; } /* verify only 1 entry from the lo frequency table */ if (fid < HI_FID_TABLE_BOTTOM) { if (cntlofreq) { - /* if both entries are the same, ignore this one ... */ - if ((powernow_table[i].frequency != powernow_table[cntlofreq].frequency) || - (powernow_table[i].index != powernow_table[cntlofreq].index)) { - printk(KERN_ERR PFX "Too many lo freq table entries\n"); + /* if both entries are the same, + * ignore this one ... */ + if ((freq != powernow_table[cntlofreq].frequency) || + (index != powernow_table[cntlofreq].index)) { + printk(KERN_ERR PFX + "Too many lo freq table " + "entries\n"); return 1; } - dprintk("double low frequency table entry, ignoring it.\n"); - powernow_table[i].frequency = CPUFREQ_ENTRY_INVALID; + dprintk("double low frequency table entry, " + "ignoring it.\n"); + invalidate_entry(data, i); continue; } else cntlofreq = i; } - if (powernow_table[i].frequency != (data->acpi_data.states[i].core_frequency * 1000)) { - printk(KERN_INFO PFX "invalid freq entries %u kHz vs. %u kHz\n", - powernow_table[i].frequency, - (unsigned int) (data->acpi_data.states[i].core_frequency * 1000)); - powernow_table[i].frequency = CPUFREQ_ENTRY_INVALID; + if (freq != (data->acpi_data.states[i].core_frequency * 1000)) { + printk(KERN_INFO PFX "invalid freq entries " + "%u kHz vs. %u kHz\n", freq, + (unsigned int) + (data->acpi_data.states[i].core_frequency + * 1000)); + invalidate_entry(data, i); continue; } } @@ -935,7 +1011,8 @@ static int fill_powernow_table_fidvid(struct powernow_k8_data *data, struct cpuf static void powernow_k8_cpu_exit_acpi(struct powernow_k8_data *data) { if (data->acpi_data.state_count) - acpi_processor_unregister_performance(&data->acpi_data, data->cpu); + acpi_processor_unregister_performance(&data->acpi_data, + data->cpu); free_cpumask_var(data->acpi_data.shared_cpu_map); } @@ -954,14 +1031,25 @@ static int get_transition_latency(struct powernow_k8_data *data) } #else -static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data) { return -ENODEV; } -static void powernow_k8_cpu_exit_acpi(struct powernow_k8_data *data) { return; } -static void powernow_k8_acpi_pst_values(struct powernow_k8_data *data, unsigned int index) { return; } +static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data) +{ + return -ENODEV; +} +static void powernow_k8_cpu_exit_acpi(struct powernow_k8_data *data) +{ + return; +} +static void powernow_k8_acpi_pst_values(struct powernow_k8_data *data, + unsigned int index) +{ + return; +} static int get_transition_latency(struct powernow_k8_data *data) { return 0; } #endif /* CONFIG_X86_POWERNOW_K8_ACPI */ /* Take a frequency, and issue the fid/vid transition command */ -static int transition_frequency_fidvid(struct powernow_k8_data *data, unsigned int index) +static int transition_frequency_fidvid(struct powernow_k8_data *data, + unsigned int index) { u32 fid = 0; u32 vid = 0; @@ -989,7 +1077,8 @@ static int transition_frequency_fidvid(struct powernow_k8_data *data, unsigned i return 0; } - if ((fid < HI_FID_TABLE_BOTTOM) && (data->currfid < HI_FID_TABLE_BOTTOM)) { + if ((fid < HI_FID_TABLE_BOTTOM) && + (data->currfid < HI_FID_TABLE_BOTTOM)) { printk(KERN_ERR PFX "ignoring illegal change in lo freq table-%x to 0x%x\n", data->currfid, fid); @@ -1017,7 +1106,8 @@ static int transition_frequency_fidvid(struct powernow_k8_data *data, unsigned i } /* Take a frequency, and issue the hardware pstate transition command */ -static int transition_frequency_pstate(struct powernow_k8_data *data, unsigned int index) +static int transition_frequency_pstate(struct powernow_k8_data *data, + unsigned int index) { u32 pstate = 0; int res, i; @@ -1029,7 +1119,8 @@ static int transition_frequency_pstate(struct powernow_k8_data *data, unsigned i pstate = index & HW_PSTATE_MASK; if (pstate > data->max_hw_pstate) return 0; - freqs.old = find_khz_freq_from_pstate(data->powernow_table, data->currpstate); + freqs.old = find_khz_freq_from_pstate(data->powernow_table, + data->currpstate); freqs.new = find_khz_freq_from_pstate(data->powernow_table, pstate); for_each_cpu_mask_nr(i, *(data->available_cores)) { @@ -1048,7 +1139,8 @@ static int transition_frequency_pstate(struct powernow_k8_data *data, unsigned i } /* Driver entry point to switch to the target frequency */ -static int powernowk8_target(struct cpufreq_policy *pol, unsigned targfreq, unsigned relation) +static int powernowk8_target(struct cpufreq_policy *pol, + unsigned targfreq, unsigned relation) { cpumask_t oldmask; struct powernow_k8_data *data = per_cpu(powernow_data, pol->cpu); @@ -1087,14 +1179,18 @@ static int powernowk8_target(struct cpufreq_policy *pol, unsigned targfreq, unsi dprintk("targ: curr fid 0x%x, vid 0x%x\n", data->currfid, data->currvid); - if ((checkvid != data->currvid) || (checkfid != data->currfid)) { + if ((checkvid != data->currvid) || + (checkfid != data->currfid)) { printk(KERN_INFO PFX - "error - out of sync, fix 0x%x 0x%x, vid 0x%x 0x%x\n", - checkfid, data->currfid, checkvid, data->currvid); + "error - out of sync, fix 0x%x 0x%x, " + "vid 0x%x 0x%x\n", + checkfid, data->currfid, + checkvid, data->currvid); } } - if (cpufreq_frequency_table_target(pol, data->powernow_table, targfreq, relation, &newstate)) + if (cpufreq_frequency_table_target(pol, data->powernow_table, + targfreq, relation, &newstate)) goto err_out; mutex_lock(&fidvid_mutex); @@ -1114,7 +1210,8 @@ static int powernowk8_target(struct cpufreq_policy *pol, unsigned targfreq, unsi mutex_unlock(&fidvid_mutex); if (cpu_family == CPU_HW_PSTATE) - pol->cur = find_khz_freq_from_pstate(data->powernow_table, newstate); + pol->cur = find_khz_freq_from_pstate(data->powernow_table, + newstate); else pol->cur = find_khz_freq_from_fid(data->currfid); ret = 0; @@ -1164,10 +1261,11 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol) */ if (num_online_cpus() != 1) { #ifndef CONFIG_ACPI_PROCESSOR - printk(KERN_ERR PFX "ACPI Processor support is required " - "for SMP systems but is absent. Please load the " - "ACPI Processor module before starting this " - "driver.\n"); + printk(KERN_ERR PFX + "ACPI Processor support is required for " + "SMP systems but is absent. Please load the " + "ACPI Processor module before starting this " + "driver.\n"); #else printk(KERN_ERR FW_BUG PFX "Your BIOS does not provide" " ACPI _PSS objects in a way that Linux " @@ -1228,7 +1326,8 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol) data->available_cores = pol->cpus; if (cpu_family == CPU_HW_PSTATE) - pol->cur = find_khz_freq_from_pstate(data->powernow_table, data->currpstate); + pol->cur = find_khz_freq_from_pstate(data->powernow_table, + data->currpstate); else pol->cur = find_khz_freq_from_fid(data->currfid); dprintk("policy current frequency %d kHz\n", pol->cur); @@ -1245,7 +1344,8 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol) cpufreq_frequency_table_get_attr(data->powernow_table, pol->cpu); if (cpu_family == CPU_HW_PSTATE) - dprintk("cpu_init done, current pstate 0x%x\n", data->currpstate); + dprintk("cpu_init done, current pstate 0x%x\n", + data->currpstate); else dprintk("cpu_init done, current fid 0x%x, vid 0x%x\n", data->currfid, data->currvid); @@ -1262,7 +1362,7 @@ err_out: return -ENODEV; } -static int __devexit powernowk8_cpu_exit (struct cpufreq_policy *pol) +static int __devexit powernowk8_cpu_exit(struct cpufreq_policy *pol) { struct powernow_k8_data *data = per_cpu(powernow_data, pol->cpu); @@ -1279,7 +1379,7 @@ static int __devexit powernowk8_cpu_exit (struct cpufreq_policy *pol) return 0; } -static unsigned int powernowk8_get (unsigned int cpu) +static unsigned int powernowk8_get(unsigned int cpu) { struct powernow_k8_data *data; cpumask_t oldmask = current->cpus_allowed; @@ -1315,7 +1415,7 @@ out: return khz; } -static struct freq_attr* powernow_k8_attr[] = { +static struct freq_attr *powernow_k8_attr[] = { &cpufreq_freq_attr_scaling_available_freqs, NULL, }; @@ -1360,7 +1460,8 @@ static void __exit powernowk8_exit(void) cpufreq_unregister_driver(&cpufreq_amd64_driver); } -MODULE_AUTHOR("Paul Devriendt and Mark Langsdorf "); +MODULE_AUTHOR("Paul Devriendt and " + "Mark Langsdorf "); MODULE_DESCRIPTION("AMD Athlon 64 and Opteron processor frequency driver."); MODULE_LICENSE("GPL"); -- cgit v1.2.3 From 57f4fa699195b761cbea90db5e38b4bc15610c7c Mon Sep 17 00:00:00 2001 From: Thomas Renninger Date: Wed, 4 Feb 2009 01:17:45 +0100 Subject: [CPUFREQ] powernow-k8: Always compile powernow-k8 driver with ACPI support powernow-k8 driver should always try to get cpufreq info from ACPI. Otherwise it will not be able to detect the transition latency correctly which results in ondemand governor taking a wrong sampling rate which will then result in sever performance loss. Let the user not shoot himself in the foot and always compile in ACPI support for powernow-k8. This also fixes a wrong message if ACPI_PROCESSOR is compiled as a module and #ifndef CONFIG_ACPI_PROCESSOR path is chosen. Signed-off-by: Thomas Renninger Signed-off-by: Dave Jones --- arch/x86/kernel/cpu/cpufreq/Kconfig | 19 ++----------------- arch/x86/kernel/cpu/cpufreq/powernow-k8.c | 28 ---------------------------- arch/x86/kernel/cpu/cpufreq/powernow-k8.h | 5 +---- 3 files changed, 3 insertions(+), 49 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/kernel/cpu/cpufreq/Kconfig b/arch/x86/kernel/cpu/cpufreq/Kconfig index 65792c2cc46..52c83987547 100644 --- a/arch/x86/kernel/cpu/cpufreq/Kconfig +++ b/arch/x86/kernel/cpu/cpufreq/Kconfig @@ -87,30 +87,15 @@ config X86_POWERNOW_K7_ACPI config X86_POWERNOW_K8 tristate "AMD Opteron/Athlon64 PowerNow!" select CPU_FREQ_TABLE + depends on ACPI && ACPI_PROCESSOR help - This adds the CPUFreq driver for mobile AMD Opteron/Athlon64 processors. + This adds the CPUFreq driver for K8/K10 Opteron/Athlon64 processors. To compile this driver as a module, choose M here: the module will be called powernow-k8. For details, take a look at . - If in doubt, say N. - -config X86_POWERNOW_K8_ACPI - bool - prompt "ACPI Support" if X86_32 - depends on ACPI && X86_POWERNOW_K8 && ACPI_PROCESSOR - depends on !(X86_POWERNOW_K8 = y && ACPI_PROCESSOR = m) - default y - help - This provides access to the K8s Processor Performance States via ACPI. - This driver is probably required for CPUFreq to work with multi-socket and - SMP systems. It is not required on at least some single-socket yet - multi-core systems, even if SMP is enabled. - - It is safe to say Y here. - config X86_GX_SUSPMOD tristate "Cyrix MediaGX/NatSemi Geode Suspend Modulation" depends on X86_32 && PCI diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c index 4dd7e3bdee2..acc06b03194 100644 --- a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c +++ b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c @@ -38,11 +38,9 @@ #include -#ifdef CONFIG_X86_POWERNOW_K8_ACPI #include #include #include -#endif #define PFX "powernow-k8: " #define VERSION "version 2.20.00" @@ -800,7 +798,6 @@ static int find_psb_table(struct powernow_k8_data *data) return -ENODEV; } -#ifdef CONFIG_X86_POWERNOW_K8_ACPI static void powernow_k8_acpi_pst_values(struct powernow_k8_data *data, unsigned int index) { @@ -1030,23 +1027,6 @@ static int get_transition_latency(struct powernow_k8_data *data) return 1000 * max_latency; } -#else -static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data) -{ - return -ENODEV; -} -static void powernow_k8_cpu_exit_acpi(struct powernow_k8_data *data) -{ - return; -} -static void powernow_k8_acpi_pst_values(struct powernow_k8_data *data, - unsigned int index) -{ - return; -} -static int get_transition_latency(struct powernow_k8_data *data) { return 0; } -#endif /* CONFIG_X86_POWERNOW_K8_ACPI */ - /* Take a frequency, and issue the fid/vid transition command */ static int transition_frequency_fidvid(struct powernow_k8_data *data, unsigned int index) @@ -1260,19 +1240,11 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol) * an UP version, and is deprecated by AMD. */ if (num_online_cpus() != 1) { -#ifndef CONFIG_ACPI_PROCESSOR - printk(KERN_ERR PFX - "ACPI Processor support is required for " - "SMP systems but is absent. Please load the " - "ACPI Processor module before starting this " - "driver.\n"); -#else printk(KERN_ERR FW_BUG PFX "Your BIOS does not provide" " ACPI _PSS objects in a way that Linux " "understands. Please report this to the Linux " "ACPI maintainers and complain to your BIOS " "vendor.\n"); -#endif kfree(data); return -ENODEV; } diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k8.h b/arch/x86/kernel/cpu/cpufreq/powernow-k8.h index 8ecc75b6c7c..6c6698feade 100644 --- a/arch/x86/kernel/cpu/cpufreq/powernow-k8.h +++ b/arch/x86/kernel/cpu/cpufreq/powernow-k8.h @@ -45,11 +45,10 @@ struct powernow_k8_data { * frequency is in kHz */ struct cpufreq_frequency_table *powernow_table; -#ifdef CONFIG_X86_POWERNOW_K8_ACPI /* the acpi table needs to be kept. it's only available if ACPI was * used to determine valid frequency/vid/fid states */ struct acpi_processor_performance acpi_data; -#endif + /* we need to keep track of associated cores, but let cpufreq * handle hotplug events - so just point at cpufreq pol->cpus * structure */ @@ -222,10 +221,8 @@ static int core_frequency_transition(struct powernow_k8_data *data, u32 reqfid); static void powernow_k8_acpi_pst_values(struct powernow_k8_data *data, unsigned int index); -#ifdef CONFIG_X86_POWERNOW_K8_ACPI static int fill_powernow_table_pstate(struct powernow_k8_data *data, struct cpufreq_frequency_table *powernow_table); static int fill_powernow_table_fidvid(struct powernow_k8_data *data, struct cpufreq_frequency_table *powernow_table); -#endif #ifdef CONFIG_SMP static inline void define_siblings(int cpu, cpumask_t cpu_sharedcore_mask[]) -- cgit v1.2.3 From 79cc56af9fdbeaa91f50289b932d0959b41f9467 Mon Sep 17 00:00:00 2001 From: Thomas Renninger Date: Wed, 4 Feb 2009 11:56:11 +0100 Subject: [CPUFREQ] powernow-k8: Only print error message once, not per core. This is the typical message you get if you plug in a CPU which is newer than your BIOS. It's annoying seeing this message for each core. Signed-off-by: Thomas Renninger Signed-off-by: Dave Jones --- arch/x86/kernel/cpu/cpufreq/powernow-k8.c | 21 +++++++++++++++------ 1 file changed, 15 insertions(+), 6 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c index acc06b03194..c44853fc827 100644 --- a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c +++ b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c @@ -794,7 +794,7 @@ static int find_psb_table(struct powernow_k8_data *data) * BIOS and Kernel Developer's Guide, which is available on * www.amd.com */ - printk(KERN_ERR PFX "BIOS error - no PSB or ACPI _PSS objects\n"); + printk(KERN_ERR FW_BUG PFX "No PSB or ACPI _PSS objects\n"); return -ENODEV; } @@ -1218,6 +1218,7 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol) struct powernow_k8_data *data; cpumask_t oldmask; int rc; + static int print_once; if (!cpu_online(pol->cpu)) return -ENODEV; @@ -1240,11 +1241,19 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol) * an UP version, and is deprecated by AMD. */ if (num_online_cpus() != 1) { - printk(KERN_ERR FW_BUG PFX "Your BIOS does not provide" - " ACPI _PSS objects in a way that Linux " - "understands. Please report this to the Linux " - "ACPI maintainers and complain to your BIOS " - "vendor.\n"); + /* + * Replace this one with print_once as soon as such a + * thing gets introduced + */ + if (!print_once) { + WARN_ONCE(1, KERN_ERR FW_BUG PFX "Your BIOS " + "does not provide ACPI _PSS objects " + "in a way that Linux understands. " + "Please report this to the Linux ACPI" + " maintainers and complain to your " + "BIOS vendor.\n"); + print_once++; + } kfree(data); return -ENODEV; } -- cgit v1.2.3 From 3a58df35a64a1e0ac32c30ea629a513dec2fe711 Mon Sep 17 00:00:00 2001 From: Dave Jones Date: Sat, 17 Jan 2009 22:36:14 -0500 Subject: [CPUFREQ] checkpatch cleanups for acpi-cpufreq Signed-off-by: Dave Jones --- arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c | 36 +++++++++++++++--------------- 1 file changed, 18 insertions(+), 18 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c index 4b1c319d30c..3babe1f1e91 100644 --- a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c +++ b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c @@ -1,5 +1,5 @@ /* - * acpi-cpufreq.c - ACPI Processor P-States Driver ($Revision: 1.4 $) + * acpi-cpufreq.c - ACPI Processor P-States Driver * * Copyright (C) 2001, 2002 Andy Grover * Copyright (C) 2001, 2002 Paul Diefenbaugh @@ -36,16 +36,18 @@ #include #include +#include +#include +#include + #include -#include #include #include #include -#include -#include -#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, "acpi-cpufreq", msg) +#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, \ + "acpi-cpufreq", msg) MODULE_AUTHOR("Paul Diefenbaugh, Dominik Brodowski"); MODULE_DESCRIPTION("ACPI Processor P-States Driver"); @@ -95,7 +97,7 @@ static unsigned extract_io(u32 value, struct acpi_cpufreq_data *data) perf = data->acpi_data; - for (i=0; istate_count; i++) { + for (i = 0; i < perf->state_count; i++) { if (value == perf->states[i].status) return data->freq_table[i].frequency; } @@ -110,7 +112,7 @@ static unsigned extract_msr(u32 msr, struct acpi_cpufreq_data *data) msr &= INTEL_MSR_RANGE; perf = data->acpi_data; - for (i=0; data->freq_table[i].frequency != CPUFREQ_TABLE_END; i++) { + for (i = 0; data->freq_table[i].frequency != CPUFREQ_TABLE_END; i++) { if (msr == perf->states[data->freq_table[i].index].status) return data->freq_table[i].frequency; } @@ -138,15 +140,13 @@ struct io_addr { u8 bit_width; }; -typedef union { - struct msr_addr msr; - struct io_addr io; -} drv_addr_union; - struct drv_cmd { unsigned int type; const struct cpumask *mask; - drv_addr_union addr; + union { + struct msr_addr msr; + struct io_addr io; + } addr; u32 val; }; @@ -369,7 +369,7 @@ static unsigned int check_freqs(const struct cpumask *mask, unsigned int freq, unsigned int cur_freq; unsigned int i; - for (i=0; i<100; i++) { + for (i = 0; i < 100; i++) { cur_freq = extract_freq(get_cur_val(mask), data); if (cur_freq == freq) return 1; @@ -494,7 +494,7 @@ acpi_cpufreq_guess_freq(struct acpi_cpufreq_data *data, unsigned int cpu) unsigned long freq; unsigned long freqn = perf->states[0].core_frequency * 1000; - for (i=0; i<(perf->state_count-1); i++) { + for (i = 0; i < (perf->state_count-1); i++) { freq = freqn; freqn = perf->states[i+1].core_frequency * 1000; if ((2 * cpu_khz) > (freqn + freq)) { @@ -673,7 +673,7 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy) /* detect transition latency */ policy->cpuinfo.transition_latency = 0; - for (i=0; istate_count; i++) { + for (i = 0; i < perf->state_count; i++) { if ((perf->states[i].transition_latency * 1000) > policy->cpuinfo.transition_latency) policy->cpuinfo.transition_latency = @@ -682,8 +682,8 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy) data->max_freq = perf->states[0].core_frequency * 1000; /* table init */ - for (i=0; istate_count; i++) { - if (i>0 && perf->states[i].core_frequency >= + for (i = 0; i < perf->state_count; i++) { + if (i > 0 && perf->states[i].core_frequency >= data->freq_table[valid_states-1].frequency / 1000) continue; -- cgit v1.2.3 From 91420220d278584693d11a800b78fdc20e8fe10e Mon Sep 17 00:00:00 2001 From: Dave Jones Date: Wed, 4 Feb 2009 15:28:54 -0500 Subject: [CPUFREQ] Use swap() in longhaul.c Remove hand-coded implementation of swap() Signed-off-by: Dave Jones --- arch/x86/kernel/cpu/cpufreq/longhaul.c | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/kernel/cpu/cpufreq/longhaul.c b/arch/x86/kernel/cpu/cpufreq/longhaul.c index dc03622a83a..f1c51aea064 100644 --- a/arch/x86/kernel/cpu/cpufreq/longhaul.c +++ b/arch/x86/kernel/cpu/cpufreq/longhaul.c @@ -511,13 +511,10 @@ static int __init longhaul_get_ranges(void) } } if (min_i != j) { - unsigned int temp; - temp = longhaul_table[j].frequency; - longhaul_table[j].frequency = longhaul_table[min_i].frequency; - longhaul_table[min_i].frequency = temp; - temp = longhaul_table[j].index; - longhaul_table[j].index = longhaul_table[min_i].index; - longhaul_table[min_i].index = temp; + swap(longhaul_table[j].frequency, + longhaul_table[min_i].frequency); + swap(longhaul_table[j].index, + longhaul_table[min_i].index); } } -- cgit v1.2.3 From de3ed81d746d7cfc22a0c645244ed9fa71e4a833 Mon Sep 17 00:00:00 2001 From: Matthew Garrett Date: Wed, 18 Feb 2009 18:28:22 +0000 Subject: [CPUFREQ] Change link order of x86 cpufreq modules Change the link order of the cpufreq modules to ensure that they're probed in the preferred order when statically linked in. Signed-off-by: Matthew Garrett Signed-off-by: Dave Jones --- arch/x86/kernel/cpu/cpufreq/Makefile | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/kernel/cpu/cpufreq/Makefile b/arch/x86/kernel/cpu/cpufreq/Makefile index 560f7760dae..509296df294 100644 --- a/arch/x86/kernel/cpu/cpufreq/Makefile +++ b/arch/x86/kernel/cpu/cpufreq/Makefile @@ -1,6 +1,11 @@ +# Link order matters. K8 is preferred to ACPI because of firmware bugs in early +# K8 systems. ACPI is preferred to all other hardware-specific drivers. +# speedstep-* is preferred over p4-clockmod. + +obj-$(CONFIG_X86_POWERNOW_K8) += powernow-k8.o +obj-$(CONFIG_X86_ACPI_CPUFREQ) += acpi-cpufreq.o obj-$(CONFIG_X86_POWERNOW_K6) += powernow-k6.o obj-$(CONFIG_X86_POWERNOW_K7) += powernow-k7.o -obj-$(CONFIG_X86_POWERNOW_K8) += powernow-k8.o obj-$(CONFIG_X86_LONGHAUL) += longhaul.o obj-$(CONFIG_X86_E_POWERSAVER) += e_powersaver.o obj-$(CONFIG_ELAN_CPUFREQ) += elanfreq.o @@ -10,7 +15,6 @@ obj-$(CONFIG_X86_GX_SUSPMOD) += gx-suspmod.o obj-$(CONFIG_X86_SPEEDSTEP_ICH) += speedstep-ich.o obj-$(CONFIG_X86_SPEEDSTEP_LIB) += speedstep-lib.o obj-$(CONFIG_X86_SPEEDSTEP_SMI) += speedstep-smi.o -obj-$(CONFIG_X86_ACPI_CPUFREQ) += acpi-cpufreq.o obj-$(CONFIG_X86_SPEEDSTEP_CENTRINO) += speedstep-centrino.o obj-$(CONFIG_X86_P4_CLOCKMOD) += p4-clockmod.o obj-$(CONFIG_X86_CPUFREQ_NFORCE2) += cpufreq-nforce2.o -- cgit v1.2.3 From 0cb8bc256093e716d2a0a4a721f36c625a3f7634 Mon Sep 17 00:00:00 2001 From: Dave Jones Date: Wed, 18 Feb 2009 14:11:00 -0500 Subject: [CPUFREQ] powernow-k8: Use a common exit path. a0abd520fd69295f4a3735e29a9448a32e101d47 introduced a slew of extra kfree/return -ENODEV pairs. This replaces them all with gotos. Signed-off-by: Dave Jones --- arch/x86/kernel/cpu/cpufreq/powernow-k8.c | 22 ++++++++++------------ 1 file changed, 10 insertions(+), 12 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c index c44853fc827..a15ac94e0b9 100644 --- a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c +++ b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c @@ -1254,21 +1254,18 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol) "BIOS vendor.\n"); print_once++; } - kfree(data); - return -ENODEV; + goto err_out; } if (pol->cpu != 0) { printk(KERN_ERR FW_BUG PFX "No ACPI _PSS objects for " "CPU other than CPU0. Complain to your BIOS " "vendor.\n"); - kfree(data); - return -ENODEV; + goto err_out; } rc = find_psb_table(data); - if (rc) { - kfree(data); - return -ENODEV; - } + if (rc) + goto err_out; + /* Take a crude guess here. * That guess was in microseconds, so multiply with 1000 */ pol->cpuinfo.transition_latency = ( @@ -1283,16 +1280,16 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol) if (smp_processor_id() != pol->cpu) { printk(KERN_ERR PFX "limiting to cpu %u failed\n", pol->cpu); - goto err_out; + goto err_out_unmask; } if (pending_bit_stuck()) { printk(KERN_ERR PFX "failing init, change pending bit set\n"); - goto err_out; + goto err_out_unmask; } if (query_current_values_with_pending_wait(data)) - goto err_out; + goto err_out_unmask; if (cpu_family == CPU_OPTERON) fidvid_msr_init(); @@ -1335,10 +1332,11 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol) return 0; -err_out: +err_out_unmask: set_cpus_allowed_ptr(current, &oldmask); powernow_k8_cpu_exit_acpi(data); +err_out: kfree(data); return -ENODEV; } -- cgit v1.2.3 From 199785eac892a1fa1b71cc22bec58e8b156d9311 Mon Sep 17 00:00:00 2001 From: Matthias-Christian Ott Date: Fri, 20 Feb 2009 20:52:17 -0500 Subject: [CPUFREQ] p4-clockmod reports wrong frequency. http://bugzilla.kernel.org/show_bug.cgi?id=10968 [ Updated for current tree, and fixed compile failure when p4-clockmod was built modular -- davej] From: Matthias-Christian Ott Signed-off-by: Dominik Brodowski Signed-off-by: Dave Jones --- arch/x86/include/asm/timer.h | 2 +- arch/x86/kernel/cpu/cpufreq/p4-clockmod.c | 7 ++++++ arch/x86/kernel/cpu/cpufreq/speedstep-lib.c | 34 +++++++++++++++++------------ arch/x86/kernel/tsc.c | 3 --- 4 files changed, 28 insertions(+), 18 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/include/asm/timer.h b/arch/x86/include/asm/timer.h index 2bb6a835c45..4f5c2472485 100644 --- a/arch/x86/include/asm/timer.h +++ b/arch/x86/include/asm/timer.h @@ -11,8 +11,8 @@ unsigned long native_calibrate_tsc(void); #ifdef CONFIG_X86_32 extern int timer_ack; +#endif extern int recalibrate_cpu_khz(void); -#endif /* CONFIG_X86_32 */ extern int no_timer_check; diff --git a/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c b/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c index 46a2a7a5314..1778402305e 100644 --- a/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c +++ b/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c @@ -31,6 +31,7 @@ #include #include +#include #include "speedstep-lib.h" @@ -224,6 +225,12 @@ static int cpufreq_p4_cpu_init(struct cpufreq_policy *policy) dprintk("has errata -- disabling low frequencies\n"); } + if (speedstep_detect_processor() == SPEEDSTEP_CPU_P4D && + c->x86_model < 2) { + /* switch to maximum frequency and measure result */ + cpufreq_p4_setdc(policy->cpu, DC_DISABLE); + recalibrate_cpu_khz(); + } /* get max frequency */ stock_freq = cpufreq_p4_get_frequency(c); if (!stock_freq) diff --git a/arch/x86/kernel/cpu/cpufreq/speedstep-lib.c b/arch/x86/kernel/cpu/cpufreq/speedstep-lib.c index 55c696daa05..2e3c6862657 100644 --- a/arch/x86/kernel/cpu/cpufreq/speedstep-lib.c +++ b/arch/x86/kernel/cpu/cpufreq/speedstep-lib.c @@ -16,6 +16,7 @@ #include #include +#include #include "speedstep-lib.h" #define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, \ @@ -178,6 +179,15 @@ static unsigned int pentium4_get_frequency(void) u32 msr_lo, msr_hi, mult; unsigned int fsb = 0; unsigned int ret; + u8 fsb_code; + + /* Pentium 4 Model 0 and 1 do not have the Core Clock Frequency + * to System Bus Frequency Ratio Field in the Processor Frequency + * Configuration Register of the MSR. Therefore the current + * frequency cannot be calculated and has to be measured. + */ + if (c->x86_model < 2) + return cpu_khz; rdmsr(0x2c, msr_lo, msr_hi); @@ -188,21 +198,17 @@ static unsigned int pentium4_get_frequency(void) * revision #12 in Table B-1: MSRs in the Pentium 4 and * Intel Xeon Processors, on page B-4 and B-5. */ - if (c->x86_model < 2) + fsb_code = (msr_lo >> 16) & 0x7; + switch (fsb_code) { + case 0: fsb = 100 * 1000; - else { - u8 fsb_code = (msr_lo >> 16) & 0x7; - switch (fsb_code) { - case 0: - fsb = 100 * 1000; - break; - case 1: - fsb = 13333 * 10; - break; - case 2: - fsb = 200 * 1000; - break; - } + break; + case 1: + fsb = 13333 * 10; + break; + case 2: + fsb = 200 * 1000; + break; } if (!fsb) diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c index 599e5816863..5ad22f8f5f3 100644 --- a/arch/x86/kernel/tsc.c +++ b/arch/x86/kernel/tsc.c @@ -523,8 +523,6 @@ unsigned long native_calibrate_tsc(void) return tsc_pit_min; } -#ifdef CONFIG_X86_32 -/* Only called from the Powernow K7 cpu freq driver */ int recalibrate_cpu_khz(void) { #ifndef CONFIG_SMP @@ -546,7 +544,6 @@ int recalibrate_cpu_khz(void) EXPORT_SYMBOL(recalibrate_cpu_khz); -#endif /* CONFIG_X86_32 */ /* Accelerators for sched_clock() * convert from cycles(64bits) => nanoseconds (64bits) -- cgit v1.2.3 From eb3092cee79e4efa5d3e9c81c7e6ca90318cebb8 Mon Sep 17 00:00:00 2001 From: Matthew Garrett Date: Sat, 21 Feb 2009 01:58:47 +0000 Subject: [CPUFREQ] Make cpufreq-nforce2 less obnoxious Not owning an nforce2 is a sign of good taste, not an error. Signed-off-by: Matthew Garrett Signed-off-by: Dave Jones --- arch/x86/kernel/cpu/cpufreq/cpufreq-nforce2.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/x86') diff --git a/arch/x86/kernel/cpu/cpufreq/cpufreq-nforce2.c b/arch/x86/kernel/cpu/cpufreq/cpufreq-nforce2.c index 99262906838..733093d6043 100644 --- a/arch/x86/kernel/cpu/cpufreq/cpufreq-nforce2.c +++ b/arch/x86/kernel/cpu/cpufreq/cpufreq-nforce2.c @@ -424,7 +424,7 @@ static int __init nforce2_init(void) /* detect chipset */ if (nforce2_detect_chipset()) { - printk(KERN_ERR PFX "No nForce2 chipset.\n"); + printk(KERN_INFO PFX "No nForce2 chipset.\n"); return -ENODEV; } -- cgit v1.2.3 From 83ce400928680a6c8123d492684b27857f5a2d95 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Thu, 26 Feb 2009 20:16:58 +0100 Subject: x86: set X86_FEATURE_TSC_RELIABLE If the TSC is constant and non-stop, also set it reliable. (We will turn this off in DMI quirks for multi-chassis systems) The performance number on a 16-way Nehalem system running 32 tasks that context-switch between each other is significant: sched_clock_stable=0 sched_clock_stable=1 .................... .................... 22.456925 million/sec 24.306972 million/sec [+8.2%] lmbench's "lat_ctx -s 0 2" goes from 0.63 microseconds to 0.59 microseconds - a 6.7% increase in context-switching performance. Perfstat of 1 million pipe context switches between two tasks: Performance counter stats for './pipe-test-1m': [before] [after] ............ ............ 37621.421089 36436.848378 task clock ticks (msecs) 0 0 CPU migrations (events) 2000274 2000189 context switches (events) 194 193 pagefaults (events) 8433799643 8171016416 CPU cycles (events) -3.21% 8370133368 8180999694 instructions (events) -2.31% 4158565 3895941 cache references (events) -6.74% 44312 46264 cache misses (events) 2349.287976 2279.362465 wall-time (msecs) -3.06% The speedup comes straight from the reduction in the instruction count. sched_clock_cpu() got simpler and the whole workload thus executes faster. Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/intel.c | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) (limited to 'arch/x86') diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c index 24ff26a38ad..5fff00c70de 100644 --- a/arch/x86/kernel/cpu/intel.c +++ b/arch/x86/kernel/cpu/intel.c @@ -4,6 +4,7 @@ #include #include #include +#include #include #include @@ -56,11 +57,16 @@ static void __cpuinit early_init_intel(struct cpuinfo_x86 *c) /* * c->x86_power is 8000_0007 edx. Bit 8 is TSC runs at constant rate - * with P/T states and does not stop in deep C-states + * with P/T states and does not stop in deep C-states. + * + * It is also reliable across cores and sockets. (but not across + * cabinets - we turn it off in that case explicitly.) */ if (c->x86_power & (1 << 8)) { set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC); + set_cpu_cap(c, X86_FEATURE_TSC_RELIABLE); + sched_clock_stable = 1; } } -- cgit v1.2.3 From 36e8abf3edcd2d207193ec5741d1a2a645d470a5 Mon Sep 17 00:00:00 2001 From: Dave Jones Date: Thu, 5 Mar 2009 00:16:26 -0500 Subject: [CPUFREQ] Prevent p4-clockmod from auto-binding to the ondemand governor. The latency of p4-clockmod sucks so hard that scaling on a regular basis with ondemand is a really bad idea. Signed-off-by: Matthew Garrett Signed-off-by: Dave Jones --- arch/x86/kernel/cpu/cpufreq/p4-clockmod.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) (limited to 'arch/x86') diff --git a/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c b/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c index 1778402305e..352cf9a4916 100644 --- a/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c +++ b/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c @@ -246,7 +246,10 @@ static int cpufreq_p4_cpu_init(struct cpufreq_policy *policy) cpufreq_frequency_table_get_attr(p4clockmod_table, policy->cpu); /* cpuinfo and default policy values */ - policy->cpuinfo.transition_latency = 1000000; /* assumed */ + + /* the transition latency is set to be 1 higher than the maximum + * transition latency of the ondemand governor */ + policy->cpuinfo.transition_latency = 10000001; policy->cur = stock_freq; return cpufreq_frequency_table_cpuinfo(policy, &p4clockmod_table[0]); -- cgit v1.2.3 From 5f812de63ce515265ebd84e932c762136924ab84 Mon Sep 17 00:00:00 2001 From: FUJITA Tomonori Date: Tue, 6 Jan 2009 00:17:09 +0900 Subject: AMD IOMMU: remove unnecessary ifdef We try to avoid this type of ifdef and we can safely remove this ifdef. Signed-off-by: FUJITA Tomonori Signed-off-by: Joerg Roedel --- arch/x86/kernel/amd_iommu.c | 2 -- 1 file changed, 2 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c index 5113c080f0c..d7a7c4c063f 100644 --- a/arch/x86/kernel/amd_iommu.c +++ b/arch/x86/kernel/amd_iommu.c @@ -23,9 +23,7 @@ #include #include #include -#ifdef CONFIG_IOMMU_API #include -#endif #include #include #include -- cgit v1.2.3 From f24ade3a3332811a512ed3b6c6aa69486719b1d8 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Tue, 10 Mar 2009 19:02:30 +0100 Subject: x86, sched_clock(): mark variables read-mostly Impact: micro-optimization There's a number of variables in the sched_clock() path that are in .data/.bss - but not marked __read_mostly. This creates the danger of accidental false cacheline sharing with some other, write-often variable. So mark them __read_mostly. Signed-off-by: Ingo Molnar --- arch/x86/kernel/tsc.c | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c index 599e5816863..9c8b71531ca 100644 --- a/arch/x86/kernel/tsc.c +++ b/arch/x86/kernel/tsc.c @@ -17,20 +17,21 @@ #include #include -unsigned int cpu_khz; /* TSC clocks / usec, not used here */ +unsigned int __read_mostly cpu_khz; /* TSC clocks / usec, not used here */ EXPORT_SYMBOL(cpu_khz); -unsigned int tsc_khz; + +unsigned int __read_mostly tsc_khz; EXPORT_SYMBOL(tsc_khz); /* * TSC can be unstable due to cpufreq or due to unsynced TSCs */ -static int tsc_unstable; +static int __read_mostly tsc_unstable; /* native_sched_clock() is called before tsc_init(), so we must start with the TSC soft disabled to prevent erroneous rdtsc usage on !cpu_has_tsc processors */ -static int tsc_disabled = -1; +static int __read_mostly tsc_disabled = -1; static int tsc_clocksource_reliable; /* -- cgit v1.2.3 From 2118d0c548e8a2205e1a29eb5b89e5f2e9ae2c8b Mon Sep 17 00:00:00 2001 From: Joerg Roedel Date: Fri, 9 Jan 2009 15:13:15 +0100 Subject: dma-debug: x86 architecture bindings Impact: make use of DMA-API debugging code in x86 Signed-off-by: Joerg Roedel --- arch/x86/Kconfig | 1 + arch/x86/include/asm/dma-mapping.h | 45 +++++++++++++++++++++++++++++++++----- arch/x86/kernel/pci-dma.c | 6 +++++ 3 files changed, 46 insertions(+), 6 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index bc2fbadff9f..f2cb677b263 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -40,6 +40,7 @@ config X86 select HAVE_GENERIC_DMA_COHERENT if X86_32 select HAVE_EFFICIENT_UNALIGNED_ACCESS select USER_STACKTRACE_SUPPORT + select HAVE_DMA_API_DEBUG config ARCH_DEFCONFIG string diff --git a/arch/x86/include/asm/dma-mapping.h b/arch/x86/include/asm/dma-mapping.h index 9c78bd40ebe..cea7b74963e 100644 --- a/arch/x86/include/asm/dma-mapping.h +++ b/arch/x86/include/asm/dma-mapping.h @@ -7,6 +7,7 @@ */ #include +#include #include #include #include @@ -56,11 +57,16 @@ dma_map_single(struct device *hwdev, void *ptr, size_t size, enum dma_data_direction dir) { struct dma_map_ops *ops = get_dma_ops(hwdev); + dma_addr_t addr; BUG_ON(!valid_dma_direction(dir)); - return ops->map_page(hwdev, virt_to_page(ptr), + addr = ops->map_page(hwdev, virt_to_page(ptr), (unsigned long)ptr & ~PAGE_MASK, size, dir, NULL); + debug_dma_map_page(hwdev, virt_to_page(ptr), + (unsigned long)ptr & ~PAGE_MASK, size, + dir, addr, true); + return addr; } static inline void @@ -72,6 +78,7 @@ dma_unmap_single(struct device *dev, dma_addr_t addr, size_t size, BUG_ON(!valid_dma_direction(dir)); if (ops->unmap_page) ops->unmap_page(dev, addr, size, dir, NULL); + debug_dma_unmap_page(dev, addr, size, dir, true); } static inline int @@ -79,9 +86,13 @@ dma_map_sg(struct device *hwdev, struct scatterlist *sg, int nents, enum dma_data_direction dir) { struct dma_map_ops *ops = get_dma_ops(hwdev); + int ents; BUG_ON(!valid_dma_direction(dir)); - return ops->map_sg(hwdev, sg, nents, dir, NULL); + ents = ops->map_sg(hwdev, sg, nents, dir, NULL); + debug_dma_map_sg(hwdev, sg, nents, ents, dir); + + return ents; } static inline void @@ -91,6 +102,7 @@ dma_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents, struct dma_map_ops *ops = get_dma_ops(hwdev); BUG_ON(!valid_dma_direction(dir)); + debug_dma_unmap_sg(hwdev, sg, nents, dir); if (ops->unmap_sg) ops->unmap_sg(hwdev, sg, nents, dir, NULL); } @@ -104,6 +116,7 @@ dma_sync_single_for_cpu(struct device *hwdev, dma_addr_t dma_handle, BUG_ON(!valid_dma_direction(dir)); if (ops->sync_single_for_cpu) ops->sync_single_for_cpu(hwdev, dma_handle, size, dir); + debug_dma_sync_single_for_cpu(hwdev, dma_handle, size, dir); flush_write_buffers(); } @@ -116,6 +129,7 @@ dma_sync_single_for_device(struct device *hwdev, dma_addr_t dma_handle, BUG_ON(!valid_dma_direction(dir)); if (ops->sync_single_for_device) ops->sync_single_for_device(hwdev, dma_handle, size, dir); + debug_dma_sync_single_for_device(hwdev, dma_handle, size, dir); flush_write_buffers(); } @@ -130,6 +144,8 @@ dma_sync_single_range_for_cpu(struct device *hwdev, dma_addr_t dma_handle, if (ops->sync_single_range_for_cpu) ops->sync_single_range_for_cpu(hwdev, dma_handle, offset, size, dir); + debug_dma_sync_single_range_for_cpu(hwdev, dma_handle, + offset, size, dir); flush_write_buffers(); } @@ -144,6 +160,8 @@ dma_sync_single_range_for_device(struct device *hwdev, dma_addr_t dma_handle, if (ops->sync_single_range_for_device) ops->sync_single_range_for_device(hwdev, dma_handle, offset, size, dir); + debug_dma_sync_single_range_for_device(hwdev, dma_handle, + offset, size, dir); flush_write_buffers(); } @@ -156,6 +174,7 @@ dma_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg, BUG_ON(!valid_dma_direction(dir)); if (ops->sync_sg_for_cpu) ops->sync_sg_for_cpu(hwdev, sg, nelems, dir); + debug_dma_sync_sg_for_cpu(hwdev, sg, nelems, dir); flush_write_buffers(); } @@ -168,6 +187,7 @@ dma_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg, BUG_ON(!valid_dma_direction(dir)); if (ops->sync_sg_for_device) ops->sync_sg_for_device(hwdev, sg, nelems, dir); + debug_dma_sync_sg_for_device(hwdev, sg, nelems, dir); flush_write_buffers(); } @@ -177,15 +197,24 @@ static inline dma_addr_t dma_map_page(struct device *dev, struct page *page, enum dma_data_direction dir) { struct dma_map_ops *ops = get_dma_ops(dev); + dma_addr_t addr; BUG_ON(!valid_dma_direction(dir)); - return ops->map_page(dev, page, offset, size, dir, NULL); + addr = ops->map_page(dev, page, offset, size, dir, NULL); + debug_dma_map_page(dev, page, offset, size, dir, addr, false); + + return addr; } static inline void dma_unmap_page(struct device *dev, dma_addr_t addr, size_t size, enum dma_data_direction dir) { - dma_unmap_single(dev, addr, size, dir); + struct dma_map_ops *ops = get_dma_ops(dev); + + BUG_ON(!valid_dma_direction(dir)); + if (ops->unmap_page) + ops->unmap_page(dev, addr, size, dir, NULL); + debug_dma_unmap_page(dev, addr, size, dir, false); } static inline void @@ -250,8 +279,11 @@ dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, if (!ops->alloc_coherent) return NULL; - return ops->alloc_coherent(dev, size, dma_handle, - dma_alloc_coherent_gfp_flags(dev, gfp)); + memory = ops->alloc_coherent(dev, size, dma_handle, + dma_alloc_coherent_gfp_flags(dev, gfp)); + debug_dma_alloc_coherent(dev, size, *dma_handle, memory); + + return memory; } static inline void dma_free_coherent(struct device *dev, size_t size, @@ -264,6 +296,7 @@ static inline void dma_free_coherent(struct device *dev, size_t size, if (dma_release_from_coherent(dev, get_order(size), vaddr)) return; + debug_dma_free_coherent(dev, size, vaddr, bus); if (ops->free_coherent) ops->free_coherent(dev, size, vaddr, bus); } diff --git a/arch/x86/kernel/pci-dma.c b/arch/x86/kernel/pci-dma.c index f293a8df682..ebf7d454f21 100644 --- a/arch/x86/kernel/pci-dma.c +++ b/arch/x86/kernel/pci-dma.c @@ -1,4 +1,5 @@ #include +#include #include #include #include @@ -44,6 +45,9 @@ struct device x86_dma_fallback_dev = { }; EXPORT_SYMBOL(x86_dma_fallback_dev); +/* Number of entries preallocated for DMA-API debugging */ +#define PREALLOC_DMA_DEBUG_ENTRIES 32768 + int dma_set_mask(struct device *dev, u64 mask) { if (!dev->dma_mask || !dma_supported(dev, mask)) @@ -265,6 +269,8 @@ EXPORT_SYMBOL(dma_supported); static int __init pci_iommu_init(void) { + dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES); + calgary_iommu_init(); intel_iommu_init(); -- cgit v1.2.3 From 86f319529372953e353dc998bc6a761949614903 Mon Sep 17 00:00:00 2001 From: Joerg Roedel Date: Mon, 16 Mar 2009 17:50:28 +0100 Subject: dma-debug/x86: register pci bus for dma-debug leak detection Impact: detect dma memory leaks for pci devices Signed-off-by: Joerg Roedel --- arch/x86/kernel/pci-dma.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'arch/x86') diff --git a/arch/x86/kernel/pci-dma.c b/arch/x86/kernel/pci-dma.c index ebf7d454f21..c7c4776ff63 100644 --- a/arch/x86/kernel/pci-dma.c +++ b/arch/x86/kernel/pci-dma.c @@ -271,6 +271,10 @@ static int __init pci_iommu_init(void) { dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES); +#ifdef CONFIG_PCI + dma_debug_add_bus(&pci_bus_type); +#endif + calgary_iommu_init(); intel_iommu_init(); -- cgit v1.2.3 From 600914ba524130583fa5acdd00df4aa7aa44b173 Mon Sep 17 00:00:00 2001 From: Bjorn Helgaas Date: Wed, 14 Jan 2009 10:04:25 -0700 Subject: PCI/x86: make early dump handle multi-function devices The early "dump PCI config space" code skips many multi-function devices. This patch fixes that, so it dumps all devices in PCI domain 0. We should not skip the rest of the functions if CLASS_REVISION is 0xffffffff. Often multi-function devices have gaps in the function ID space, e.g., 1c.0 and 1c.2 exist but 1c.1 doesn't. The CLASS_REVISION of the non-existent 1c.1 function will appear to be 0xffffffff. We should only look at the HEADER_TYPE of function zero. Often the "multi-function" is set in function zero, but not in other functions. Signed-off-by: Bjorn Helgaas Signed-off-by: Jesse Barnes --- arch/x86/pci/early.c | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/pci/early.c b/arch/x86/pci/early.c index f6adf2c6d75..c1a2cd54172 100644 --- a/arch/x86/pci/early.c +++ b/arch/x86/pci/early.c @@ -96,18 +96,21 @@ void early_dump_pci_devices(void) for (func = 0; func < 8; func++) { u32 class; u8 type; + class = read_pci_config(bus, slot, func, PCI_CLASS_REVISION); if (class == 0xffffffff) - break; + continue; early_dump_pci_device(bus, slot, func); - /* No multi-function device? */ - type = read_pci_config_byte(bus, slot, func, + if (func == 0) { + type = read_pci_config_byte(bus, slot, + func, PCI_HEADER_TYPE); - if (!(type & 0x80)) - break; + if (!(type & 0x80)) + break; + } } } } -- cgit v1.2.3 From 7bc9e77dcc6edf6ce0b3e4677b1e7f4a05b95b85 Mon Sep 17 00:00:00 2001 From: Bjorn Helgaas Date: Wed, 14 Jan 2009 10:04:30 -0700 Subject: PCI/x86: format early dump like other PCI output Use %02x:%02x.%d rather than %02x:%02x:%02x so PCI addresses look the same as in other parts of the kernel. Signed-off-by: Bjorn Helgaas Signed-off-by: Jesse Barnes --- arch/x86/pci/early.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/pci/early.c b/arch/x86/pci/early.c index c1a2cd54172..aaf26ae58cd 100644 --- a/arch/x86/pci/early.c +++ b/arch/x86/pci/early.c @@ -69,11 +69,12 @@ void early_dump_pci_device(u8 bus, u8 slot, u8 func) int j; u32 val; - printk(KERN_INFO "PCI: %02x:%02x:%02x", bus, slot, func); + printk(KERN_INFO "pci 0000:%02x:%02x.%d config space:", + bus, slot, func); for (i = 0; i < 256; i += 4) { if (!(i & 0x0f)) - printk("\n%04x:",i); + printk("\n %02x:",i); val = read_pci_config(bus, slot, func, i); for (j = 0; j < 4; j++) { @@ -115,4 +116,3 @@ void early_dump_pci_devices(void) } } } - -- cgit v1.2.3 From 11df1f05514beaf0269484191007dbc8d47e0e6f Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Mon, 19 Jan 2009 11:31:00 +1100 Subject: PCI/MSI: Use #ifdefs instead of weak functions Weak functions aren't all they're cracked up to be. They lead to incorrect binaries with some toolchains, they require us to have empty functions we otherwise wouldn't, and the unused code is not elided (as of gcc 4.3.2 anyway). So replace the weak MSI arch hooks with the #define foo foo idiom. We no longer need empty versions of arch_setup/teardown_msi_irq(). This is less source (by 1 line!), and results in smaller binaries too: text data bss dec hex filename 9354300 1693916 678424 11726640 b2ef30 build/powerpc/vmlinux-before 9354052 1693852 678424 11726328 b2edf8 build/powerpc/vmlinux-after Also smaller on x86_64 and arm (iop13xx). Signed-off-by: Michael Ellerman Signed-off-by: Jesse Barnes --- arch/x86/include/asm/pci.h | 3 +++ 1 file changed, 3 insertions(+) (limited to 'arch/x86') diff --git a/arch/x86/include/asm/pci.h b/arch/x86/include/asm/pci.h index a977de23cb4..a0301bfeb95 100644 --- a/arch/x86/include/asm/pci.h +++ b/arch/x86/include/asm/pci.h @@ -86,6 +86,9 @@ static inline void early_quirks(void) { } extern void pci_iommu_alloc(void); +/* MSI arch hook */ +#define arch_setup_msi_irqs arch_setup_msi_irqs + #endif /* __KERNEL__ */ #ifdef CONFIG_X86_32 -- cgit v1.2.3 From 13bf75766966e1bcc71fae536988caec312eef8f Mon Sep 17 00:00:00 2001 From: Bjorn Helgaas Date: Tue, 24 Feb 2009 10:38:22 -0700 Subject: x86: use dev_printk in quirk message This patch changes a VIA PCI quirk to use dev_info() rather than printk(). Signed-off-by: Bjorn Helgaas Signed-off-by: Jesse Barnes --- arch/x86/kernel/pci-dma.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/kernel/pci-dma.c b/arch/x86/kernel/pci-dma.c index b2542853314..022833bb9ff 100644 --- a/arch/x86/kernel/pci-dma.c +++ b/arch/x86/kernel/pci-dma.c @@ -290,8 +290,7 @@ fs_initcall(pci_iommu_init); static __devinit void via_no_dac(struct pci_dev *dev) { if ((dev->class >> 8) == PCI_CLASS_BRIDGE_PCI && forbid_dac == 0) { - printk(KERN_INFO - "PCI: VIA PCI bridge detected. Disabling DAC.\n"); + dev_info(&dev->dev, "disabling DAC on VIA PCI bridge\n"); forbid_dac = 1; } } -- cgit v1.2.3 From 1c8d7b0a562da06d3ebe83f01b1ed553205d1ae4 Mon Sep 17 00:00:00 2001 From: Matthew Wilcox Date: Tue, 17 Mar 2009 08:54:10 -0400 Subject: PCI MSI: Add support for multiple MSI Add the new API pci_enable_msi_block() to allow drivers to request multiple MSI and reimplement pci_enable_msi in terms of pci_enable_msi_block. Ensure that the architecture back ends don't have to know about multiple MSI. Signed-off-by: Matthew Wilcox Signed-off-by: Jesse Barnes --- arch/x86/kernel/io_apic.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'arch/x86') diff --git a/arch/x86/kernel/io_apic.c b/arch/x86/kernel/io_apic.c index bc7ac4da90d..a09549a6321 100644 --- a/arch/x86/kernel/io_apic.c +++ b/arch/x86/kernel/io_apic.c @@ -3510,6 +3510,10 @@ int arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) int index = 0; #endif + /* x86 doesn't support multiple MSI yet */ + if (type == PCI_CAP_ID_MSI && nvec > 1) + return 1; + irq_want = nr_irqs_gsi; sub_handle = 0; list_for_each_entry(msidesc, &dev->msi_list, list) { -- cgit v1.2.3 From dfadd9edff498d767008edc6b2a6e86a7a19934d Mon Sep 17 00:00:00 2001 From: Yinghai Lu Date: Sun, 8 Mar 2009 21:35:37 -0700 Subject: PCI/x86: detect host bridge config space size w/o using quirks Many host bridges support a 4k config space, so check them directy instead of using quirks to add them. We only need to do this extra check for host bridges at this point, because only host bridges are known to have extended address space without also having a PCI-X/PCI-E caps. Other devices with this property could be done with quirks (if there are any). As a bonus, we can remove the quirks for AMD host bridges with family 10h and 11h since they're not needed any more. With this patch, we can get correct pci cfg size of new Intel CPUs/IOHs with host bridges. Signed-off-by: Yinghai Lu Acked-by: H. Peter Anvin Reviewed-by: Matthew Wilcox Cc: Signed-off-by: Jesse Barnes --- arch/x86/pci/fixup.c | 20 -------------------- 1 file changed, 20 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/pci/fixup.c b/arch/x86/pci/fixup.c index 7d388d5cf54..096b0ed0713 100644 --- a/arch/x86/pci/fixup.c +++ b/arch/x86/pci/fixup.c @@ -494,26 +494,6 @@ static void __devinit pci_siemens_interrupt_controller(struct pci_dev *dev) DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SIEMENS, 0x0015, pci_siemens_interrupt_controller); -/* - * Regular PCI devices have 256 bytes, but AMD Family 10h/11h CPUs have - * 4096 bytes configuration space for each function of their processor - * configuration space. - */ -static void amd_cpu_pci_cfg_space_size(struct pci_dev *dev) -{ - dev->cfg_size = pci_cfg_space_size_ext(dev); -} -DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, 0x1200, amd_cpu_pci_cfg_space_size); -DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, 0x1201, amd_cpu_pci_cfg_space_size); -DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, 0x1202, amd_cpu_pci_cfg_space_size); -DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, 0x1203, amd_cpu_pci_cfg_space_size); -DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, 0x1204, amd_cpu_pci_cfg_space_size); -DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, 0x1300, amd_cpu_pci_cfg_space_size); -DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, 0x1301, amd_cpu_pci_cfg_space_size); -DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, 0x1302, amd_cpu_pci_cfg_space_size); -DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, 0x1303, amd_cpu_pci_cfg_space_size); -DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, 0x1304, amd_cpu_pci_cfg_space_size); - /* * SB600: Disable BAR1 on device 14.0 to avoid HPET resources from * confusing the PCI engine: -- cgit v1.2.3 From 5546d6f56807115a035d140f7364ce5807dbcc87 Mon Sep 17 00:00:00 2001 From: Ed Swierk Date: Thu, 19 Mar 2009 20:57:56 -0700 Subject: x86/PCI: Detect mmconfig on nVidia MCP55 Detect and enable memory-mapped PCI configuration space on the nVidia MCP55 southbridge. Tested against 2.6.27.4 on an Arista Networks development board with one MCP55, Coreboot firmware, no ACPI. Signed-off-by: Ed Swierk Signed-off-by: Yinghai Lu Signed-off-by: Jesse Barnes --- arch/x86/pci/mmconfig-shared.c | 64 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 64 insertions(+) (limited to 'arch/x86') diff --git a/arch/x86/pci/mmconfig-shared.c b/arch/x86/pci/mmconfig-shared.c index 89bf9242c80..d68dc1bb01b 100644 --- a/arch/x86/pci/mmconfig-shared.c +++ b/arch/x86/pci/mmconfig-shared.c @@ -154,6 +154,68 @@ static const char __init *pci_mmcfg_amd_fam10h(void) return "AMD Family 10h NB"; } +static bool __initdata mcp55_checked; +static const char __init *pci_mmcfg_nvidia_mcp55(void) +{ + int bus; + int mcp55_mmconf_found = 0; + + static const u32 extcfg_regnum = 0x90; + static const u32 extcfg_regsize = 4; + static const u32 extcfg_enable_mask = 1<<31; + static const u32 extcfg_start_mask = 0xff<<16; + static const int extcfg_start_shift = 16; + static const u32 extcfg_size_mask = 0x3<<28; + static const int extcfg_size_shift = 28; + static const int extcfg_sizebus[] = {0x100, 0x80, 0x40, 0x20}; + static const u32 extcfg_base_mask[] = {0x7ff8, 0x7ffc, 0x7ffe, 0x7fff}; + static const int extcfg_base_lshift = 25; + + /* + * do check if amd fam10h already took over + */ + if (!acpi_disabled || pci_mmcfg_config_num || mcp55_checked) + return NULL; + + mcp55_checked = true; + for (bus = 0; bus < 256; bus++) { + u64 base; + u32 l, extcfg; + u16 vendor, device; + int start, size_index, end; + + raw_pci_ops->read(0, bus, PCI_DEVFN(0, 0), 0, 4, &l); + vendor = l & 0xffff; + device = (l >> 16) & 0xffff; + + if (PCI_VENDOR_ID_NVIDIA != vendor || 0x0369 != device) + continue; + + raw_pci_ops->read(0, bus, PCI_DEVFN(0, 0), extcfg_regnum, + extcfg_regsize, &extcfg); + + if (!(extcfg & extcfg_enable_mask)) + continue; + + if (extend_mmcfg(1) == -1) + continue; + + size_index = (extcfg & extcfg_size_mask) >> extcfg_size_shift; + base = extcfg & extcfg_base_mask[size_index]; + /* base could > 4G */ + base <<= extcfg_base_lshift; + start = (extcfg & extcfg_start_mask) >> extcfg_start_shift; + end = start + extcfg_sizebus[size_index] - 1; + fill_one_mmcfg(base, 0, start, end); + mcp55_mmconf_found++; + } + + if (!mcp55_mmconf_found) + return NULL; + + return "nVidia MCP55"; +} + struct pci_mmcfg_hostbridge_probe { u32 bus; u32 devfn; @@ -171,6 +233,8 @@ static struct pci_mmcfg_hostbridge_probe pci_mmcfg_probes[] __initdata = { 0x1200, pci_mmcfg_amd_fam10h }, { 0xff, PCI_DEVFN(0, 0), PCI_VENDOR_ID_AMD, 0x1200, pci_mmcfg_amd_fam10h }, + { 0, PCI_DEVFN(0, 0), PCI_VENDOR_ID_NVIDIA, + 0x0369, pci_mmcfg_nvidia_mcp55 }, }; static int __init pci_mmcfg_check_hostbridge(void) -- cgit v1.2.3 From 068258bc15439c11a966e873f931cc8e513dca61 Mon Sep 17 00:00:00 2001 From: Yinghai Lu Date: Thu, 19 Mar 2009 20:55:35 -0700 Subject: x86/PCI: host mmconfig detect clean up Fix mmconfig detection to not assume a single mmconfig space in the northbridge, paving the way for AMD fam10h + mcp55 CPUs. On those, the MSR has some range, but the mcp55 pci config will have another one. Also helps the mcp55 + io55 case, where every one will have one range. If it is mcp55, exclude the range that is used by CPU MSR, in other words , if the CPU claims busses 0-255, the range in mcp55 is dropped, because CPU HW will not route those ranges to mcp55 mmconfig to handle it. Signed-off-by: Yinghai Lu Signed-off-by: Jesse Barnes --- arch/x86/pci/mmconfig-shared.c | 163 +++++++++++++++++++++++++++-------------- arch/x86/pci/mmconfig_64.c | 17 +++-- 2 files changed, 120 insertions(+), 60 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/pci/mmconfig-shared.c b/arch/x86/pci/mmconfig-shared.c index d68dc1bb01b..905bb526b13 100644 --- a/arch/x86/pci/mmconfig-shared.c +++ b/arch/x86/pci/mmconfig-shared.c @@ -14,6 +14,7 @@ #include #include #include +#include #include #include @@ -24,24 +25,49 @@ /* Indicate if the mmcfg resources have been placed into the resource table. */ static int __initdata pci_mmcfg_resources_inserted; +static __init int extend_mmcfg(int num) +{ + struct acpi_mcfg_allocation *new; + int new_num = pci_mmcfg_config_num + num; + + new = kzalloc(sizeof(pci_mmcfg_config[0]) * new_num, GFP_KERNEL); + if (!new) + return -1; + + if (pci_mmcfg_config) { + memcpy(new, pci_mmcfg_config, + sizeof(pci_mmcfg_config[0]) * new_num); + kfree(pci_mmcfg_config); + } + pci_mmcfg_config = new; + + return 0; +} + +static __init void fill_one_mmcfg(u64 addr, int segment, int start, int end) +{ + int i = pci_mmcfg_config_num; + + pci_mmcfg_config_num++; + pci_mmcfg_config[i].address = addr; + pci_mmcfg_config[i].pci_segment = segment; + pci_mmcfg_config[i].start_bus_number = start; + pci_mmcfg_config[i].end_bus_number = end; +} + static const char __init *pci_mmcfg_e7520(void) { u32 win; raw_pci_ops->read(0, 0, PCI_DEVFN(0, 0), 0xce, 2, &win); win = win & 0xf000; - if(win == 0x0000 || win == 0xf000) - pci_mmcfg_config_num = 0; - else { - pci_mmcfg_config_num = 1; - pci_mmcfg_config = kzalloc(sizeof(pci_mmcfg_config[0]), GFP_KERNEL); - if (!pci_mmcfg_config) - return NULL; - pci_mmcfg_config[0].address = win << 16; - pci_mmcfg_config[0].pci_segment = 0; - pci_mmcfg_config[0].start_bus_number = 0; - pci_mmcfg_config[0].end_bus_number = 255; - } + if (win == 0x0000 || win == 0xf000) + return NULL; + + if (extend_mmcfg(1) == -1) + return NULL; + + fill_one_mmcfg(win << 16, 0, 0, 255); return "Intel Corporation E7520 Memory Controller Hub"; } @@ -50,13 +76,11 @@ static const char __init *pci_mmcfg_intel_945(void) { u32 pciexbar, mask = 0, len = 0; - pci_mmcfg_config_num = 1; - raw_pci_ops->read(0, 0, PCI_DEVFN(0, 0), 0x48, 4, &pciexbar); /* Enable bit */ if (!(pciexbar & 1)) - pci_mmcfg_config_num = 0; + return NULL; /* Size bits */ switch ((pciexbar >> 1) & 3) { @@ -73,28 +97,23 @@ static const char __init *pci_mmcfg_intel_945(void) len = 0x04000000U; break; default: - pci_mmcfg_config_num = 0; + return NULL; } /* Errata #2, things break when not aligned on a 256Mb boundary */ /* Can only happen in 64M/128M mode */ if ((pciexbar & mask) & 0x0fffffffU) - pci_mmcfg_config_num = 0; + return NULL; /* Don't hit the APIC registers and their friends */ if ((pciexbar & mask) >= 0xf0000000U) - pci_mmcfg_config_num = 0; - - if (pci_mmcfg_config_num) { - pci_mmcfg_config = kzalloc(sizeof(pci_mmcfg_config[0]), GFP_KERNEL); - if (!pci_mmcfg_config) - return NULL; - pci_mmcfg_config[0].address = pciexbar & mask; - pci_mmcfg_config[0].pci_segment = 0; - pci_mmcfg_config[0].start_bus_number = 0; - pci_mmcfg_config[0].end_bus_number = (len >> 20) - 1; - } + return NULL; + + if (extend_mmcfg(1) == -1) + return NULL; + + fill_one_mmcfg(pciexbar & mask, 0, 0, (len >> 20) - 1); return "Intel Corporation 945G/GZ/P/PL Express Memory Controller Hub"; } @@ -138,18 +157,11 @@ static const char __init *pci_mmcfg_amd_fam10h(void) busnbits = 8; } - pci_mmcfg_config_num = (1 << segnbits); - pci_mmcfg_config = kzalloc(sizeof(pci_mmcfg_config[0]) * - pci_mmcfg_config_num, GFP_KERNEL); - if (!pci_mmcfg_config) + if (extend_mmcfg(1 << segnbits) == -1) return NULL; - for (i = 0; i < (1 << segnbits); i++) { - pci_mmcfg_config[i].address = base + (1<<28) * i; - pci_mmcfg_config[i].pci_segment = i; - pci_mmcfg_config[i].start_bus_number = 0; - pci_mmcfg_config[i].end_bus_number = (1 << busnbits) - 1; - } + for (i = 0; i < (1 << segnbits); i++) + fill_one_mmcfg(base + (1<<28) * i, i, 0, (1 << busnbits) - 1); return "AMD Family 10h NB"; } @@ -237,6 +249,48 @@ static struct pci_mmcfg_hostbridge_probe pci_mmcfg_probes[] __initdata = { 0x0369, pci_mmcfg_nvidia_mcp55 }, }; +static int __init cmp_mmcfg(const void *x1, const void *x2) +{ + const typeof(pci_mmcfg_config[0]) *m1 = x1; + const typeof(pci_mmcfg_config[0]) *m2 = x2; + int start1, start2; + + start1 = m1->start_bus_number; + start2 = m2->start_bus_number; + + return start1 - start2; +} + +static void __init pci_mmcfg_check_end_bus_number(void) +{ + int i; + typeof(pci_mmcfg_config[0]) *cfg, *cfgx; + + /* sort them at first */ + sort(pci_mmcfg_config, pci_mmcfg_config_num, + sizeof(pci_mmcfg_config[0]), cmp_mmcfg, NULL); + + /* last one*/ + if (pci_mmcfg_config_num > 0) { + i = pci_mmcfg_config_num - 1; + cfg = &pci_mmcfg_config[i]; + if (cfg->end_bus_number < cfg->start_bus_number) + cfg->end_bus_number = 255; + } + + /* don't overlap please */ + for (i = 0; i < pci_mmcfg_config_num - 1; i++) { + cfg = &pci_mmcfg_config[i]; + cfgx = &pci_mmcfg_config[i+1]; + + if (cfg->end_bus_number < cfg->start_bus_number) + cfg->end_bus_number = 255; + + if (cfg->end_bus_number >= cfgx->start_bus_number) + cfg->end_bus_number = cfgx->start_bus_number - 1; + } +} + static int __init pci_mmcfg_check_hostbridge(void) { u32 l; @@ -250,31 +304,33 @@ static int __init pci_mmcfg_check_hostbridge(void) pci_mmcfg_config_num = 0; pci_mmcfg_config = NULL; - name = NULL; - for (i = 0; !name && i < ARRAY_SIZE(pci_mmcfg_probes); i++) { + for (i = 0; i < ARRAY_SIZE(pci_mmcfg_probes); i++) { bus = pci_mmcfg_probes[i].bus; devfn = pci_mmcfg_probes[i].devfn; raw_pci_ops->read(0, bus, devfn, 0, 4, &l); vendor = l & 0xffff; device = (l >> 16) & 0xffff; + name = NULL; if (pci_mmcfg_probes[i].vendor == vendor && pci_mmcfg_probes[i].device == device) name = pci_mmcfg_probes[i].probe(); - } - if (name) { - printk(KERN_INFO "PCI: Found %s %s MMCONFIG support.\n", - name, pci_mmcfg_config_num ? "with" : "without"); + if (name) + printk(KERN_INFO "PCI: Found %s with MMCONFIG support.\n", + name); } - return name != NULL; + /* some end_bus_number is crazy, fix it */ + pci_mmcfg_check_end_bus_number(); + + return pci_mmcfg_config_num != 0; } static void __init pci_mmcfg_insert_resources(void) { -#define PCI_MMCFG_RESOURCE_NAME_LEN 19 +#define PCI_MMCFG_RESOURCE_NAME_LEN 24 int i; struct resource *res; char *names; @@ -292,9 +348,10 @@ static void __init pci_mmcfg_insert_resources(void) struct acpi_mcfg_allocation *cfg = &pci_mmcfg_config[i]; num_buses = cfg->end_bus_number - cfg->start_bus_number + 1; res->name = names; - snprintf(names, PCI_MMCFG_RESOURCE_NAME_LEN, "PCI MMCONFIG %u", - cfg->pci_segment); - res->start = cfg->address; + snprintf(names, PCI_MMCFG_RESOURCE_NAME_LEN, + "PCI MMCONFIG %u [%02x-%02x]", cfg->pci_segment, + cfg->start_bus_number, cfg->end_bus_number); + res->start = cfg->address + (cfg->start_bus_number << 20); res->end = res->start + (num_buses << 20) - 1; res->flags = IORESOURCE_MEM | IORESOURCE_BUSY; insert_resource(&iomem_resource, res); @@ -418,8 +475,6 @@ static void __init pci_mmcfg_reject_broken(int early) (pci_mmcfg_config[0].address == 0)) return; - cfg = &pci_mmcfg_config[0]; - for (i = 0; i < pci_mmcfg_config_num; i++) { int valid = 0; u64 addr, size; @@ -487,10 +542,10 @@ static void __init __pci_mmcfg_init(int early) known_bridge = 1; } - if (!known_bridge) { + if (!known_bridge) acpi_table_parse(ACPI_SIG_MCFG, acpi_parse_mcfg); - pci_mmcfg_reject_broken(early); - } + + pci_mmcfg_reject_broken(early); if ((pci_mmcfg_config_num == 0) || (pci_mmcfg_config == NULL) || diff --git a/arch/x86/pci/mmconfig_64.c b/arch/x86/pci/mmconfig_64.c index 30007ffc8e1..94349f8b2f9 100644 --- a/arch/x86/pci/mmconfig_64.c +++ b/arch/x86/pci/mmconfig_64.c @@ -112,13 +112,18 @@ static struct pci_raw_ops pci_mmcfg = { static void __iomem * __init mcfg_ioremap(struct acpi_mcfg_allocation *cfg) { void __iomem *addr; - u32 size; - - size = (cfg->end_bus_number + 1) << 20; - addr = ioremap_nocache(cfg->address, size); + u64 start, size; + + start = cfg->start_bus_number; + start <<= 20; + start += cfg->address; + size = cfg->end_bus_number + 1 - cfg->start_bus_number; + size <<= 20; + addr = ioremap_nocache(start, size); if (addr) { printk(KERN_INFO "PCI: Using MMCONFIG at %Lx - %Lx\n", - cfg->address, cfg->address + size - 1); + start, start + size - 1); + addr -= cfg->start_bus_number << 20; } return addr; } @@ -157,7 +162,7 @@ void __init pci_mmcfg_arch_free(void) for (i = 0; i < pci_mmcfg_config_num; ++i) { if (pci_mmcfg_virt[i].virt) { - iounmap(pci_mmcfg_virt[i].virt); + iounmap(pci_mmcfg_virt[i].virt + (pci_mmcfg_virt[i].cfg->start_bus_number << 20)); pci_mmcfg_virt[i].virt = NULL; pci_mmcfg_virt[i].cfg = NULL; } -- cgit v1.2.3 From f0b85051d0f0891378a83db22c0786f1d756fbff Mon Sep 17 00:00:00 2001 From: Alexander Graf Date: Tue, 25 Nov 2008 20:17:01 +0100 Subject: KVM: SVM: Clean up VINTR setting The current VINTR intercept setters don't look clean to me. To make the code easier to read and enable the possibilty to trap on a VINTR set, this uses a helper function to set the VINTR intercept. v2 uses two distinct functions for setting and clearing the bit Acked-by: Joerg Roedel Signed-off-by: Alexander Graf Signed-off-by: Avi Kivity --- arch/x86/kvm/svm.c | 20 +++++++++++++++----- 1 file changed, 15 insertions(+), 5 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index a9e769e4e25..33407d95761 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c @@ -718,6 +718,16 @@ static void svm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags) to_svm(vcpu)->vmcb->save.rflags = rflags; } +static void svm_set_vintr(struct vcpu_svm *svm) +{ + svm->vmcb->control.intercept |= 1ULL << INTERCEPT_VINTR; +} + +static void svm_clear_vintr(struct vcpu_svm *svm) +{ + svm->vmcb->control.intercept &= ~(1ULL << INTERCEPT_VINTR); +} + static struct vmcb_seg *svm_seg(struct kvm_vcpu *vcpu, int seg) { struct vmcb_save_area *save = &to_svm(vcpu)->vmcb->save; @@ -1380,7 +1390,7 @@ static int interrupt_window_interception(struct vcpu_svm *svm, { KVMTRACE_0D(PEND_INTR, &svm->vcpu, handler); - svm->vmcb->control.intercept &= ~(1ULL << INTERCEPT_VINTR); + svm_clear_vintr(svm); svm->vmcb->control.int_ctl &= ~V_IRQ_MASK; /* * If the user space waits to inject interrupts, exit as soon as @@ -1593,7 +1603,7 @@ static void svm_intr_assist(struct kvm_vcpu *vcpu) (vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK) || (vmcb->control.event_inj & SVM_EVTINJ_VALID)) { /* unable to deliver irq, set pending irq */ - vmcb->control.intercept |= (1ULL << INTERCEPT_VINTR); + svm_set_vintr(svm); svm_inject_irq(svm, 0x0); goto out; } @@ -1652,9 +1662,9 @@ static void do_interrupt_requests(struct kvm_vcpu *vcpu, */ if (!svm->vcpu.arch.interrupt_window_open && (svm->vcpu.arch.irq_summary || kvm_run->request_interrupt_window)) - control->intercept |= 1ULL << INTERCEPT_VINTR; - else - control->intercept &= ~(1ULL << INTERCEPT_VINTR); + svm_set_vintr(svm); + else + svm_clear_vintr(svm); } static int svm_set_tss_addr(struct kvm *kvm, unsigned int addr) -- cgit v1.2.3 From 9962d032bbff0268f22068787831405f8468c8b4 Mon Sep 17 00:00:00 2001 From: Alexander Graf Date: Tue, 25 Nov 2008 20:17:02 +0100 Subject: KVM: SVM: Move EFER and MSR constants to generic x86 code MSR_EFER_SVME_MASK, MSR_VM_CR and MSR_VM_HSAVE_PA are set in KVM specific headers. Linux does have nice header files to collect EFER bits and MSR IDs, so IMHO we should put them there. While at it, I also changed the naming scheme to match that of the other defines. (introduced in v6) Acked-by: Joerg Roedel Signed-off-by: Alexander Graf Signed-off-by: Avi Kivity --- arch/x86/include/asm/kvm_host.h | 1 + arch/x86/include/asm/msr-index.h | 7 +++++++ arch/x86/include/asm/svm.h | 4 ---- arch/x86/include/asm/virtext.h | 2 +- arch/x86/kvm/svm.c | 6 +++--- 5 files changed, 12 insertions(+), 8 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 730843d1d2f..2998efe8927 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -22,6 +22,7 @@ #include #include #include +#include #define KVM_MAX_VCPUS 16 #define KVM_MEMORY_SLOTS 32 diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h index 358acc59ae0..46e9646e7a6 100644 --- a/arch/x86/include/asm/msr-index.h +++ b/arch/x86/include/asm/msr-index.h @@ -18,11 +18,13 @@ #define _EFER_LME 8 /* Long mode enable */ #define _EFER_LMA 10 /* Long mode active (read-only) */ #define _EFER_NX 11 /* No execute enable */ +#define _EFER_SVME 12 /* Enable virtualization */ #define EFER_SCE (1<<_EFER_SCE) #define EFER_LME (1<<_EFER_LME) #define EFER_LMA (1<<_EFER_LMA) #define EFER_NX (1<<_EFER_NX) +#define EFER_SVME (1<<_EFER_SVME) /* Intel MSRs. Some also available on other CPUs */ #define MSR_IA32_PERFCTR0 0x000000c1 @@ -360,4 +362,9 @@ #define MSR_IA32_VMX_PROCBASED_CTLS2 0x0000048b #define MSR_IA32_VMX_EPT_VPID_CAP 0x0000048c +/* AMD-V MSRs */ + +#define MSR_VM_CR 0xc0010114 +#define MSR_VM_HSAVE_PA 0xc0010117 + #endif /* _ASM_X86_MSR_INDEX_H */ diff --git a/arch/x86/include/asm/svm.h b/arch/x86/include/asm/svm.h index 1b8afa78e86..82ada75f3eb 100644 --- a/arch/x86/include/asm/svm.h +++ b/arch/x86/include/asm/svm.h @@ -174,10 +174,6 @@ struct __attribute__ ((__packed__)) vmcb { #define SVM_CPUID_FEATURE_SHIFT 2 #define SVM_CPUID_FUNC 0x8000000a -#define MSR_EFER_SVME_MASK (1ULL << 12) -#define MSR_VM_CR 0xc0010114 -#define MSR_VM_HSAVE_PA 0xc0010117ULL - #define SVM_VM_CR_SVM_DISABLE 4 #define SVM_SELECTOR_S_SHIFT 4 diff --git a/arch/x86/include/asm/virtext.h b/arch/x86/include/asm/virtext.h index 59363627523..e0f9aa16358 100644 --- a/arch/x86/include/asm/virtext.h +++ b/arch/x86/include/asm/virtext.h @@ -118,7 +118,7 @@ static inline void cpu_svm_disable(void) wrmsrl(MSR_VM_HSAVE_PA, 0); rdmsrl(MSR_EFER, efer); - wrmsrl(MSR_EFER, efer & ~MSR_EFER_SVME_MASK); + wrmsrl(MSR_EFER, efer & ~EFER_SVME); } /** Makes sure SVM is disabled, if it is supported on the CPU diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index 33407d95761..e4eb3fd91b9 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c @@ -198,7 +198,7 @@ static void svm_set_efer(struct kvm_vcpu *vcpu, u64 efer) if (!npt_enabled && !(efer & EFER_LMA)) efer &= ~EFER_LME; - to_svm(vcpu)->vmcb->save.efer = efer | MSR_EFER_SVME_MASK; + to_svm(vcpu)->vmcb->save.efer = efer | EFER_SVME; vcpu->arch.shadow_efer = efer; } @@ -292,7 +292,7 @@ static void svm_hardware_enable(void *garbage) svm_data->tss_desc = (struct kvm_ldttss_desc *)(gdt + GDT_ENTRY_TSS); rdmsrl(MSR_EFER, efer); - wrmsrl(MSR_EFER, efer | MSR_EFER_SVME_MASK); + wrmsrl(MSR_EFER, efer | EFER_SVME); wrmsrl(MSR_VM_HSAVE_PA, page_to_pfn(svm_data->save_area) << PAGE_SHIFT); @@ -559,7 +559,7 @@ static void init_vmcb(struct vcpu_svm *svm) init_sys_seg(&save->ldtr, SEG_TYPE_LDT); init_sys_seg(&save->tr, SEG_TYPE_BUSY_TSS16); - save->efer = MSR_EFER_SVME_MASK; + save->efer = EFER_SVME; save->dr6 = 0xffff0ff0; save->dr7 = 0x400; save->rflags = 2; -- cgit v1.2.3 From c0725420cfdcf6dd9705b164a8c6cba86684080d Mon Sep 17 00:00:00 2001 From: Alexander Graf Date: Tue, 25 Nov 2008 20:17:03 +0100 Subject: KVM: SVM: Add helper functions for nested SVM These are helpers for the nested SVM implementation. - nsvm_printk implements a debug printk variant - nested_svm_do calls a handler that can accesses gpa-based memory v3 makes use of the new permission checker v6 changes: - streamline nsvm_debug() - remove printk(KERN_ERR) - SVME check before CPL check - give GP error code - use new EFER constant Acked-by: Joerg Roedel Signed-off-by: Alexander Graf Signed-off-by: Avi Kivity --- arch/x86/kvm/svm.c | 85 ++++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 85 insertions(+) (limited to 'arch/x86') diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index e4eb3fd91b9..87debdcd1b9 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c @@ -50,6 +50,15 @@ MODULE_LICENSE("GPL"); #define DEBUGCTL_RESERVED_BITS (~(0x3fULL)) +/* Turn on to get debugging output*/ +/* #define NESTED_DEBUG */ + +#ifdef NESTED_DEBUG +#define nsvm_printk(fmt, args...) printk(KERN_INFO fmt, ## args) +#else +#define nsvm_printk(fmt, args...) do {} while(0) +#endif + /* enable NPT for AMD64 and X86 with PAE */ #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE) static bool npt_enabled = true; @@ -1149,6 +1158,82 @@ static int vmmcall_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) return 1; } +static int nested_svm_check_permissions(struct vcpu_svm *svm) +{ + if (!(svm->vcpu.arch.shadow_efer & EFER_SVME) + || !is_paging(&svm->vcpu)) { + kvm_queue_exception(&svm->vcpu, UD_VECTOR); + return 1; + } + + if (svm->vmcb->save.cpl) { + kvm_inject_gp(&svm->vcpu, 0); + return 1; + } + + return 0; +} + +static struct page *nested_svm_get_page(struct vcpu_svm *svm, u64 gpa) +{ + struct page *page; + + down_read(¤t->mm->mmap_sem); + page = gfn_to_page(svm->vcpu.kvm, gpa >> PAGE_SHIFT); + up_read(¤t->mm->mmap_sem); + + if (is_error_page(page)) { + printk(KERN_INFO "%s: could not find page at 0x%llx\n", + __func__, gpa); + kvm_release_page_clean(page); + kvm_inject_gp(&svm->vcpu, 0); + return NULL; + } + return page; +} + +static int nested_svm_do(struct vcpu_svm *svm, + u64 arg1_gpa, u64 arg2_gpa, void *opaque, + int (*handler)(struct vcpu_svm *svm, + void *arg1, + void *arg2, + void *opaque)) +{ + struct page *arg1_page; + struct page *arg2_page = NULL; + void *arg1; + void *arg2 = NULL; + int retval; + + arg1_page = nested_svm_get_page(svm, arg1_gpa); + if(arg1_page == NULL) + return 1; + + if (arg2_gpa) { + arg2_page = nested_svm_get_page(svm, arg2_gpa); + if(arg2_page == NULL) { + kvm_release_page_clean(arg1_page); + return 1; + } + } + + arg1 = kmap_atomic(arg1_page, KM_USER0); + if (arg2_gpa) + arg2 = kmap_atomic(arg2_page, KM_USER1); + + retval = handler(svm, arg1, arg2, opaque); + + kunmap_atomic(arg1, KM_USER0); + if (arg2_gpa) + kunmap_atomic(arg2, KM_USER1); + + kvm_release_page_dirty(arg1_page); + if (arg2_gpa) + kvm_release_page_dirty(arg2_page); + + return retval; +} + static int invalid_op_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) { -- cgit v1.2.3 From 1371d90460189d02bf1bcca19dbfe6bd10dc6031 Mon Sep 17 00:00:00 2001 From: Alexander Graf Date: Tue, 25 Nov 2008 20:17:04 +0100 Subject: KVM: SVM: Implement GIF, clgi and stgi This patch implements the GIF flag and the clgi and stgi instructions that set this flag. Only if the flag is set (default), interrupts can be received by the CPU. To keep the information about that somewhere, this patch adds a new hidden flags vector. that is used to store information that does not go into the vmcb, but is SVM specific. I tried to write some code to make -no-kvm-irqchip work too, but the first level guest won't even boot with that atm, so I ditched it. v2 moves the hflags to x86 generic code v3 makes use of the new permission helper v6 only enables interrupt_window if GIF=1 Acked-by: Joerg Roedel Signed-off-by: Alexander Graf Signed-off-by: Avi Kivity --- arch/x86/include/asm/kvm_host.h | 3 +++ arch/x86/kvm/svm.c | 47 ++++++++++++++++++++++++++++++++++++----- 2 files changed, 45 insertions(+), 5 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 2998efe8927..29e4157732d 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -259,6 +259,7 @@ struct kvm_vcpu_arch { unsigned long cr3; unsigned long cr4; unsigned long cr8; + u32 hflags; u64 pdptrs[4]; /* pae */ u64 shadow_efer; u64 apic_base; @@ -738,6 +739,8 @@ enum { TASK_SWITCH_GATE = 3, }; +#define HF_GIF_MASK (1 << 0) + /* * Hardware virtualization extension instructions may fault if a * reboot turns off virtualization while processes are running. diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index 87debdcd1b9..79cc06bfe57 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c @@ -251,7 +251,7 @@ static void skip_emulated_instruction(struct kvm_vcpu *vcpu) kvm_rip_write(vcpu, svm->next_rip); svm->vmcb->control.int_state &= ~SVM_INTERRUPT_SHADOW_MASK; - vcpu->arch.interrupt_window_open = 1; + vcpu->arch.interrupt_window_open = (svm->vcpu.arch.hflags & HF_GIF_MASK); } static int has_svm(void) @@ -600,6 +600,8 @@ static void init_vmcb(struct vcpu_svm *svm) save->cr4 = 0; } force_new_asid(&svm->vcpu); + + svm->vcpu.arch.hflags = HF_GIF_MASK; } static int svm_vcpu_reset(struct kvm_vcpu *vcpu) @@ -1234,6 +1236,36 @@ static int nested_svm_do(struct vcpu_svm *svm, return retval; } +static int stgi_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) +{ + if (nested_svm_check_permissions(svm)) + return 1; + + svm->next_rip = kvm_rip_read(&svm->vcpu) + 3; + skip_emulated_instruction(&svm->vcpu); + + svm->vcpu.arch.hflags |= HF_GIF_MASK; + + return 1; +} + +static int clgi_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) +{ + if (nested_svm_check_permissions(svm)) + return 1; + + svm->next_rip = kvm_rip_read(&svm->vcpu) + 3; + skip_emulated_instruction(&svm->vcpu); + + svm->vcpu.arch.hflags &= ~HF_GIF_MASK; + + /* After a CLGI no interrupts should come */ + svm_clear_vintr(svm); + svm->vmcb->control.int_ctl &= ~V_IRQ_MASK; + + return 1; +} + static int invalid_op_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) { @@ -1535,8 +1567,8 @@ static int (*svm_exit_handlers[])(struct vcpu_svm *svm, [SVM_EXIT_VMMCALL] = vmmcall_interception, [SVM_EXIT_VMLOAD] = invalid_op_interception, [SVM_EXIT_VMSAVE] = invalid_op_interception, - [SVM_EXIT_STGI] = invalid_op_interception, - [SVM_EXIT_CLGI] = invalid_op_interception, + [SVM_EXIT_STGI] = stgi_interception, + [SVM_EXIT_CLGI] = clgi_interception, [SVM_EXIT_SKINIT] = invalid_op_interception, [SVM_EXIT_WBINVD] = emulate_on_interception, [SVM_EXIT_MONITOR] = invalid_op_interception, @@ -1684,6 +1716,9 @@ static void svm_intr_assist(struct kvm_vcpu *vcpu) if (!kvm_cpu_has_interrupt(vcpu)) goto out; + if (!(svm->vcpu.arch.hflags & HF_GIF_MASK)) + goto out; + if (!(vmcb->save.rflags & X86_EFLAGS_IF) || (vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK) || (vmcb->control.event_inj & SVM_EVTINJ_VALID)) { @@ -1710,7 +1745,8 @@ static void kvm_reput_irq(struct vcpu_svm *svm) } svm->vcpu.arch.interrupt_window_open = - !(control->int_state & SVM_INTERRUPT_SHADOW_MASK); + !(control->int_state & SVM_INTERRUPT_SHADOW_MASK) && + (svm->vcpu.arch.hflags & HF_GIF_MASK); } static void svm_do_inject_vector(struct vcpu_svm *svm) @@ -1734,7 +1770,8 @@ static void do_interrupt_requests(struct kvm_vcpu *vcpu, svm->vcpu.arch.interrupt_window_open = (!(control->int_state & SVM_INTERRUPT_SHADOW_MASK) && - (svm->vmcb->save.rflags & X86_EFLAGS_IF)); + (svm->vmcb->save.rflags & X86_EFLAGS_IF) && + (svm->vcpu.arch.hflags & HF_GIF_MASK)); if (svm->vcpu.arch.interrupt_window_open && svm->vcpu.arch.irq_summary) /* -- cgit v1.2.3 From b286d5d8b0836e76832dafcc5a18b0e8e5a3bc5e Mon Sep 17 00:00:00 2001 From: Alexander Graf Date: Tue, 25 Nov 2008 20:17:05 +0100 Subject: KVM: SVM: Implement hsave Implement the hsave MSR, that gives the VCPU a GPA to save the old guest state in. v2 allows userspace to save/restore hsave v4 dummys out the hsave MSR, so we use a host page v6 remembers the guest's hsave and exports the MSR Acked-by: Joerg Roedel Signed-off-by: Alexander Graf Signed-off-by: Avi Kivity --- arch/x86/kvm/kvm_svm.h | 2 ++ arch/x86/kvm/svm.c | 13 +++++++++++++ arch/x86/kvm/x86.c | 2 +- 3 files changed, 16 insertions(+), 1 deletion(-) (limited to 'arch/x86') diff --git a/arch/x86/kvm/kvm_svm.h b/arch/x86/kvm/kvm_svm.h index 8e5ee99551f..a0877cac7b9 100644 --- a/arch/x86/kvm/kvm_svm.h +++ b/arch/x86/kvm/kvm_svm.h @@ -41,6 +41,8 @@ struct vcpu_svm { unsigned long host_dr7; u32 *msrpm; + struct vmcb *hsave; + u64 hsave_msr; }; #endif diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index 79cc06bfe57..59aaff1c959 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c @@ -626,6 +626,7 @@ static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id) struct vcpu_svm *svm; struct page *page; struct page *msrpm_pages; + struct page *hsave_page; int err; svm = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL); @@ -651,6 +652,11 @@ static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id) svm->msrpm = page_address(msrpm_pages); svm_vcpu_init_msrpm(svm->msrpm); + hsave_page = alloc_page(GFP_KERNEL); + if (!hsave_page) + goto uninit; + svm->hsave = page_address(hsave_page); + svm->vmcb = page_address(page); clear_page(svm->vmcb); svm->vmcb_pa = page_to_pfn(page) << PAGE_SHIFT; @@ -680,6 +686,7 @@ static void svm_free_vcpu(struct kvm_vcpu *vcpu) __free_page(pfn_to_page(svm->vmcb_pa >> PAGE_SHIFT)); __free_pages(virt_to_page(svm->msrpm), MSRPM_ALLOC_ORDER); + __free_page(virt_to_page(svm->hsave)); kvm_vcpu_uninit(vcpu); kmem_cache_free(kvm_vcpu_cache, svm); } @@ -1377,6 +1384,9 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 *data) case MSR_IA32_LASTINTTOIP: *data = svm->vmcb->save.last_excp_to; break; + case MSR_VM_HSAVE_PA: + *data = svm->hsave_msr; + break; default: return kvm_get_msr_common(vcpu, ecx, data); } @@ -1470,6 +1480,9 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 data) */ pr_unimpl(vcpu, "unimplemented perfctr wrmsr: 0x%x data 0x%llx\n", ecx, data); + break; + case MSR_VM_HSAVE_PA: + svm->hsave_msr = data; break; default: return kvm_set_msr_common(vcpu, ecx, data); diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 758b7a155ae..99165a961f0 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -456,7 +456,7 @@ static u32 msrs_to_save[] = { MSR_CSTAR, MSR_KERNEL_GS_BASE, MSR_SYSCALL_MASK, MSR_LSTAR, #endif MSR_IA32_TIME_STAMP_COUNTER, MSR_KVM_SYSTEM_TIME, MSR_KVM_WALL_CLOCK, - MSR_IA32_PERF_STATUS, MSR_IA32_CR_PAT + MSR_IA32_PERF_STATUS, MSR_IA32_CR_PAT, MSR_VM_HSAVE_PA }; static unsigned num_msrs_to_save; -- cgit v1.2.3 From 5542675baa7e62ca4d18278c8758b6a4ec410639 Mon Sep 17 00:00:00 2001 From: Alexander Graf Date: Tue, 25 Nov 2008 20:17:06 +0100 Subject: KVM: SVM: Add VMLOAD and VMSAVE handlers This implements the VMLOAD and VMSAVE instructions, that usually surround the VMRUN instructions. Both instructions load / restore the same elements, so we only need to implement them once. v2 fixes CPL checking and replaces memcpy by assignments v3 makes use of the new permission checking Acked-by: Joerg Roedel Signed-off-by: Alexander Graf Signed-off-by: Avi Kivity --- arch/x86/kvm/svm.c | 60 ++++++++++++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 58 insertions(+), 2 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index 59aaff1c959..a83c94eb577 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c @@ -1243,6 +1243,62 @@ static int nested_svm_do(struct vcpu_svm *svm, return retval; } +static int nested_svm_vmloadsave(struct vmcb *from_vmcb, struct vmcb *to_vmcb) +{ + to_vmcb->save.fs = from_vmcb->save.fs; + to_vmcb->save.gs = from_vmcb->save.gs; + to_vmcb->save.tr = from_vmcb->save.tr; + to_vmcb->save.ldtr = from_vmcb->save.ldtr; + to_vmcb->save.kernel_gs_base = from_vmcb->save.kernel_gs_base; + to_vmcb->save.star = from_vmcb->save.star; + to_vmcb->save.lstar = from_vmcb->save.lstar; + to_vmcb->save.cstar = from_vmcb->save.cstar; + to_vmcb->save.sfmask = from_vmcb->save.sfmask; + to_vmcb->save.sysenter_cs = from_vmcb->save.sysenter_cs; + to_vmcb->save.sysenter_esp = from_vmcb->save.sysenter_esp; + to_vmcb->save.sysenter_eip = from_vmcb->save.sysenter_eip; + + return 1; +} + +static int nested_svm_vmload(struct vcpu_svm *svm, void *nested_vmcb, + void *arg2, void *opaque) +{ + return nested_svm_vmloadsave((struct vmcb *)nested_vmcb, svm->vmcb); +} + +static int nested_svm_vmsave(struct vcpu_svm *svm, void *nested_vmcb, + void *arg2, void *opaque) +{ + return nested_svm_vmloadsave(svm->vmcb, (struct vmcb *)nested_vmcb); +} + +static int vmload_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) +{ + if (nested_svm_check_permissions(svm)) + return 1; + + svm->next_rip = kvm_rip_read(&svm->vcpu) + 3; + skip_emulated_instruction(&svm->vcpu); + + nested_svm_do(svm, svm->vmcb->save.rax, 0, NULL, nested_svm_vmload); + + return 1; +} + +static int vmsave_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) +{ + if (nested_svm_check_permissions(svm)) + return 1; + + svm->next_rip = kvm_rip_read(&svm->vcpu) + 3; + skip_emulated_instruction(&svm->vcpu); + + nested_svm_do(svm, svm->vmcb->save.rax, 0, NULL, nested_svm_vmsave); + + return 1; +} + static int stgi_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) { if (nested_svm_check_permissions(svm)) @@ -1578,8 +1634,8 @@ static int (*svm_exit_handlers[])(struct vcpu_svm *svm, [SVM_EXIT_SHUTDOWN] = shutdown_interception, [SVM_EXIT_VMRUN] = invalid_op_interception, [SVM_EXIT_VMMCALL] = vmmcall_interception, - [SVM_EXIT_VMLOAD] = invalid_op_interception, - [SVM_EXIT_VMSAVE] = invalid_op_interception, + [SVM_EXIT_VMLOAD] = vmload_interception, + [SVM_EXIT_VMSAVE] = vmsave_interception, [SVM_EXIT_STGI] = stgi_interception, [SVM_EXIT_CLGI] = clgi_interception, [SVM_EXIT_SKINIT] = invalid_op_interception, -- cgit v1.2.3 From 3d6368ef580a4dff012960834bba4e28d3c1430c Mon Sep 17 00:00:00 2001 From: Alexander Graf Date: Tue, 25 Nov 2008 20:17:07 +0100 Subject: KVM: SVM: Add VMRUN handler This patch implements VMRUN. VMRUN enters a virtual CPU and runs that in the same context as the normal guest CPU would run. So basically it is implemented the same way, a normal CPU would do it. We also prepare all intercepts that get OR'ed with the original intercepts, as we do not allow a level 2 guest to be intercepted less than the first level guest. v2 implements the following improvements: - fixes the CPL check - does not allocate iopm when not used - remembers the host's IF in the HIF bit in the hflags v3: - make use of the new permission checking - add support for V_INTR_MASKING_MASK v4: - use host page backed hsave v5: - remove IOPM merging code v6: - save cr4 so PAE l1 guests work v7: - return 0 on vmrun so we check the MSRs too - fix MSR check to use the correct variable Acked-by: Joerg Roedel Signed-off-by: Alexander Graf Signed-off-by: Avi Kivity --- arch/x86/include/asm/kvm_host.h | 2 + arch/x86/kvm/kvm_svm.h | 8 ++ arch/x86/kvm/svm.c | 157 +++++++++++++++++++++++++++++++++++++++- 3 files changed, 165 insertions(+), 2 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 29e4157732d..53779309514 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -740,6 +740,8 @@ enum { }; #define HF_GIF_MASK (1 << 0) +#define HF_HIF_MASK (1 << 1) +#define HF_VINTR_MASK (1 << 2) /* * Hardware virtualization extension instructions may fault if a diff --git a/arch/x86/kvm/kvm_svm.h b/arch/x86/kvm/kvm_svm.h index a0877cac7b9..91673413d8f 100644 --- a/arch/x86/kvm/kvm_svm.h +++ b/arch/x86/kvm/kvm_svm.h @@ -43,6 +43,14 @@ struct vcpu_svm { u32 *msrpm; struct vmcb *hsave; u64 hsave_msr; + + u64 nested_vmcb; + + /* These are the merged vectors */ + u32 *nested_msrpm; + + /* gpa pointers to the real vectors */ + u64 nested_vmcb_msrpm; }; #endif diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index a83c94eb577..fad187cbfab 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c @@ -77,6 +77,11 @@ static inline struct vcpu_svm *to_svm(struct kvm_vcpu *vcpu) return container_of(vcpu, struct vcpu_svm, vcpu); } +static inline bool is_nested(struct vcpu_svm *svm) +{ + return svm->nested_vmcb; +} + static unsigned long iopm_base; struct kvm_ldttss_desc { @@ -601,6 +606,7 @@ static void init_vmcb(struct vcpu_svm *svm) } force_new_asid(&svm->vcpu); + svm->nested_vmcb = 0; svm->vcpu.arch.hflags = HF_GIF_MASK; } @@ -627,6 +633,7 @@ static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id) struct page *page; struct page *msrpm_pages; struct page *hsave_page; + struct page *nested_msrpm_pages; int err; svm = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL); @@ -649,6 +656,11 @@ static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id) msrpm_pages = alloc_pages(GFP_KERNEL, MSRPM_ALLOC_ORDER); if (!msrpm_pages) goto uninit; + + nested_msrpm_pages = alloc_pages(GFP_KERNEL, MSRPM_ALLOC_ORDER); + if (!nested_msrpm_pages) + goto uninit; + svm->msrpm = page_address(msrpm_pages); svm_vcpu_init_msrpm(svm->msrpm); @@ -657,6 +669,8 @@ static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id) goto uninit; svm->hsave = page_address(hsave_page); + svm->nested_msrpm = page_address(nested_msrpm_pages); + svm->vmcb = page_address(page); clear_page(svm->vmcb); svm->vmcb_pa = page_to_pfn(page) << PAGE_SHIFT; @@ -687,6 +701,7 @@ static void svm_free_vcpu(struct kvm_vcpu *vcpu) __free_page(pfn_to_page(svm->vmcb_pa >> PAGE_SHIFT)); __free_pages(virt_to_page(svm->msrpm), MSRPM_ALLOC_ORDER); __free_page(virt_to_page(svm->hsave)); + __free_pages(virt_to_page(svm->nested_msrpm), MSRPM_ALLOC_ORDER); kvm_vcpu_uninit(vcpu); kmem_cache_free(kvm_vcpu_cache, svm); } @@ -1243,6 +1258,123 @@ static int nested_svm_do(struct vcpu_svm *svm, return retval; } + +static int nested_svm_vmrun_msrpm(struct vcpu_svm *svm, void *arg1, + void *arg2, void *opaque) +{ + int i; + u32 *nested_msrpm = (u32*)arg1; + for (i=0; i< PAGE_SIZE * (1 << MSRPM_ALLOC_ORDER) / 4; i++) + svm->nested_msrpm[i] = svm->msrpm[i] | nested_msrpm[i]; + svm->vmcb->control.msrpm_base_pa = __pa(svm->nested_msrpm); + + return 0; +} + +static int nested_svm_vmrun(struct vcpu_svm *svm, void *arg1, + void *arg2, void *opaque) +{ + struct vmcb *nested_vmcb = (struct vmcb *)arg1; + struct vmcb *hsave = svm->hsave; + + /* nested_vmcb is our indicator if nested SVM is activated */ + svm->nested_vmcb = svm->vmcb->save.rax; + + /* Clear internal status */ + svm->vcpu.arch.exception.pending = false; + + /* Save the old vmcb, so we don't need to pick what we save, but + can restore everything when a VMEXIT occurs */ + memcpy(hsave, svm->vmcb, sizeof(struct vmcb)); + /* We need to remember the original CR3 in the SPT case */ + if (!npt_enabled) + hsave->save.cr3 = svm->vcpu.arch.cr3; + hsave->save.cr4 = svm->vcpu.arch.cr4; + hsave->save.rip = svm->next_rip; + + if (svm->vmcb->save.rflags & X86_EFLAGS_IF) + svm->vcpu.arch.hflags |= HF_HIF_MASK; + else + svm->vcpu.arch.hflags &= ~HF_HIF_MASK; + + /* Load the nested guest state */ + svm->vmcb->save.es = nested_vmcb->save.es; + svm->vmcb->save.cs = nested_vmcb->save.cs; + svm->vmcb->save.ss = nested_vmcb->save.ss; + svm->vmcb->save.ds = nested_vmcb->save.ds; + svm->vmcb->save.gdtr = nested_vmcb->save.gdtr; + svm->vmcb->save.idtr = nested_vmcb->save.idtr; + svm->vmcb->save.rflags = nested_vmcb->save.rflags; + svm_set_efer(&svm->vcpu, nested_vmcb->save.efer); + svm_set_cr0(&svm->vcpu, nested_vmcb->save.cr0); + svm_set_cr4(&svm->vcpu, nested_vmcb->save.cr4); + if (npt_enabled) { + svm->vmcb->save.cr3 = nested_vmcb->save.cr3; + svm->vcpu.arch.cr3 = nested_vmcb->save.cr3; + } else { + kvm_set_cr3(&svm->vcpu, nested_vmcb->save.cr3); + kvm_mmu_reset_context(&svm->vcpu); + } + svm->vmcb->save.cr2 = nested_vmcb->save.cr2; + kvm_register_write(&svm->vcpu, VCPU_REGS_RAX, nested_vmcb->save.rax); + kvm_register_write(&svm->vcpu, VCPU_REGS_RSP, nested_vmcb->save.rsp); + kvm_register_write(&svm->vcpu, VCPU_REGS_RIP, nested_vmcb->save.rip); + /* In case we don't even reach vcpu_run, the fields are not updated */ + svm->vmcb->save.rax = nested_vmcb->save.rax; + svm->vmcb->save.rsp = nested_vmcb->save.rsp; + svm->vmcb->save.rip = nested_vmcb->save.rip; + svm->vmcb->save.dr7 = nested_vmcb->save.dr7; + svm->vmcb->save.dr6 = nested_vmcb->save.dr6; + svm->vmcb->save.cpl = nested_vmcb->save.cpl; + + /* We don't want a nested guest to be more powerful than the guest, + so all intercepts are ORed */ + svm->vmcb->control.intercept_cr_read |= + nested_vmcb->control.intercept_cr_read; + svm->vmcb->control.intercept_cr_write |= + nested_vmcb->control.intercept_cr_write; + svm->vmcb->control.intercept_dr_read |= + nested_vmcb->control.intercept_dr_read; + svm->vmcb->control.intercept_dr_write |= + nested_vmcb->control.intercept_dr_write; + svm->vmcb->control.intercept_exceptions |= + nested_vmcb->control.intercept_exceptions; + + svm->vmcb->control.intercept |= nested_vmcb->control.intercept; + + svm->nested_vmcb_msrpm = nested_vmcb->control.msrpm_base_pa; + + force_new_asid(&svm->vcpu); + svm->vmcb->control.exit_int_info = nested_vmcb->control.exit_int_info; + svm->vmcb->control.exit_int_info_err = nested_vmcb->control.exit_int_info_err; + svm->vmcb->control.int_ctl = nested_vmcb->control.int_ctl | V_INTR_MASKING_MASK; + if (nested_vmcb->control.int_ctl & V_IRQ_MASK) { + nsvm_printk("nSVM Injecting Interrupt: 0x%x\n", + nested_vmcb->control.int_ctl); + } + if (nested_vmcb->control.int_ctl & V_INTR_MASKING_MASK) + svm->vcpu.arch.hflags |= HF_VINTR_MASK; + else + svm->vcpu.arch.hflags &= ~HF_VINTR_MASK; + + nsvm_printk("nSVM exit_int_info: 0x%x | int_state: 0x%x\n", + nested_vmcb->control.exit_int_info, + nested_vmcb->control.int_state); + + svm->vmcb->control.int_vector = nested_vmcb->control.int_vector; + svm->vmcb->control.int_state = nested_vmcb->control.int_state; + svm->vmcb->control.tsc_offset += nested_vmcb->control.tsc_offset; + if (nested_vmcb->control.event_inj & SVM_EVTINJ_VALID) + nsvm_printk("Injecting Event: 0x%x\n", + nested_vmcb->control.event_inj); + svm->vmcb->control.event_inj = nested_vmcb->control.event_inj; + svm->vmcb->control.event_inj_err = nested_vmcb->control.event_inj_err; + + svm->vcpu.arch.hflags |= HF_GIF_MASK; + + return 0; +} + static int nested_svm_vmloadsave(struct vmcb *from_vmcb, struct vmcb *to_vmcb) { to_vmcb->save.fs = from_vmcb->save.fs; @@ -1299,6 +1431,26 @@ static int vmsave_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) return 1; } +static int vmrun_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) +{ + nsvm_printk("VMrun\n"); + if (nested_svm_check_permissions(svm)) + return 1; + + svm->next_rip = kvm_rip_read(&svm->vcpu) + 3; + skip_emulated_instruction(&svm->vcpu); + + if (nested_svm_do(svm, svm->vmcb->save.rax, 0, + NULL, nested_svm_vmrun)) + return 1; + + if (nested_svm_do(svm, svm->nested_vmcb_msrpm, 0, + NULL, nested_svm_vmrun_msrpm)) + return 1; + + return 1; +} + static int stgi_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) { if (nested_svm_check_permissions(svm)) @@ -1632,7 +1784,7 @@ static int (*svm_exit_handlers[])(struct vcpu_svm *svm, [SVM_EXIT_MSR] = msr_interception, [SVM_EXIT_TASK_SWITCH] = task_switch_interception, [SVM_EXIT_SHUTDOWN] = shutdown_interception, - [SVM_EXIT_VMRUN] = invalid_op_interception, + [SVM_EXIT_VMRUN] = vmrun_interception, [SVM_EXIT_VMMCALL] = vmmcall_interception, [SVM_EXIT_VMLOAD] = vmload_interception, [SVM_EXIT_VMSAVE] = vmsave_interception, @@ -1939,7 +2091,8 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) svm->host_cr2 = kvm_read_cr2(); svm->host_dr6 = read_dr6(); svm->host_dr7 = read_dr7(); - svm->vmcb->save.cr2 = vcpu->arch.cr2; + if (!is_nested(svm)) + svm->vmcb->save.cr2 = vcpu->arch.cr2; /* required for live migration with NPT */ if (npt_enabled) svm->vmcb->save.cr3 = vcpu->arch.cr3; -- cgit v1.2.3 From cf74a78b229d07f77416d2fe1f029f183a8a31df Mon Sep 17 00:00:00 2001 From: Alexander Graf Date: Tue, 25 Nov 2008 20:17:08 +0100 Subject: KVM: SVM: Add VMEXIT handler and intercepts This adds the #VMEXIT intercept, so we return to the level 1 guest when something happens in the level 2 guest that should return to the level 1 guest. v2 implements HIF handling and cleans up exception interception v3 adds support for V_INTR_MASKING_MASK v4 uses the host page hsave v5 removes IOPM merging code v6 moves mmu code out of the atomic section Acked-by: Joerg Roedel Signed-off-by: Alexander Graf Signed-off-by: Avi Kivity --- arch/x86/kvm/svm.c | 293 +++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 293 insertions(+) (limited to 'arch/x86') diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index fad187cbfab..4cb2920b152 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c @@ -72,6 +72,13 @@ module_param(npt, int, S_IRUGO); static void kvm_reput_irq(struct vcpu_svm *svm); static void svm_flush_tlb(struct kvm_vcpu *vcpu); +static int nested_svm_exit_handled(struct vcpu_svm *svm, bool kvm_override); +static int nested_svm_vmexit(struct vcpu_svm *svm); +static int nested_svm_vmsave(struct vcpu_svm *svm, void *nested_vmcb, + void *arg2, void *opaque); +static int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr, + bool has_error_code, u32 error_code); + static inline struct vcpu_svm *to_svm(struct kvm_vcpu *vcpu) { return container_of(vcpu, struct vcpu_svm, vcpu); @@ -221,6 +228,11 @@ static void svm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr, { struct vcpu_svm *svm = to_svm(vcpu); + /* If we are within a nested VM we'd better #VMEXIT and let the + guest handle the exception */ + if (nested_svm_check_exception(svm, nr, has_error_code, error_code)) + return; + svm->vmcb->control.event_inj = nr | SVM_EVTINJ_VALID | (has_error_code ? SVM_EVTINJ_VALID_ERR : 0) @@ -1198,6 +1210,46 @@ static int nested_svm_check_permissions(struct vcpu_svm *svm) return 0; } +static int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr, + bool has_error_code, u32 error_code) +{ + if (is_nested(svm)) { + svm->vmcb->control.exit_code = SVM_EXIT_EXCP_BASE + nr; + svm->vmcb->control.exit_code_hi = 0; + svm->vmcb->control.exit_info_1 = error_code; + svm->vmcb->control.exit_info_2 = svm->vcpu.arch.cr2; + if (nested_svm_exit_handled(svm, false)) { + nsvm_printk("VMexit -> EXCP 0x%x\n", nr); + + nested_svm_vmexit(svm); + return 1; + } + } + + return 0; +} + +static inline int nested_svm_intr(struct vcpu_svm *svm) +{ + if (is_nested(svm)) { + if (!(svm->vcpu.arch.hflags & HF_VINTR_MASK)) + return 0; + + if (!(svm->vcpu.arch.hflags & HF_HIF_MASK)) + return 0; + + svm->vmcb->control.exit_code = SVM_EXIT_INTR; + + if (nested_svm_exit_handled(svm, false)) { + nsvm_printk("VMexit -> INTR\n"); + nested_svm_vmexit(svm); + return 1; + } + } + + return 0; +} + static struct page *nested_svm_get_page(struct vcpu_svm *svm, u64 gpa) { struct page *page; @@ -1258,6 +1310,228 @@ static int nested_svm_do(struct vcpu_svm *svm, return retval; } +static int nested_svm_exit_handled_real(struct vcpu_svm *svm, + void *arg1, + void *arg2, + void *opaque) +{ + struct vmcb *nested_vmcb = (struct vmcb *)arg1; + bool kvm_overrides = *(bool *)opaque; + u32 exit_code = svm->vmcb->control.exit_code; + + if (kvm_overrides) { + switch (exit_code) { + case SVM_EXIT_INTR: + case SVM_EXIT_NMI: + return 0; + /* For now we are always handling NPFs when using them */ + case SVM_EXIT_NPF: + if (npt_enabled) + return 0; + break; + /* When we're shadowing, trap PFs */ + case SVM_EXIT_EXCP_BASE + PF_VECTOR: + if (!npt_enabled) + return 0; + break; + default: + break; + } + } + + switch (exit_code) { + case SVM_EXIT_READ_CR0 ... SVM_EXIT_READ_CR8: { + u32 cr_bits = 1 << (exit_code - SVM_EXIT_READ_CR0); + if (nested_vmcb->control.intercept_cr_read & cr_bits) + return 1; + break; + } + case SVM_EXIT_WRITE_CR0 ... SVM_EXIT_WRITE_CR8: { + u32 cr_bits = 1 << (exit_code - SVM_EXIT_WRITE_CR0); + if (nested_vmcb->control.intercept_cr_write & cr_bits) + return 1; + break; + } + case SVM_EXIT_READ_DR0 ... SVM_EXIT_READ_DR7: { + u32 dr_bits = 1 << (exit_code - SVM_EXIT_READ_DR0); + if (nested_vmcb->control.intercept_dr_read & dr_bits) + return 1; + break; + } + case SVM_EXIT_WRITE_DR0 ... SVM_EXIT_WRITE_DR7: { + u32 dr_bits = 1 << (exit_code - SVM_EXIT_WRITE_DR0); + if (nested_vmcb->control.intercept_dr_write & dr_bits) + return 1; + break; + } + case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 0x1f: { + u32 excp_bits = 1 << (exit_code - SVM_EXIT_EXCP_BASE); + if (nested_vmcb->control.intercept_exceptions & excp_bits) + return 1; + break; + } + default: { + u64 exit_bits = 1ULL << (exit_code - SVM_EXIT_INTR); + nsvm_printk("exit code: 0x%x\n", exit_code); + if (nested_vmcb->control.intercept & exit_bits) + return 1; + } + } + + return 0; +} + +static int nested_svm_exit_handled_msr(struct vcpu_svm *svm, + void *arg1, void *arg2, + void *opaque) +{ + struct vmcb *nested_vmcb = (struct vmcb *)arg1; + u8 *msrpm = (u8 *)arg2; + u32 t0, t1; + u32 msr = svm->vcpu.arch.regs[VCPU_REGS_RCX]; + u32 param = svm->vmcb->control.exit_info_1 & 1; + + if (!(nested_vmcb->control.intercept & (1ULL << INTERCEPT_MSR_PROT))) + return 0; + + switch(msr) { + case 0 ... 0x1fff: + t0 = (msr * 2) % 8; + t1 = msr / 8; + break; + case 0xc0000000 ... 0xc0001fff: + t0 = (8192 + msr - 0xc0000000) * 2; + t1 = (t0 / 8); + t0 %= 8; + break; + case 0xc0010000 ... 0xc0011fff: + t0 = (16384 + msr - 0xc0010000) * 2; + t1 = (t0 / 8); + t0 %= 8; + break; + default: + return 1; + break; + } + if (msrpm[t1] & ((1 << param) << t0)) + return 1; + + return 0; +} + +static int nested_svm_exit_handled(struct vcpu_svm *svm, bool kvm_override) +{ + bool k = kvm_override; + + switch (svm->vmcb->control.exit_code) { + case SVM_EXIT_MSR: + return nested_svm_do(svm, svm->nested_vmcb, + svm->nested_vmcb_msrpm, NULL, + nested_svm_exit_handled_msr); + default: break; + } + + return nested_svm_do(svm, svm->nested_vmcb, 0, &k, + nested_svm_exit_handled_real); +} + +static int nested_svm_vmexit_real(struct vcpu_svm *svm, void *arg1, + void *arg2, void *opaque) +{ + struct vmcb *nested_vmcb = (struct vmcb *)arg1; + struct vmcb *hsave = svm->hsave; + u64 nested_save[] = { nested_vmcb->save.cr0, + nested_vmcb->save.cr3, + nested_vmcb->save.cr4, + nested_vmcb->save.efer, + nested_vmcb->control.intercept_cr_read, + nested_vmcb->control.intercept_cr_write, + nested_vmcb->control.intercept_dr_read, + nested_vmcb->control.intercept_dr_write, + nested_vmcb->control.intercept_exceptions, + nested_vmcb->control.intercept, + nested_vmcb->control.msrpm_base_pa, + nested_vmcb->control.iopm_base_pa, + nested_vmcb->control.tsc_offset }; + + /* Give the current vmcb to the guest */ + memcpy(nested_vmcb, svm->vmcb, sizeof(struct vmcb)); + nested_vmcb->save.cr0 = nested_save[0]; + if (!npt_enabled) + nested_vmcb->save.cr3 = nested_save[1]; + nested_vmcb->save.cr4 = nested_save[2]; + nested_vmcb->save.efer = nested_save[3]; + nested_vmcb->control.intercept_cr_read = nested_save[4]; + nested_vmcb->control.intercept_cr_write = nested_save[5]; + nested_vmcb->control.intercept_dr_read = nested_save[6]; + nested_vmcb->control.intercept_dr_write = nested_save[7]; + nested_vmcb->control.intercept_exceptions = nested_save[8]; + nested_vmcb->control.intercept = nested_save[9]; + nested_vmcb->control.msrpm_base_pa = nested_save[10]; + nested_vmcb->control.iopm_base_pa = nested_save[11]; + nested_vmcb->control.tsc_offset = nested_save[12]; + + /* We always set V_INTR_MASKING and remember the old value in hflags */ + if (!(svm->vcpu.arch.hflags & HF_VINTR_MASK)) + nested_vmcb->control.int_ctl &= ~V_INTR_MASKING_MASK; + + if ((nested_vmcb->control.int_ctl & V_IRQ_MASK) && + (nested_vmcb->control.int_vector)) { + nsvm_printk("WARNING: IRQ 0x%x still enabled on #VMEXIT\n", + nested_vmcb->control.int_vector); + } + + /* Restore the original control entries */ + svm->vmcb->control = hsave->control; + + /* Kill any pending exceptions */ + if (svm->vcpu.arch.exception.pending == true) + nsvm_printk("WARNING: Pending Exception\n"); + svm->vcpu.arch.exception.pending = false; + + /* Restore selected save entries */ + svm->vmcb->save.es = hsave->save.es; + svm->vmcb->save.cs = hsave->save.cs; + svm->vmcb->save.ss = hsave->save.ss; + svm->vmcb->save.ds = hsave->save.ds; + svm->vmcb->save.gdtr = hsave->save.gdtr; + svm->vmcb->save.idtr = hsave->save.idtr; + svm->vmcb->save.rflags = hsave->save.rflags; + svm_set_efer(&svm->vcpu, hsave->save.efer); + svm_set_cr0(&svm->vcpu, hsave->save.cr0 | X86_CR0_PE); + svm_set_cr4(&svm->vcpu, hsave->save.cr4); + if (npt_enabled) { + svm->vmcb->save.cr3 = hsave->save.cr3; + svm->vcpu.arch.cr3 = hsave->save.cr3; + } else { + kvm_set_cr3(&svm->vcpu, hsave->save.cr3); + } + kvm_register_write(&svm->vcpu, VCPU_REGS_RAX, hsave->save.rax); + kvm_register_write(&svm->vcpu, VCPU_REGS_RSP, hsave->save.rsp); + kvm_register_write(&svm->vcpu, VCPU_REGS_RIP, hsave->save.rip); + svm->vmcb->save.dr7 = 0; + svm->vmcb->save.cpl = 0; + svm->vmcb->control.exit_int_info = 0; + + svm->vcpu.arch.hflags &= ~HF_GIF_MASK; + /* Exit nested SVM mode */ + svm->nested_vmcb = 0; + + return 0; +} + +static int nested_svm_vmexit(struct vcpu_svm *svm) +{ + nsvm_printk("VMexit\n"); + if (nested_svm_do(svm, svm->nested_vmcb, 0, + NULL, nested_svm_vmexit_real)) + return 1; + + kvm_mmu_reset_context(&svm->vcpu); + kvm_mmu_load(&svm->vcpu); + + return 0; +} static int nested_svm_vmrun_msrpm(struct vcpu_svm *svm, void *arg1, void *arg2, void *opaque) @@ -1805,6 +2079,17 @@ static int handle_exit(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) KVMTRACE_3D(VMEXIT, vcpu, exit_code, (u32)svm->vmcb->save.rip, (u32)((u64)svm->vmcb->save.rip >> 32), entryexit); + if (is_nested(svm)) { + nsvm_printk("nested handle_exit: 0x%x | 0x%lx | 0x%lx | 0x%lx\n", + exit_code, svm->vmcb->control.exit_info_1, + svm->vmcb->control.exit_info_2, svm->vmcb->save.rip); + if (nested_svm_exit_handled(svm, true)) { + nested_svm_vmexit(svm); + nsvm_printk("-> #VMEXIT\n"); + return 1; + } + } + if (npt_enabled) { int mmu_reload = 0; if ((vcpu->arch.cr0 ^ svm->vmcb->save.cr0) & X86_CR0_PG) { @@ -1892,6 +2177,8 @@ static void svm_set_irq(struct kvm_vcpu *vcpu, int irq) { struct vcpu_svm *svm = to_svm(vcpu); + nested_svm_intr(svm); + svm_inject_irq(svm, irq); } @@ -1937,6 +2224,9 @@ static void svm_intr_assist(struct kvm_vcpu *vcpu) if (!kvm_cpu_has_interrupt(vcpu)) goto out; + if (nested_svm_intr(svm)) + goto out; + if (!(svm->vcpu.arch.hflags & HF_GIF_MASK)) goto out; @@ -1989,6 +2279,9 @@ static void do_interrupt_requests(struct kvm_vcpu *vcpu, struct vcpu_svm *svm = to_svm(vcpu); struct vmcb_control_area *control = &svm->vmcb->control; + if (nested_svm_intr(svm)) + return; + svm->vcpu.arch.interrupt_window_open = (!(control->int_state & SVM_INTERRUPT_SHADOW_MASK) && (svm->vmcb->save.rflags & X86_EFLAGS_IF) && -- cgit v1.2.3 From eb6f302edff9da963e687bf6c15dcf88843b1c3b Mon Sep 17 00:00:00 2001 From: Joerg Roedel Date: Tue, 25 Nov 2008 20:17:09 +0100 Subject: KVM: SVM: Allow read access to MSR_VM_VR KVM tries to read the VM_CR MSR to find out if SVM was disabled by the BIOS. So implement read support for this MSR to make nested SVM running. Signed-off-by: Joerg Roedel Signed-off-by: Alexander Graf Signed-off-by: Avi Kivity --- arch/x86/kvm/svm.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'arch/x86') diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index 4cb2920b152..df5b4119266 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c @@ -1869,6 +1869,9 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 *data) case MSR_VM_HSAVE_PA: *data = svm->hsave_msr; break; + case MSR_VM_CR: + *data = 0; + break; default: return kvm_get_msr_common(vcpu, ecx, data); } -- cgit v1.2.3 From 236de05553a7ef8f6940de8686ae9bf1272cd2cf Mon Sep 17 00:00:00 2001 From: Alexander Graf Date: Tue, 25 Nov 2008 20:17:10 +0100 Subject: KVM: SVM: Allow setting the SVME bit Normally setting the SVME bit in EFER is not allowed, as we did not support SVM. Not since we do, we should also allow enabling SVM mode. v2 comes as last patch, so we don't enable half-ready code v4 introduces a module option to enable SVM v6 warns that nesting is enabled Acked-by: Joerg Roedel Signed-off-by: Alexander Graf Signed-off-by: Avi Kivity --- arch/x86/kvm/svm.c | 8 ++++++++ 1 file changed, 8 insertions(+) (limited to 'arch/x86') diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index df5b4119266..0fbbde54eca 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c @@ -69,6 +69,9 @@ static int npt = 1; module_param(npt, int, S_IRUGO); +static int nested = 0; +module_param(nested, int, S_IRUGO); + static void kvm_reput_irq(struct vcpu_svm *svm); static void svm_flush_tlb(struct kvm_vcpu *vcpu); @@ -443,6 +446,11 @@ static __init int svm_hardware_setup(void) if (boot_cpu_has(X86_FEATURE_NX)) kvm_enable_efer_bits(EFER_NX); + if (nested) { + printk(KERN_INFO "kvm: Nested Virtualization enabled\n"); + kvm_enable_efer_bits(EFER_SVME); + } + for_each_online_cpu(cpu) { r = svm_cpu_init(cpu); if (r) -- cgit v1.2.3 From d80174745ba3938bc6abb8f95ed670ac0631a182 Mon Sep 17 00:00:00 2001 From: Alexander Graf Date: Tue, 25 Nov 2008 20:17:11 +0100 Subject: KVM: SVM: Only allow setting of EFER_SVME when CPUID SVM is set Userspace has to tell the kernel module somehow that nested SVM should be used. The easiest way that doesn't break anything I could think of is to implement if (cpuid & svm) allow write to efer else deny write to efer Old userspaces mask the SVM capability bit, so they don't break. In order to find out that the SVM capability is set, I had to split the kvm_emulate_cpuid into a finding and an emulating part. (introduced in v6) Acked-by: Joerg Roedel Signed-off-by: Alexander Graf Signed-off-by: Avi Kivity --- arch/x86/kvm/x86.c | 58 +++++++++++++++++++++++++++++++++++++++--------------- 1 file changed, 42 insertions(+), 16 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 99165a961f0..b5e9932e0f6 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -69,6 +69,8 @@ static u64 __read_mostly efer_reserved_bits = 0xfffffffffffffffeULL; static int kvm_dev_ioctl_get_supported_cpuid(struct kvm_cpuid2 *cpuid, struct kvm_cpuid_entry2 __user *entries); +struct kvm_cpuid_entry2 *kvm_find_cpuid_entry(struct kvm_vcpu *vcpu, + u32 function, u32 index); struct kvm_x86_ops *kvm_x86_ops; EXPORT_SYMBOL_GPL(kvm_x86_ops); @@ -173,6 +175,7 @@ void kvm_inject_page_fault(struct kvm_vcpu *vcpu, unsigned long addr, u32 error_code) { ++vcpu->stat.pf_guest; + if (vcpu->arch.exception.pending) { if (vcpu->arch.exception.nr == PF_VECTOR) { printk(KERN_DEBUG "kvm: inject_page_fault:" @@ -442,6 +445,11 @@ unsigned long kvm_get_cr8(struct kvm_vcpu *vcpu) } EXPORT_SYMBOL_GPL(kvm_get_cr8); +static inline u32 bit(int bitno) +{ + return 1 << (bitno & 31); +} + /* * List of msr numbers which we expose to userspace through KVM_GET_MSRS * and KVM_SET_MSRS, and KVM_GET_MSR_INDEX_LIST. @@ -481,6 +489,17 @@ static void set_efer(struct kvm_vcpu *vcpu, u64 efer) return; } + if (efer & EFER_SVME) { + struct kvm_cpuid_entry2 *feat; + + feat = kvm_find_cpuid_entry(vcpu, 0x80000001, 0); + if (!feat || !(feat->ecx & bit(X86_FEATURE_SVM))) { + printk(KERN_DEBUG "set_efer: #GP, enable SVM w/o SVM\n"); + kvm_inject_gp(vcpu, 0); + return; + } + } + kvm_x86_ops->set_efer(vcpu, efer); efer &= ~EFER_LMA; @@ -1181,11 +1200,6 @@ out: return r; } -static inline u32 bit(int bitno) -{ - return 1 << (bitno & 31); -} - static void do_cpuid_1_ent(struct kvm_cpuid_entry2 *entry, u32 function, u32 index) { @@ -1228,7 +1242,8 @@ static void do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function, const u32 kvm_supported_word3_x86_features = bit(X86_FEATURE_XMM3) | bit(X86_FEATURE_CX16); const u32 kvm_supported_word6_x86_features = - bit(X86_FEATURE_LAHF_LM) | bit(X86_FEATURE_CMP_LEGACY); + bit(X86_FEATURE_LAHF_LM) | bit(X86_FEATURE_CMP_LEGACY) | + bit(X86_FEATURE_SVM); /* all func 2 cpuid_count() should be called on the same cpu */ get_cpu(); @@ -2832,20 +2847,15 @@ static int is_matching_cpuid_entry(struct kvm_cpuid_entry2 *e, return 1; } -void kvm_emulate_cpuid(struct kvm_vcpu *vcpu) +struct kvm_cpuid_entry2 *kvm_find_cpuid_entry(struct kvm_vcpu *vcpu, + u32 function, u32 index) { int i; - u32 function, index; - struct kvm_cpuid_entry2 *e, *best; + struct kvm_cpuid_entry2 *best = NULL; - function = kvm_register_read(vcpu, VCPU_REGS_RAX); - index = kvm_register_read(vcpu, VCPU_REGS_RCX); - kvm_register_write(vcpu, VCPU_REGS_RAX, 0); - kvm_register_write(vcpu, VCPU_REGS_RBX, 0); - kvm_register_write(vcpu, VCPU_REGS_RCX, 0); - kvm_register_write(vcpu, VCPU_REGS_RDX, 0); - best = NULL; for (i = 0; i < vcpu->arch.cpuid_nent; ++i) { + struct kvm_cpuid_entry2 *e; + e = &vcpu->arch.cpuid_entries[i]; if (is_matching_cpuid_entry(e, function, index)) { if (e->flags & KVM_CPUID_FLAG_STATEFUL_FUNC) @@ -2860,6 +2870,22 @@ void kvm_emulate_cpuid(struct kvm_vcpu *vcpu) if (!best || e->function > best->function) best = e; } + + return best; +} + +void kvm_emulate_cpuid(struct kvm_vcpu *vcpu) +{ + u32 function, index; + struct kvm_cpuid_entry2 *best; + + function = kvm_register_read(vcpu, VCPU_REGS_RAX); + index = kvm_register_read(vcpu, VCPU_REGS_RCX); + kvm_register_write(vcpu, VCPU_REGS_RAX, 0); + kvm_register_write(vcpu, VCPU_REGS_RBX, 0); + kvm_register_write(vcpu, VCPU_REGS_RCX, 0); + kvm_register_write(vcpu, VCPU_REGS_RDX, 0); + best = kvm_find_cpuid_entry(vcpu, function, index); if (best) { kvm_register_write(vcpu, VCPU_REGS_RAX, best->eax); kvm_register_write(vcpu, VCPU_REGS_RBX, best->ebx); -- cgit v1.2.3 From 8ab2d2e231062814bd89bba2d6d92563190aa2bb Mon Sep 17 00:00:00 2001 From: Jan Kiszka Date: Mon, 15 Dec 2008 13:52:10 +0100 Subject: KVM: VMX: Support for injecting software exceptions VMX differentiates between processor and software generated exceptions when injecting them into the guest. Extend vmx_queue_exception accordingly (and refactor related constants) so that we can use this service reliably for the new guest debugging framework. Signed-off-by: Jan Kiszka Signed-off-by: Avi Kivity --- arch/x86/include/asm/vmx.h | 3 ++- arch/x86/kvm/vmx.c | 35 ++++++++++++++++++++--------------- 2 files changed, 22 insertions(+), 16 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/include/asm/vmx.h b/arch/x86/include/asm/vmx.h index d0238e6151d..32159f034ef 100644 --- a/arch/x86/include/asm/vmx.h +++ b/arch/x86/include/asm/vmx.h @@ -270,8 +270,9 @@ enum vmcs_field { #define INTR_TYPE_EXT_INTR (0 << 8) /* external interrupt */ #define INTR_TYPE_NMI_INTR (2 << 8) /* NMI */ -#define INTR_TYPE_EXCEPTION (3 << 8) /* processor exception */ +#define INTR_TYPE_HARD_EXCEPTION (3 << 8) /* processor exception */ #define INTR_TYPE_SOFT_INTR (4 << 8) /* software interrupt */ +#define INTR_TYPE_SOFT_EXCEPTION (6 << 8) /* software exception */ /* GUEST_INTERRUPTIBILITY_INFO flags. */ #define GUEST_INTR_STATE_STI 0x00000001 diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 7611af57682..1d974c1eaa7 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -189,21 +189,21 @@ static inline int is_page_fault(u32 intr_info) { return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK | INTR_INFO_VALID_MASK)) == - (INTR_TYPE_EXCEPTION | PF_VECTOR | INTR_INFO_VALID_MASK); + (INTR_TYPE_HARD_EXCEPTION | PF_VECTOR | INTR_INFO_VALID_MASK); } static inline int is_no_device(u32 intr_info) { return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK | INTR_INFO_VALID_MASK)) == - (INTR_TYPE_EXCEPTION | NM_VECTOR | INTR_INFO_VALID_MASK); + (INTR_TYPE_HARD_EXCEPTION | NM_VECTOR | INTR_INFO_VALID_MASK); } static inline int is_invalid_opcode(u32 intr_info) { return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK | INTR_INFO_VALID_MASK)) == - (INTR_TYPE_EXCEPTION | UD_VECTOR | INTR_INFO_VALID_MASK); + (INTR_TYPE_HARD_EXCEPTION | UD_VECTOR | INTR_INFO_VALID_MASK); } static inline int is_external_interrupt(u32 intr_info) @@ -747,29 +747,33 @@ static void vmx_queue_exception(struct kvm_vcpu *vcpu, unsigned nr, bool has_error_code, u32 error_code) { struct vcpu_vmx *vmx = to_vmx(vcpu); + u32 intr_info = nr | INTR_INFO_VALID_MASK; - if (has_error_code) + if (has_error_code) { vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE, error_code); + intr_info |= INTR_INFO_DELIVER_CODE_MASK; + } if (vcpu->arch.rmode.active) { vmx->rmode.irq.pending = true; vmx->rmode.irq.vector = nr; vmx->rmode.irq.rip = kvm_rip_read(vcpu); - if (nr == BP_VECTOR) + if (nr == BP_VECTOR || nr == OF_VECTOR) vmx->rmode.irq.rip++; - vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, - nr | INTR_TYPE_SOFT_INTR - | (has_error_code ? INTR_INFO_DELIVER_CODE_MASK : 0) - | INTR_INFO_VALID_MASK); + intr_info |= INTR_TYPE_SOFT_INTR; + vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, intr_info); vmcs_write32(VM_ENTRY_INSTRUCTION_LEN, 1); kvm_rip_write(vcpu, vmx->rmode.irq.rip - 1); return; } - vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, - nr | INTR_TYPE_EXCEPTION - | (has_error_code ? INTR_INFO_DELIVER_CODE_MASK : 0) - | INTR_INFO_VALID_MASK); + if (nr == BP_VECTOR || nr == OF_VECTOR) { + vmcs_write32(VM_ENTRY_INSTRUCTION_LEN, 1); + intr_info |= INTR_TYPE_SOFT_EXCEPTION; + } else + intr_info |= INTR_TYPE_HARD_EXCEPTION; + + vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, intr_info); } static bool vmx_exception_injected(struct kvm_vcpu *vcpu) @@ -2650,7 +2654,7 @@ static int handle_exception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) } if ((intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK)) == - (INTR_TYPE_EXCEPTION | 1)) { + (INTR_TYPE_HARD_EXCEPTION | 1)) { kvm_run->exit_reason = KVM_EXIT_DEBUG; return 0; } @@ -3238,7 +3242,8 @@ static void vmx_complete_interrupts(struct vcpu_vmx *vmx) vmx->vcpu.arch.nmi_injected = false; } kvm_clear_exception_queue(&vmx->vcpu); - if (idtv_info_valid && type == INTR_TYPE_EXCEPTION) { + if (idtv_info_valid && (type == INTR_TYPE_HARD_EXCEPTION || + type == INTR_TYPE_SOFT_EXCEPTION)) { if (idt_vectoring_info & VECTORING_INFO_DELIVER_CODE_MASK) { error = vmcs_read32(IDT_VECTORING_ERROR_CODE); kvm_queue_exception_e(&vmx->vcpu, vector, error); -- cgit v1.2.3 From d0bfb940ecabf0b44fb1fd80d8d60594e569e5ec Mon Sep 17 00:00:00 2001 From: Jan Kiszka Date: Mon, 15 Dec 2008 13:52:10 +0100 Subject: KVM: New guest debug interface This rips out the support for KVM_DEBUG_GUEST and introduces a new IOCTL instead: KVM_SET_GUEST_DEBUG. The IOCTL payload consists of a generic part, controlling the "main switch" and the single-step feature. The arch specific part adds an x86 interface for intercepting both types of debug exceptions separately and re-injecting them when the host was not interested. Moveover, the foundation for guest debugging via debug registers is layed. To signal breakpoint events properly back to userland, an arch-specific data block is now returned along KVM_EXIT_DEBUG. For x86, the arch block contains the PC, the debug exception, and relevant debug registers to tell debug events properly apart. The availability of this new interface is signaled by KVM_CAP_SET_GUEST_DEBUG. Empty stubs for not yet supported archs are provided. Note that both SVM and VTX are supported, but only the latter was tested yet. Based on the experience with all those VTX corner case, I would be fairly surprised if SVM will work out of the box. Signed-off-by: Jan Kiszka Signed-off-by: Avi Kivity --- arch/x86/include/asm/kvm.h | 18 ++++++++ arch/x86/include/asm/kvm_host.h | 9 +--- arch/x86/kvm/svm.c | 50 +++++++++++++++++++++- arch/x86/kvm/vmx.c | 93 ++++++++++++++++------------------------- arch/x86/kvm/x86.c | 14 ++++--- 5 files changed, 111 insertions(+), 73 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/include/asm/kvm.h b/arch/x86/include/asm/kvm.h index 886c9402ec4..32eb96c7ca2 100644 --- a/arch/x86/include/asm/kvm.h +++ b/arch/x86/include/asm/kvm.h @@ -212,6 +212,24 @@ struct kvm_pit_channel_state { __s64 count_load_time; }; +struct kvm_debug_exit_arch { + __u32 exception; + __u32 pad; + __u64 pc; + __u64 dr6; + __u64 dr7; +}; + +#define KVM_GUESTDBG_USE_SW_BP 0x00010000 +#define KVM_GUESTDBG_USE_HW_BP 0x00020000 +#define KVM_GUESTDBG_INJECT_DB 0x00040000 +#define KVM_GUESTDBG_INJECT_BP 0x00080000 + +/* for KVM_SET_GUEST_DEBUG */ +struct kvm_guest_debug_arch { + __u64 debugreg[8]; +}; + struct kvm_pit_state { struct kvm_pit_channel_state channels[3]; }; diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 53779309514..c430cd580ee 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -135,12 +135,6 @@ enum { #define KVM_NR_MEM_OBJS 40 -struct kvm_guest_debug { - int enabled; - unsigned long bp[4]; - int singlestep; -}; - /* * We don't want allocation failures within the mmu code, so we preallocate * enough memory for a single page fault in a cache. @@ -448,8 +442,7 @@ struct kvm_x86_ops { void (*vcpu_put)(struct kvm_vcpu *vcpu); int (*set_guest_debug)(struct kvm_vcpu *vcpu, - struct kvm_debug_guest *dbg); - void (*guest_debug_pre)(struct kvm_vcpu *vcpu); + struct kvm_guest_debug *dbg); int (*get_msr)(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata); int (*set_msr)(struct kvm_vcpu *vcpu, u32 msr_index, u64 data); u64 (*get_segment_base)(struct kvm_vcpu *vcpu, int seg); diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index 0fbbde54eca..88d9062f454 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c @@ -968,9 +968,32 @@ static void svm_set_segment(struct kvm_vcpu *vcpu, } -static int svm_guest_debug(struct kvm_vcpu *vcpu, struct kvm_debug_guest *dbg) +static int svm_guest_debug(struct kvm_vcpu *vcpu, struct kvm_guest_debug *dbg) { - return -EOPNOTSUPP; + int old_debug = vcpu->guest_debug; + struct vcpu_svm *svm = to_svm(vcpu); + + vcpu->guest_debug = dbg->control; + + svm->vmcb->control.intercept_exceptions &= + ~((1 << DB_VECTOR) | (1 << BP_VECTOR)); + if (vcpu->guest_debug & KVM_GUESTDBG_ENABLE) { + if (vcpu->guest_debug & + (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP)) + svm->vmcb->control.intercept_exceptions |= + 1 << DB_VECTOR; + if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP) + svm->vmcb->control.intercept_exceptions |= + 1 << BP_VECTOR; + } else + vcpu->guest_debug = 0; + + if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) + svm->vmcb->save.rflags |= X86_EFLAGS_TF | X86_EFLAGS_RF; + else if (old_debug & KVM_GUESTDBG_SINGLESTEP) + svm->vmcb->save.rflags &= ~(X86_EFLAGS_TF | X86_EFLAGS_RF); + + return 0; } static int svm_get_irq(struct kvm_vcpu *vcpu) @@ -1094,6 +1117,27 @@ static int pf_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) return kvm_mmu_page_fault(&svm->vcpu, fault_address, error_code); } +static int db_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) +{ + if (!(svm->vcpu.guest_debug & + (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP))) { + kvm_queue_exception(&svm->vcpu, DB_VECTOR); + return 1; + } + kvm_run->exit_reason = KVM_EXIT_DEBUG; + kvm_run->debug.arch.pc = svm->vmcb->save.cs.base + svm->vmcb->save.rip; + kvm_run->debug.arch.exception = DB_VECTOR; + return 0; +} + +static int bp_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) +{ + kvm_run->exit_reason = KVM_EXIT_DEBUG; + kvm_run->debug.arch.pc = svm->vmcb->save.cs.base + svm->vmcb->save.rip; + kvm_run->debug.arch.exception = BP_VECTOR; + return 0; +} + static int ud_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) { int er; @@ -2050,6 +2094,8 @@ static int (*svm_exit_handlers[])(struct vcpu_svm *svm, [SVM_EXIT_WRITE_DR3] = emulate_on_interception, [SVM_EXIT_WRITE_DR5] = emulate_on_interception, [SVM_EXIT_WRITE_DR7] = emulate_on_interception, + [SVM_EXIT_EXCP_BASE + DB_VECTOR] = db_interception, + [SVM_EXIT_EXCP_BASE + BP_VECTOR] = bp_interception, [SVM_EXIT_EXCP_BASE + UD_VECTOR] = ud_interception, [SVM_EXIT_EXCP_BASE + PF_VECTOR] = pf_interception, [SVM_EXIT_EXCP_BASE + NM_VECTOR] = nm_interception, diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 1d974c1eaa7..f55690ddb3a 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -480,8 +480,13 @@ static void update_exception_bitmap(struct kvm_vcpu *vcpu) eb = (1u << PF_VECTOR) | (1u << UD_VECTOR); if (!vcpu->fpu_active) eb |= 1u << NM_VECTOR; - if (vcpu->guest_debug.enabled) - eb |= 1u << DB_VECTOR; + if (vcpu->guest_debug & KVM_GUESTDBG_ENABLE) { + if (vcpu->guest_debug & + (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP)) + eb |= 1u << DB_VECTOR; + if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP) + eb |= 1u << BP_VECTOR; + } if (vcpu->arch.rmode.active) eb = ~0; if (vm_need_ept()) @@ -1003,40 +1008,23 @@ static void vmx_cache_reg(struct kvm_vcpu *vcpu, enum kvm_reg reg) } } -static int set_guest_debug(struct kvm_vcpu *vcpu, struct kvm_debug_guest *dbg) +static int set_guest_debug(struct kvm_vcpu *vcpu, struct kvm_guest_debug *dbg) { - unsigned long dr7 = 0x400; - int old_singlestep; - - old_singlestep = vcpu->guest_debug.singlestep; - - vcpu->guest_debug.enabled = dbg->enabled; - if (vcpu->guest_debug.enabled) { - int i; - - dr7 |= 0x200; /* exact */ - for (i = 0; i < 4; ++i) { - if (!dbg->breakpoints[i].enabled) - continue; - vcpu->guest_debug.bp[i] = dbg->breakpoints[i].address; - dr7 |= 2 << (i*2); /* global enable */ - dr7 |= 0 << (i*4+16); /* execution breakpoint */ - } - - vcpu->guest_debug.singlestep = dbg->singlestep; - } else - vcpu->guest_debug.singlestep = 0; + int old_debug = vcpu->guest_debug; + unsigned long flags; - if (old_singlestep && !vcpu->guest_debug.singlestep) { - unsigned long flags; + vcpu->guest_debug = dbg->control; + if (!(vcpu->guest_debug & KVM_GUESTDBG_ENABLE)) + vcpu->guest_debug = 0; - flags = vmcs_readl(GUEST_RFLAGS); + flags = vmcs_readl(GUEST_RFLAGS); + if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) + flags |= X86_EFLAGS_TF | X86_EFLAGS_RF; + else if (old_debug & KVM_GUESTDBG_SINGLESTEP) flags &= ~(X86_EFLAGS_TF | X86_EFLAGS_RF); - vmcs_writel(GUEST_RFLAGS, flags); - } + vmcs_writel(GUEST_RFLAGS, flags); update_exception_bitmap(vcpu); - vmcs_writel(GUEST_DR7, dr7); return 0; } @@ -2540,24 +2528,6 @@ static int vmx_set_tss_addr(struct kvm *kvm, unsigned int addr) return 0; } -static void kvm_guest_debug_pre(struct kvm_vcpu *vcpu) -{ - struct kvm_guest_debug *dbg = &vcpu->guest_debug; - - set_debugreg(dbg->bp[0], 0); - set_debugreg(dbg->bp[1], 1); - set_debugreg(dbg->bp[2], 2); - set_debugreg(dbg->bp[3], 3); - - if (dbg->singlestep) { - unsigned long flags; - - flags = vmcs_readl(GUEST_RFLAGS); - flags |= X86_EFLAGS_TF | X86_EFLAGS_RF; - vmcs_writel(GUEST_RFLAGS, flags); - } -} - static int handle_rmode_exception(struct kvm_vcpu *vcpu, int vec, u32 err_code) { @@ -2574,9 +2544,17 @@ static int handle_rmode_exception(struct kvm_vcpu *vcpu, * the required debugging infrastructure rework. */ switch (vec) { - case DE_VECTOR: case DB_VECTOR: + if (vcpu->guest_debug & + (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP)) + return 0; + kvm_queue_exception(vcpu, vec); + return 1; case BP_VECTOR: + if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP) + return 0; + /* fall through */ + case DE_VECTOR: case OF_VECTOR: case BR_VECTOR: case UD_VECTOR: @@ -2593,7 +2571,7 @@ static int handle_rmode_exception(struct kvm_vcpu *vcpu, static int handle_exception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) { struct vcpu_vmx *vmx = to_vmx(vcpu); - u32 intr_info, error_code; + u32 intr_info, ex_no, error_code; unsigned long cr2, rip; u32 vect_info; enum emulation_result er; @@ -2653,14 +2631,16 @@ static int handle_exception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) return 1; } - if ((intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK)) == - (INTR_TYPE_HARD_EXCEPTION | 1)) { + ex_no = intr_info & INTR_INFO_VECTOR_MASK; + if (ex_no == DB_VECTOR || ex_no == BP_VECTOR) { kvm_run->exit_reason = KVM_EXIT_DEBUG; - return 0; + kvm_run->debug.arch.pc = vmcs_readl(GUEST_CS_BASE) + rip; + kvm_run->debug.arch.exception = ex_no; + } else { + kvm_run->exit_reason = KVM_EXIT_EXCEPTION; + kvm_run->ex.exception = ex_no; + kvm_run->ex.error_code = error_code; } - kvm_run->exit_reason = KVM_EXIT_EXCEPTION; - kvm_run->ex.exception = intr_info & INTR_INFO_VECTOR_MASK; - kvm_run->ex.error_code = error_code; return 0; } @@ -3600,7 +3580,6 @@ static struct kvm_x86_ops vmx_x86_ops = { .vcpu_put = vmx_vcpu_put, .set_guest_debug = set_guest_debug, - .guest_debug_pre = kvm_guest_debug_pre, .get_msr = vmx_get_msr, .set_msr = vmx_set_msr, .get_segment_base = vmx_get_segment_base, diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index b5e9932e0f6..e990d164b56 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -3005,9 +3005,6 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) goto out; } - if (vcpu->guest_debug.enabled) - kvm_x86_ops->guest_debug_pre(vcpu); - vcpu->guest_mode = 1; /* * Make sure that guest_mode assignment won't happen after @@ -3218,7 +3215,7 @@ int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) /* * Don't leak debug flags in case they were set for guest debugging */ - if (vcpu->guest_debug.enabled && vcpu->guest_debug.singlestep) + if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) regs->rflags &= ~(X86_EFLAGS_TF | X86_EFLAGS_RF); vcpu_put(vcpu); @@ -3837,8 +3834,8 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, return 0; } -int kvm_arch_vcpu_ioctl_debug_guest(struct kvm_vcpu *vcpu, - struct kvm_debug_guest *dbg) +int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, + struct kvm_guest_debug *dbg) { int r; @@ -3846,6 +3843,11 @@ int kvm_arch_vcpu_ioctl_debug_guest(struct kvm_vcpu *vcpu, r = kvm_x86_ops->set_guest_debug(vcpu, dbg); + if (dbg->control & KVM_GUESTDBG_INJECT_DB) + kvm_queue_exception(vcpu, DB_VECTOR); + else if (dbg->control & KVM_GUESTDBG_INJECT_BP) + kvm_queue_exception(vcpu, BP_VECTOR); + vcpu_put(vcpu); return r; -- cgit v1.2.3 From 55934c0bd3bb232a9cf902820dd63ad18ed65e49 Mon Sep 17 00:00:00 2001 From: Jan Kiszka Date: Mon, 15 Dec 2008 13:52:10 +0100 Subject: KVM: VMX: Allow single-stepping when uninterruptible When single-stepping over STI and MOV SS, we must clear the corresponding interruptibility bits in the guest state. Otherwise vmentry fails as it then expects bit 14 (BS) in pending debug exceptions being set, but that's not correct for the guest debugging case. Note that clearing those bits is safe as we check for interruptibility based on the original state and do not inject interrupts or NMIs if guest interruptibility was blocked. Signed-off-by: Jan Kiszka Signed-off-by: Avi Kivity --- arch/x86/kvm/vmx.c | 10 ++++++++++ 1 file changed, 10 insertions(+) (limited to 'arch/x86') diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index f55690ddb3a..c776868ffe4 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -2478,6 +2478,11 @@ static void do_interrupt_requests(struct kvm_vcpu *vcpu, { vmx_update_window_states(vcpu); + if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) + vmcs_clear_bits(GUEST_INTERRUPTIBILITY_INFO, + GUEST_INTR_STATE_STI | + GUEST_INTR_STATE_MOV_SS); + if (vcpu->arch.nmi_pending && !vcpu->arch.nmi_injected) { if (vcpu->arch.interrupt.pending) { enable_nmi_window(vcpu); @@ -3244,6 +3249,11 @@ static void vmx_intr_assist(struct kvm_vcpu *vcpu) vmx_update_window_states(vcpu); + if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) + vmcs_clear_bits(GUEST_INTERRUPTIBILITY_INFO, + GUEST_INTR_STATE_STI | + GUEST_INTR_STATE_MOV_SS); + if (vcpu->arch.nmi_pending && !vcpu->arch.nmi_injected) { if (vcpu->arch.interrupt.pending) { enable_nmi_window(vcpu); -- cgit v1.2.3 From 42dbaa5a057736bf8b5c22aa42dbe975bf1080e5 Mon Sep 17 00:00:00 2001 From: Jan Kiszka Date: Mon, 15 Dec 2008 13:52:10 +0100 Subject: KVM: x86: Virtualize debug registers So far KVM only had basic x86 debug register support, once introduced to realize guest debugging that way. The guest itself was not able to use those registers. This patch now adds (almost) full support for guest self-debugging via hardware registers. It refactors the code, moving generic parts out of SVM (VMX was already cleaned up by the KVM_SET_GUEST_DEBUG patches), and it ensures that the registers are properly switched between host and guest. This patch also prepares debug register usage by the host. The latter will (once wired-up by the following patch) allow for hardware breakpoints/watchpoints in guest code. If this is enabled, the guest will only see faked debug registers without functionality, but with content reflecting the guest's modifications. Tested on Intel only, but SVM /should/ work as well, but who knows... Known limitations: Trapping on tss switch won't work - most probably on Intel. Credits also go to Joerg Roedel - I used his once posted debugging series as platform for this patch. Signed-off-by: Jan Kiszka Signed-off-by: Avi Kivity --- arch/x86/include/asm/kvm_host.h | 22 ++++++++ arch/x86/include/asm/vmx.h | 2 +- arch/x86/kvm/kvm_svm.h | 6 --- arch/x86/kvm/svm.c | 116 +++++++++++++++------------------------- arch/x86/kvm/vmx.c | 114 +++++++++++++++++++++++++++++++++------ arch/x86/kvm/x86.c | 29 ++++++++++ 6 files changed, 193 insertions(+), 96 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index c430cd580ee..0a4dab25a91 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -135,6 +135,19 @@ enum { #define KVM_NR_MEM_OBJS 40 +#define KVM_NR_DB_REGS 4 + +#define DR6_BD (1 << 13) +#define DR6_BS (1 << 14) +#define DR6_FIXED_1 0xffff0ff0 +#define DR6_VOLATILE 0x0000e00f + +#define DR7_BP_EN_MASK 0x000000ff +#define DR7_GE (1 << 9) +#define DR7_GD (1 << 13) +#define DR7_FIXED_1 0x00000400 +#define DR7_VOLATILE 0xffff23ff + /* * We don't want allocation failures within the mmu code, so we preallocate * enough memory for a single page fault in a cache. @@ -334,6 +347,15 @@ struct kvm_vcpu_arch { struct mtrr_state_type mtrr_state; u32 pat; + + int switch_db_regs; + unsigned long host_db[KVM_NR_DB_REGS]; + unsigned long host_dr6; + unsigned long host_dr7; + unsigned long db[KVM_NR_DB_REGS]; + unsigned long dr6; + unsigned long dr7; + unsigned long eff_db[KVM_NR_DB_REGS]; }; struct kvm_mem_alias { diff --git a/arch/x86/include/asm/vmx.h b/arch/x86/include/asm/vmx.h index 32159f034ef..498f944010b 100644 --- a/arch/x86/include/asm/vmx.h +++ b/arch/x86/include/asm/vmx.h @@ -312,7 +312,7 @@ enum vmcs_field { #define DEBUG_REG_ACCESS_TYPE 0x10 /* 4, direction of access */ #define TYPE_MOV_TO_DR (0 << 4) #define TYPE_MOV_FROM_DR (1 << 4) -#define DEBUG_REG_ACCESS_REG 0xf00 /* 11:8, general purpose reg. */ +#define DEBUG_REG_ACCESS_REG(eq) (((eq) >> 8) & 0xf) /* 11:8, general purpose reg. */ /* segment AR */ diff --git a/arch/x86/kvm/kvm_svm.h b/arch/x86/kvm/kvm_svm.h index 91673413d8f..ed66e4c078d 100644 --- a/arch/x86/kvm/kvm_svm.h +++ b/arch/x86/kvm/kvm_svm.h @@ -18,7 +18,6 @@ static const u32 host_save_user_msrs[] = { }; #define NR_HOST_SAVE_USER_MSRS ARRAY_SIZE(host_save_user_msrs) -#define NUM_DB_REGS 4 struct kvm_vcpu; @@ -29,16 +28,11 @@ struct vcpu_svm { struct svm_cpu_data *svm_data; uint64_t asid_generation; - unsigned long db_regs[NUM_DB_REGS]; - u64 next_rip; u64 host_user_msrs[NR_HOST_SAVE_USER_MSRS]; u64 host_gs_base; unsigned long host_cr2; - unsigned long host_db_regs[NUM_DB_REGS]; - unsigned long host_dr6; - unsigned long host_dr7; u32 *msrpm; struct vmcb *hsave; diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index 88d9062f454..815f50e425a 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c @@ -38,9 +38,6 @@ MODULE_LICENSE("GPL"); #define IOPM_ALLOC_ORDER 2 #define MSRPM_ALLOC_ORDER 1 -#define DR7_GD_MASK (1 << 13) -#define DR6_BD_MASK (1 << 13) - #define SEG_TYPE_LDT 2 #define SEG_TYPE_BUSY_TSS16 3 @@ -181,32 +178,6 @@ static inline void kvm_write_cr2(unsigned long val) asm volatile ("mov %0, %%cr2" :: "r" (val)); } -static inline unsigned long read_dr6(void) -{ - unsigned long dr6; - - asm volatile ("mov %%dr6, %0" : "=r" (dr6)); - return dr6; -} - -static inline void write_dr6(unsigned long val) -{ - asm volatile ("mov %0, %%dr6" :: "r" (val)); -} - -static inline unsigned long read_dr7(void) -{ - unsigned long dr7; - - asm volatile ("mov %%dr7, %0" : "=r" (dr7)); - return dr7; -} - -static inline void write_dr7(unsigned long val) -{ - asm volatile ("mov %0, %%dr7" :: "r" (val)); -} - static inline void force_new_asid(struct kvm_vcpu *vcpu) { to_svm(vcpu)->asid_generation--; @@ -695,7 +666,6 @@ static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id) clear_page(svm->vmcb); svm->vmcb_pa = page_to_pfn(page) << PAGE_SHIFT; svm->asid_generation = 0; - memset(svm->db_regs, 0, sizeof(svm->db_regs)); init_vmcb(svm); fx_init(&svm->vcpu); @@ -1035,7 +1005,29 @@ static void new_asid(struct vcpu_svm *svm, struct svm_cpu_data *svm_data) static unsigned long svm_get_dr(struct kvm_vcpu *vcpu, int dr) { - unsigned long val = to_svm(vcpu)->db_regs[dr]; + struct vcpu_svm *svm = to_svm(vcpu); + unsigned long val; + + switch (dr) { + case 0 ... 3: + val = vcpu->arch.db[dr]; + break; + case 6: + if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) + val = vcpu->arch.dr6; + else + val = svm->vmcb->save.dr6; + break; + case 7: + if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) + val = vcpu->arch.dr7; + else + val = svm->vmcb->save.dr7; + break; + default: + val = 0; + } + KVMTRACE_2D(DR_READ, vcpu, (u32)dr, (u32)val, handler); return val; } @@ -1045,33 +1037,40 @@ static void svm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long value, { struct vcpu_svm *svm = to_svm(vcpu); - *exception = 0; + KVMTRACE_2D(DR_WRITE, vcpu, (u32)dr, (u32)value, handler); - if (svm->vmcb->save.dr7 & DR7_GD_MASK) { - svm->vmcb->save.dr7 &= ~DR7_GD_MASK; - svm->vmcb->save.dr6 |= DR6_BD_MASK; - *exception = DB_VECTOR; - return; - } + *exception = 0; switch (dr) { case 0 ... 3: - svm->db_regs[dr] = value; + vcpu->arch.db[dr] = value; + if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)) + vcpu->arch.eff_db[dr] = value; return; case 4 ... 5: - if (vcpu->arch.cr4 & X86_CR4_DE) { + if (vcpu->arch.cr4 & X86_CR4_DE) *exception = UD_VECTOR; + return; + case 6: + if (value & 0xffffffff00000000ULL) { + *exception = GP_VECTOR; return; } - case 7: { - if (value & ~((1ULL << 32) - 1)) { + vcpu->arch.dr6 = (value & DR6_VOLATILE) | DR6_FIXED_1; + return; + case 7: + if (value & 0xffffffff00000000ULL) { *exception = GP_VECTOR; return; } - svm->vmcb->save.dr7 = value; + vcpu->arch.dr7 = (value & DR7_VOLATILE) | DR7_FIXED_1; + if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)) { + svm->vmcb->save.dr7 = vcpu->arch.dr7; + vcpu->arch.switch_db_regs = (value & DR7_BP_EN_MASK); + } return; - } default: + /* FIXME: Possible case? */ printk(KERN_DEBUG "%s: unexpected dr %u\n", __func__, dr); *exception = UD_VECTOR; @@ -2365,22 +2364,6 @@ static int svm_set_tss_addr(struct kvm *kvm, unsigned int addr) return 0; } -static void save_db_regs(unsigned long *db_regs) -{ - asm volatile ("mov %%dr0, %0" : "=r"(db_regs[0])); - asm volatile ("mov %%dr1, %0" : "=r"(db_regs[1])); - asm volatile ("mov %%dr2, %0" : "=r"(db_regs[2])); - asm volatile ("mov %%dr3, %0" : "=r"(db_regs[3])); -} - -static void load_db_regs(unsigned long *db_regs) -{ - asm volatile ("mov %0, %%dr0" : : "r"(db_regs[0])); - asm volatile ("mov %0, %%dr1" : : "r"(db_regs[1])); - asm volatile ("mov %0, %%dr2" : : "r"(db_regs[2])); - asm volatile ("mov %0, %%dr3" : : "r"(db_regs[3])); -} - static void svm_flush_tlb(struct kvm_vcpu *vcpu) { force_new_asid(vcpu); @@ -2439,20 +2422,12 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) gs_selector = kvm_read_gs(); ldt_selector = kvm_read_ldt(); svm->host_cr2 = kvm_read_cr2(); - svm->host_dr6 = read_dr6(); - svm->host_dr7 = read_dr7(); if (!is_nested(svm)) svm->vmcb->save.cr2 = vcpu->arch.cr2; /* required for live migration with NPT */ if (npt_enabled) svm->vmcb->save.cr3 = vcpu->arch.cr3; - if (svm->vmcb->save.dr7 & 0xff) { - write_dr7(0); - save_db_regs(svm->host_db_regs); - load_db_regs(svm->db_regs); - } - clgi(); local_irq_enable(); @@ -2528,16 +2503,11 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) #endif ); - if ((svm->vmcb->save.dr7 & 0xff)) - load_db_regs(svm->host_db_regs); - vcpu->arch.cr2 = svm->vmcb->save.cr2; vcpu->arch.regs[VCPU_REGS_RAX] = svm->vmcb->save.rax; vcpu->arch.regs[VCPU_REGS_RSP] = svm->vmcb->save.rsp; vcpu->arch.regs[VCPU_REGS_RIP] = svm->vmcb->save.rip; - write_dr6(svm->host_dr6); - write_dr7(svm->host_dr7); kvm_write_cr2(svm->host_cr2); kvm_load_fs(fs_selector); diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index c776868ffe4..0989776ee7b 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -2311,7 +2311,6 @@ static int vmx_vcpu_reset(struct kvm_vcpu *vcpu) kvm_rip_write(vcpu, 0); kvm_register_write(vcpu, VCPU_REGS_RSP, 0); - /* todo: dr0 = dr1 = dr2 = dr3 = 0; dr6 = 0xffff0ff0 */ vmcs_writel(GUEST_DR7, 0x400); vmcs_writel(GUEST_GDTR_BASE, 0); @@ -2577,7 +2576,7 @@ static int handle_exception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) { struct vcpu_vmx *vmx = to_vmx(vcpu); u32 intr_info, ex_no, error_code; - unsigned long cr2, rip; + unsigned long cr2, rip, dr6; u32 vect_info; enum emulation_result er; @@ -2637,14 +2636,28 @@ static int handle_exception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) } ex_no = intr_info & INTR_INFO_VECTOR_MASK; - if (ex_no == DB_VECTOR || ex_no == BP_VECTOR) { + switch (ex_no) { + case DB_VECTOR: + dr6 = vmcs_readl(EXIT_QUALIFICATION); + if (!(vcpu->guest_debug & + (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP))) { + vcpu->arch.dr6 = dr6 | DR6_FIXED_1; + kvm_queue_exception(vcpu, DB_VECTOR); + return 1; + } + kvm_run->debug.arch.dr6 = dr6 | DR6_FIXED_1; + kvm_run->debug.arch.dr7 = vmcs_readl(GUEST_DR7); + /* fall through */ + case BP_VECTOR: kvm_run->exit_reason = KVM_EXIT_DEBUG; kvm_run->debug.arch.pc = vmcs_readl(GUEST_CS_BASE) + rip; kvm_run->debug.arch.exception = ex_no; - } else { + break; + default: kvm_run->exit_reason = KVM_EXIT_EXCEPTION; kvm_run->ex.exception = ex_no; kvm_run->ex.error_code = error_code; + break; } return 0; } @@ -2784,21 +2797,44 @@ static int handle_dr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) unsigned long val; int dr, reg; - /* - * FIXME: this code assumes the host is debugging the guest. - * need to deal with guest debugging itself too. - */ + dr = vmcs_readl(GUEST_DR7); + if (dr & DR7_GD) { + /* + * As the vm-exit takes precedence over the debug trap, we + * need to emulate the latter, either for the host or the + * guest debugging itself. + */ + if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) { + kvm_run->debug.arch.dr6 = vcpu->arch.dr6; + kvm_run->debug.arch.dr7 = dr; + kvm_run->debug.arch.pc = + vmcs_readl(GUEST_CS_BASE) + + vmcs_readl(GUEST_RIP); + kvm_run->debug.arch.exception = DB_VECTOR; + kvm_run->exit_reason = KVM_EXIT_DEBUG; + return 0; + } else { + vcpu->arch.dr7 &= ~DR7_GD; + vcpu->arch.dr6 |= DR6_BD; + vmcs_writel(GUEST_DR7, vcpu->arch.dr7); + kvm_queue_exception(vcpu, DB_VECTOR); + return 1; + } + } + exit_qualification = vmcs_readl(EXIT_QUALIFICATION); - dr = exit_qualification & 7; - reg = (exit_qualification >> 8) & 15; - if (exit_qualification & 16) { - /* mov from dr */ + dr = exit_qualification & DEBUG_REG_ACCESS_NUM; + reg = DEBUG_REG_ACCESS_REG(exit_qualification); + if (exit_qualification & TYPE_MOV_FROM_DR) { switch (dr) { + case 0 ... 3: + val = vcpu->arch.db[dr]; + break; case 6: - val = 0xffff0ff0; + val = vcpu->arch.dr6; break; case 7: - val = 0x400; + val = vcpu->arch.dr7; break; default: val = 0; @@ -2806,7 +2842,38 @@ static int handle_dr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) kvm_register_write(vcpu, reg, val); KVMTRACE_2D(DR_READ, vcpu, (u32)dr, (u32)val, handler); } else { - /* mov to dr */ + val = vcpu->arch.regs[reg]; + switch (dr) { + case 0 ... 3: + vcpu->arch.db[dr] = val; + if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)) + vcpu->arch.eff_db[dr] = val; + break; + case 4 ... 5: + if (vcpu->arch.cr4 & X86_CR4_DE) + kvm_queue_exception(vcpu, UD_VECTOR); + break; + case 6: + if (val & 0xffffffff00000000ULL) { + kvm_queue_exception(vcpu, GP_VECTOR); + break; + } + vcpu->arch.dr6 = (val & DR6_VOLATILE) | DR6_FIXED_1; + break; + case 7: + if (val & 0xffffffff00000000ULL) { + kvm_queue_exception(vcpu, GP_VECTOR); + break; + } + vcpu->arch.dr7 = (val & DR7_VOLATILE) | DR7_FIXED_1; + if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)) { + vmcs_writel(GUEST_DR7, vcpu->arch.dr7); + vcpu->arch.switch_db_regs = + (val & DR7_BP_EN_MASK); + } + break; + } + KVMTRACE_2D(DR_WRITE, vcpu, (u32)dr, (u32)val, handler); } skip_emulated_instruction(vcpu); return 1; @@ -2957,7 +3024,18 @@ static int handle_task_switch(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) } tss_selector = exit_qualification; - return kvm_task_switch(vcpu, tss_selector, reason); + if (!kvm_task_switch(vcpu, tss_selector, reason)) + return 0; + + /* clear all local breakpoint enable flags */ + vmcs_writel(GUEST_DR7, vmcs_readl(GUEST_DR7) & ~55); + + /* + * TODO: What about debug traps on tss switch? + * Are we supposed to inject them and update dr6? + */ + + return 1; } static int handle_ept_violation(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) @@ -3342,6 +3420,8 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) */ vmcs_writel(HOST_CR0, read_cr0()); + set_debugreg(vcpu->arch.dr6, 6); + asm( /* Store host registers */ "push %%"R"dx; push %%"R"bp;" @@ -3436,6 +3516,8 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) vcpu->arch.regs_avail = ~((1 << VCPU_REGS_RIP) | (1 << VCPU_REGS_RSP)); vcpu->arch.regs_dirty = 0; + get_debugreg(vcpu->arch.dr6, 6); + vmx->idt_vectoring_info = vmcs_read32(IDT_VECTORING_INFO_FIELD); if (vmx->rmode.irq.pending) fixup_rmode_irq(vmx); diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index e990d164b56..300bc4d42ab 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -3025,10 +3025,34 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) kvm_guest_enter(); + get_debugreg(vcpu->arch.host_dr6, 6); + get_debugreg(vcpu->arch.host_dr7, 7); + if (unlikely(vcpu->arch.switch_db_regs)) { + get_debugreg(vcpu->arch.host_db[0], 0); + get_debugreg(vcpu->arch.host_db[1], 1); + get_debugreg(vcpu->arch.host_db[2], 2); + get_debugreg(vcpu->arch.host_db[3], 3); + + set_debugreg(0, 7); + set_debugreg(vcpu->arch.eff_db[0], 0); + set_debugreg(vcpu->arch.eff_db[1], 1); + set_debugreg(vcpu->arch.eff_db[2], 2); + set_debugreg(vcpu->arch.eff_db[3], 3); + } KVMTRACE_0D(VMENTRY, vcpu, entryexit); kvm_x86_ops->run(vcpu, kvm_run); + if (unlikely(vcpu->arch.switch_db_regs)) { + set_debugreg(0, 7); + set_debugreg(vcpu->arch.host_db[0], 0); + set_debugreg(vcpu->arch.host_db[1], 1); + set_debugreg(vcpu->arch.host_db[2], 2); + set_debugreg(vcpu->arch.host_db[3], 3); + } + set_debugreg(vcpu->arch.host_dr6, 6); + set_debugreg(vcpu->arch.host_dr7, 7); + vcpu->guest_mode = 0; local_irq_enable(); @@ -4035,6 +4059,11 @@ int kvm_arch_vcpu_reset(struct kvm_vcpu *vcpu) vcpu->arch.nmi_pending = false; vcpu->arch.nmi_injected = false; + vcpu->arch.switch_db_regs = 0; + memset(vcpu->arch.db, 0, sizeof(vcpu->arch.db)); + vcpu->arch.dr6 = DR6_FIXED_1; + vcpu->arch.dr7 = DR7_FIXED_1; + return kvm_x86_ops->vcpu_reset(vcpu); } -- cgit v1.2.3 From ae675ef01cd86014acf8da5dee87876b71122495 Mon Sep 17 00:00:00 2001 From: Jan Kiszka Date: Mon, 15 Dec 2008 13:52:10 +0100 Subject: KVM: x86: Wire-up hardware breakpoints for guest debugging Add the remaining bits to make use of debug registers also for guest debugging, thus enabling the use of hardware breakpoints and watchpoints. Signed-off-by: Jan Kiszka Signed-off-by: Avi Kivity --- arch/x86/kvm/svm.c | 5 +++++ arch/x86/kvm/vmx.c | 5 +++++ arch/x86/kvm/x86.c | 14 +++++++++++++- 3 files changed, 23 insertions(+), 1 deletion(-) (limited to 'arch/x86') diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index 815f50e425a..b3a7a314d55 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c @@ -958,6 +958,11 @@ static int svm_guest_debug(struct kvm_vcpu *vcpu, struct kvm_guest_debug *dbg) } else vcpu->guest_debug = 0; + if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) + svm->vmcb->save.dr7 = dbg->arch.debugreg[7]; + else + svm->vmcb->save.dr7 = vcpu->arch.dr7; + if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) svm->vmcb->save.rflags |= X86_EFLAGS_TF | X86_EFLAGS_RF; else if (old_debug & KVM_GUESTDBG_SINGLESTEP) diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 0989776ee7b..cee81c9a665 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -1017,6 +1017,11 @@ static int set_guest_debug(struct kvm_vcpu *vcpu, struct kvm_guest_debug *dbg) if (!(vcpu->guest_debug & KVM_GUESTDBG_ENABLE)) vcpu->guest_debug = 0; + if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) + vmcs_writel(GUEST_DR7, dbg->arch.debugreg[7]); + else + vmcs_writel(GUEST_DR7, vcpu->arch.dr7); + flags = vmcs_readl(GUEST_RFLAGS); if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) flags |= X86_EFLAGS_TF | X86_EFLAGS_RF; diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 300bc4d42ab..2477e87b2f8 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -3861,10 +3861,22 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, struct kvm_guest_debug *dbg) { - int r; + int i, r; vcpu_load(vcpu); + if ((dbg->control & (KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_HW_BP)) == + (KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_HW_BP)) { + for (i = 0; i < KVM_NR_DB_REGS; ++i) + vcpu->arch.eff_db[i] = dbg->arch.debugreg[i]; + vcpu->arch.switch_db_regs = + (dbg->arch.debugreg[7] & DR7_BP_EN_MASK); + } else { + for (i = 0; i < KVM_NR_DB_REGS; i++) + vcpu->arch.eff_db[i] = vcpu->arch.db[i]; + vcpu->arch.switch_db_regs = (vcpu->arch.dr7 & DR7_BP_EN_MASK); + } + r = kvm_x86_ops->set_guest_debug(vcpu, dbg); if (dbg->control & KVM_GUESTDBG_INJECT_DB) -- cgit v1.2.3 From a770f6f28b1a9287189f3dc8333eb694d9a2f0ab Mon Sep 17 00:00:00 2001 From: Avi Kivity Date: Sun, 21 Dec 2008 19:20:09 +0200 Subject: KVM: MMU: Inherit a shadow page's guest level count from vcpu setup Instead of "calculating" it on every shadow page allocation, set it once when switching modes, and copy it when allocating pages. This doesn't buy us much, but sets up the stage for inheriting more information related to the mmu setup. Signed-off-by: Avi Kivity --- arch/x86/include/asm/kvm_host.h | 1 + arch/x86/kvm/mmu.c | 17 +++++++++++------ 2 files changed, 12 insertions(+), 6 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 0a4dab25a91..28f875f28f5 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -244,6 +244,7 @@ struct kvm_mmu { hpa_t root_hpa; int root_level; int shadow_root_level; + union kvm_mmu_page_role base_role; u64 *pae_root; }; diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 2d4477c7147..f15023c11fe 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -1204,8 +1204,7 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp; struct hlist_node *node, *tmp; - role.word = 0; - role.glevels = vcpu->arch.mmu.root_level; + role = vcpu->arch.mmu.base_role; role.level = level; role.metaphysical = metaphysical; role.access = access; @@ -2251,17 +2250,23 @@ static int init_kvm_tdp_mmu(struct kvm_vcpu *vcpu) static int init_kvm_softmmu(struct kvm_vcpu *vcpu) { + int r; + ASSERT(vcpu); ASSERT(!VALID_PAGE(vcpu->arch.mmu.root_hpa)); if (!is_paging(vcpu)) - return nonpaging_init_context(vcpu); + r = nonpaging_init_context(vcpu); else if (is_long_mode(vcpu)) - return paging64_init_context(vcpu); + r = paging64_init_context(vcpu); else if (is_pae(vcpu)) - return paging32E_init_context(vcpu); + r = paging32E_init_context(vcpu); else - return paging32_init_context(vcpu); + r = paging32_init_context(vcpu); + + vcpu->arch.mmu.base_role.glevels = vcpu->arch.mmu.root_level; + + return r; } static int init_kvm_mmu(struct kvm_vcpu *vcpu) -- cgit v1.2.3 From 2f0b3d60b2c43aef7cd10169c425c052169c622a Mon Sep 17 00:00:00 2001 From: Avi Kivity Date: Sun, 21 Dec 2008 19:27:36 +0200 Subject: KVM: MMU: Segregate mmu pages created with different cr4.pge settings Don't allow a vcpu with cr4.pge cleared to use a shadow page created with cr4.pge set; this might cause a cr3 switch not to sync ptes that have the global bit set (the global bit has no effect if !cr4.pge). This can only occur on smp with different cr4.pge settings for different vcpus (since a cr4 change will resync the shadow ptes), but there's no cost to being correct here. Signed-off-by: Avi Kivity --- arch/x86/include/asm/kvm_host.h | 1 + arch/x86/kvm/x86.c | 1 + 2 files changed, 2 insertions(+) (limited to 'arch/x86') diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 28f875f28f5..c2a01d0513f 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -183,6 +183,7 @@ union kvm_mmu_page_role { unsigned metaphysical:1; unsigned access:3; unsigned invalid:1; + unsigned cr4_pge:1; }; }; diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 2477e87b2f8..873602b5edf 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -364,6 +364,7 @@ void kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) } kvm_x86_ops->set_cr4(vcpu, cr4); vcpu->arch.cr4 = cr4; + vcpu->arch.mmu.base_role.cr4_pge = !!(cr4 & X86_CR4_PGE); kvm_mmu_sync_global(vcpu); kvm_mmu_reset_context(vcpu); } -- cgit v1.2.3 From e2078318042569682d0496cfd7bd962a766e82b1 Mon Sep 17 00:00:00 2001 From: Avi Kivity Date: Sun, 21 Dec 2008 19:36:59 +0200 Subject: KVM: MMU: Initialize a shadow page's global attribute from cr4.pge If cr4.pge is cleared, we ought to treat any ptes in the page as non-global. This allows us to remove the check from set_spte(). Signed-off-by: Avi Kivity --- arch/x86/kvm/mmu.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index f15023c11fe..5f03ec324e3 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -797,7 +797,6 @@ static struct kvm_mmu_page *kvm_mmu_alloc_page(struct kvm_vcpu *vcpu, ASSERT(is_empty_shadow_page(sp->spt)); bitmap_zero(sp->slot_bitmap, KVM_MEMORY_SLOTS + KVM_PRIVATE_MEM_SLOTS); sp->multimapped = 0; - sp->global = 1; sp->parent_pte = parent_pte; --vcpu->kvm->arch.n_free_mmu_pages; return sp; @@ -1241,6 +1240,7 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu, pgprintk("%s: adding gfn %lx role %x\n", __func__, gfn, role.word); sp->gfn = gfn; sp->role = role; + sp->global = role.cr4_pge; hlist_add_head(&sp->hash_link, bucket); if (!metaphysical) { if (rmap_write_protect(vcpu->kvm, gfn)) @@ -1668,8 +1668,6 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte, u64 mt_mask = shadow_mt_mask; struct kvm_mmu_page *sp = page_header(__pa(shadow_pte)); - if (!(vcpu->arch.cr4 & X86_CR4_PGE)) - global = 0; if (!global && sp->global) { sp->global = 0; if (sp->unsync) { -- cgit v1.2.3 From 2b3d2a206037b1471de6a6dc51427af034cfdb47 Mon Sep 17 00:00:00 2001 From: Avi Kivity Date: Tue, 23 Dec 2008 19:46:01 +0200 Subject: KVM: Fix vmload and friends misinterpreted as lidt The AMD SVM instruction family all overload the 0f 01 /3 opcode, further multiplexing on the three r/m bits. But the code decided that anything that isn't a vmmcall must be an lidt (which shares the 0f 01 /3 opcode, for the case that mod = 3). Fix by aborting emulation if this isn't a vmmcall. Signed-off-by: Avi Kivity --- arch/x86/kvm/x86_emulate.c | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/kvm/x86_emulate.c b/arch/x86/kvm/x86_emulate.c index d174db7a337..54fb09889a8 100644 --- a/arch/x86/kvm/x86_emulate.c +++ b/arch/x86/kvm/x86_emulate.c @@ -1908,11 +1908,16 @@ twobyte_insn: c->dst.type = OP_NONE; break; case 3: /* lidt/vmmcall */ - if (c->modrm_mod == 3 && c->modrm_rm == 1) { - rc = kvm_fix_hypercall(ctxt->vcpu); - if (rc) - goto done; - kvm_emulate_hypercall(ctxt->vcpu); + if (c->modrm_mod == 3) { + switch (c->modrm_rm) { + case 1: + rc = kvm_fix_hypercall(ctxt->vcpu); + if (rc) + goto done; + break; + default: + goto cannot_emulate; + } } else { rc = read_descriptor(ctxt, ops, c->src.ptr, &size, &address, -- cgit v1.2.3 From 2d11123a77e54d5cea262c958e8498f4a08bce3d Mon Sep 17 00:00:00 2001 From: Avi Kivity Date: Thu, 25 Dec 2008 14:39:47 +0200 Subject: KVM: MMU: Add for_each_shadow_entry(), a simpler alternative to walk_shadow() Using a for_each loop style removes the need to write callback and nasty casts. Implement the walk_shadow() using the for_each_shadow_entry(). Signed-off-by: Avi Kivity --- arch/x86/kvm/mmu.c | 69 ++++++++++++++++++++++++++++++++++++++---------------- 1 file changed, 49 insertions(+), 20 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 5f03ec324e3..c669f2af1d1 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -150,6 +150,20 @@ struct kvm_shadow_walk { u64 addr, u64 *spte, int level); }; +struct kvm_shadow_walk_iterator { + u64 addr; + hpa_t shadow_addr; + int level; + u64 *sptep; + unsigned index; +}; + +#define for_each_shadow_entry(_vcpu, _addr, _walker) \ + for (shadow_walk_init(&(_walker), _vcpu, _addr); \ + shadow_walk_okay(&(_walker)); \ + shadow_walk_next(&(_walker))) + + struct kvm_unsync_walk { int (*entry) (struct kvm_mmu_page *sp, struct kvm_unsync_walk *walk); }; @@ -1254,33 +1268,48 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu, return sp; } +static void shadow_walk_init(struct kvm_shadow_walk_iterator *iterator, + struct kvm_vcpu *vcpu, u64 addr) +{ + iterator->addr = addr; + iterator->shadow_addr = vcpu->arch.mmu.root_hpa; + iterator->level = vcpu->arch.mmu.shadow_root_level; + if (iterator->level == PT32E_ROOT_LEVEL) { + iterator->shadow_addr + = vcpu->arch.mmu.pae_root[(addr >> 30) & 3]; + iterator->shadow_addr &= PT64_BASE_ADDR_MASK; + --iterator->level; + if (!iterator->shadow_addr) + iterator->level = 0; + } +} + +static bool shadow_walk_okay(struct kvm_shadow_walk_iterator *iterator) +{ + if (iterator->level < PT_PAGE_TABLE_LEVEL) + return false; + iterator->index = SHADOW_PT_INDEX(iterator->addr, iterator->level); + iterator->sptep = ((u64 *)__va(iterator->shadow_addr)) + iterator->index; + return true; +} + +static void shadow_walk_next(struct kvm_shadow_walk_iterator *iterator) +{ + iterator->shadow_addr = *iterator->sptep & PT64_BASE_ADDR_MASK; + --iterator->level; +} + static int walk_shadow(struct kvm_shadow_walk *walker, struct kvm_vcpu *vcpu, u64 addr) { - hpa_t shadow_addr; - int level; + struct kvm_shadow_walk_iterator iterator; int r; - u64 *sptep; - unsigned index; - - shadow_addr = vcpu->arch.mmu.root_hpa; - level = vcpu->arch.mmu.shadow_root_level; - if (level == PT32E_ROOT_LEVEL) { - shadow_addr = vcpu->arch.mmu.pae_root[(addr >> 30) & 3]; - shadow_addr &= PT64_BASE_ADDR_MASK; - if (!shadow_addr) - return 1; - --level; - } - while (level >= PT_PAGE_TABLE_LEVEL) { - index = SHADOW_PT_INDEX(addr, level); - sptep = ((u64 *)__va(shadow_addr)) + index; - r = walker->entry(walker, vcpu, addr, sptep, level); + for_each_shadow_entry(vcpu, addr, iterator) { + r = walker->entry(walker, vcpu, addr, + iterator.sptep, iterator.level); if (r) return r; - shadow_addr = *sptep & PT64_BASE_ADDR_MASK; - --level; } return 0; } -- cgit v1.2.3 From 9f652d21c3f887075a33abae85bf53fec64e67b1 Mon Sep 17 00:00:00 2001 From: Avi Kivity Date: Thu, 25 Dec 2008 14:54:25 +0200 Subject: KVM: MMU: Use for_each_shadow_entry() in __direct_map() Eliminating a callback and a useless structure. Signed-off-by: Avi Kivity --- arch/x86/kvm/mmu.c | 83 +++++++++++++++++++----------------------------------- 1 file changed, 29 insertions(+), 54 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index c669f2af1d1..a25e1adb5cf 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -1846,67 +1846,42 @@ static void nonpaging_new_cr3(struct kvm_vcpu *vcpu) { } -struct direct_shadow_walk { - struct kvm_shadow_walk walker; - pfn_t pfn; - int write; - int largepage; - int pt_write; -}; - -static int direct_map_entry(struct kvm_shadow_walk *_walk, - struct kvm_vcpu *vcpu, - u64 addr, u64 *sptep, int level) +static int __direct_map(struct kvm_vcpu *vcpu, gpa_t v, int write, + int largepage, gfn_t gfn, pfn_t pfn) { - struct direct_shadow_walk *walk = - container_of(_walk, struct direct_shadow_walk, walker); + struct kvm_shadow_walk_iterator iterator; struct kvm_mmu_page *sp; + int pt_write = 0; gfn_t pseudo_gfn; - gfn_t gfn = addr >> PAGE_SHIFT; - - if (level == PT_PAGE_TABLE_LEVEL - || (walk->largepage && level == PT_DIRECTORY_LEVEL)) { - mmu_set_spte(vcpu, sptep, ACC_ALL, ACC_ALL, - 0, walk->write, 1, &walk->pt_write, - walk->largepage, 0, gfn, walk->pfn, false); - ++vcpu->stat.pf_fixed; - return 1; - } - if (*sptep == shadow_trap_nonpresent_pte) { - pseudo_gfn = (addr & PT64_DIR_BASE_ADDR_MASK) >> PAGE_SHIFT; - sp = kvm_mmu_get_page(vcpu, pseudo_gfn, (gva_t)addr, level - 1, - 1, ACC_ALL, sptep); - if (!sp) { - pgprintk("nonpaging_map: ENOMEM\n"); - kvm_release_pfn_clean(walk->pfn); - return -ENOMEM; + for_each_shadow_entry(vcpu, (u64)gfn << PAGE_SHIFT, iterator) { + if (iterator.level == PT_PAGE_TABLE_LEVEL + || (largepage && iterator.level == PT_DIRECTORY_LEVEL)) { + mmu_set_spte(vcpu, iterator.sptep, ACC_ALL, ACC_ALL, + 0, write, 1, &pt_write, + largepage, 0, gfn, pfn, false); + ++vcpu->stat.pf_fixed; + break; } - set_shadow_pte(sptep, - __pa(sp->spt) - | PT_PRESENT_MASK | PT_WRITABLE_MASK - | shadow_user_mask | shadow_x_mask); - } - return 0; -} + if (*iterator.sptep == shadow_trap_nonpresent_pte) { + pseudo_gfn = (iterator.addr & PT64_DIR_BASE_ADDR_MASK) >> PAGE_SHIFT; + sp = kvm_mmu_get_page(vcpu, pseudo_gfn, iterator.addr, + iterator.level - 1, + 1, ACC_ALL, iterator.sptep); + if (!sp) { + pgprintk("nonpaging_map: ENOMEM\n"); + kvm_release_pfn_clean(pfn); + return -ENOMEM; + } -static int __direct_map(struct kvm_vcpu *vcpu, gpa_t v, int write, - int largepage, gfn_t gfn, pfn_t pfn) -{ - int r; - struct direct_shadow_walk walker = { - .walker = { .entry = direct_map_entry, }, - .pfn = pfn, - .largepage = largepage, - .write = write, - .pt_write = 0, - }; - - r = walk_shadow(&walker.walker, vcpu, gfn << PAGE_SHIFT); - if (r < 0) - return r; - return walker.pt_write; + set_shadow_pte(iterator.sptep, + __pa(sp->spt) + | PT_PRESENT_MASK | PT_WRITABLE_MASK + | shadow_user_mask | shadow_x_mask); + } + } + return pt_write; } static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write, gfn_t gfn) -- cgit v1.2.3 From e7a04c99b54ad9acb98a56113ec3163bc1039e13 Mon Sep 17 00:00:00 2001 From: Avi Kivity Date: Thu, 25 Dec 2008 15:10:50 +0200 Subject: KVM: MMU: Replace walk_shadow() by for_each_shadow_entry() in fetch() Effectively reverting to the pre walk_shadow() version -- but now with the reusable for_each(). Signed-off-by: Avi Kivity --- arch/x86/kvm/paging_tmpl.h | 128 ++++++++++++++++++++------------------------- 1 file changed, 58 insertions(+), 70 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h index 9fd78b6e17a..69c7e3311b8 100644 --- a/arch/x86/kvm/paging_tmpl.h +++ b/arch/x86/kvm/paging_tmpl.h @@ -283,91 +283,79 @@ static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *page, /* * Fetch a shadow pte for a specific level in the paging hierarchy. */ -static int FNAME(shadow_walk_entry)(struct kvm_shadow_walk *_sw, - struct kvm_vcpu *vcpu, u64 addr, - u64 *sptep, int level) +static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr, + struct guest_walker *gw, + int user_fault, int write_fault, int largepage, + int *ptwrite, pfn_t pfn) { - struct shadow_walker *sw = - container_of(_sw, struct shadow_walker, walker); - struct guest_walker *gw = sw->guest_walker; unsigned access = gw->pt_access; struct kvm_mmu_page *shadow_page; - u64 spte; + u64 spte, *sptep; int metaphysical; gfn_t table_gfn; int r; + int level; pt_element_t curr_pte; + struct kvm_shadow_walk_iterator iterator; - if (level == PT_PAGE_TABLE_LEVEL - || (sw->largepage && level == PT_DIRECTORY_LEVEL)) { - mmu_set_spte(vcpu, sptep, access, gw->pte_access & access, - sw->user_fault, sw->write_fault, - gw->ptes[gw->level-1] & PT_DIRTY_MASK, - sw->ptwrite, sw->largepage, - gw->ptes[gw->level-1] & PT_GLOBAL_MASK, - gw->gfn, sw->pfn, false); - sw->sptep = sptep; - return 1; - } - - if (is_shadow_present_pte(*sptep) && !is_large_pte(*sptep)) - return 0; - - if (is_large_pte(*sptep)) { - set_shadow_pte(sptep, shadow_trap_nonpresent_pte); - kvm_flush_remote_tlbs(vcpu->kvm); - rmap_remove(vcpu->kvm, sptep); - } + if (!is_present_pte(gw->ptes[gw->level - 1])) + return NULL; - if (level == PT_DIRECTORY_LEVEL && gw->level == PT_DIRECTORY_LEVEL) { - metaphysical = 1; - if (!is_dirty_pte(gw->ptes[level - 1])) - access &= ~ACC_WRITE_MASK; - table_gfn = gpte_to_gfn(gw->ptes[level - 1]); - } else { - metaphysical = 0; - table_gfn = gw->table_gfn[level - 2]; - } - shadow_page = kvm_mmu_get_page(vcpu, table_gfn, (gva_t)addr, level-1, - metaphysical, access, sptep); - if (!metaphysical) { - r = kvm_read_guest_atomic(vcpu->kvm, gw->pte_gpa[level - 2], - &curr_pte, sizeof(curr_pte)); - if (r || curr_pte != gw->ptes[level - 2]) { - kvm_mmu_put_page(shadow_page, sptep); - kvm_release_pfn_clean(sw->pfn); - sw->sptep = NULL; - return 1; + for_each_shadow_entry(vcpu, addr, iterator) { + level = iterator.level; + sptep = iterator.sptep; + if (level == PT_PAGE_TABLE_LEVEL + || (largepage && level == PT_DIRECTORY_LEVEL)) { + mmu_set_spte(vcpu, sptep, access, + gw->pte_access & access, + user_fault, write_fault, + gw->ptes[gw->level-1] & PT_DIRTY_MASK, + ptwrite, largepage, + gw->ptes[gw->level-1] & PT_GLOBAL_MASK, + gw->gfn, pfn, false); + break; } - } - spte = __pa(shadow_page->spt) | PT_PRESENT_MASK | PT_ACCESSED_MASK - | PT_WRITABLE_MASK | PT_USER_MASK; - *sptep = spte; - return 0; -} + if (is_shadow_present_pte(*sptep) && !is_large_pte(*sptep)) + continue; -static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr, - struct guest_walker *guest_walker, - int user_fault, int write_fault, int largepage, - int *ptwrite, pfn_t pfn) -{ - struct shadow_walker walker = { - .walker = { .entry = FNAME(shadow_walk_entry), }, - .guest_walker = guest_walker, - .user_fault = user_fault, - .write_fault = write_fault, - .largepage = largepage, - .ptwrite = ptwrite, - .pfn = pfn, - }; + if (is_large_pte(*sptep)) { + set_shadow_pte(sptep, shadow_trap_nonpresent_pte); + kvm_flush_remote_tlbs(vcpu->kvm); + rmap_remove(vcpu->kvm, sptep); + } - if (!is_present_pte(guest_walker->ptes[guest_walker->level - 1])) - return NULL; + if (level == PT_DIRECTORY_LEVEL + && gw->level == PT_DIRECTORY_LEVEL) { + metaphysical = 1; + if (!is_dirty_pte(gw->ptes[level - 1])) + access &= ~ACC_WRITE_MASK; + table_gfn = gpte_to_gfn(gw->ptes[level - 1]); + } else { + metaphysical = 0; + table_gfn = gw->table_gfn[level - 2]; + } + shadow_page = kvm_mmu_get_page(vcpu, table_gfn, addr, level-1, + metaphysical, access, sptep); + if (!metaphysical) { + r = kvm_read_guest_atomic(vcpu->kvm, + gw->pte_gpa[level - 2], + &curr_pte, sizeof(curr_pte)); + if (r || curr_pte != gw->ptes[level - 2]) { + kvm_mmu_put_page(shadow_page, sptep); + kvm_release_pfn_clean(pfn); + sptep = NULL; + break; + } + } - walk_shadow(&walker.walker, vcpu, addr); + spte = __pa(shadow_page->spt) + | PT_PRESENT_MASK | PT_ACCESSED_MASK + | PT_WRITABLE_MASK | PT_USER_MASK; + *sptep = spte; + } - return walker.sptep; + return sptep; } /* -- cgit v1.2.3 From a461930bc3cece021f8f89a80dcc1d0691a92b52 Mon Sep 17 00:00:00 2001 From: Avi Kivity Date: Thu, 25 Dec 2008 15:19:00 +0200 Subject: KVM: MMU: Replace walk_shadow() by for_each_shadow_entry() in invlpg() Signed-off-by: Avi Kivity --- arch/x86/kvm/paging_tmpl.h | 81 ++++++++++++++++++---------------------------- 1 file changed, 32 insertions(+), 49 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h index 69c7e3311b8..46b68f941f6 100644 --- a/arch/x86/kvm/paging_tmpl.h +++ b/arch/x86/kvm/paging_tmpl.h @@ -25,7 +25,6 @@ #if PTTYPE == 64 #define pt_element_t u64 #define guest_walker guest_walker64 - #define shadow_walker shadow_walker64 #define FNAME(name) paging##64_##name #define PT_BASE_ADDR_MASK PT64_BASE_ADDR_MASK #define PT_DIR_BASE_ADDR_MASK PT64_DIR_BASE_ADDR_MASK @@ -42,7 +41,6 @@ #elif PTTYPE == 32 #define pt_element_t u32 #define guest_walker guest_walker32 - #define shadow_walker shadow_walker32 #define FNAME(name) paging##32_##name #define PT_BASE_ADDR_MASK PT32_BASE_ADDR_MASK #define PT_DIR_BASE_ADDR_MASK PT32_DIR_BASE_ADDR_MASK @@ -73,18 +71,6 @@ struct guest_walker { u32 error_code; }; -struct shadow_walker { - struct kvm_shadow_walk walker; - struct guest_walker *guest_walker; - int user_fault; - int write_fault; - int largepage; - int *ptwrite; - pfn_t pfn; - u64 *sptep; - gpa_t pte_gpa; -}; - static gfn_t gpte_to_gfn(pt_element_t gpte) { return (gpte & PT_BASE_ADDR_MASK) >> PAGE_SHIFT; @@ -453,54 +439,52 @@ out_unlock: return 0; } -static int FNAME(shadow_invlpg_entry)(struct kvm_shadow_walk *_sw, - struct kvm_vcpu *vcpu, u64 addr, - u64 *sptep, int level) +static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva) { - struct shadow_walker *sw = - container_of(_sw, struct shadow_walker, walker); + struct kvm_shadow_walk_iterator iterator; + pt_element_t gpte; + gpa_t pte_gpa = -1; + int level; + u64 *sptep; + + spin_lock(&vcpu->kvm->mmu_lock); - /* FIXME: properly handle invlpg on large guest pages */ - if (level == PT_PAGE_TABLE_LEVEL || - ((level == PT_DIRECTORY_LEVEL) && is_large_pte(*sptep))) { - struct kvm_mmu_page *sp = page_header(__pa(sptep)); + for_each_shadow_entry(vcpu, gva, iterator) { + level = iterator.level; + sptep = iterator.sptep; - sw->pte_gpa = (sp->gfn << PAGE_SHIFT); - sw->pte_gpa += (sptep - sp->spt) * sizeof(pt_element_t); + /* FIXME: properly handle invlpg on large guest pages */ + if (level == PT_PAGE_TABLE_LEVEL || + ((level == PT_DIRECTORY_LEVEL) && is_large_pte(*sptep))) { + struct kvm_mmu_page *sp = page_header(__pa(sptep)); - if (is_shadow_present_pte(*sptep)) { - rmap_remove(vcpu->kvm, sptep); - if (is_large_pte(*sptep)) - --vcpu->kvm->stat.lpages; + pte_gpa = (sp->gfn << PAGE_SHIFT); + pte_gpa += (sptep - sp->spt) * sizeof(pt_element_t); + + if (is_shadow_present_pte(*sptep)) { + rmap_remove(vcpu->kvm, sptep); + if (is_large_pte(*sptep)) + --vcpu->kvm->stat.lpages; + } + set_shadow_pte(sptep, shadow_trap_nonpresent_pte); + break; } - set_shadow_pte(sptep, shadow_trap_nonpresent_pte); - return 1; - } - if (!is_shadow_present_pte(*sptep)) - return 1; - return 0; -} -static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva) -{ - pt_element_t gpte; - struct shadow_walker walker = { - .walker = { .entry = FNAME(shadow_invlpg_entry), }, - .pte_gpa = -1, - }; + if (!is_shadow_present_pte(*sptep)) + break; + } - spin_lock(&vcpu->kvm->mmu_lock); - walk_shadow(&walker.walker, vcpu, gva); spin_unlock(&vcpu->kvm->mmu_lock); - if (walker.pte_gpa == -1) + + if (pte_gpa == -1) return; - if (kvm_read_guest_atomic(vcpu->kvm, walker.pte_gpa, &gpte, + if (kvm_read_guest_atomic(vcpu->kvm, pte_gpa, &gpte, sizeof(pt_element_t))) return; if (is_present_pte(gpte) && (gpte & PT_ACCESSED_MASK)) { if (mmu_topup_memory_caches(vcpu)) return; - kvm_mmu_pte_write(vcpu, walker.pte_gpa, (const u8 *)&gpte, + kvm_mmu_pte_write(vcpu, pte_gpa, (const u8 *)&gpte, sizeof(pt_element_t), 0); } } @@ -607,7 +591,6 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp) #undef pt_element_t #undef guest_walker -#undef shadow_walker #undef FNAME #undef PT_BASE_ADDR_MASK #undef PT_INDEX -- cgit v1.2.3 From e8c4a4e8a7cc047dfb3c26b2cbc8599ad3460364 Mon Sep 17 00:00:00 2001 From: Avi Kivity Date: Thu, 25 Dec 2008 15:20:07 +0200 Subject: KVM: MMU: Drop walk_shadow() No longer used. Signed-off-by: Avi Kivity --- arch/x86/kvm/mmu.c | 20 -------------------- 1 file changed, 20 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index a25e1adb5cf..15850809b55 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -145,11 +145,6 @@ struct kvm_rmap_desc { struct kvm_rmap_desc *more; }; -struct kvm_shadow_walk { - int (*entry)(struct kvm_shadow_walk *walk, struct kvm_vcpu *vcpu, - u64 addr, u64 *spte, int level); -}; - struct kvm_shadow_walk_iterator { u64 addr; hpa_t shadow_addr; @@ -1299,21 +1294,6 @@ static void shadow_walk_next(struct kvm_shadow_walk_iterator *iterator) --iterator->level; } -static int walk_shadow(struct kvm_shadow_walk *walker, - struct kvm_vcpu *vcpu, u64 addr) -{ - struct kvm_shadow_walk_iterator iterator; - int r; - - for_each_shadow_entry(vcpu, addr, iterator) { - r = walker->entry(walker, vcpu, addr, - iterator.sptep, iterator.level); - if (r) - return r; - } - return 0; -} - static void kvm_mmu_page_unlink_children(struct kvm *kvm, struct kvm_mmu_page *sp) { -- cgit v1.2.3 From 53f658b3c33616a4997ee254311b335e59063289 Mon Sep 17 00:00:00 2001 From: Marcelo Tosatti Date: Thu, 11 Dec 2008 20:45:05 +0100 Subject: KVM: VMX: initialize TSC offset relative to vm creation time VMX initializes the TSC offset for each vcpu at different times, and also reinitializes it for vcpus other than 0 on APIC SIPI message. This bug causes the TSC's to appear unsynchronized in the guest, even if the host is good. Older Linux kernels don't handle the situation very well, so gettimeofday is likely to go backwards in time: http://www.mail-archive.com/kvm@vger.kernel.org/msg02955.html http://sourceforge.net/tracker/index.php?func=detail&aid=2025534&group_id=180599&atid=893831 Fix it by initializating the offset of each vcpu relative to vm creation time, and moving it from vmx_vcpu_reset to vmx_vcpu_setup, out of the APIC MP init path. Signed-off-by: Marcelo Tosatti Signed-off-by: Avi Kivity --- arch/x86/include/asm/kvm_host.h | 1 + arch/x86/kvm/vmx.c | 19 +++++++++++-------- arch/x86/kvm/x86.c | 2 ++ 3 files changed, 14 insertions(+), 8 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index c2a01d0513f..9efc446b5ac 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -398,6 +398,7 @@ struct kvm_arch{ unsigned long irq_sources_bitmap; unsigned long irq_states[KVM_IOAPIC_NUM_PINS]; + u64 vm_init_tsc; }; struct kvm_vm_stat { diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index cee81c9a665..3312047664a 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -865,11 +865,8 @@ static u64 guest_read_tsc(void) * writes 'guest_tsc' into guest's timestamp counter "register" * guest_tsc = host_tsc + tsc_offset ==> tsc_offset = guest_tsc - host_tsc */ -static void guest_write_tsc(u64 guest_tsc) +static void guest_write_tsc(u64 guest_tsc, u64 host_tsc) { - u64 host_tsc; - - rdtscll(host_tsc); vmcs_write64(TSC_OFFSET, guest_tsc - host_tsc); } @@ -934,6 +931,7 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data) { struct vcpu_vmx *vmx = to_vmx(vcpu); struct kvm_msr_entry *msr; + u64 host_tsc; int ret = 0; switch (msr_index) { @@ -959,7 +957,8 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data) vmcs_writel(GUEST_SYSENTER_ESP, data); break; case MSR_IA32_TIME_STAMP_COUNTER: - guest_write_tsc(data); + rdtscll(host_tsc); + guest_write_tsc(data, host_tsc); break; case MSR_P6_PERFCTR0: case MSR_P6_PERFCTR1: @@ -2109,7 +2108,7 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx) { u32 host_sysenter_cs, msr_low, msr_high; u32 junk; - u64 host_pat; + u64 host_pat, tsc_this, tsc_base; unsigned long a; struct descriptor_table dt; int i; @@ -2237,6 +2236,12 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx) vmcs_writel(CR0_GUEST_HOST_MASK, ~0UL); vmcs_writel(CR4_GUEST_HOST_MASK, KVM_GUEST_CR4_MASK); + tsc_base = vmx->vcpu.kvm->arch.vm_init_tsc; + rdtscll(tsc_this); + if (tsc_this < vmx->vcpu.kvm->arch.vm_init_tsc) + tsc_base = tsc_this; + + guest_write_tsc(0, tsc_base); return 0; } @@ -2328,8 +2333,6 @@ static int vmx_vcpu_reset(struct kvm_vcpu *vcpu) vmcs_write32(GUEST_INTERRUPTIBILITY_INFO, 0); vmcs_write32(GUEST_PENDING_DBG_EXCEPTIONS, 0); - guest_write_tsc(0); - /* Special registers */ vmcs_write64(GUEST_IA32_DEBUGCTL, 0); diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 873602b5edf..3b2acfd72d7 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -4170,6 +4170,8 @@ struct kvm *kvm_arch_create_vm(void) /* Reserve bit 0 of irq_sources_bitmap for userspace irq source */ set_bit(KVM_USERSPACE_IRQ_SOURCE_ID, &kvm->arch.irq_sources_bitmap); + rdtscll(kvm->arch.vm_init_tsc); + return kvm; } -- cgit v1.2.3 From 77c2002e7c6f019f59a6f3cc5f8b16b41748dbe1 Mon Sep 17 00:00:00 2001 From: Izik Eidus Date: Mon, 29 Dec 2008 01:42:19 +0200 Subject: KVM: introduce kvm_read_guest_virt, kvm_write_guest_virt This commit change the name of emulator_read_std into kvm_read_guest_virt, and add new function name kvm_write_guest_virt that allow writing into a guest virtual address. Signed-off-by: Izik Eidus Signed-off-by: Avi Kivity --- arch/x86/include/asm/kvm_host.h | 4 --- arch/x86/kvm/x86.c | 56 ++++++++++++++++++++++++++++++----------- 2 files changed, 42 insertions(+), 18 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 9efc446b5ac..b74576aec19 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -609,10 +609,6 @@ void kvm_inject_nmi(struct kvm_vcpu *vcpu); void fx_init(struct kvm_vcpu *vcpu); -int emulator_read_std(unsigned long addr, - void *val, - unsigned int bytes, - struct kvm_vcpu *vcpu); int emulator_write_emulated(unsigned long addr, const void *val, unsigned int bytes, diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 3b2acfd72d7..67f91764e99 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -1976,10 +1976,8 @@ static struct kvm_io_device *vcpu_find_mmio_dev(struct kvm_vcpu *vcpu, return dev; } -int emulator_read_std(unsigned long addr, - void *val, - unsigned int bytes, - struct kvm_vcpu *vcpu) +int kvm_read_guest_virt(gva_t addr, void *val, unsigned int bytes, + struct kvm_vcpu *vcpu) { void *data = val; int r = X86EMUL_CONTINUE; @@ -1987,27 +1985,57 @@ int emulator_read_std(unsigned long addr, while (bytes) { gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr); unsigned offset = addr & (PAGE_SIZE-1); - unsigned tocopy = min(bytes, (unsigned)PAGE_SIZE - offset); + unsigned toread = min(bytes, (unsigned)PAGE_SIZE - offset); int ret; if (gpa == UNMAPPED_GVA) { r = X86EMUL_PROPAGATE_FAULT; goto out; } - ret = kvm_read_guest(vcpu->kvm, gpa, data, tocopy); + ret = kvm_read_guest(vcpu->kvm, gpa, data, toread); if (ret < 0) { r = X86EMUL_UNHANDLEABLE; goto out; } - bytes -= tocopy; - data += tocopy; - addr += tocopy; + bytes -= toread; + data += toread; + addr += toread; } out: return r; } -EXPORT_SYMBOL_GPL(emulator_read_std); + +int kvm_write_guest_virt(gva_t addr, void *val, unsigned int bytes, + struct kvm_vcpu *vcpu) +{ + void *data = val; + int r = X86EMUL_CONTINUE; + + while (bytes) { + gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr); + unsigned offset = addr & (PAGE_SIZE-1); + unsigned towrite = min(bytes, (unsigned)PAGE_SIZE - offset); + int ret; + + if (gpa == UNMAPPED_GVA) { + r = X86EMUL_PROPAGATE_FAULT; + goto out; + } + ret = kvm_write_guest(vcpu->kvm, gpa, data, towrite); + if (ret < 0) { + r = X86EMUL_UNHANDLEABLE; + goto out; + } + + bytes -= towrite; + data += towrite; + addr += towrite; + } +out: + return r; +} + static int emulator_read_emulated(unsigned long addr, void *val, @@ -2029,8 +2057,8 @@ static int emulator_read_emulated(unsigned long addr, if ((gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE) goto mmio; - if (emulator_read_std(addr, val, bytes, vcpu) - == X86EMUL_CONTINUE) + if (kvm_read_guest_virt(addr, val, bytes, vcpu) + == X86EMUL_CONTINUE) return X86EMUL_CONTINUE; if (gpa == UNMAPPED_GVA) return X86EMUL_PROPAGATE_FAULT; @@ -2233,7 +2261,7 @@ void kvm_report_emulation_failure(struct kvm_vcpu *vcpu, const char *context) rip_linear = rip + get_segment_base(vcpu, VCPU_SREG_CS); - emulator_read_std(rip_linear, (void *)opcodes, 4, vcpu); + kvm_read_guest_virt(rip_linear, (void *)opcodes, 4, vcpu); printk(KERN_ERR "emulation failed (%s) rip %lx %02x %02x %02x %02x\n", context, rip, opcodes[0], opcodes[1], opcodes[2], opcodes[3]); @@ -2241,7 +2269,7 @@ void kvm_report_emulation_failure(struct kvm_vcpu *vcpu, const char *context) EXPORT_SYMBOL_GPL(kvm_report_emulation_failure); static struct x86_emulate_ops emulate_ops = { - .read_std = emulator_read_std, + .read_std = kvm_read_guest_virt, .read_emulated = emulator_read_emulated, .write_emulated = emulator_write_emulated, .cmpxchg_emulated = emulator_cmpxchg_emulated, -- cgit v1.2.3 From 0f346074403bc109f9569f14b45cb09e83729032 Mon Sep 17 00:00:00 2001 From: Izik Eidus Date: Mon, 29 Dec 2008 01:42:20 +0200 Subject: KVM: remove the vmap usage vmap() on guest pages hides those pages from the Linux mm for an extended (userspace determined) amount of time. Get rid of it. Signed-off-by: Izik Eidus Signed-off-by: Avi Kivity --- arch/x86/kvm/x86.c | 62 ++++++++++++------------------------------------------ 1 file changed, 13 insertions(+), 49 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 67f91764e99..2a4f3a69734 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -2371,40 +2371,19 @@ int emulate_instruction(struct kvm_vcpu *vcpu, } EXPORT_SYMBOL_GPL(emulate_instruction); -static void free_pio_guest_pages(struct kvm_vcpu *vcpu) -{ - int i; - - for (i = 0; i < ARRAY_SIZE(vcpu->arch.pio.guest_pages); ++i) - if (vcpu->arch.pio.guest_pages[i]) { - kvm_release_page_dirty(vcpu->arch.pio.guest_pages[i]); - vcpu->arch.pio.guest_pages[i] = NULL; - } -} - static int pio_copy_data(struct kvm_vcpu *vcpu) { void *p = vcpu->arch.pio_data; - void *q; + gva_t q = vcpu->arch.pio.guest_gva; unsigned bytes; - int nr_pages = vcpu->arch.pio.guest_pages[1] ? 2 : 1; + int ret; - q = vmap(vcpu->arch.pio.guest_pages, nr_pages, VM_READ|VM_WRITE, - PAGE_KERNEL); - if (!q) { - free_pio_guest_pages(vcpu); - return -ENOMEM; - } - q += vcpu->arch.pio.guest_page_offset; bytes = vcpu->arch.pio.size * vcpu->arch.pio.cur_count; if (vcpu->arch.pio.in) - memcpy(q, p, bytes); + ret = kvm_write_guest_virt(q, p, bytes, vcpu); else - memcpy(p, q, bytes); - q -= vcpu->arch.pio.guest_page_offset; - vunmap(q); - free_pio_guest_pages(vcpu); - return 0; + ret = kvm_read_guest_virt(q, p, bytes, vcpu); + return ret; } int complete_pio(struct kvm_vcpu *vcpu) @@ -2515,7 +2494,6 @@ int kvm_emulate_pio(struct kvm_vcpu *vcpu, struct kvm_run *run, int in, vcpu->arch.pio.in = in; vcpu->arch.pio.string = 0; vcpu->arch.pio.down = 0; - vcpu->arch.pio.guest_page_offset = 0; vcpu->arch.pio.rep = 0; if (vcpu->run->io.direction == KVM_EXIT_IO_IN) @@ -2543,9 +2521,7 @@ int kvm_emulate_pio_string(struct kvm_vcpu *vcpu, struct kvm_run *run, int in, gva_t address, int rep, unsigned port) { unsigned now, in_page; - int i, ret = 0; - int nr_pages = 1; - struct page *page; + int ret = 0; struct kvm_io_device *pio_dev; vcpu->run->exit_reason = KVM_EXIT_IO; @@ -2557,7 +2533,6 @@ int kvm_emulate_pio_string(struct kvm_vcpu *vcpu, struct kvm_run *run, int in, vcpu->arch.pio.in = in; vcpu->arch.pio.string = 1; vcpu->arch.pio.down = down; - vcpu->arch.pio.guest_page_offset = offset_in_page(address); vcpu->arch.pio.rep = rep; if (vcpu->run->io.direction == KVM_EXIT_IO_IN) @@ -2577,15 +2552,8 @@ int kvm_emulate_pio_string(struct kvm_vcpu *vcpu, struct kvm_run *run, int in, else in_page = offset_in_page(address) + size; now = min(count, (unsigned long)in_page / size); - if (!now) { - /* - * String I/O straddles page boundary. Pin two guest pages - * so that we satisfy atomicity constraints. Do just one - * transaction to avoid complexity. - */ - nr_pages = 2; + if (!now) now = 1; - } if (down) { /* * String I/O in reverse. Yuck. Kill the guest, fix later. @@ -2600,15 +2568,7 @@ int kvm_emulate_pio_string(struct kvm_vcpu *vcpu, struct kvm_run *run, int in, if (vcpu->arch.pio.cur_count == vcpu->arch.pio.count) kvm_x86_ops->skip_emulated_instruction(vcpu); - for (i = 0; i < nr_pages; ++i) { - page = gva_to_page(vcpu, address + i * PAGE_SIZE); - vcpu->arch.pio.guest_pages[i] = page; - if (!page) { - kvm_inject_gp(vcpu, 0); - free_pio_guest_pages(vcpu); - return 1; - } - } + vcpu->arch.pio.guest_gva = address; pio_dev = vcpu_find_pio_dev(vcpu, port, vcpu->arch.pio.cur_count, @@ -2616,7 +2576,11 @@ int kvm_emulate_pio_string(struct kvm_vcpu *vcpu, struct kvm_run *run, int in, if (!vcpu->arch.pio.in) { /* string PIO write */ ret = pio_copy_data(vcpu); - if (ret >= 0 && pio_dev) { + if (ret == X86EMUL_PROPAGATE_FAULT) { + kvm_inject_gp(vcpu, 0); + return 1; + } + if (ret == 0 && pio_dev) { pio_string_write(pio_dev, vcpu); complete_pio(vcpu); if (vcpu->arch.pio.count == 0) -- cgit v1.2.3 From 61a6bd672bda3b9468bf5895c1be085c4e481138 Mon Sep 17 00:00:00 2001 From: Avi Kivity Date: Mon, 29 Dec 2008 17:32:28 +0200 Subject: KVM: Fallback support for MSR_VM_HSAVE_PA Since we advertise MSR_VM_HSAVE_PA, userspace will attempt to read it even on Intel. Implement fake support for this MSR to avoid the warnings. Signed-off-by: Avi Kivity --- arch/x86/kvm/x86.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'arch/x86') diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 2a4f3a69734..c3fbe8c55c1 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -742,6 +742,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data) break; case MSR_IA32_UCODE_REV: case MSR_IA32_UCODE_WRITE: + case MSR_VM_HSAVE_PA: break; case 0x200 ... 0x2ff: return set_msr_mtrr(vcpu, msr, data); @@ -863,6 +864,7 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata) case MSR_IA32_LASTBRANCHTOIP: case MSR_IA32_LASTINTFROMIP: case MSR_IA32_LASTINTTOIP: + case MSR_VM_HSAVE_PA: data = 0; break; case MSR_MTRRcap: -- cgit v1.2.3 From 52d939a0bf44081bc9f69b4fbdc9e7f416df27c7 Mon Sep 17 00:00:00 2001 From: Marcelo Tosatti Date: Tue, 30 Dec 2008 15:55:06 -0200 Subject: KVM: PIT: provide an option to disable interrupt reinjection Certain clocks (such as TSC) in older 2.6 guests overaccount for lost ticks, causing severe time drift. Interrupt reinjection magnifies the problem. Provide an option to disable it. [avi: allow room for expansion in case we want to disable reinjection of other timers] Signed-off-by: Marcelo Tosatti Signed-off-by: Avi Kivity --- arch/x86/include/asm/kvm.h | 5 +++++ arch/x86/kvm/i8254.c | 4 ++++ arch/x86/kvm/i8254.h | 1 + arch/x86/kvm/x86.c | 21 +++++++++++++++++++++ 4 files changed, 31 insertions(+) (limited to 'arch/x86') diff --git a/arch/x86/include/asm/kvm.h b/arch/x86/include/asm/kvm.h index 32eb96c7ca2..54bcf228152 100644 --- a/arch/x86/include/asm/kvm.h +++ b/arch/x86/include/asm/kvm.h @@ -233,4 +233,9 @@ struct kvm_guest_debug_arch { struct kvm_pit_state { struct kvm_pit_channel_state channels[3]; }; + +struct kvm_reinject_control { + __u8 pit_reinject; + __u8 reserved[31]; +}; #endif /* _ASM_X86_KVM_H */ diff --git a/arch/x86/kvm/i8254.c b/arch/x86/kvm/i8254.c index 72bd275a9b5..528daadeba4 100644 --- a/arch/x86/kvm/i8254.c +++ b/arch/x86/kvm/i8254.c @@ -201,6 +201,9 @@ static int __pit_timer_fn(struct kvm_kpit_state *ps) if (!atomic_inc_and_test(&pt->pending)) set_bit(KVM_REQ_PENDING_TIMER, &vcpu0->requests); + if (!pt->reinject) + atomic_set(&pt->pending, 1); + if (vcpu0 && waitqueue_active(&vcpu0->wq)) wake_up_interruptible(&vcpu0->wq); @@ -580,6 +583,7 @@ struct kvm_pit *kvm_create_pit(struct kvm *kvm) pit_state->irq_ack_notifier.gsi = 0; pit_state->irq_ack_notifier.irq_acked = kvm_pit_ack_irq; kvm_register_irq_ack_notifier(kvm, &pit_state->irq_ack_notifier); + pit_state->pit_timer.reinject = true; mutex_unlock(&pit->pit_state.lock); kvm_pit_reset(pit); diff --git a/arch/x86/kvm/i8254.h b/arch/x86/kvm/i8254.h index 4178022b97a..76959c4b500 100644 --- a/arch/x86/kvm/i8254.h +++ b/arch/x86/kvm/i8254.h @@ -9,6 +9,7 @@ struct kvm_kpit_timer { s64 period; /* unit: ns */ s64 scheduled; atomic_t pending; + bool reinject; }; struct kvm_kpit_channel_state { diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index c3fbe8c55c1..a1f14611f4b 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -993,6 +993,7 @@ int kvm_dev_ioctl_check_extension(long ext) case KVM_CAP_NOP_IO_DELAY: case KVM_CAP_MP_STATE: case KVM_CAP_SYNC_MMU: + case KVM_CAP_REINJECT_CONTROL: r = 1; break; case KVM_CAP_COALESCED_MMIO: @@ -1728,6 +1729,15 @@ static int kvm_vm_ioctl_set_pit(struct kvm *kvm, struct kvm_pit_state *ps) return r; } +static int kvm_vm_ioctl_reinject(struct kvm *kvm, + struct kvm_reinject_control *control) +{ + if (!kvm->arch.vpit) + return -ENXIO; + kvm->arch.vpit->pit_state.pit_timer.reinject = control->pit_reinject; + return 0; +} + /* * Get (and clear) the dirty memory log for a memory slot. */ @@ -1925,6 +1935,17 @@ long kvm_arch_vm_ioctl(struct file *filp, r = 0; break; } + case KVM_REINJECT_CONTROL: { + struct kvm_reinject_control control; + r = -EFAULT; + if (copy_from_user(&control, argp, sizeof(control))) + goto out; + r = kvm_vm_ioctl_reinject(kvm, &control); + if (r) + goto out; + r = 0; + break; + } default: ; } -- cgit v1.2.3 From 1c08364c3565242f1e1bd585bc2ce458967941af Mon Sep 17 00:00:00 2001 From: Avi Kivity Date: Sun, 4 Jan 2009 12:39:07 +0200 Subject: KVM: Move struct kvm_pio_request into x86 kvm_host.h This is an x86 specific stucture and has no business living in common code. Signed-off-by: Avi Kivity --- arch/x86/include/asm/kvm_host.h | 12 ++++++++++++ 1 file changed, 12 insertions(+) (limited to 'arch/x86') diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index b74576aec19..863ea73431a 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -227,6 +227,18 @@ struct kvm_pv_mmu_op_buffer { char buf[512] __aligned(sizeof(long)); }; +struct kvm_pio_request { + unsigned long count; + int cur_count; + gva_t guest_gva; + int in; + int port; + int size; + int string; + int down; + int rep; +}; + /* * x86 supports 3 paging modes (4-level 64-bit, 3-level 64-bit, and 2-level * 32-bit). The kvm_mmu structure abstracts the details of the current mmu -- cgit v1.2.3 From 1872a3f411ffe95c8e92300e0986a3532db55ce9 Mon Sep 17 00:00:00 2001 From: Avi Kivity Date: Sun, 4 Jan 2009 23:26:52 +0200 Subject: KVM: VMX: Fix guest state validity checks The vmx guest state validity checks are full of bugs. Make them conform to the manual. Signed-off-by: Avi Kivity --- arch/x86/kvm/vmx.c | 18 ++++++++++++++---- 1 file changed, 14 insertions(+), 4 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 3312047664a..be9441056ff 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -1784,14 +1784,16 @@ static bool code_segment_valid(struct kvm_vcpu *vcpu) vmx_get_segment(vcpu, &cs, VCPU_SREG_CS); cs_rpl = cs.selector & SELECTOR_RPL_MASK; + if (cs.unusable) + return false; if (~cs.type & (AR_TYPE_CODE_MASK|AR_TYPE_ACCESSES_MASK)) return false; if (!cs.s) return false; - if (!(~cs.type & (AR_TYPE_CODE_MASK|AR_TYPE_WRITEABLE_MASK))) { + if (cs.type & AR_TYPE_WRITEABLE_MASK) { if (cs.dpl > cs_rpl) return false; - } else if (cs.type & AR_TYPE_CODE_MASK) { + } else { if (cs.dpl != cs_rpl) return false; } @@ -1810,7 +1812,9 @@ static bool stack_segment_valid(struct kvm_vcpu *vcpu) vmx_get_segment(vcpu, &ss, VCPU_SREG_SS); ss_rpl = ss.selector & SELECTOR_RPL_MASK; - if ((ss.type != 3) || (ss.type != 7)) + if (ss.unusable) + return true; + if (ss.type != 3 && ss.type != 7) return false; if (!ss.s) return false; @@ -1830,6 +1834,8 @@ static bool data_segment_valid(struct kvm_vcpu *vcpu, int seg) vmx_get_segment(vcpu, &var, seg); rpl = var.selector & SELECTOR_RPL_MASK; + if (var.unusable) + return true; if (!var.s) return false; if (!var.present) @@ -1851,9 +1857,11 @@ static bool tr_valid(struct kvm_vcpu *vcpu) vmx_get_segment(vcpu, &tr, VCPU_SREG_TR); + if (tr.unusable) + return false; if (tr.selector & SELECTOR_TI_MASK) /* TI = 1 */ return false; - if ((tr.type != 3) || (tr.type != 11)) /* TODO: Check if guest is in IA32e mode */ + if (tr.type != 3 && tr.type != 11) /* TODO: Check if guest is in IA32e mode */ return false; if (!tr.present) return false; @@ -1867,6 +1875,8 @@ static bool ldtr_valid(struct kvm_vcpu *vcpu) vmx_get_segment(vcpu, &ldtr, VCPU_SREG_LDTR); + if (ldtr.unusable) + return true; if (ldtr.selector & SELECTOR_TI_MASK) /* TI = 1 */ return false; if (ldtr.type != 2) -- cgit v1.2.3 From 9fd4a3b7a412f983696b23121413a79d2132fed6 Mon Sep 17 00:00:00 2001 From: Avi Kivity Date: Sun, 4 Jan 2009 23:43:42 +0200 Subject: KVM: VMX: don't clobber segment AR if emulating invalid state The ususable bit is important for determining state validity; don't clobber it. Signed-off-by: Avi Kivity --- arch/x86/kvm/vmx.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/x86') diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index be9441056ff..a6598cbaa00 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -1649,7 +1649,7 @@ static void vmx_get_segment(struct kvm_vcpu *vcpu, var->limit = vmcs_read32(sf->limit); var->selector = vmcs_read16(sf->selector); ar = vmcs_read32(sf->ar_bytes); - if (ar & AR_UNUSABLE_MASK) + if ((ar & AR_UNUSABLE_MASK) && !emulate_invalid_guest_state) ar = 0; var->type = ar & 15; var->s = (ar >> 4) & 1; -- cgit v1.2.3 From 10f32d84c750ccf8c0afb3a4ea9d4059aa3e9ffc Mon Sep 17 00:00:00 2001 From: Avi Kivity Date: Mon, 5 Jan 2009 00:53:19 +0200 Subject: KVM: VMX: Prevent exit handler from running if emulating due to invalid state If we've just emulated an instruction, we won't have any valid exit reason and associated information. Fix by moving the clearing of the emulation_required flag to the exit handler. This way the exit handler can notice that we've been emulating and abort early. Signed-off-by: Avi Kivity --- arch/x86/kvm/vmx.c | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index a6598cbaa00..a309be6788e 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -3130,7 +3130,6 @@ static int handle_nmi_window(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) static void handle_invalid_guest_state(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) { - struct vcpu_vmx *vmx = to_vmx(vcpu); int err; preempt_enable(); @@ -3155,11 +3154,6 @@ static void handle_invalid_guest_state(struct kvm_vcpu *vcpu, local_irq_disable(); preempt_disable(); - - /* Guest state should be valid now except if we need to - * emulate an MMIO */ - if (guest_state_valid(vcpu)) - vmx->emulation_required = 0; } /* @@ -3208,8 +3202,11 @@ static int kvm_handle_exit(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) /* If we need to emulate an MMIO from handle_invalid_guest_state * we just return 0 */ - if (vmx->emulation_required && emulate_invalid_guest_state) + if (vmx->emulation_required && emulate_invalid_guest_state) { + if (guest_state_valid(vcpu)) + vmx->emulation_required = 0; return 0; + } /* Access CR3 don't cause VMExit in paging mode, so we need * to sync with guest real CR3. */ -- cgit v1.2.3 From 350f69dcd169d536307aa4a8c38c480e3a51c0db Mon Sep 17 00:00:00 2001 From: Avi Kivity Date: Mon, 5 Jan 2009 11:12:40 +0200 Subject: KVM: x86 emulator: Make emulate_pop() a little more generic Allow emulate_pop() to read into arbitrary memory rather than just the source operand. Needed for complicated instructions like far returns. Signed-off-by: Avi Kivity --- arch/x86/kvm/x86_emulate.c | 15 ++++++--------- 1 file changed, 6 insertions(+), 9 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/kvm/x86_emulate.c b/arch/x86/kvm/x86_emulate.c index 54fb09889a8..94459f313f1 100644 --- a/arch/x86/kvm/x86_emulate.c +++ b/arch/x86/kvm/x86_emulate.c @@ -1136,18 +1136,19 @@ static inline void emulate_push(struct x86_emulate_ctxt *ctxt) } static int emulate_pop(struct x86_emulate_ctxt *ctxt, - struct x86_emulate_ops *ops) + struct x86_emulate_ops *ops, + void *dest, int len) { struct decode_cache *c = &ctxt->decode; int rc; rc = ops->read_emulated(register_address(c, ss_base(ctxt), c->regs[VCPU_REGS_RSP]), - &c->src.val, c->src.bytes, ctxt->vcpu); + dest, len, ctxt->vcpu); if (rc != 0) return rc; - register_address_increment(c, &c->regs[VCPU_REGS_RSP], c->src.bytes); + register_address_increment(c, &c->regs[VCPU_REGS_RSP], len); return rc; } @@ -1157,11 +1158,9 @@ static inline int emulate_grp1a(struct x86_emulate_ctxt *ctxt, struct decode_cache *c = &ctxt->decode; int rc; - c->src.bytes = c->dst.bytes; - rc = emulate_pop(ctxt, ops); + rc = emulate_pop(ctxt, ops, &c->dst.val, c->dst.bytes); if (rc != 0) return rc; - c->dst.val = c->src.val; return 0; } @@ -1467,11 +1466,9 @@ special_insn: break; case 0x58 ... 0x5f: /* pop reg */ pop_instruction: - c->src.bytes = c->op_bytes; - rc = emulate_pop(ctxt, ops); + rc = emulate_pop(ctxt, ops, &c->dst.val, c->op_bytes); if (rc != 0) goto done; - c->dst.val = c->src.val; break; case 0x63: /* movsxd */ if (ctxt->mode != X86EMUL_MODE_PROT64) -- cgit v1.2.3 From 8b3079a5c0c031de07c8390aa160a4229088274f Mon Sep 17 00:00:00 2001 From: Avi Kivity Date: Mon, 5 Jan 2009 12:10:54 +0200 Subject: KVM: VMX: When emulating on invalid vmx state, don't return to userspace unnecessarily If we aren't doing mmio there's no need to exit to userspace (which will just be confused). Signed-off-by: Avi Kivity --- arch/x86/kvm/vmx.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index a309be6788e..df454de8acf 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -91,6 +91,7 @@ struct vcpu_vmx { } rmode; int vpid; bool emulation_required; + enum emulation_result invalid_state_emulation_result; /* Support for vnmi-less CPUs */ int soft_vnmi_blocked; @@ -3130,7 +3131,8 @@ static int handle_nmi_window(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) static void handle_invalid_guest_state(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) { - int err; + struct vcpu_vmx *vmx = to_vmx(vcpu); + enum emulation_result err = EMULATE_DONE; preempt_enable(); local_irq_enable(); @@ -3154,6 +3156,8 @@ static void handle_invalid_guest_state(struct kvm_vcpu *vcpu, local_irq_disable(); preempt_disable(); + + vmx->invalid_state_emulation_result = err; } /* @@ -3205,7 +3209,7 @@ static int kvm_handle_exit(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) if (vmx->emulation_required && emulate_invalid_guest_state) { if (guest_state_valid(vcpu)) vmx->emulation_required = 0; - return 0; + return vmx->invalid_state_emulation_result != EMULATE_DO_MMIO; } /* Access CR3 don't cause VMExit in paging mode, so we need -- cgit v1.2.3 From a77ab5ead5c1fef2c6c5a9b3cf3765e52643a2aa Mon Sep 17 00:00:00 2001 From: Avi Kivity Date: Mon, 5 Jan 2009 13:27:34 +0200 Subject: KVM: x86 emulator: implement 'ret far' instruction (opcode 0xcb) Signed-off-by: Avi Kivity --- arch/x86/kvm/x86_emulate.c | 26 +++++++++++++++++++++++++- 1 file changed, 25 insertions(+), 1 deletion(-) (limited to 'arch/x86') diff --git a/arch/x86/kvm/x86_emulate.c b/arch/x86/kvm/x86_emulate.c index 94459f313f1..ca91749d208 100644 --- a/arch/x86/kvm/x86_emulate.c +++ b/arch/x86/kvm/x86_emulate.c @@ -178,7 +178,7 @@ static u32 opcode_table[256] = { 0, ImplicitOps | Stack, 0, 0, ByteOp | DstMem | SrcImm | ModRM | Mov, DstMem | SrcImm | ModRM | Mov, /* 0xC8 - 0xCF */ - 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, ImplicitOps | Stack, 0, 0, 0, 0, /* 0xD0 - 0xD7 */ ByteOp | DstMem | SrcImplicit | ModRM, DstMem | SrcImplicit | ModRM, ByteOp | DstMem | SrcImplicit | ModRM, DstMem | SrcImplicit | ModRM, @@ -1278,6 +1278,25 @@ static inline int emulate_grp9(struct x86_emulate_ctxt *ctxt, return 0; } +static int emulate_ret_far(struct x86_emulate_ctxt *ctxt, + struct x86_emulate_ops *ops) +{ + struct decode_cache *c = &ctxt->decode; + int rc; + unsigned long cs; + + rc = emulate_pop(ctxt, ops, &c->eip, c->op_bytes); + if (rc) + return rc; + if (c->op_bytes == 4) + c->eip = (u32)c->eip; + rc = emulate_pop(ctxt, ops, &cs, c->op_bytes); + if (rc) + return rc; + rc = kvm_load_segment_descriptor(ctxt->vcpu, (u16)cs, 1, VCPU_SREG_CS); + return rc; +} + static inline int writeback(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops) { @@ -1735,6 +1754,11 @@ special_insn: mov: c->dst.val = c->src.val; break; + case 0xcb: /* ret far */ + rc = emulate_ret_far(ctxt, ops); + if (rc) + goto done; + break; case 0xd0 ... 0xd1: /* Grp2 */ c->src.val = 1; emulate_grp2(ctxt); -- cgit v1.2.3 From 269e05e48502f1cc06802e9fba90f5100dd6bb0d Mon Sep 17 00:00:00 2001 From: Avi Kivity Date: Mon, 5 Jan 2009 15:21:42 +0200 Subject: KVM: Properly lock PIT creation Otherwise, two threads can create a PIT in parallel and cause a memory leak. Signed-off-by: Avi Kivity --- arch/x86/kvm/i8254.c | 2 -- arch/x86/kvm/x86.c | 6 ++++++ 2 files changed, 6 insertions(+), 2 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/kvm/i8254.c b/arch/x86/kvm/i8254.c index 528daadeba4..69d1bbff3fd 100644 --- a/arch/x86/kvm/i8254.c +++ b/arch/x86/kvm/i8254.c @@ -548,9 +548,7 @@ struct kvm_pit *kvm_create_pit(struct kvm *kvm) if (!pit) return NULL; - mutex_lock(&kvm->lock); pit->irq_source_id = kvm_request_irq_source_id(kvm); - mutex_unlock(&kvm->lock); if (pit->irq_source_id < 0) { kfree(pit); return NULL; diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index a1f14611f4b..6fbc3460337 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -1837,10 +1837,16 @@ long kvm_arch_vm_ioctl(struct file *filp, goto out; break; case KVM_CREATE_PIT: + mutex_lock(&kvm->lock); + r = -EEXIST; + if (kvm->arch.vpit) + goto create_pit_unlock; r = -ENOMEM; kvm->arch.vpit = kvm_create_pit(kvm); if (kvm->arch.vpit) r = 0; + create_pit_unlock: + mutex_unlock(&kvm->lock); break; case KVM_IRQ_LINE: { struct kvm_irq_level irq_event; -- cgit v1.2.3 From c8a73f186bf62235b6fb5dd52601d641917dd50b Mon Sep 17 00:00:00 2001 From: Alexander Graf Date: Mon, 5 Jan 2009 16:02:47 +0100 Subject: KVM: SVM: Add microcode patch level dummy VMware ESX checks if the microcode level is correct when using a barcelona CPU, in order to see if it actually can use SVM. Let's tell it we're on the safe side... Signed-off-by: Alexander Graf Signed-off-by: Avi Kivity --- arch/x86/kvm/svm.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'arch/x86') diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index b3a7a314d55..3a60c3f04e6 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c @@ -1928,6 +1928,9 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 *data) case MSR_VM_CR: *data = 0; break; + case MSR_IA32_UCODE_REV: + *data = 0x01000065; + break; default: return kvm_get_msr_common(vcpu, ecx, data); } -- cgit v1.2.3 From 4677a3b693e035f186e2875259b9a0bb94c42fbe Mon Sep 17 00:00:00 2001 From: Avi Kivity Date: Tue, 6 Jan 2009 13:00:27 +0200 Subject: KVM: MMU: Optimize page unshadowing Using kvm_mmu_lookup_page() will result in multiple scans of the hash chains; use hlist_for_each_entry_safe() to achieve a single scan instead. Signed-off-by: Avi Kivity --- arch/x86/kvm/mmu.c | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 15850809b55..aac0499947d 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -1471,11 +1471,20 @@ static int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn) static void mmu_unshadow(struct kvm *kvm, gfn_t gfn) { + unsigned index; + struct hlist_head *bucket; struct kvm_mmu_page *sp; + struct hlist_node *node, *nn; - while ((sp = kvm_mmu_lookup_page(kvm, gfn)) != NULL) { - pgprintk("%s: zap %lx %x\n", __func__, gfn, sp->role.word); - kvm_mmu_zap_page(kvm, sp); + index = kvm_page_table_hashfn(gfn); + bucket = &kvm->arch.mmu_page_hash[index]; + hlist_for_each_entry_safe(sp, node, nn, bucket, hash_link) { + if (sp->gfn == gfn && !sp->role.metaphysical + && !sp->role.invalid) { + pgprintk("%s: zap %lx %x\n", + __func__, gfn, sp->role.word); + kvm_mmu_zap_page(kvm, sp); + } } } -- cgit v1.2.3 From 5d9b8e30f543a9f21a968a4cda71e8f6d1c66a61 Mon Sep 17 00:00:00 2001 From: Avi Kivity Date: Sun, 4 Jan 2009 18:04:18 +0200 Subject: KVM: Add CONFIG_HAVE_KVM_IRQCHIP Two KVM archs support irqchips and two don't. Add a Kconfig item to make selecting between the two models easier. Signed-off-by: Avi Kivity --- arch/x86/kvm/Kconfig | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'arch/x86') diff --git a/arch/x86/kvm/Kconfig b/arch/x86/kvm/Kconfig index b81125f0bde..0a303c3ed11 100644 --- a/arch/x86/kvm/Kconfig +++ b/arch/x86/kvm/Kconfig @@ -4,6 +4,10 @@ config HAVE_KVM bool +config HAVE_KVM_IRQCHIP + bool + default y + menuconfig VIRTUALIZATION bool "Virtualization" depends on HAVE_KVM || X86 -- cgit v1.2.3 From 4780c65904f0fc4e312ee2da9383eacbe04e61ea Mon Sep 17 00:00:00 2001 From: Avi Kivity Date: Sun, 4 Jan 2009 18:06:06 +0200 Subject: KVM: Reset PIT irq injection logic when the PIT IRQ is unmasked While the PIT is masked the guest cannot ack the irq, so the reinject logic will never allow the interrupt to be injected. Fix by resetting the reinjection counters on unmask. Unbreaks Xen. Signed-off-by: Avi Kivity --- arch/x86/kvm/i8254.c | 15 +++++++++++++++ arch/x86/kvm/i8254.h | 1 + 2 files changed, 16 insertions(+) (limited to 'arch/x86') diff --git a/arch/x86/kvm/i8254.c b/arch/x86/kvm/i8254.c index 69d1bbff3fd..c13bb92d315 100644 --- a/arch/x86/kvm/i8254.c +++ b/arch/x86/kvm/i8254.c @@ -539,6 +539,16 @@ void kvm_pit_reset(struct kvm_pit *pit) pit->pit_state.irq_ack = 1; } +static void pit_mask_notifer(struct kvm_irq_mask_notifier *kimn, bool mask) +{ + struct kvm_pit *pit = container_of(kimn, struct kvm_pit, mask_notifier); + + if (!mask) { + atomic_set(&pit->pit_state.pit_timer.pending, 0); + pit->pit_state.irq_ack = 1; + } +} + struct kvm_pit *kvm_create_pit(struct kvm *kvm) { struct kvm_pit *pit; @@ -586,6 +596,9 @@ struct kvm_pit *kvm_create_pit(struct kvm *kvm) kvm_pit_reset(pit); + pit->mask_notifier.func = pit_mask_notifer; + kvm_register_irq_mask_notifier(kvm, 0, &pit->mask_notifier); + return pit; } @@ -594,6 +607,8 @@ void kvm_free_pit(struct kvm *kvm) struct hrtimer *timer; if (kvm->arch.vpit) { + kvm_unregister_irq_mask_notifier(kvm, 0, + &kvm->arch.vpit->mask_notifier); mutex_lock(&kvm->arch.vpit->pit_state.lock); timer = &kvm->arch.vpit->pit_state.pit_timer.timer; hrtimer_cancel(timer); diff --git a/arch/x86/kvm/i8254.h b/arch/x86/kvm/i8254.h index 76959c4b500..6acbe4b505d 100644 --- a/arch/x86/kvm/i8254.h +++ b/arch/x86/kvm/i8254.h @@ -46,6 +46,7 @@ struct kvm_pit { struct kvm *kvm; struct kvm_kpit_state pit_state; int irq_source_id; + struct kvm_irq_mask_notifier mask_notifier; }; #define KVM_PIT_BASE_ADDRESS 0x40 -- cgit v1.2.3 From ff81ff10b4417952919dcc0bd67effa9ceb411c0 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Thu, 8 Jan 2009 11:05:17 -0800 Subject: KVM: SVM: Fix typo in has_svm() Signed-off-by: Joe Perches Acked-by: Joerg Roedel Signed-off-by: Avi Kivity --- arch/x86/kvm/svm.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/x86') diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index 3a60c3f04e6..db5021b2b5a 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c @@ -250,7 +250,7 @@ static int has_svm(void) const char *msg; if (!cpu_has_svm(&msg)) { - printk(KERN_INFO "has_svn: %s\n", msg); + printk(KERN_INFO "has_svm: %s\n", msg); return 0; } -- cgit v1.2.3 From 9903a927a4aea4b1ea42356a8453fca664df0b18 Mon Sep 17 00:00:00 2001 From: Marcelo Tosatti Date: Thu, 8 Jan 2009 17:44:19 -0200 Subject: KVM: MMU: drop zeroing on mmu_memory_cache_alloc Zeroing on mmu_memory_cache_alloc is unnecessary since: - Smaller areas are pre-allocated with kmem_cache_zalloc. - Page pointed by ->spt is overwritten with prefetch_page and entries in page pointed by ->gfns are initialized before reading. [avi: zeroing pages is unnecessary] Signed-off-by: Marcelo Tosatti Signed-off-by: Avi Kivity --- arch/x86/kvm/mmu.c | 1 - 1 file changed, 1 deletion(-) (limited to 'arch/x86') diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index aac0499947d..de9a9fbc16e 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -352,7 +352,6 @@ static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc, BUG_ON(!mc->nobjs); p = mc->objects[--mc->nobjs]; - memset(p, 0, size); return p; } -- cgit v1.2.3 From f6e2c02b6d28ddabe99377c5640a833407a62632 Mon Sep 17 00:00:00 2001 From: Avi Kivity Date: Sun, 11 Jan 2009 13:02:10 +0200 Subject: KVM: MMU: Rename "metaphysical" attribute to "direct" This actually describes what is going on, rather than alerting the reader that something strange is going on. Signed-off-by: Avi Kivity --- arch/x86/include/asm/kvm_host.h | 5 +++-- arch/x86/kvm/mmu.c | 32 ++++++++++++++++---------------- arch/x86/kvm/paging_tmpl.h | 12 ++++++------ 3 files changed, 25 insertions(+), 24 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 863ea73431a..55fd4c5fd38 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -170,7 +170,8 @@ struct kvm_pte_chain { * bits 0:3 - total guest paging levels (2-4, or zero for real mode) * bits 4:7 - page table level for this shadow (1-4) * bits 8:9 - page table quadrant for 2-level guests - * bit 16 - "metaphysical" - gfn is not a real page (huge page/real mode) + * bit 16 - direct mapping of virtual to physical mapping at gfn + * used for real mode and two-dimensional paging * bits 17:19 - common access permissions for all ptes in this shadow page */ union kvm_mmu_page_role { @@ -180,7 +181,7 @@ union kvm_mmu_page_role { unsigned level:4; unsigned quadrant:2; unsigned pad_for_nice_hex_output:6; - unsigned metaphysical:1; + unsigned direct:1; unsigned access:3; unsigned invalid:1; unsigned cr4_pge:1; diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index de9a9fbc16e..ef060ec444a 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -1066,7 +1066,7 @@ static struct kvm_mmu_page *kvm_mmu_lookup_page(struct kvm *kvm, gfn_t gfn) index = kvm_page_table_hashfn(gfn); bucket = &kvm->arch.mmu_page_hash[index]; hlist_for_each_entry(sp, node, bucket, hash_link) - if (sp->gfn == gfn && !sp->role.metaphysical + if (sp->gfn == gfn && !sp->role.direct && !sp->role.invalid) { pgprintk("%s: found role %x\n", __func__, sp->role.word); @@ -1200,7 +1200,7 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu, gfn_t gfn, gva_t gaddr, unsigned level, - int metaphysical, + int direct, unsigned access, u64 *parent_pte) { @@ -1213,7 +1213,7 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu, role = vcpu->arch.mmu.base_role; role.level = level; - role.metaphysical = metaphysical; + role.direct = direct; role.access = access; if (vcpu->arch.mmu.root_level <= PT32_ROOT_LEVEL) { quadrant = gaddr >> (PAGE_SHIFT + (PT64_PT_BITS * level)); @@ -1250,7 +1250,7 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu, sp->role = role; sp->global = role.cr4_pge; hlist_add_head(&sp->hash_link, bucket); - if (!metaphysical) { + if (!direct) { if (rmap_write_protect(vcpu->kvm, gfn)) kvm_flush_remote_tlbs(vcpu->kvm); account_shadowed(vcpu->kvm, gfn); @@ -1395,7 +1395,7 @@ static int kvm_mmu_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp) kvm_mmu_page_unlink_children(kvm, sp); kvm_mmu_unlink_parents(kvm, sp); kvm_flush_remote_tlbs(kvm); - if (!sp->role.invalid && !sp->role.metaphysical) + if (!sp->role.invalid && !sp->role.direct) unaccount_shadowed(kvm, sp->gfn); if (sp->unsync) kvm_unlink_unsync_page(kvm, sp); @@ -1458,7 +1458,7 @@ static int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn) index = kvm_page_table_hashfn(gfn); bucket = &kvm->arch.mmu_page_hash[index]; hlist_for_each_entry_safe(sp, node, n, bucket, hash_link) - if (sp->gfn == gfn && !sp->role.metaphysical) { + if (sp->gfn == gfn && !sp->role.direct) { pgprintk("%s: gfn %lx role %x\n", __func__, gfn, sp->role.word); r = 1; @@ -1478,7 +1478,7 @@ static void mmu_unshadow(struct kvm *kvm, gfn_t gfn) index = kvm_page_table_hashfn(gfn); bucket = &kvm->arch.mmu_page_hash[index]; hlist_for_each_entry_safe(sp, node, nn, bucket, hash_link) { - if (sp->gfn == gfn && !sp->role.metaphysical + if (sp->gfn == gfn && !sp->role.direct && !sp->role.invalid) { pgprintk("%s: zap %lx %x\n", __func__, gfn, sp->role.word); @@ -1638,7 +1638,7 @@ static int kvm_unsync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp) bucket = &vcpu->kvm->arch.mmu_page_hash[index]; /* don't unsync if pagetable is shadowed with multiple roles */ hlist_for_each_entry_safe(s, node, n, bucket, hash_link) { - if (s->gfn != sp->gfn || s->role.metaphysical) + if (s->gfn != sp->gfn || s->role.direct) continue; if (s->role.word != sp->role.word) return 1; @@ -1951,7 +1951,7 @@ static void mmu_alloc_roots(struct kvm_vcpu *vcpu) int i; gfn_t root_gfn; struct kvm_mmu_page *sp; - int metaphysical = 0; + int direct = 0; root_gfn = vcpu->arch.cr3 >> PAGE_SHIFT; @@ -1960,18 +1960,18 @@ static void mmu_alloc_roots(struct kvm_vcpu *vcpu) ASSERT(!VALID_PAGE(root)); if (tdp_enabled) - metaphysical = 1; + direct = 1; sp = kvm_mmu_get_page(vcpu, root_gfn, 0, - PT64_ROOT_LEVEL, metaphysical, + PT64_ROOT_LEVEL, direct, ACC_ALL, NULL); root = __pa(sp->spt); ++sp->root_count; vcpu->arch.mmu.root_hpa = root; return; } - metaphysical = !is_paging(vcpu); + direct = !is_paging(vcpu); if (tdp_enabled) - metaphysical = 1; + direct = 1; for (i = 0; i < 4; ++i) { hpa_t root = vcpu->arch.mmu.pae_root[i]; @@ -1985,7 +1985,7 @@ static void mmu_alloc_roots(struct kvm_vcpu *vcpu) } else if (vcpu->arch.mmu.root_level == 0) root_gfn = 0; sp = kvm_mmu_get_page(vcpu, root_gfn, i << 30, - PT32_ROOT_LEVEL, metaphysical, + PT32_ROOT_LEVEL, direct, ACC_ALL, NULL); root = __pa(sp->spt); ++sp->root_count; @@ -2487,7 +2487,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa, index = kvm_page_table_hashfn(gfn); bucket = &vcpu->kvm->arch.mmu_page_hash[index]; hlist_for_each_entry_safe(sp, node, n, bucket, hash_link) { - if (sp->gfn != gfn || sp->role.metaphysical || sp->role.invalid) + if (sp->gfn != gfn || sp->role.direct || sp->role.invalid) continue; pte_size = sp->role.glevels == PT32_ROOT_LEVEL ? 4 : 8; misaligned = (offset ^ (offset + bytes - 1)) & ~(pte_size - 1); @@ -3125,7 +3125,7 @@ static void audit_write_protection(struct kvm_vcpu *vcpu) gfn_t gfn; list_for_each_entry(sp, &vcpu->kvm->arch.active_mmu_pages, link) { - if (sp->role.metaphysical) + if (sp->role.direct) continue; gfn = unalias_gfn(vcpu->kvm, sp->gfn); diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h index 46b68f941f6..7314c0944c5 100644 --- a/arch/x86/kvm/paging_tmpl.h +++ b/arch/x86/kvm/paging_tmpl.h @@ -277,7 +277,7 @@ static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr, unsigned access = gw->pt_access; struct kvm_mmu_page *shadow_page; u64 spte, *sptep; - int metaphysical; + int direct; gfn_t table_gfn; int r; int level; @@ -313,17 +313,17 @@ static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr, if (level == PT_DIRECTORY_LEVEL && gw->level == PT_DIRECTORY_LEVEL) { - metaphysical = 1; + direct = 1; if (!is_dirty_pte(gw->ptes[level - 1])) access &= ~ACC_WRITE_MASK; table_gfn = gpte_to_gfn(gw->ptes[level - 1]); } else { - metaphysical = 0; + direct = 0; table_gfn = gw->table_gfn[level - 2]; } shadow_page = kvm_mmu_get_page(vcpu, table_gfn, addr, level-1, - metaphysical, access, sptep); - if (!metaphysical) { + direct, access, sptep); + if (!direct) { r = kvm_read_guest_atomic(vcpu->kvm, gw->pte_gpa[level - 2], &curr_pte, sizeof(curr_pte)); @@ -512,7 +512,7 @@ static void FNAME(prefetch_page)(struct kvm_vcpu *vcpu, pt_element_t pt[256 / sizeof(pt_element_t)]; gpa_t pte_gpa; - if (sp->role.metaphysical + if (sp->role.direct || (PTTYPE == 32 && sp->role.level > PT_PAGE_TABLE_LEVEL)) { nonpaging_prefetch_page(vcpu, sp); return; -- cgit v1.2.3 From 5a41accd3f69c6ef1d58f0c148980bae70515937 Mon Sep 17 00:00:00 2001 From: Avi Kivity Date: Sun, 11 Jan 2009 17:19:35 +0200 Subject: KVM: MMU: Only enable cr4_pge role in shadow mode Two dimensional paging is only confused by it. Signed-off-by: Avi Kivity --- arch/x86/kvm/x86.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/x86') diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 6fbc3460337..19ccde621cd 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -364,7 +364,7 @@ void kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) } kvm_x86_ops->set_cr4(vcpu, cr4); vcpu->arch.cr4 = cr4; - vcpu->arch.mmu.base_role.cr4_pge = !!(cr4 & X86_CR4_PGE); + vcpu->arch.mmu.base_role.cr4_pge = (cr4 & X86_CR4_PGE) && !tdp_enabled; kvm_mmu_sync_global(vcpu); kvm_mmu_reset_context(vcpu); } -- cgit v1.2.3 From 193554750441d91e127dd5066b8aebe0f769101c Mon Sep 17 00:00:00 2001 From: Amit Shah Date: Wed, 14 Jan 2009 16:56:00 +0000 Subject: KVM: x86: Fix typos and whitespace errors Some typos, comments, whitespace errors corrected in the cpuid code Signed-off-by: Amit Shah Signed-off-by: Avi Kivity --- arch/x86/kvm/x86.c | 33 ++++++++++++++++----------------- 1 file changed, 16 insertions(+), 17 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 19ccde621cd..141a0166e51 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -1067,7 +1067,7 @@ long kvm_arch_dev_ioctl(struct file *filp, if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid)) goto out; r = kvm_dev_ioctl_get_supported_cpuid(&cpuid, - cpuid_arg->entries); + cpuid_arg->entries); if (r) goto out; @@ -1165,8 +1165,8 @@ out: } static int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu, - struct kvm_cpuid2 *cpuid, - struct kvm_cpuid_entry2 __user *entries) + struct kvm_cpuid2 *cpuid, + struct kvm_cpuid_entry2 __user *entries) { int r; @@ -1185,8 +1185,8 @@ out: } static int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu, - struct kvm_cpuid2 *cpuid, - struct kvm_cpuid_entry2 __user *entries) + struct kvm_cpuid2 *cpuid, + struct kvm_cpuid_entry2 __user *entries) { int r; @@ -1195,7 +1195,7 @@ static int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu, goto out; r = -EFAULT; if (copy_to_user(entries, &vcpu->arch.cpuid_entries, - vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2))) + vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2))) goto out; return 0; @@ -1205,12 +1205,12 @@ out: } static void do_cpuid_1_ent(struct kvm_cpuid_entry2 *entry, u32 function, - u32 index) + u32 index) { entry->function = function; entry->index = index; cpuid_count(entry->function, entry->index, - &entry->eax, &entry->ebx, &entry->ecx, &entry->edx); + &entry->eax, &entry->ebx, &entry->ecx, &entry->edx); entry->flags = 0; } @@ -1249,7 +1249,7 @@ static void do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function, bit(X86_FEATURE_LAHF_LM) | bit(X86_FEATURE_CMP_LEGACY) | bit(X86_FEATURE_SVM); - /* all func 2 cpuid_count() should be called on the same cpu */ + /* all calls to cpuid_count() should be made on the same cpu */ get_cpu(); do_cpuid_1_ent(entry, function, index); ++*nent; @@ -1323,7 +1323,7 @@ static void do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function, } static int kvm_dev_ioctl_get_supported_cpuid(struct kvm_cpuid2 *cpuid, - struct kvm_cpuid_entry2 __user *entries) + struct kvm_cpuid_entry2 __user *entries) { struct kvm_cpuid_entry2 *cpuid_entries; int limit, nent = 0, r = -E2BIG; @@ -1340,7 +1340,7 @@ static int kvm_dev_ioctl_get_supported_cpuid(struct kvm_cpuid2 *cpuid, limit = cpuid_entries[0].eax; for (func = 1; func <= limit && nent < cpuid->nent; ++func) do_cpuid_ent(&cpuid_entries[nent], func, 0, - &nent, cpuid->nent); + &nent, cpuid->nent); r = -E2BIG; if (nent >= cpuid->nent) goto out_free; @@ -1349,10 +1349,10 @@ static int kvm_dev_ioctl_get_supported_cpuid(struct kvm_cpuid2 *cpuid, limit = cpuid_entries[nent - 1].eax; for (func = 0x80000001; func <= limit && nent < cpuid->nent; ++func) do_cpuid_ent(&cpuid_entries[nent], func, 0, - &nent, cpuid->nent); + &nent, cpuid->nent); r = -EFAULT; if (copy_to_user(entries, cpuid_entries, - nent * sizeof(struct kvm_cpuid_entry2))) + nent * sizeof(struct kvm_cpuid_entry2))) goto out_free; cpuid->nent = nent; r = 0; @@ -1496,7 +1496,7 @@ long kvm_arch_vcpu_ioctl(struct file *filp, if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid)) goto out; r = kvm_vcpu_ioctl_set_cpuid2(vcpu, &cpuid, - cpuid_arg->entries); + cpuid_arg->entries); if (r) goto out; break; @@ -1509,7 +1509,7 @@ long kvm_arch_vcpu_ioctl(struct file *filp, if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid)) goto out; r = kvm_vcpu_ioctl_get_cpuid2(vcpu, &cpuid, - cpuid_arg->entries); + cpuid_arg->entries); if (r) goto out; r = -EFAULT; @@ -2864,7 +2864,7 @@ static int is_matching_cpuid_entry(struct kvm_cpuid_entry2 *e, if ((e->flags & KVM_CPUID_FLAG_SIGNIFCANT_INDEX) && e->index != index) return 0; if ((e->flags & KVM_CPUID_FLAG_STATEFUL_FUNC) && - !(e->flags & KVM_CPUID_FLAG_STATE_READ_NEXT)) + !(e->flags & KVM_CPUID_FLAG_STATE_READ_NEXT)) return 0; return 1; } @@ -2892,7 +2892,6 @@ struct kvm_cpuid_entry2 *kvm_find_cpuid_entry(struct kvm_vcpu *vcpu, if (!best || e->function > best->function) best = e; } - return best; } -- cgit v1.2.3 From 399ec807ddc38ecccf8c06dbde04531cbdc63e11 Mon Sep 17 00:00:00 2001 From: Avi Kivity Date: Wed, 19 Nov 2008 13:58:46 +0200 Subject: KVM: Userspace controlled irq routing Currently KVM has a static routing from GSI numbers to interrupts (namely, 0-15 are mapped 1:1 to both PIC and IOAPIC, and 16:23 are mapped 1:1 to the IOAPIC). This is insufficient for several reasons: - HPET requires non 1:1 mapping for the timer interrupt - MSIs need a new method to assign interrupt numbers and dispatch them - ACPI APIC mode needs to be able to reassign the PCI LINK interrupts to the ioapics This patch implements an interrupt routing table (as a linked list, but this can be easily changed) and a userspace interface to replace the table. The routing table is initialized according to the current hardwired mapping. Signed-off-by: Avi Kivity --- arch/x86/kvm/x86.c | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'arch/x86') diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 141a0166e51..32e3a7ec6ad 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -1835,6 +1835,12 @@ long kvm_arch_vm_ioctl(struct file *filp, } } else goto out; + r = kvm_setup_default_irq_routing(kvm); + if (r) { + kfree(kvm->arch.vpic); + kfree(kvm->arch.vioapic); + goto out; + } break; case KVM_CREATE_PIT: mutex_lock(&kvm->lock); -- cgit v1.2.3 From 91b2ae773d3b168b763237fac33f75b13d891f20 Mon Sep 17 00:00:00 2001 From: Avi Kivity Date: Mon, 19 Jan 2009 14:57:52 +0200 Subject: KVM: Avoid using CONFIG_ in userspace visible headers Kconfig symbols are not available in userspace, and are not stripped by headers-install. Avoid their use by adding #defines in to suit each architecture. Signed-off-by: Avi Kivity --- arch/x86/include/asm/kvm.h | 1 + 1 file changed, 1 insertion(+) (limited to 'arch/x86') diff --git a/arch/x86/include/asm/kvm.h b/arch/x86/include/asm/kvm.h index 54bcf228152..dc3f6cf1170 100644 --- a/arch/x86/include/asm/kvm.h +++ b/arch/x86/include/asm/kvm.h @@ -15,6 +15,7 @@ #define __KVM_HAVE_DEVICE_ASSIGNMENT #define __KVM_HAVE_MSI #define __KVM_HAVE_USER_NMI +#define __KVM_HAVE_GUEST_DEBUG /* Architectural interrupt line count. */ #define KVM_NR_INTERRUPTS 256 -- cgit v1.2.3 From 44882eed2ebe7f75f8cdae5671ab1d6e0fa40dbc Mon Sep 17 00:00:00 2001 From: Marcelo Tosatti Date: Tue, 27 Jan 2009 15:12:38 -0200 Subject: KVM: make irq ack notifications aware of routing table IRQ ack notifications assume an identity mapping between pin->gsi, which might not be the case with, for example, HPET. Translate before acking. Signed-off-by: Marcelo Tosatti Acked-by: Gleb Natapov --- arch/x86/kvm/i8259.c | 5 +++-- arch/x86/kvm/irq.h | 2 ++ 2 files changed, 5 insertions(+), 2 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/kvm/i8259.c b/arch/x86/kvm/i8259.c index 179dcb0103f..93160375c84 100644 --- a/arch/x86/kvm/i8259.c +++ b/arch/x86/kvm/i8259.c @@ -49,7 +49,8 @@ static void pic_unlock(struct kvm_pic *s) spin_unlock(&s->lock); while (acks) { - kvm_notify_acked_irq(kvm, __ffs(acks)); + kvm_notify_acked_irq(kvm, SELECT_PIC(__ffs(acks)), + __ffs(acks)); acks &= acks - 1; } @@ -232,7 +233,7 @@ int kvm_pic_read_irq(struct kvm *kvm) } pic_update_irq(s); pic_unlock(s); - kvm_notify_acked_irq(kvm, irq); + kvm_notify_acked_irq(kvm, SELECT_PIC(irq), irq); return intno; } diff --git a/arch/x86/kvm/irq.h b/arch/x86/kvm/irq.h index 82579ee538d..9f593188129 100644 --- a/arch/x86/kvm/irq.h +++ b/arch/x86/kvm/irq.h @@ -32,6 +32,8 @@ #include "lapic.h" #define PIC_NUM_PINS 16 +#define SELECT_PIC(irq) \ + ((irq) < 8 ? KVM_IRQCHIP_PIC_MASTER : KVM_IRQCHIP_PIC_SLAVE) struct kvm; struct kvm_vcpu; -- cgit v1.2.3 From d20626936dd6aa783760e780dae5abb127564316 Mon Sep 17 00:00:00 2001 From: Alexander Graf Date: Mon, 2 Feb 2009 16:23:50 +0100 Subject: x86: Add EFER descriptions for FFXSR AMD k10 includes support for the FFXSR feature, which leaves out XMM registers on FXSAVE/FXSAVE when the EFER_FFXSR bit is set in EFER. The CPUID feature bit exists already, but the EFER bit is missing currently, so this patch adds it to the list of known EFER bits. Signed-off-by: Alexander Graf CC: Joerg Roedel Signed-off-by: Avi Kivity --- arch/x86/include/asm/msr-index.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'arch/x86') diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h index 46e9646e7a6..f4e505f286b 100644 --- a/arch/x86/include/asm/msr-index.h +++ b/arch/x86/include/asm/msr-index.h @@ -19,12 +19,14 @@ #define _EFER_LMA 10 /* Long mode active (read-only) */ #define _EFER_NX 11 /* No execute enable */ #define _EFER_SVME 12 /* Enable virtualization */ +#define _EFER_FFXSR 14 /* Enable Fast FXSAVE/FXRSTOR */ #define EFER_SCE (1<<_EFER_SCE) #define EFER_LME (1<<_EFER_LME) #define EFER_LMA (1<<_EFER_LMA) #define EFER_NX (1<<_EFER_NX) #define EFER_SVME (1<<_EFER_SVME) +#define EFER_FFXSR (1<<_EFER_FFXSR) /* Intel MSRs. Some also available on other CPUs */ #define MSR_IA32_PERFCTR0 0x000000c1 -- cgit v1.2.3 From 1b2fd70c4eddef53f32639296818c0253e7ca48d Mon Sep 17 00:00:00 2001 From: Alexander Graf Date: Mon, 2 Feb 2009 16:23:51 +0100 Subject: KVM: Add FFXSR support AMD K10 CPUs implement the FFXSR feature that gets enabled using EFER. Let's check if the virtual CPU description includes that CPUID feature bit and allow enabling it then. This is required for Windows Server 2008 in Hyper-V mode. v2 adds CPUID capability exposure Signed-off-by: Alexander Graf Signed-off-by: Avi Kivity --- arch/x86/kvm/svm.c | 3 +++ arch/x86/kvm/x86.c | 12 ++++++++++++ 2 files changed, 15 insertions(+) (limited to 'arch/x86') diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index db5021b2b5a..1c4a018bf4b 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c @@ -417,6 +417,9 @@ static __init int svm_hardware_setup(void) if (boot_cpu_has(X86_FEATURE_NX)) kvm_enable_efer_bits(EFER_NX); + if (boot_cpu_has(X86_FEATURE_FXSR_OPT)) + kvm_enable_efer_bits(EFER_FFXSR); + if (nested) { printk(KERN_INFO "kvm: Nested Virtualization enabled\n"); kvm_enable_efer_bits(EFER_SVME); diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 32e3a7ec6ad..8f83590b47d 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -490,6 +490,17 @@ static void set_efer(struct kvm_vcpu *vcpu, u64 efer) return; } + if (efer & EFER_FFXSR) { + struct kvm_cpuid_entry2 *feat; + + feat = kvm_find_cpuid_entry(vcpu, 0x80000001, 0); + if (!feat || !(feat->edx & bit(X86_FEATURE_FXSR_OPT))) { + printk(KERN_DEBUG "set_efer: #GP, enable FFXSR w/o CPUID capability\n"); + kvm_inject_gp(vcpu, 0); + return; + } + } + if (efer & EFER_SVME) { struct kvm_cpuid_entry2 *feat; @@ -1240,6 +1251,7 @@ static void do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function, #ifdef CONFIG_X86_64 bit(X86_FEATURE_LM) | #endif + bit(X86_FEATURE_FXSR_OPT) | bit(X86_FEATURE_MMXEXT) | bit(X86_FEATURE_3DNOWEXT) | bit(X86_FEATURE_3DNOW); -- cgit v1.2.3 From 34c33d163fe509da8414a736c6328855f8c164e5 Mon Sep 17 00:00:00 2001 From: Jan Kiszka Date: Sun, 8 Feb 2009 13:28:15 +0100 Subject: KVM: Drop unused evaluations from string pio handlers Looks like neither the direction nor the rep prefix are used anymore. Drop related evaluations from SVM's and VMX's I/O exit handlers. Signed-off-by: Jan Kiszka Signed-off-by: Avi Kivity --- arch/x86/kvm/svm.c | 4 +--- arch/x86/kvm/vmx.c | 4 +--- 2 files changed, 2 insertions(+), 6 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index 1c4a018bf4b..22e88a4a51c 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c @@ -1194,7 +1194,7 @@ static int shutdown_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) static int io_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) { u32 io_info = svm->vmcb->control.exit_info_1; /* address size bug? */ - int size, down, in, string, rep; + int size, in, string; unsigned port; ++svm->vcpu.stat.io_exits; @@ -1213,8 +1213,6 @@ static int io_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) in = (io_info & SVM_IOIO_TYPE_MASK) != 0; port = io_info >> 16; size = (io_info & SVM_IOIO_SIZE_MASK) >> SVM_IOIO_SIZE_SHIFT; - rep = (io_info & SVM_IOIO_REP_MASK) != 0; - down = (svm->vmcb->save.rflags & X86_EFLAGS_DF) != 0; skip_emulated_instruction(&svm->vcpu); return kvm_emulate_pio(&svm->vcpu, kvm_run, in, size, port); diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index df454de8acf..509b3530540 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -2698,7 +2698,7 @@ static int handle_triple_fault(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) static int handle_io(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) { unsigned long exit_qualification; - int size, down, in, string, rep; + int size, in, string; unsigned port; ++vcpu->stat.io_exits; @@ -2714,8 +2714,6 @@ static int handle_io(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) size = (exit_qualification & 7) + 1; in = (exit_qualification & 8) != 0; - down = (vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_DF) != 0; - rep = (exit_qualification & 32) != 0; port = exit_qualification >> 16; skip_emulated_instruction(vcpu); -- cgit v1.2.3 From 49cd7d2238e44f7ee4269481cd8a1261cc8f93a5 Mon Sep 17 00:00:00 2001 From: Sheng Yang Date: Wed, 11 Feb 2009 13:50:40 +0800 Subject: KVM: VMX: Use kvm_mmu_page_fault() handle EPT violation mmio Removed duplicated code. Signed-off-by: Sheng Yang Signed-off-by: Avi Kivity --- arch/x86/kvm/vmx.c | 30 +----------------------------- 1 file changed, 1 insertion(+), 29 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 509b3530540..cb27ffccf46 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -3058,11 +3058,8 @@ static int handle_task_switch(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) static int handle_ept_violation(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) { u64 exit_qualification; - enum emulation_result er; gpa_t gpa; - unsigned long hva; int gla_validity; - int r; exit_qualification = vmcs_read64(EXIT_QUALIFICATION); @@ -3085,32 +3082,7 @@ static int handle_ept_violation(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) } gpa = vmcs_read64(GUEST_PHYSICAL_ADDRESS); - hva = gfn_to_hva(vcpu->kvm, gpa >> PAGE_SHIFT); - if (!kvm_is_error_hva(hva)) { - r = kvm_mmu_page_fault(vcpu, gpa & PAGE_MASK, 0); - if (r < 0) { - printk(KERN_ERR "EPT: Not enough memory!\n"); - return -ENOMEM; - } - return 1; - } else { - /* must be MMIO */ - er = emulate_instruction(vcpu, kvm_run, 0, 0, 0); - - if (er == EMULATE_FAIL) { - printk(KERN_ERR - "EPT: Fail to handle EPT violation vmexit!er is %d\n", - er); - printk(KERN_ERR "EPT: GPA: 0x%lx, GVA: 0x%lx\n", - (long unsigned int)vmcs_read64(GUEST_PHYSICAL_ADDRESS), - (long unsigned int)vmcs_read64(GUEST_LINEAR_ADDRESS)); - printk(KERN_ERR "EPT: Exit qualification is 0x%lx\n", - (long unsigned int)exit_qualification); - return -ENOTSUPP; - } else if (er == EMULATE_DO_MMIO) - return 0; - } - return 1; + return kvm_mmu_page_fault(vcpu, gpa & PAGE_MASK, 0); } static int handle_nmi_window(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) -- cgit v1.2.3 From c807660407a695f390034e402edfe544a1d2e40c Mon Sep 17 00:00:00 2001 From: Gerd Hoffmann Date: Wed, 4 Feb 2009 17:52:04 +0100 Subject: KVM: Fix kvmclock on !constant_tsc boxes kvmclock currently falls apart on machines without constant tsc. This patch fixes it. Changes: * keep tsc frequency in a per-cpu variable. * handle kvmclock update using a new request flag, thus checking whenever we need an update each time we enter guest context. * use a cpufreq notifier to track frequency changes and force kvmclock updates. * send ipis to kick cpu out of guest context if needed to make sure the guest doesn't see stale values. Signed-off-by: Gerd Hoffmann Signed-off-by: Avi Kivity --- arch/x86/kvm/x86.c | 103 ++++++++++++++++++++++++++++++++++++++++++++++++----- 1 file changed, 94 insertions(+), 9 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 8f83590b47d..05d7be89b5e 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -36,6 +36,7 @@ #include #include #include +#include #include #include @@ -617,6 +618,8 @@ static void kvm_set_time_scale(uint32_t tsc_khz, struct pvclock_vcpu_time_info * hv_clock->tsc_to_system_mul); } +static DEFINE_PER_CPU(unsigned long, cpu_tsc_khz); + static void kvm_write_guest_time(struct kvm_vcpu *v) { struct timespec ts; @@ -627,9 +630,9 @@ static void kvm_write_guest_time(struct kvm_vcpu *v) if ((!vcpu->time_page)) return; - if (unlikely(vcpu->hv_clock_tsc_khz != tsc_khz)) { - kvm_set_time_scale(tsc_khz, &vcpu->hv_clock); - vcpu->hv_clock_tsc_khz = tsc_khz; + if (unlikely(vcpu->hv_clock_tsc_khz != __get_cpu_var(cpu_tsc_khz))) { + kvm_set_time_scale(__get_cpu_var(cpu_tsc_khz), &vcpu->hv_clock); + vcpu->hv_clock_tsc_khz = __get_cpu_var(cpu_tsc_khz); } /* Keep irq disabled to prevent changes to the clock */ @@ -660,6 +663,16 @@ static void kvm_write_guest_time(struct kvm_vcpu *v) mark_page_dirty(v->kvm, vcpu->time >> PAGE_SHIFT); } +static int kvm_request_guest_time_update(struct kvm_vcpu *v) +{ + struct kvm_vcpu_arch *vcpu = &v->arch; + + if (!vcpu->time_page) + return 0; + set_bit(KVM_REQ_KVMCLOCK_UPDATE, &v->requests); + return 1; +} + static bool msr_mtrr_valid(unsigned msr) { switch (msr) { @@ -790,7 +803,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data) vcpu->arch.time_page = NULL; } - kvm_write_guest_time(vcpu); + kvm_request_guest_time_update(vcpu); break; } default: @@ -1000,6 +1013,7 @@ int kvm_dev_ioctl_check_extension(long ext) case KVM_CAP_MMU_SHADOW_CACHE_CONTROL: case KVM_CAP_SET_TSS_ADDR: case KVM_CAP_EXT_CPUID: + case KVM_CAP_CLOCKSOURCE: case KVM_CAP_PIT: case KVM_CAP_NOP_IO_DELAY: case KVM_CAP_MP_STATE: @@ -1025,9 +1039,6 @@ int kvm_dev_ioctl_check_extension(long ext) case KVM_CAP_IOMMU: r = iommu_found(); break; - case KVM_CAP_CLOCKSOURCE: - r = boot_cpu_has(X86_FEATURE_CONSTANT_TSC); - break; default: r = 0; break; @@ -1098,7 +1109,7 @@ out: void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) { kvm_x86_ops->vcpu_load(vcpu, cpu); - kvm_write_guest_time(vcpu); + kvm_request_guest_time_update(vcpu); } void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) @@ -2642,9 +2653,72 @@ int kvm_emulate_pio_string(struct kvm_vcpu *vcpu, struct kvm_run *run, int in, } EXPORT_SYMBOL_GPL(kvm_emulate_pio_string); +static void bounce_off(void *info) +{ + /* nothing */ +} + +static unsigned int ref_freq; +static unsigned long tsc_khz_ref; + +static int kvmclock_cpufreq_notifier(struct notifier_block *nb, unsigned long val, + void *data) +{ + struct cpufreq_freqs *freq = data; + struct kvm *kvm; + struct kvm_vcpu *vcpu; + int i, send_ipi = 0; + + if (!ref_freq) + ref_freq = freq->old; + + if (val == CPUFREQ_PRECHANGE && freq->old > freq->new) + return 0; + if (val == CPUFREQ_POSTCHANGE && freq->old < freq->new) + return 0; + per_cpu(cpu_tsc_khz, freq->cpu) = cpufreq_scale(tsc_khz_ref, ref_freq, freq->new); + + spin_lock(&kvm_lock); + list_for_each_entry(kvm, &vm_list, vm_list) { + for (i = 0; i < KVM_MAX_VCPUS; ++i) { + vcpu = kvm->vcpus[i]; + if (!vcpu) + continue; + if (vcpu->cpu != freq->cpu) + continue; + if (!kvm_request_guest_time_update(vcpu)) + continue; + if (vcpu->cpu != smp_processor_id()) + send_ipi++; + } + } + spin_unlock(&kvm_lock); + + if (freq->old < freq->new && send_ipi) { + /* + * We upscale the frequency. Must make the guest + * doesn't see old kvmclock values while running with + * the new frequency, otherwise we risk the guest sees + * time go backwards. + * + * In case we update the frequency for another cpu + * (which might be in guest context) send an interrupt + * to kick the cpu out of guest context. Next time + * guest context is entered kvmclock will be updated, + * so the guest will not see stale values. + */ + smp_call_function_single(freq->cpu, bounce_off, NULL, 1); + } + return 0; +} + +static struct notifier_block kvmclock_cpufreq_notifier_block = { + .notifier_call = kvmclock_cpufreq_notifier +}; + int kvm_arch_init(void *opaque) { - int r; + int r, cpu; struct kvm_x86_ops *ops = (struct kvm_x86_ops *)opaque; if (kvm_x86_ops) { @@ -2675,6 +2749,15 @@ int kvm_arch_init(void *opaque) kvm_mmu_set_base_ptes(PT_PRESENT_MASK); kvm_mmu_set_mask_ptes(PT_USER_MASK, PT_ACCESSED_MASK, PT_DIRTY_MASK, PT64_NX_MASK, 0, 0); + + for_each_possible_cpu(cpu) + per_cpu(cpu_tsc_khz, cpu) = tsc_khz; + if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) { + tsc_khz_ref = tsc_khz; + cpufreq_register_notifier(&kvmclock_cpufreq_notifier_block, + CPUFREQ_TRANSITION_NOTIFIER); + } + return 0; out: @@ -3010,6 +3093,8 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) if (vcpu->requests) { if (test_and_clear_bit(KVM_REQ_MIGRATE_TIMER, &vcpu->requests)) __kvm_migrate_timers(vcpu); + if (test_and_clear_bit(KVM_REQ_KVMCLOCK_UPDATE, &vcpu->requests)) + kvm_write_guest_time(vcpu); if (test_and_clear_bit(KVM_REQ_MMU_SYNC, &vcpu->requests)) kvm_mmu_sync_roots(vcpu); if (test_and_clear_bit(KVM_REQ_TLB_FLUSH, &vcpu->requests)) -- cgit v1.2.3 From 6bed6b9e84bf5e6b468847a50c0751389fdd01d4 Mon Sep 17 00:00:00 2001 From: Joerg Roedel Date: Wed, 18 Feb 2009 14:08:59 +0100 Subject: KVM: MMU: remove redundant check in mmu_set_spte The following code flow is unnecessary: if (largepage) was_rmapped = is_large_pte(*shadow_pte); else was_rmapped = 1; The is_large_pte() function will always evaluate to one here because the (largepage && !is_large_pte) case is already handled in the first if-clause. So we can remove this check and set was_rmapped to one always here. Signed-off-by: Joerg Roedel Acked-by: Marcelo Tosatti Signed-off-by: Avi Kivity --- arch/x86/kvm/mmu.c | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index ef060ec444a..c90b4b2f2ab 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -1791,12 +1791,8 @@ static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte, pgprintk("hfn old %lx new %lx\n", spte_to_pfn(*shadow_pte), pfn); rmap_remove(vcpu->kvm, shadow_pte); - } else { - if (largepage) - was_rmapped = is_large_pte(*shadow_pte); - else - was_rmapped = 1; - } + } else + was_rmapped = 1; } if (set_spte(vcpu, shadow_pte, pte_access, user_fault, write_fault, dirty, largepage, global, gfn, pfn, speculative, true)) { -- cgit v1.2.3 From 452425dbaa1974e9fc489e64a8de46a47b4c2754 Mon Sep 17 00:00:00 2001 From: Joerg Roedel Date: Wed, 18 Feb 2009 14:54:37 +0100 Subject: KVM: MMU: remove assertion in kvm_mmu_alloc_page The assertion no longer makes sense since we don't clear page tables on allocation; instead we clear them during prefetch. Signed-off-by: Joerg Roedel Signed-off-by: Avi Kivity --- arch/x86/kvm/mmu.c | 1 - 1 file changed, 1 deletion(-) (limited to 'arch/x86') diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index c90b4b2f2ab..4a21b0f8491 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -802,7 +802,6 @@ static struct kvm_mmu_page *kvm_mmu_alloc_page(struct kvm_vcpu *vcpu, set_page_private(virt_to_page(sp->spt), (unsigned long)sp); list_add(&sp->link, &vcpu->kvm->arch.active_mmu_pages); INIT_LIST_HEAD(&sp->oos_link); - ASSERT(is_empty_shadow_page(sp->spt)); bitmap_zero(sp->slot_bitmap, KVM_MEMORY_SLOTS + KVM_PRIVATE_MEM_SLOTS); sp->multimapped = 0; sp->parent_pte = parent_pte; -- cgit v1.2.3 From 4925663a079c77d95d8685228ad6675fc5639c8e Mon Sep 17 00:00:00 2001 From: Gleb Natapov Date: Wed, 4 Feb 2009 17:28:14 +0200 Subject: KVM: Report IRQ injection status to userspace. IRQ injection status is either -1 (if there was no CPU found that should except the interrupt because IRQ was masked or ioapic was misconfigured or ...) or >= 0 in that case the number indicates to how many CPUs interrupt was injected. If the value is 0 it means that the interrupt was coalesced and probably should be reinjected. Signed-off-by: Gleb Natapov Signed-off-by: Avi Kivity --- arch/x86/include/asm/kvm_host.h | 2 +- arch/x86/kvm/i8259.c | 18 +++++++++++++----- arch/x86/kvm/x86.c | 13 +++++++++++-- 3 files changed, 25 insertions(+), 8 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 55fd4c5fd38..f0faf58044f 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -616,7 +616,7 @@ void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code); void kvm_inject_page_fault(struct kvm_vcpu *vcpu, unsigned long cr2, u32 error_code); -void kvm_pic_set_irq(void *opaque, int irq, int level); +int kvm_pic_set_irq(void *opaque, int irq, int level); void kvm_inject_nmi(struct kvm_vcpu *vcpu); diff --git a/arch/x86/kvm/i8259.c b/arch/x86/kvm/i8259.c index 93160375c84..b4e662e94dd 100644 --- a/arch/x86/kvm/i8259.c +++ b/arch/x86/kvm/i8259.c @@ -77,12 +77,13 @@ void kvm_pic_clear_isr_ack(struct kvm *kvm) /* * set irq level. If an edge is detected, then the IRR is set to 1 */ -static inline void pic_set_irq1(struct kvm_kpic_state *s, int irq, int level) +static inline int pic_set_irq1(struct kvm_kpic_state *s, int irq, int level) { - int mask; + int mask, ret = 1; mask = 1 << irq; if (s->elcr & mask) /* level triggered */ if (level) { + ret = !(s->irr & mask); s->irr |= mask; s->last_irr |= mask; } else { @@ -91,11 +92,15 @@ static inline void pic_set_irq1(struct kvm_kpic_state *s, int irq, int level) } else /* edge triggered */ if (level) { - if ((s->last_irr & mask) == 0) + if ((s->last_irr & mask) == 0) { + ret = !(s->irr & mask); s->irr |= mask; + } s->last_irr |= mask; } else s->last_irr &= ~mask; + + return (s->imr & mask) ? -1 : ret; } /* @@ -172,16 +177,19 @@ void kvm_pic_update_irq(struct kvm_pic *s) pic_unlock(s); } -void kvm_pic_set_irq(void *opaque, int irq, int level) +int kvm_pic_set_irq(void *opaque, int irq, int level) { struct kvm_pic *s = opaque; + int ret = -1; pic_lock(s); if (irq >= 0 && irq < PIC_NUM_PINS) { - pic_set_irq1(&s->pics[irq >> 3], irq & 7, level); + ret = pic_set_irq1(&s->pics[irq >> 3], irq & 7, level); pic_update_irq(s); } pic_unlock(s); + + return ret; } /* diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 05d7be89b5e..e4db5be7c95 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -1019,6 +1019,7 @@ int kvm_dev_ioctl_check_extension(long ext) case KVM_CAP_MP_STATE: case KVM_CAP_SYNC_MMU: case KVM_CAP_REINJECT_CONTROL: + case KVM_CAP_IRQ_INJECT_STATUS: r = 1; break; case KVM_CAP_COALESCED_MMIO: @@ -1877,6 +1878,7 @@ long kvm_arch_vm_ioctl(struct file *filp, create_pit_unlock: mutex_unlock(&kvm->lock); break; + case KVM_IRQ_LINE_STATUS: case KVM_IRQ_LINE: { struct kvm_irq_level irq_event; @@ -1884,10 +1886,17 @@ long kvm_arch_vm_ioctl(struct file *filp, if (copy_from_user(&irq_event, argp, sizeof irq_event)) goto out; if (irqchip_in_kernel(kvm)) { + __s32 status; mutex_lock(&kvm->lock); - kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, - irq_event.irq, irq_event.level); + status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, + irq_event.irq, irq_event.level); mutex_unlock(&kvm->lock); + if (ioctl == KVM_IRQ_LINE_STATUS) { + irq_event.status = status; + if (copy_to_user(argp, &irq_event, + sizeof irq_event)) + goto out; + } r = 0; } break; -- cgit v1.2.3 From 1fbdc7a58512a6283e10fd27108197679db95ffa Mon Sep 17 00:00:00 2001 From: Andre Przywara Date: Sun, 11 Jan 2009 22:39:44 +0100 Subject: KVM: SVM: set accessed bit for VMCB segment selectors In the segment descriptor _cache_ the accessed bit is always set (although it can be cleared in the descriptor itself). Since Intel checks for this condition on a VMENTRY, set this bit in the AMD path to enable cross vendor migration. Cc: stable@kernel.org Signed-off-by: Andre Przywara Acked-By: Amit Shah Signed-off-by: Avi Kivity --- arch/x86/kvm/svm.c | 41 +++++++++++++++++++++++++++++------------ 1 file changed, 29 insertions(+), 12 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index 22e88a4a51c..1821c207819 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c @@ -796,20 +796,37 @@ static void svm_get_segment(struct kvm_vcpu *vcpu, var->db = (s->attrib >> SVM_SELECTOR_DB_SHIFT) & 1; var->g = (s->attrib >> SVM_SELECTOR_G_SHIFT) & 1; - /* - * SVM always stores 0 for the 'G' bit in the CS selector in - * the VMCB on a VMEXIT. This hurts cross-vendor migration: - * Intel's VMENTRY has a check on the 'G' bit. - */ - if (seg == VCPU_SREG_CS) + switch (seg) { + case VCPU_SREG_CS: + /* + * SVM always stores 0 for the 'G' bit in the CS selector in + * the VMCB on a VMEXIT. This hurts cross-vendor migration: + * Intel's VMENTRY has a check on the 'G' bit. + */ var->g = s->limit > 0xfffff; - - /* - * Work around a bug where the busy flag in the tr selector - * isn't exposed - */ - if (seg == VCPU_SREG_TR) + break; + case VCPU_SREG_TR: + /* + * Work around a bug where the busy flag in the tr selector + * isn't exposed + */ var->type |= 0x2; + break; + case VCPU_SREG_DS: + case VCPU_SREG_ES: + case VCPU_SREG_FS: + case VCPU_SREG_GS: + /* + * The accessed bit must always be set in the segment + * descriptor cache, although it can be cleared in the + * descriptor, the cached bit always remains at 1. Since + * Intel has a check on this, set it here to support + * cross-vendor migration. + */ + if (!var->unusable) + var->type |= 0x1; + break; + } var->unusable = !var->present; } -- cgit v1.2.3 From c5bc22424021cabda862727fb3f5098b866f074d Mon Sep 17 00:00:00 2001 From: Joerg Roedel Date: Thu, 19 Feb 2009 12:18:56 +0100 Subject: KVM: MMU: Fix another largepage memory leak In the paging_fetch function rmap_remove is called after setting a large pte to non-present. This causes rmap_remove to not drop the reference to the large page. The result is a memory leak of that page. Cc: stable@kernel.org Signed-off-by: Joerg Roedel Acked-by: Marcelo Tosatti Signed-off-by: Avi Kivity --- arch/x86/kvm/paging_tmpl.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/x86') diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h index 7314c0944c5..0f11792fafa 100644 --- a/arch/x86/kvm/paging_tmpl.h +++ b/arch/x86/kvm/paging_tmpl.h @@ -306,9 +306,9 @@ static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr, continue; if (is_large_pte(*sptep)) { + rmap_remove(vcpu->kvm, sptep); set_shadow_pte(sptep, shadow_trap_nonpresent_pte); kvm_flush_remote_tlbs(vcpu->kvm); - rmap_remove(vcpu->kvm, sptep); } if (level == PT_DIRECTORY_LEVEL -- cgit v1.2.3 From 401d10dee083bda281f2fdcdf654080313ba30ec Mon Sep 17 00:00:00 2001 From: Amit Shah Date: Fri, 20 Feb 2009 22:53:37 +0530 Subject: KVM: VMX: Update necessary state when guest enters long mode setup_msrs() should be called when entering long mode to save the shadow state for the 64-bit guest state. Using vmx_set_efer() in enter_lmode() removes some duplicated code and also ensures we call setup_msrs(). We can safely pass the value of shadow_efer to vmx_set_efer() as no other bits in the efer change while enabling long mode (guest first sets EFER.LME, then sets CR0.PG which causes a vmexit where we activate long mode). With this fix, is_long_mode() can check for EFER.LMA set instead of EFER.LME and 5e23049e86dd298b72e206b420513dbc3a240cd9 can be reverted. Signed-off-by: Amit Shah Signed-off-by: Avi Kivity --- arch/x86/kvm/vmx.c | 54 ++++++++++++++++++++++++------------------------------ 1 file changed, 24 insertions(+), 30 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index cb27ffccf46..48063a0aa24 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -1430,6 +1430,29 @@ continue_rmode: init_rmode(vcpu->kvm); } +static void vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer) +{ + struct vcpu_vmx *vmx = to_vmx(vcpu); + struct kvm_msr_entry *msr = find_msr_entry(vmx, MSR_EFER); + + vcpu->arch.shadow_efer = efer; + if (!msr) + return; + if (efer & EFER_LMA) { + vmcs_write32(VM_ENTRY_CONTROLS, + vmcs_read32(VM_ENTRY_CONTROLS) | + VM_ENTRY_IA32E_MODE); + msr->data = efer; + } else { + vmcs_write32(VM_ENTRY_CONTROLS, + vmcs_read32(VM_ENTRY_CONTROLS) & + ~VM_ENTRY_IA32E_MODE); + + msr->data = efer & ~EFER_LME; + } + setup_msrs(vmx); +} + #ifdef CONFIG_X86_64 static void enter_lmode(struct kvm_vcpu *vcpu) @@ -1444,13 +1467,8 @@ static void enter_lmode(struct kvm_vcpu *vcpu) (guest_tr_ar & ~AR_TYPE_MASK) | AR_TYPE_BUSY_64_TSS); } - vcpu->arch.shadow_efer |= EFER_LMA; - - find_msr_entry(to_vmx(vcpu), MSR_EFER)->data |= EFER_LMA | EFER_LME; - vmcs_write32(VM_ENTRY_CONTROLS, - vmcs_read32(VM_ENTRY_CONTROLS) - | VM_ENTRY_IA32E_MODE); + vmx_set_efer(vcpu, vcpu->arch.shadow_efer); } static void exit_lmode(struct kvm_vcpu *vcpu) @@ -1609,30 +1627,6 @@ static void vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) vmcs_writel(GUEST_CR4, hw_cr4); } -static void vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer) -{ - struct vcpu_vmx *vmx = to_vmx(vcpu); - struct kvm_msr_entry *msr = find_msr_entry(vmx, MSR_EFER); - - vcpu->arch.shadow_efer = efer; - if (!msr) - return; - if (efer & EFER_LMA) { - vmcs_write32(VM_ENTRY_CONTROLS, - vmcs_read32(VM_ENTRY_CONTROLS) | - VM_ENTRY_IA32E_MODE); - msr->data = efer; - - } else { - vmcs_write32(VM_ENTRY_CONTROLS, - vmcs_read32(VM_ENTRY_CONTROLS) & - ~VM_ENTRY_IA32E_MODE); - - msr->data = efer & ~EFER_LME; - } - setup_msrs(vmx); -} - static u64 vmx_get_segment_base(struct kvm_vcpu *vcpu, int seg) { struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg]; -- cgit v1.2.3 From 41d6af119206e98764b4ae6d264d63acefcf851e Mon Sep 17 00:00:00 2001 From: Amit Shah Date: Thu, 28 Feb 2008 16:06:15 +0530 Subject: KVM: is_long_mode() should check for EFER.LMA is_long_mode currently checks the LongModeEnable bit in EFER instead of the LongModeActive bit. This is wrong, but we survived this till now since it wasn't triggered. This breaks guests that go from long mode to compatibility mode. This is noticed on a solaris guest and fixes bug #1842160 Signed-off-by: Amit Shah Signed-off-by: Avi Kivity --- arch/x86/kvm/mmu.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/x86') diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h index 258e5d56298..eaab2145f62 100644 --- a/arch/x86/kvm/mmu.h +++ b/arch/x86/kvm/mmu.h @@ -54,7 +54,7 @@ static inline int kvm_mmu_reload(struct kvm_vcpu *vcpu) static inline int is_long_mode(struct kvm_vcpu *vcpu) { #ifdef CONFIG_X86_64 - return vcpu->arch.shadow_efer & EFER_LME; + return vcpu->arch.shadow_efer & EFER_LMA; #else return 0; #endif -- cgit v1.2.3 From d7364a29b305862cbc92b3f8b6ad9968270d068c Mon Sep 17 00:00:00 2001 From: Hannes Eder Date: Sat, 21 Feb 2009 02:18:13 +0100 Subject: KVM: fix sparse warnings: context imbalance Impact: Attribute function with __acquires(...) resp. __releases(...). Fix this sparse warnings: arch/x86/kvm/i8259.c:34:13: warning: context imbalance in 'pic_lock' - wrong count at exit arch/x86/kvm/i8259.c:39:13: warning: context imbalance in 'pic_unlock' - unexpected unlock Signed-off-by: Hannes Eder Signed-off-by: Avi Kivity --- arch/x86/kvm/i8259.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'arch/x86') diff --git a/arch/x86/kvm/i8259.c b/arch/x86/kvm/i8259.c index b4e662e94dd..1ccb50c74f1 100644 --- a/arch/x86/kvm/i8259.c +++ b/arch/x86/kvm/i8259.c @@ -32,11 +32,13 @@ #include static void pic_lock(struct kvm_pic *s) + __acquires(&s->lock) { spin_lock(&s->lock); } static void pic_unlock(struct kvm_pic *s) + __releases(&s->lock) { struct kvm *kvm = s->kvm; unsigned acks = s->pending_acks; -- cgit v1.2.3 From cded19f396bc3c9d299331709d0a9fbc6ecc148a Mon Sep 17 00:00:00 2001 From: Hannes Eder Date: Sat, 21 Feb 2009 02:19:13 +0100 Subject: KVM: fix sparse warnings: Should it be static? Impact: Make symbols static. Fix this sparse warnings: arch/x86/kvm/mmu.c:992:5: warning: symbol 'mmu_pages_add' was not declared. Should it be static? arch/x86/kvm/mmu.c:1124:5: warning: symbol 'mmu_pages_next' was not declared. Should it be static? arch/x86/kvm/mmu.c:1144:6: warning: symbol 'mmu_pages_clear_parents' was not declared. Should it be static? arch/x86/kvm/x86.c:2037:5: warning: symbol 'kvm_read_guest_virt' was not declared. Should it be static? arch/x86/kvm/x86.c:2067:5: warning: symbol 'kvm_write_guest_virt' was not declared. Should it be static? virt/kvm/irq_comm.c:220:5: warning: symbol 'setup_routing_entry' was not declared. Should it be static? Signed-off-by: Hannes Eder Signed-off-by: Avi Kivity --- arch/x86/kvm/mmu.c | 11 ++++++----- arch/x86/kvm/x86.c | 8 ++++---- 2 files changed, 10 insertions(+), 9 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 4a21b0f8491..2a36f7f7c4c 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -989,8 +989,8 @@ struct kvm_mmu_pages { idx < 512; \ idx = find_next_bit(bitmap, 512, idx+1)) -int mmu_pages_add(struct kvm_mmu_pages *pvec, struct kvm_mmu_page *sp, - int idx) +static int mmu_pages_add(struct kvm_mmu_pages *pvec, struct kvm_mmu_page *sp, + int idx) { int i; @@ -1121,8 +1121,9 @@ struct mmu_page_path { i < pvec.nr && ({ sp = pvec.page[i].sp; 1;}); \ i = mmu_pages_next(&pvec, &parents, i)) -int mmu_pages_next(struct kvm_mmu_pages *pvec, struct mmu_page_path *parents, - int i) +static int mmu_pages_next(struct kvm_mmu_pages *pvec, + struct mmu_page_path *parents, + int i) { int n; @@ -1141,7 +1142,7 @@ int mmu_pages_next(struct kvm_mmu_pages *pvec, struct mmu_page_path *parents, return n; } -void mmu_pages_clear_parents(struct mmu_page_path *parents) +static void mmu_pages_clear_parents(struct mmu_page_path *parents) { struct kvm_mmu_page *sp; unsigned int level = 0; diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index e4db5be7c95..8ca100a9eca 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -2043,8 +2043,8 @@ static struct kvm_io_device *vcpu_find_mmio_dev(struct kvm_vcpu *vcpu, return dev; } -int kvm_read_guest_virt(gva_t addr, void *val, unsigned int bytes, - struct kvm_vcpu *vcpu) +static int kvm_read_guest_virt(gva_t addr, void *val, unsigned int bytes, + struct kvm_vcpu *vcpu) { void *data = val; int r = X86EMUL_CONTINUE; @@ -2073,8 +2073,8 @@ out: return r; } -int kvm_write_guest_virt(gva_t addr, void *val, unsigned int bytes, - struct kvm_vcpu *vcpu) +static int kvm_write_guest_virt(gva_t addr, void *val, unsigned int bytes, + struct kvm_vcpu *vcpu) { void *data = val; int r = X86EMUL_CONTINUE; -- cgit v1.2.3 From 4539b35881ae9664b0e2953438dd83f5ee02c0b4 Mon Sep 17 00:00:00 2001 From: Andrea Arcangeli Date: Thu, 12 Mar 2009 18:18:43 +0100 Subject: KVM: Fix missing smp tlb flush in invlpg When kvm emulates an invlpg instruction, it can drop a shadow pte, but leaves the guest tlbs intact. This can cause memory corruption when swapping out. Without this the other cpu can still write to a freed host physical page. tlb smp flush must happen if rmap_remove is called always before mmu_lock is released because the VM will take the mmu_lock before it can finally add the page to the freelist after swapout. mmu notifier makes it safe to flush the tlb after freeing the page (otherwise it would never be safe) so we can do a single flush for multiple sptes invalidated. Cc: stable@kernel.org Signed-off-by: Andrea Arcangeli Acked-by: Marcelo Tosatti Signed-off-by: Avi Kivity --- arch/x86/kvm/paging_tmpl.h | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'arch/x86') diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h index 0f11792fafa..6bd70206c56 100644 --- a/arch/x86/kvm/paging_tmpl.h +++ b/arch/x86/kvm/paging_tmpl.h @@ -446,6 +446,7 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva) gpa_t pte_gpa = -1; int level; u64 *sptep; + int need_flush = 0; spin_lock(&vcpu->kvm->mmu_lock); @@ -465,6 +466,7 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva) rmap_remove(vcpu->kvm, sptep); if (is_large_pte(*sptep)) --vcpu->kvm->stat.lpages; + need_flush = 1; } set_shadow_pte(sptep, shadow_trap_nonpresent_pte); break; @@ -474,6 +476,8 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva) break; } + if (need_flush) + kvm_flush_remote_tlbs(vcpu->kvm); spin_unlock(&vcpu->kvm->mmu_lock); if (pte_gpa == -1) -- cgit v1.2.3 From 16175a796d061833aacfbd9672235f2d2725df65 Mon Sep 17 00:00:00 2001 From: Avi Kivity Date: Mon, 23 Mar 2009 22:13:44 +0200 Subject: KVM: VMX: Don't allow uninhibited access to EFER on i386 vmx_set_msr() does not allow i386 guests to touch EFER, but they can still do so through the default: label in the switch. If they set EFER_LME, they can oops the host. Fix by having EFER access through the normal channel (which will check for EFER_LME) even on i386. Reported-and-tested-by: Benjamin Gilbert Cc: stable@kernel.org Signed-off-by: Avi Kivity --- arch/x86/kvm/vmx.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/x86') diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 48063a0aa24..bb481330716 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -936,11 +936,11 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data) int ret = 0; switch (msr_index) { -#ifdef CONFIG_X86_64 case MSR_EFER: vmx_load_host_state(vmx); ret = kvm_set_msr_common(vcpu, msr_index, data); break; +#ifdef CONFIG_X86_64 case MSR_FS_BASE: vmcs_writel(GUEST_FS_BASE, data); break; -- cgit v1.2.3 From fc2869f6a1993550c2765e934b117e993782db30 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Fri, 13 Mar 2009 16:37:48 +0100 Subject: x86: disable __do_IRQ support Impact: disable unused code x86 is fully converted to flow handlers. No need to keep the deprecated __do_IRQ() support active. Signed-off-by: Thomas Gleixner Signed-off-by: Ingo Molnar --- arch/x86/Kconfig | 3 +++ 1 file changed, 3 insertions(+) (limited to 'arch/x86') diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index bc2fbadff9f..3a330a437c6 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -165,6 +165,9 @@ config GENERIC_HARDIRQS bool default y +config GENERIC_HARDIRQS_NO__DO_IRQ + def_bool y + config GENERIC_IRQ_PROBE bool default y -- cgit v1.2.3 From 17d140402e6f0fd5dde2fdf8d045e3f95f865663 Mon Sep 17 00:00:00 2001 From: Cyrill Gorcunov Date: Wed, 14 Jan 2009 23:37:50 +0300 Subject: x86: headers cleanup - setup.h Impact: cleanup 'make headers_check' warn us about leaking of kernel private (mostly compile time vars) data to userspace in headers. Fix it. Guard this one by __KERNEL__. Signed-off-by: Cyrill Gorcunov Signed-off-by: H. Peter Anvin Signed-off-by: Ingo Molnar --- arch/x86/include/asm/setup.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/include/asm/setup.h b/arch/x86/include/asm/setup.h index 5a3a1371575..c2308f5250f 100644 --- a/arch/x86/include/asm/setup.h +++ b/arch/x86/include/asm/setup.h @@ -1,6 +1,8 @@ #ifndef _ASM_X86_SETUP_H #define _ASM_X86_SETUP_H +#ifdef __KERNEL__ + #define COMMAND_LINE_SIZE 2048 #ifndef __ASSEMBLY__ @@ -33,8 +35,6 @@ struct x86_quirks { #endif /* __ASSEMBLY__ */ -#ifdef __KERNEL__ - #ifdef __i386__ #include -- cgit v1.2.3 From e42d1fe804408c57506a5326252b4db29958a7fb Mon Sep 17 00:00:00 2001 From: Yinghai Lu Date: Wed, 25 Mar 2009 13:26:13 -0700 Subject: x86/PCI: make pci=lastbus=255 work when acpi is on Impact: scan more peer root buses even acpi is used Move pci_bios_fixup_peer_bridges out of pci_legacy_init and into pci_subsys_init. This allows pci_bios_fixup_peer_bridges to be called even pci_apci_init is driving PCI initialization. Signed-off-by: Yinghai Lu Signed-off-by: Jesse Barnes --- arch/x86/pci/legacy.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/pci/legacy.c b/arch/x86/pci/legacy.c index f1065b129e9..4061bb0f267 100644 --- a/arch/x86/pci/legacy.c +++ b/arch/x86/pci/legacy.c @@ -50,8 +50,6 @@ static int __init pci_legacy_init(void) if (pci_root_bus) pci_bus_add_devices(pci_root_bus); - pcibios_fixup_peer_bridges(); - return 0; } @@ -67,6 +65,7 @@ int __init pci_subsys_init(void) pci_visws_init(); #endif pci_legacy_init(); + pcibios_fixup_peer_bridges(); pcibios_irq_init(); pcibios_init(); -- cgit v1.2.3 From 2b1c6bd77d4e6a727ffac8630cd154b2144b751a Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Fri, 28 Nov 2008 10:09:09 +0100 Subject: generic compat_sys_ustat Due to a different size of ino_t ustat needs a compat handler, but currently only x86 and mips provide one. Add a generic compat_sys_ustat and switch all architectures over to it. Instead of doing various user copy hacks compat_sys_ustat just reimplements sys_ustat as it's trivial. This was suggested by Arnd Bergmann. Found by Eric Sandeen when running xfstests/017 on ppc64, which causes stack smashing warnings on RHEL/Fedora due to the too large amount of data writen by the syscall. Signed-off-by: Christoph Hellwig Signed-off-by: Al Viro --- arch/x86/ia32/ia32entry.S | 2 +- arch/x86/ia32/sys_ia32.c | 22 ---------------------- arch/x86/include/asm/ia32.h | 7 ------- arch/x86/include/asm/sys_ia32.h | 2 -- 4 files changed, 1 insertion(+), 32 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S index 5a0d76dc56a..8ef8876666b 100644 --- a/arch/x86/ia32/ia32entry.S +++ b/arch/x86/ia32/ia32entry.S @@ -557,7 +557,7 @@ ia32_sys_call_table: .quad sys32_olduname .quad sys_umask /* 60 */ .quad sys_chroot - .quad sys32_ustat + .quad compat_sys_ustat .quad sys_dup2 .quad sys_getppid .quad sys_getpgrp /* 65 */ diff --git a/arch/x86/ia32/sys_ia32.c b/arch/x86/ia32/sys_ia32.c index 6c0d7f6231a..efac92fd1ef 100644 --- a/arch/x86/ia32/sys_ia32.c +++ b/arch/x86/ia32/sys_ia32.c @@ -638,28 +638,6 @@ long sys32_uname(struct old_utsname __user *name) return err ? -EFAULT : 0; } -long sys32_ustat(unsigned dev, struct ustat32 __user *u32p) -{ - struct ustat u; - mm_segment_t seg; - int ret; - - seg = get_fs(); - set_fs(KERNEL_DS); - ret = sys_ustat(dev, (struct ustat __user *)&u); - set_fs(seg); - if (ret < 0) - return ret; - - if (!access_ok(VERIFY_WRITE, u32p, sizeof(struct ustat32)) || - __put_user((__u32) u.f_tfree, &u32p->f_tfree) || - __put_user((__u32) u.f_tinode, &u32p->f_tfree) || - __copy_to_user(&u32p->f_fname, u.f_fname, sizeof(u.f_fname)) || - __copy_to_user(&u32p->f_fpack, u.f_fpack, sizeof(u.f_fpack))) - ret = -EFAULT; - return ret; -} - asmlinkage long sys32_execve(char __user *name, compat_uptr_t __user *argv, compat_uptr_t __user *envp, struct pt_regs *regs) { diff --git a/arch/x86/include/asm/ia32.h b/arch/x86/include/asm/ia32.h index 50ca486fd88..1f7e6251728 100644 --- a/arch/x86/include/asm/ia32.h +++ b/arch/x86/include/asm/ia32.h @@ -129,13 +129,6 @@ typedef struct compat_siginfo { } _sifields; } compat_siginfo_t; -struct ustat32 { - __u32 f_tfree; - compat_ino_t f_tinode; - char f_fname[6]; - char f_fpack[6]; -}; - #define IA32_STACK_TOP IA32_PAGE_OFFSET #ifdef __KERNEL__ diff --git a/arch/x86/include/asm/sys_ia32.h b/arch/x86/include/asm/sys_ia32.h index ffb08be2a53..72a6dcd1299 100644 --- a/arch/x86/include/asm/sys_ia32.h +++ b/arch/x86/include/asm/sys_ia32.h @@ -70,8 +70,6 @@ struct old_utsname; asmlinkage long sys32_olduname(struct oldold_utsname __user *); long sys32_uname(struct old_utsname __user *); -long sys32_ustat(unsigned, struct ustat32 __user *); - asmlinkage long sys32_execve(char __user *, compat_uptr_t __user *, compat_uptr_t __user *, struct pt_regs *); asmlinkage long sys32_clone(unsigned int, unsigned int, struct pt_regs *); -- cgit v1.2.3 From b7ff99ea53cd16de8f6166c0e98f19a7c6ca67ee Mon Sep 17 00:00:00 2001 From: Rusty Russell Date: Mon, 30 Mar 2009 21:55:23 -0600 Subject: lguest: wire up pte_update/pte_update_defer Impact: intermittent guest segv/crash fix I've been seeing random guest bad address crashes and segmentation faults: bisect led to 4f98a2fee8 (vmscan: split LRU lists into anon & file sets), but that's a red herring. It turns out that lguest never hooked up the pte_update/pte_update_defer calls, so our ptes were not always in sync. After the vmscan commit, the bug became reproducible; now a fsck in a 64MB guest causes reproducible pagetable corruption. Signed-off-by: Rusty Russell Cc: jeremy@xensource.com Cc: virtualization@lists.osdl.org Cc: stable@kernel.org --- arch/x86/lguest/boot.c | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) (limited to 'arch/x86') diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c index 9fe4ddaa8f6..7822d29b02c 100644 --- a/arch/x86/lguest/boot.c +++ b/arch/x86/lguest/boot.c @@ -490,11 +490,17 @@ static void lguest_write_cr4(unsigned long val) * into a process' address space. We set the entry then tell the Host the * toplevel and address this corresponds to. The Guest uses one pagetable per * process, so we need to tell the Host which one we're changing (mm->pgd). */ +static void lguest_pte_update(struct mm_struct *mm, unsigned long addr, + pte_t *ptep) +{ + lazy_hcall(LHCALL_SET_PTE, __pa(mm->pgd), addr, ptep->pte_low); +} + static void lguest_set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pteval) { *ptep = pteval; - lazy_hcall(LHCALL_SET_PTE, __pa(mm->pgd), addr, pteval.pte_low); + lguest_pte_update(mm, addr, ptep); } /* The Guest calls this to set a top-level entry. Again, we set the entry then @@ -1040,6 +1046,8 @@ __init void lguest_init(void) pv_mmu_ops.read_cr3 = lguest_read_cr3; pv_mmu_ops.lazy_mode.enter = paravirt_enter_lazy_mmu; pv_mmu_ops.lazy_mode.leave = lguest_leave_lazy_mode; + pv_mmu_ops.pte_update = lguest_pte_update; + pv_mmu_ops.pte_update_defer = lguest_pte_update; #ifdef CONFIG_X86_LOCAL_APIC /* apic read/write intercepts */ -- cgit v1.2.3 From 4cd8b5e2a159f18a1507f1187b44a1acbfa6341b Mon Sep 17 00:00:00 2001 From: Matias Zabaljauregui Date: Sat, 14 Mar 2009 13:37:52 -0200 Subject: lguest: use KVM hypercalls Impact: cleanup This patch allow us to use KVM hypercalls Signed-off-by: Matias Zabaljauregui Signed-off-by: Rusty Russell --- arch/x86/include/asm/lguest_hcall.h | 24 ++---------- arch/x86/lguest/boot.c | 78 +++++++++++++++++++++++-------------- arch/x86/lguest/i386_head.S | 4 +- 3 files changed, 55 insertions(+), 51 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/include/asm/lguest_hcall.h b/arch/x86/include/asm/lguest_hcall.h index 43894428c3c..0f4ee7148af 100644 --- a/arch/x86/include/asm/lguest_hcall.h +++ b/arch/x86/include/asm/lguest_hcall.h @@ -26,36 +26,20 @@ #ifndef __ASSEMBLY__ #include +#include /*G:031 But first, how does our Guest contact the Host to ask for privileged * operations? There are two ways: the direct way is to make a "hypercall", * to make requests of the Host Itself. * - * Our hypercall mechanism uses the highest unused trap code (traps 32 and - * above are used by real hardware interrupts). Fifteen hypercalls are + * We use the KVM hypercall mechanism. Eighteen hypercalls are * available: the hypercall number is put in the %eax register, and the - * arguments (when required) are placed in %edx, %ebx and %ecx. If a return + * arguments (when required) are placed in %ebx, %ecx and %edx. If a return * value makes sense, it's returned in %eax. * * Grossly invalid calls result in Sudden Death at the hands of the vengeful * Host, rather than returning failure. This reflects Winston Churchill's * definition of a gentleman: "someone who is only rude intentionally". */ -static inline unsigned long -hcall(unsigned long call, - unsigned long arg1, unsigned long arg2, unsigned long arg3) -{ - /* "int" is the Intel instruction to trigger a trap. */ - asm volatile("int $" __stringify(LGUEST_TRAP_ENTRY) - /* The call in %eax (aka "a") might be overwritten */ - : "=a"(call) - /* The arguments are in %eax, %edx, %ebx & %ecx */ - : "a"(call), "d"(arg1), "b"(arg2), "c"(arg3) - /* "memory" means this might write somewhere in memory. - * This isn't true for all calls, but it's safe to tell - * gcc that it might happen so it doesn't get clever. */ - : "memory"); - return call; -} /*:*/ /* Can't use our min() macro here: needs to be a constant */ @@ -64,7 +48,7 @@ hcall(unsigned long call, #define LHCALL_RING_SIZE 64 struct hcall_args { /* These map directly onto eax, ebx, ecx, edx in struct lguest_regs */ - unsigned long arg0, arg2, arg3, arg1; + unsigned long arg0, arg1, arg2, arg3; }; #endif /* !__ASSEMBLY__ */ diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c index 7822d29b02c..5be9293961b 100644 --- a/arch/x86/lguest/boot.c +++ b/arch/x86/lguest/boot.c @@ -107,7 +107,7 @@ static void async_hcall(unsigned long call, unsigned long arg1, local_irq_save(flags); if (lguest_data.hcall_status[next_call] != 0xFF) { /* Table full, so do normal hcall which will flush table. */ - hcall(call, arg1, arg2, arg3); + kvm_hypercall3(call, arg1, arg2, arg3); } else { lguest_data.hcalls[next_call].arg0 = call; lguest_data.hcalls[next_call].arg1 = arg1; @@ -134,13 +134,32 @@ static void async_hcall(unsigned long call, unsigned long arg1, * * So, when we're in lazy mode, we call async_hcall() to store the call for * future processing: */ -static void lazy_hcall(unsigned long call, +static void lazy_hcall1(unsigned long call, + unsigned long arg1) +{ + if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE) + kvm_hypercall1(call, arg1); + else + async_hcall(call, arg1, 0, 0); +} + +static void lazy_hcall2(unsigned long call, + unsigned long arg1, + unsigned long arg2) +{ + if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE) + kvm_hypercall2(call, arg1, arg2); + else + async_hcall(call, arg1, arg2, 0); +} + +static void lazy_hcall3(unsigned long call, unsigned long arg1, unsigned long arg2, unsigned long arg3) { if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE) - hcall(call, arg1, arg2, arg3); + kvm_hypercall3(call, arg1, arg2, arg3); else async_hcall(call, arg1, arg2, arg3); } @@ -150,7 +169,7 @@ static void lazy_hcall(unsigned long call, static void lguest_leave_lazy_mode(void) { paravirt_leave_lazy(paravirt_get_lazy_mode()); - hcall(LHCALL_FLUSH_ASYNC, 0, 0, 0); + kvm_hypercall0(LHCALL_FLUSH_ASYNC); } /*G:033 @@ -229,7 +248,7 @@ static void lguest_write_idt_entry(gate_desc *dt, /* Keep the local copy up to date. */ native_write_idt_entry(dt, entrynum, g); /* Tell Host about this new entry. */ - hcall(LHCALL_LOAD_IDT_ENTRY, entrynum, desc[0], desc[1]); + kvm_hypercall3(LHCALL_LOAD_IDT_ENTRY, entrynum, desc[0], desc[1]); } /* Changing to a different IDT is very rare: we keep the IDT up-to-date every @@ -241,7 +260,7 @@ static void lguest_load_idt(const struct desc_ptr *desc) struct desc_struct *idt = (void *)desc->address; for (i = 0; i < (desc->size+1)/8; i++) - hcall(LHCALL_LOAD_IDT_ENTRY, i, idt[i].a, idt[i].b); + kvm_hypercall3(LHCALL_LOAD_IDT_ENTRY, i, idt[i].a, idt[i].b); } /* @@ -261,8 +280,8 @@ static void lguest_load_idt(const struct desc_ptr *desc) */ static void lguest_load_gdt(const struct desc_ptr *desc) { - BUG_ON((desc->size+1)/8 != GDT_ENTRIES); - hcall(LHCALL_LOAD_GDT, __pa(desc->address), GDT_ENTRIES, 0); + BUG_ON((desc->size + 1) / 8 != GDT_ENTRIES); + kvm_hypercall2(LHCALL_LOAD_GDT, __pa(desc->address), GDT_ENTRIES); } /* For a single GDT entry which changes, we do the lazy thing: alter our GDT, @@ -272,7 +291,7 @@ static void lguest_write_gdt_entry(struct desc_struct *dt, int entrynum, const void *desc, int type) { native_write_gdt_entry(dt, entrynum, desc, type); - hcall(LHCALL_LOAD_GDT, __pa(dt), GDT_ENTRIES, 0); + kvm_hypercall2(LHCALL_LOAD_GDT, __pa(dt), GDT_ENTRIES); } /* OK, I lied. There are three "thread local storage" GDT entries which change @@ -284,7 +303,7 @@ static void lguest_load_tls(struct thread_struct *t, unsigned int cpu) * can't handle us removing entries we're currently using. So we clear * the GS register here: if it's needed it'll be reloaded anyway. */ lazy_load_gs(0); - lazy_hcall(LHCALL_LOAD_TLS, __pa(&t->tls_array), cpu, 0); + lazy_hcall2(LHCALL_LOAD_TLS, __pa(&t->tls_array), cpu); } /*G:038 That's enough excitement for now, back to ploughing through each of @@ -382,7 +401,7 @@ static void lguest_cpuid(unsigned int *ax, unsigned int *bx, static unsigned long current_cr0; static void lguest_write_cr0(unsigned long val) { - lazy_hcall(LHCALL_TS, val & X86_CR0_TS, 0, 0); + lazy_hcall1(LHCALL_TS, val & X86_CR0_TS); current_cr0 = val; } @@ -396,7 +415,7 @@ static unsigned long lguest_read_cr0(void) * the vowels have been optimized out. */ static void lguest_clts(void) { - lazy_hcall(LHCALL_TS, 0, 0, 0); + lazy_hcall1(LHCALL_TS, 0); current_cr0 &= ~X86_CR0_TS; } @@ -418,7 +437,7 @@ static bool cr3_changed = false; static void lguest_write_cr3(unsigned long cr3) { lguest_data.pgdir = cr3; - lazy_hcall(LHCALL_NEW_PGTABLE, cr3, 0, 0); + lazy_hcall1(LHCALL_NEW_PGTABLE, cr3); cr3_changed = true; } @@ -493,7 +512,7 @@ static void lguest_write_cr4(unsigned long val) static void lguest_pte_update(struct mm_struct *mm, unsigned long addr, pte_t *ptep) { - lazy_hcall(LHCALL_SET_PTE, __pa(mm->pgd), addr, ptep->pte_low); + lazy_hcall3(LHCALL_SET_PTE, __pa(mm->pgd), addr, ptep->pte_low); } static void lguest_set_pte_at(struct mm_struct *mm, unsigned long addr, @@ -509,8 +528,8 @@ static void lguest_set_pte_at(struct mm_struct *mm, unsigned long addr, static void lguest_set_pmd(pmd_t *pmdp, pmd_t pmdval) { *pmdp = pmdval; - lazy_hcall(LHCALL_SET_PMD, __pa(pmdp)&PAGE_MASK, - (__pa(pmdp)&(PAGE_SIZE-1))/4, 0); + lazy_hcall2(LHCALL_SET_PMD, __pa(pmdp) & PAGE_MASK, + (__pa(pmdp) & (PAGE_SIZE - 1)) / 4); } /* There are a couple of legacy places where the kernel sets a PTE, but we @@ -526,7 +545,7 @@ static void lguest_set_pte(pte_t *ptep, pte_t pteval) { *ptep = pteval; if (cr3_changed) - lazy_hcall(LHCALL_FLUSH_TLB, 1, 0, 0); + lazy_hcall1(LHCALL_FLUSH_TLB, 1); } /* Unfortunately for Lguest, the pv_mmu_ops for page tables were based on @@ -542,7 +561,7 @@ static void lguest_set_pte(pte_t *ptep, pte_t pteval) static void lguest_flush_tlb_single(unsigned long addr) { /* Simply set it to zero: if it was not, it will fault back in. */ - lazy_hcall(LHCALL_SET_PTE, lguest_data.pgdir, addr, 0); + lazy_hcall3(LHCALL_SET_PTE, lguest_data.pgdir, addr, 0); } /* This is what happens after the Guest has removed a large number of entries. @@ -550,7 +569,7 @@ static void lguest_flush_tlb_single(unsigned long addr) * have changed, ie. virtual addresses below PAGE_OFFSET. */ static void lguest_flush_tlb_user(void) { - lazy_hcall(LHCALL_FLUSH_TLB, 0, 0, 0); + lazy_hcall1(LHCALL_FLUSH_TLB, 0); } /* This is called when the kernel page tables have changed. That's not very @@ -558,7 +577,7 @@ static void lguest_flush_tlb_user(void) * slow), so it's worth separating this from the user flushing above. */ static void lguest_flush_tlb_kernel(void) { - lazy_hcall(LHCALL_FLUSH_TLB, 1, 0, 0); + lazy_hcall1(LHCALL_FLUSH_TLB, 1); } /* @@ -695,7 +714,7 @@ static int lguest_clockevent_set_next_event(unsigned long delta, } /* Please wake us this far in the future. */ - hcall(LHCALL_SET_CLOCKEVENT, delta, 0, 0); + kvm_hypercall1(LHCALL_SET_CLOCKEVENT, delta); return 0; } @@ -706,7 +725,7 @@ static void lguest_clockevent_set_mode(enum clock_event_mode mode, case CLOCK_EVT_MODE_UNUSED: case CLOCK_EVT_MODE_SHUTDOWN: /* A 0 argument shuts the clock down. */ - hcall(LHCALL_SET_CLOCKEVENT, 0, 0, 0); + kvm_hypercall0(LHCALL_SET_CLOCKEVENT); break; case CLOCK_EVT_MODE_ONESHOT: /* This is what we expect. */ @@ -781,8 +800,8 @@ static void lguest_time_init(void) static void lguest_load_sp0(struct tss_struct *tss, struct thread_struct *thread) { - lazy_hcall(LHCALL_SET_STACK, __KERNEL_DS|0x1, thread->sp0, - THREAD_SIZE/PAGE_SIZE); + lazy_hcall3(LHCALL_SET_STACK, __KERNEL_DS | 0x1, thread->sp0, + THREAD_SIZE / PAGE_SIZE); } /* Let's just say, I wouldn't do debugging under a Guest. */ @@ -855,7 +874,7 @@ static void set_lguest_basic_apic_ops(void) /* STOP! Until an interrupt comes in. */ static void lguest_safe_halt(void) { - hcall(LHCALL_HALT, 0, 0, 0); + kvm_hypercall0(LHCALL_HALT); } /* The SHUTDOWN hypercall takes a string to describe what's happening, and @@ -865,7 +884,8 @@ static void lguest_safe_halt(void) * rather than virtual addresses, so we use __pa() here. */ static void lguest_power_off(void) { - hcall(LHCALL_SHUTDOWN, __pa("Power down"), LGUEST_SHUTDOWN_POWEROFF, 0); + kvm_hypercall2(LHCALL_SHUTDOWN, __pa("Power down"), + LGUEST_SHUTDOWN_POWEROFF); } /* @@ -875,7 +895,7 @@ static void lguest_power_off(void) */ static int lguest_panic(struct notifier_block *nb, unsigned long l, void *p) { - hcall(LHCALL_SHUTDOWN, __pa(p), LGUEST_SHUTDOWN_POWEROFF, 0); + kvm_hypercall2(LHCALL_SHUTDOWN, __pa(p), LGUEST_SHUTDOWN_POWEROFF); /* The hcall won't return, but to keep gcc happy, we're "done". */ return NOTIFY_DONE; } @@ -916,7 +936,7 @@ static __init int early_put_chars(u32 vtermno, const char *buf, int count) len = sizeof(scratch) - 1; scratch[len] = '\0'; memcpy(scratch, buf, len); - hcall(LHCALL_NOTIFY, __pa(scratch), 0, 0); + kvm_hypercall1(LHCALL_NOTIFY, __pa(scratch)); /* This routine returns the number of bytes actually written. */ return len; @@ -926,7 +946,7 @@ static __init int early_put_chars(u32 vtermno, const char *buf, int count) * Launcher to reboot us. */ static void lguest_restart(char *reason) { - hcall(LHCALL_SHUTDOWN, __pa(reason), LGUEST_SHUTDOWN_RESTART, 0); + kvm_hypercall2(LHCALL_SHUTDOWN, __pa(reason), LGUEST_SHUTDOWN_RESTART); } /*G:050 diff --git a/arch/x86/lguest/i386_head.S b/arch/x86/lguest/i386_head.S index 10b9bd35a8f..f7954198947 100644 --- a/arch/x86/lguest/i386_head.S +++ b/arch/x86/lguest/i386_head.S @@ -27,8 +27,8 @@ ENTRY(lguest_entry) /* We make the "initialization" hypercall now to tell the Host about * us, and also find out where it put our page tables. */ movl $LHCALL_LGUEST_INIT, %eax - movl $lguest_data - __PAGE_OFFSET, %edx - int $LGUEST_TRAP_ENTRY + movl $lguest_data - __PAGE_OFFSET, %ebx + .byte 0x0f,0x01,0xc1 /* KVM_HYPERCALL */ /* Set up the initial stack so we can run C code. */ movl $(init_thread_union+THREAD_SIZE),%esp -- cgit v1.2.3 From 0451fb2ebc4f65c265bb51d71a2fc986ebf20218 Mon Sep 17 00:00:00 2001 From: Rusty Russell Date: Mon, 30 Mar 2009 22:05:11 -0600 Subject: cpumask: remove node_to_first_cpu Everyone defines it, and only one person uses it (arch/mips/sgi-ip27/ip27-nmi.c). So just open code it there. Signed-off-by: Rusty Russell Cc: linux-mips@linux-mips.org --- arch/x86/include/asm/topology.h | 12 ------------ 1 file changed, 12 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/include/asm/topology.h b/arch/x86/include/asm/topology.h index 77cfb2cfb38..744299c0b77 100644 --- a/arch/x86/include/asm/topology.h +++ b/arch/x86/include/asm/topology.h @@ -217,10 +217,6 @@ static inline cpumask_t node_to_cpumask(int node) { return cpu_online_map; } -static inline int node_to_first_cpu(int node) -{ - return first_cpu(cpu_online_map); -} static inline void setup_node_to_cpumask_map(void) { } @@ -237,14 +233,6 @@ static inline void setup_node_to_cpumask_map(void) { } #include -#ifdef CONFIG_NUMA -/* Returns the number of the first CPU on Node 'node'. */ -static inline int node_to_first_cpu(int node) -{ - return cpumask_first(cpumask_of_node(node)); -} -#endif - extern cpumask_t cpu_coregroup_map(int cpu); extern const struct cpumask *cpu_coregroup_mask(int cpu); -- cgit v1.2.3 From 1a8a51004a18b627ea81444201f7867875212f46 Mon Sep 17 00:00:00 2001 From: Rusty Russell Date: Mon, 30 Mar 2009 22:05:13 -0600 Subject: cpumask: remove references to struct irqaction's mask field. Impact: cleanup It's unused, since about 1995. So remove all initialization of it in preparation for actually removing the field. Signed-off-by: Rusty Russell Acked-by: Thomas Gleixner --- arch/x86/kernel/irqinit_32.c | 2 -- arch/x86/kernel/irqinit_64.c | 1 - arch/x86/kernel/mfgpt_32.c | 1 - arch/x86/kernel/setup.c | 1 - arch/x86/kernel/time_64.c | 2 -- arch/x86/kernel/vmiclock_32.c | 1 - 6 files changed, 8 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/kernel/irqinit_32.c b/arch/x86/kernel/irqinit_32.c index 50b8c3a3006..458c554c542 100644 --- a/arch/x86/kernel/irqinit_32.c +++ b/arch/x86/kernel/irqinit_32.c @@ -50,7 +50,6 @@ static irqreturn_t math_error_irq(int cpl, void *dev_id) */ static struct irqaction fpu_irq = { .handler = math_error_irq, - .mask = CPU_MASK_NONE, .name = "fpu", }; @@ -83,7 +82,6 @@ void __init init_ISA_irqs(void) */ static struct irqaction irq2 = { .handler = no_action, - .mask = CPU_MASK_NONE, .name = "cascade", }; diff --git a/arch/x86/kernel/irqinit_64.c b/arch/x86/kernel/irqinit_64.c index da481a1e3f3..76abe43aa73 100644 --- a/arch/x86/kernel/irqinit_64.c +++ b/arch/x86/kernel/irqinit_64.c @@ -45,7 +45,6 @@ static struct irqaction irq2 = { .handler = no_action, - .mask = CPU_MASK_NONE, .name = "cascade", }; DEFINE_PER_CPU(vector_irq_t, vector_irq) = { diff --git a/arch/x86/kernel/mfgpt_32.c b/arch/x86/kernel/mfgpt_32.c index 8815f3c7fec..846510b78a0 100644 --- a/arch/x86/kernel/mfgpt_32.c +++ b/arch/x86/kernel/mfgpt_32.c @@ -348,7 +348,6 @@ static irqreturn_t mfgpt_tick(int irq, void *dev_id) static struct irqaction mfgptirq = { .handler = mfgpt_tick, .flags = IRQF_DISABLED | IRQF_NOBALANCING, - .mask = CPU_MASK_NONE, .name = "mfgpt-timer" }; diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index b746deb9ebc..900dad7fe38 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c @@ -1027,7 +1027,6 @@ void __init x86_quirk_trap_init(void) static struct irqaction irq0 = { .handler = timer_interrupt, .flags = IRQF_DISABLED | IRQF_NOBALANCING | IRQF_IRQPOLL | IRQF_TIMER, - .mask = CPU_MASK_NONE, .name = "timer" }; diff --git a/arch/x86/kernel/time_64.c b/arch/x86/kernel/time_64.c index 241ec3923f6..5ba343e6184 100644 --- a/arch/x86/kernel/time_64.c +++ b/arch/x86/kernel/time_64.c @@ -116,7 +116,6 @@ unsigned long __init calibrate_cpu(void) static struct irqaction irq0 = { .handler = timer_interrupt, .flags = IRQF_DISABLED | IRQF_IRQPOLL | IRQF_NOBALANCING | IRQF_TIMER, - .mask = CPU_MASK_NONE, .name = "timer" }; @@ -125,7 +124,6 @@ void __init hpet_time_init(void) if (!hpet_enable()) setup_pit_timer(); - irq0.mask = cpumask_of_cpu(0); setup_irq(0, &irq0); } diff --git a/arch/x86/kernel/vmiclock_32.c b/arch/x86/kernel/vmiclock_32.c index 33a788d5879..d303369a7ba 100644 --- a/arch/x86/kernel/vmiclock_32.c +++ b/arch/x86/kernel/vmiclock_32.c @@ -202,7 +202,6 @@ static struct irqaction vmi_clock_action = { .name = "vmi-timer", .handler = vmi_timer_interrupt, .flags = IRQF_DISABLED | IRQF_NOBALANCING | IRQF_TIMER, - .mask = CPU_MASK_ALL, }; static void __devinit vmi_time_init_clockevent(void) -- cgit v1.2.3 From 25c1a411e8a0a709abe3449866125dc290711ea8 Mon Sep 17 00:00:00 2001 From: Stephen Rothwell Date: Mon, 30 Mar 2009 11:10:27 +1100 Subject: x86: fix mismerge in arch/x86/include/asm/timer.h Signed-off-by: Stephen Rothwell Signed-off-by: Linus Torvalds --- arch/x86/include/asm/timer.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/x86') diff --git a/arch/x86/include/asm/timer.h b/arch/x86/include/asm/timer.h index a81195eaa2b..bd37ed444a2 100644 --- a/arch/x86/include/asm/timer.h +++ b/arch/x86/include/asm/timer.h @@ -12,9 +12,9 @@ unsigned long native_calibrate_tsc(void); #ifdef CONFIG_X86_32 extern int timer_ack; -extern int recalibrate_cpu_khz(void); extern irqreturn_t timer_interrupt(int irq, void *dev_id); #endif /* CONFIG_X86_32 */ +extern int recalibrate_cpu_khz(void); extern int no_timer_check; -- cgit v1.2.3 From 2ed8d2b3a81bdbb0418301628ccdb008ac9f40b7 Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Mon, 16 Mar 2009 22:34:06 +0100 Subject: PM: Rework handling of interrupts during suspend-resume Use the functions introduced in by the previous patch, suspend_device_irqs(), resume_device_irqs() and check_wakeup_irqs(), to rework the handling of interrupts during suspend (hibernation) and resume. Namely, interrupts will only be disabled on the CPU right before suspending sysdevs, while device drivers will be prevented from receiving interrupts, with the help of the new helper function, before their "late" suspend callbacks run (and analogously during resume). In addition, since the device interrups are now disabled before the CPU has turned all interrupts off and the CPU will ACK the interrupts setting the IRQ_PENDING bit for them, check in sysdev_suspend() if any wake-up interrupts are pending and abort suspend if that's the case. Signed-off-by: Rafael J. Wysocki Acked-by: Ingo Molnar --- arch/x86/kernel/apm_32.c | 15 +++++++++++---- 1 file changed, 11 insertions(+), 4 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c index 10033fe718e..ac7783a6743 100644 --- a/arch/x86/kernel/apm_32.c +++ b/arch/x86/kernel/apm_32.c @@ -1190,8 +1190,10 @@ static int suspend(int vetoable) struct apm_user *as; device_suspend(PMSG_SUSPEND); - local_irq_disable(); + device_power_down(PMSG_SUSPEND); + + local_irq_disable(); sysdev_suspend(PMSG_SUSPEND); local_irq_enable(); @@ -1209,9 +1211,12 @@ static int suspend(int vetoable) if (err != APM_SUCCESS) apm_error("suspend", err); err = (err == APM_SUCCESS) ? 0 : -EIO; + sysdev_resume(); - device_power_up(PMSG_RESUME); local_irq_enable(); + + device_power_up(PMSG_RESUME); + device_resume(PMSG_RESUME); queue_event(APM_NORMAL_RESUME, NULL); spin_lock(&user_list_lock); @@ -1228,8 +1233,9 @@ static void standby(void) { int err; - local_irq_disable(); device_power_down(PMSG_SUSPEND); + + local_irq_disable(); sysdev_suspend(PMSG_SUSPEND); local_irq_enable(); @@ -1239,8 +1245,9 @@ static void standby(void) local_irq_disable(); sysdev_resume(); - device_power_up(PMSG_RESUME); local_irq_enable(); + + device_power_up(PMSG_RESUME); } static apm_event_t get_event(void) -- cgit v1.2.3 From 99b76233803beab302123d243eea9e41149804f3 Mon Sep 17 00:00:00 2001 From: Alexey Dobriyan Date: Wed, 25 Mar 2009 22:48:06 +0300 Subject: proc 2/2: remove struct proc_dir_entry::owner Setting ->owner as done currently (pde->owner = THIS_MODULE) is racy as correctly noted at bug #12454. Someone can lookup entry with NULL ->owner, thus not pinning enything, and release it later resulting in module refcount underflow. We can keep ->owner and supply it at registration time like ->proc_fops and ->data. But this leaves ->owner as easy-manipulative field (just one C assignment) and somebody will forget to unpin previous/pin current module when switching ->owner. ->proc_fops is declared as "const" which should give some thoughts. ->read_proc/->write_proc were just fixed to not require ->owner for protection. rmmod'ed directories will be empty and return "." and ".." -- no harm. And directories with tricky enough readdir and lookup shouldn't be modular. We definitely don't want such modular code. Removing ->owner will also make PDE smaller. So, let's nuke it. Kudos to Jeff Layton for reminding about this, let's say, oversight. http://bugzilla.kernel.org/show_bug.cgi?id=12454 Signed-off-by: Alexey Dobriyan --- arch/x86/kernel/cpu/mtrr/if.c | 10 +--------- 1 file changed, 1 insertion(+), 9 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/kernel/cpu/mtrr/if.c b/arch/x86/kernel/cpu/mtrr/if.c index 4c4214690dd..fb73a52913a 100644 --- a/arch/x86/kernel/cpu/mtrr/if.c +++ b/arch/x86/kernel/cpu/mtrr/if.c @@ -377,10 +377,6 @@ static const struct file_operations mtrr_fops = { .release = mtrr_close, }; - -static struct proc_dir_entry *proc_root_mtrr; - - static int mtrr_seq_show(struct seq_file *seq, void *offset) { char factor; @@ -423,11 +419,7 @@ static int __init mtrr_if_init(void) (!cpu_has(c, X86_FEATURE_CENTAUR_MCR))) return -ENODEV; - proc_root_mtrr = - proc_create("mtrr", S_IWUSR | S_IRUGO, NULL, &mtrr_fops); - - if (proc_root_mtrr) - proc_root_mtrr->owner = THIS_MODULE; + proc_create("mtrr", S_IWUSR | S_IRUGO, NULL, &mtrr_fops); return 0; } -- cgit v1.2.3 From 6a11f75b6a17b5d9ac5025f8d048382fd1f47377 Mon Sep 17 00:00:00 2001 From: Akinobu Mita Date: Tue, 31 Mar 2009 15:23:17 -0700 Subject: generic debug pagealloc CONFIG_DEBUG_PAGEALLOC is now supported by x86, powerpc, sparc64, and s390. This patch implements it for the rest of the architectures by filling the pages with poison byte patterns after free_pages() and verifying the poison patterns before alloc_pages(). This generic one cannot detect invalid page accesses immediately but invalid read access may cause invalid dereference by poisoned memory and invalid write access can be detected after a long delay. Signed-off-by: Akinobu Mita Cc: Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/x86/Kconfig | 3 +++ arch/x86/Kconfig.debug | 1 + 2 files changed, 4 insertions(+) (limited to 'arch/x86') diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 45161b81631..748e50a1a15 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -165,6 +165,9 @@ config AUDIT_ARCH config ARCH_SUPPORTS_OPTIMIZED_INLINING def_bool y +config ARCH_SUPPORTS_DEBUG_PAGEALLOC + def_bool y + # Use the generic interrupt handling code in kernel/irq/: config GENERIC_HARDIRQS bool diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug index fdb45df608b..a345cb5447a 100644 --- a/arch/x86/Kconfig.debug +++ b/arch/x86/Kconfig.debug @@ -75,6 +75,7 @@ config DEBUG_STACK_USAGE config DEBUG_PAGEALLOC bool "Debug page memory allocations" depends on DEBUG_KERNEL + depends on ARCH_SUPPORTS_DEBUG_PAGEALLOC ---help--- Unmap pages from the kernel linear mapping after free_pages(). This results in a large slowdown, but helps to find certain types -- cgit v1.2.3 From f4112de6b679d84bd9b9681c7504be7bdfb7c7d5 Mon Sep 17 00:00:00 2001 From: Akinobu Mita Date: Tue, 31 Mar 2009 15:23:25 -0700 Subject: mm: introduce debug_kmap_atomic x86 has debug_kmap_atomic_prot() which is error checking function for kmap_atomic. It is usefull for the other architectures, although it needs CONFIG_TRACE_IRQFLAGS_SUPPORT. This patch exposes it to the other architectures. Signed-off-by: Akinobu Mita Cc: Thomas Gleixner Cc: Ingo Molnar Cc: "H. Peter Anvin" Cc: Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/x86/mm/highmem_32.c | 45 +-------------------------------------------- 1 file changed, 1 insertion(+), 44 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/mm/highmem_32.c b/arch/x86/mm/highmem_32.c index 522db5e3d0b..8126e8d1a2a 100644 --- a/arch/x86/mm/highmem_32.c +++ b/arch/x86/mm/highmem_32.c @@ -19,49 +19,6 @@ void kunmap(struct page *page) kunmap_high(page); } -static void debug_kmap_atomic_prot(enum km_type type) -{ -#ifdef CONFIG_DEBUG_HIGHMEM - static unsigned warn_count = 10; - - if (unlikely(warn_count == 0)) - return; - - if (unlikely(in_interrupt())) { - if (in_irq()) { - if (type != KM_IRQ0 && type != KM_IRQ1 && - type != KM_BIO_SRC_IRQ && type != KM_BIO_DST_IRQ && - type != KM_BOUNCE_READ) { - WARN_ON(1); - warn_count--; - } - } else if (!irqs_disabled()) { /* softirq */ - if (type != KM_IRQ0 && type != KM_IRQ1 && - type != KM_SOFTIRQ0 && type != KM_SOFTIRQ1 && - type != KM_SKB_SUNRPC_DATA && - type != KM_SKB_DATA_SOFTIRQ && - type != KM_BOUNCE_READ) { - WARN_ON(1); - warn_count--; - } - } - } - - if (type == KM_IRQ0 || type == KM_IRQ1 || type == KM_BOUNCE_READ || - type == KM_BIO_SRC_IRQ || type == KM_BIO_DST_IRQ) { - if (!irqs_disabled()) { - WARN_ON(1); - warn_count--; - } - } else if (type == KM_SOFTIRQ0 || type == KM_SOFTIRQ1) { - if (irq_count() == 0 && !irqs_disabled()) { - WARN_ON(1); - warn_count--; - } - } -#endif -} - /* * kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap because * no global lock is needed and because the kmap code must perform a global TLB @@ -81,7 +38,7 @@ void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot) if (!PageHighMem(page)) return page_address(page); - debug_kmap_atomic_prot(type); + debug_kmap_atomic(type); idx = type + KM_TYPE_NR*smp_processor_id(); vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); -- cgit v1.2.3 From 7ca43e7564679604d86e9ed834e7bbcffd8a4a3f Mon Sep 17 00:00:00 2001 From: Akinobu Mita Date: Tue, 31 Mar 2009 15:23:25 -0700 Subject: mm: use debug_kmap_atomic Use debug_kmap_atomic in kmap_atomic, kmap_atomic_pfn, and iomap_atomic_prot_pfn. Signed-off-by: Akinobu Mita Cc: Thomas Gleixner Cc: Ingo Molnar Cc: "H. Peter Anvin" Cc: Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/x86/mm/highmem_32.c | 1 + arch/x86/mm/iomap_32.c | 2 ++ 2 files changed, 3 insertions(+) (limited to 'arch/x86') diff --git a/arch/x86/mm/highmem_32.c b/arch/x86/mm/highmem_32.c index 8126e8d1a2a..5bc5d1688c1 100644 --- a/arch/x86/mm/highmem_32.c +++ b/arch/x86/mm/highmem_32.c @@ -40,6 +40,7 @@ void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot) debug_kmap_atomic(type); + debug_kmap_atomic(type); idx = type + KM_TYPE_NR*smp_processor_id(); vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); BUG_ON(!pte_none(*(kmap_pte-idx))); diff --git a/arch/x86/mm/iomap_32.c b/arch/x86/mm/iomap_32.c index 699c9b2895a..bff0c9032f8 100644 --- a/arch/x86/mm/iomap_32.c +++ b/arch/x86/mm/iomap_32.c @@ -19,6 +19,7 @@ #include #include #include +#include int is_io_mapping_possible(resource_size_t base, unsigned long size) { @@ -71,6 +72,7 @@ iounmap_atomic(void *kvaddr, enum km_type type) unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK; enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id(); + debug_kmap_atomic(type); /* * Force other mappings to Oops if they'll try to access this pte * without first remap it. Keeping stale mappings around is a bad idea -- cgit v1.2.3 From a8af78982ff4c0b3731527b0217d286a343a3089 Mon Sep 17 00:00:00 2001 From: Magnus Damm Date: Tue, 31 Mar 2009 15:23:37 -0700 Subject: pm: rework includes, remove arch ifdefs Make the following header file changes: - remove arch ifdefs and asm/suspend.h from linux/suspend.h - add asm/suspend.h to disk.c (for arch_prepare_suspend()) - add linux/io.h to swsusp.c (for ioremap()) - x86 32/64 bit compile fixes Signed-off-by: Magnus Damm Cc: Paul Mundt Acked-by: "Rafael J. Wysocki" Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/x86/kernel/asm-offsets_32.c | 1 + arch/x86/kernel/asm-offsets_64.c | 1 + arch/x86/power/cpu_32.c | 1 + arch/x86/power/cpu_64.c | 1 + arch/x86/power/hibernate_64.c | 1 + 5 files changed, 5 insertions(+) (limited to 'arch/x86') diff --git a/arch/x86/kernel/asm-offsets_32.c b/arch/x86/kernel/asm-offsets_32.c index fbf2f33e308..5a6aa1c1162 100644 --- a/arch/x86/kernel/asm-offsets_32.c +++ b/arch/x86/kernel/asm-offsets_32.c @@ -18,6 +18,7 @@ #include #include #include +#include #include diff --git a/arch/x86/kernel/asm-offsets_64.c b/arch/x86/kernel/asm-offsets_64.c index 8793ab33e2c..e72f062fb4b 100644 --- a/arch/x86/kernel/asm-offsets_64.c +++ b/arch/x86/kernel/asm-offsets_64.c @@ -16,6 +16,7 @@ #include #include #include +#include #include diff --git a/arch/x86/power/cpu_32.c b/arch/x86/power/cpu_32.c index 274d06082f4..ce702c5b3a2 100644 --- a/arch/x86/power/cpu_32.c +++ b/arch/x86/power/cpu_32.c @@ -12,6 +12,7 @@ #include #include #include +#include static struct saved_context saved_context; diff --git a/arch/x86/power/cpu_64.c b/arch/x86/power/cpu_64.c index e3b6cf70d62..5343540f260 100644 --- a/arch/x86/power/cpu_64.c +++ b/arch/x86/power/cpu_64.c @@ -15,6 +15,7 @@ #include #include #include +#include static void fix_processor_context(void); diff --git a/arch/x86/power/hibernate_64.c b/arch/x86/power/hibernate_64.c index 6dd000dd793..65fdc86e923 100644 --- a/arch/x86/power/hibernate_64.c +++ b/arch/x86/power/hibernate_64.c @@ -14,6 +14,7 @@ #include #include #include +#include /* References to section boundaries */ extern const void __nosave_begin, __nosave_end; -- cgit v1.2.3 From bf9ed57d35d64dac5d5651478b5530a89b20ea1e Mon Sep 17 00:00:00 2001 From: Magnus Damm Date: Tue, 31 Mar 2009 15:23:38 -0700 Subject: pm: cleanup includes Remove unused/duplicate cruft from asm/suspend.h: - x86_32: remove unused acpi code - powerpc: remove duplicate prototypes, see linux/suspend.h Signed-off-by: Magnus Damm Cc: Paul Mundt Acked-by: "Rafael J. Wysocki" Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/x86/include/asm/suspend_32.h | 24 ------------------------ 1 file changed, 24 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/include/asm/suspend_32.h b/arch/x86/include/asm/suspend_32.h index a5074bd0f8b..48dcfa62ea0 100644 --- a/arch/x86/include/asm/suspend_32.h +++ b/arch/x86/include/asm/suspend_32.h @@ -24,28 +24,4 @@ struct saved_context { unsigned long return_address; } __attribute__((packed)); -#ifdef CONFIG_ACPI -extern unsigned long saved_eip; -extern unsigned long saved_esp; -extern unsigned long saved_ebp; -extern unsigned long saved_ebx; -extern unsigned long saved_esi; -extern unsigned long saved_edi; - -static inline void acpi_save_register_state(unsigned long return_point) -{ - saved_eip = return_point; - asm volatile("movl %%esp,%0" : "=m" (saved_esp)); - asm volatile("movl %%ebp,%0" : "=m" (saved_ebp)); - asm volatile("movl %%ebx,%0" : "=m" (saved_ebx)); - asm volatile("movl %%edi,%0" : "=m" (saved_edi)); - asm volatile("movl %%esi,%0" : "=m" (saved_esi)); -} - -#define acpi_restore_register_state() do {} while (0) - -/* routines for saving/restoring kernel state */ -extern int acpi_save_state_mem(void); -#endif - #endif /* _ASM_X86_SUSPEND_32_H */ -- cgit v1.2.3