]>
Commit | Line | Data |
---|---|---|
2cb7cef9 BS |
1 | From: John Keller <jpk@sgi.com> |
2 | Date: Mon, 24 Nov 2008 22:47:17 +0000 (-0600) | |
3 | Subject: [IA64] SN specific version of dma_get_required_mask() | |
4 | Patch-mainline: 2.6.29-rc2 | |
5 | Git-commit: 175add1981e53d22caba8f42d5f924a4de507b6c | |
6 | References: bnc#529369 | |
7 | ||
8 | [IA64] SN specific version of dma_get_required_mask() | |
9 | ||
10 | Create a platform specific version of dma_get_required_mask() | |
11 | for ia64 SN Altix. All SN Altix platforms support 64 bit DMA | |
12 | addressing regardless of the size of system memory. | |
13 | Create an ia64 machvec for dma_get_required_mask, with the | |
14 | SN version unconditionally returning DMA_64BIT_MASK. | |
15 | ||
16 | Signed-off-by: John Keller <jpk@sgi.com> | |
17 | Signed-off-by: Tony Luck <tony.luck@intel.com> | |
18 | Acked-by: Jeff Mahoney <jeffm@suse.com> | |
19 | --- | |
20 | Documentation/DMA-API.txt | 9 ++++----- | |
21 | arch/ia64/include/asm/dma-mapping.h | 2 ++ | |
22 | arch/ia64/include/asm/machvec.h | 7 +++++++ | |
23 | arch/ia64/include/asm/machvec_init.h | 1 + | |
24 | arch/ia64/include/asm/machvec_sn2.h | 2 ++ | |
25 | arch/ia64/pci/pci.c | 27 +++++++++++++++++++++++++++ | |
26 | arch/ia64/sn/pci/pci_dma.c | 6 ++++++ | |
27 | 7 files changed, 49 insertions(+), 5 deletions(-) | |
28 | ||
29 | --- a/Documentation/DMA-API.txt | |
30 | +++ b/Documentation/DMA-API.txt | |
31 | @@ -170,16 +170,15 @@ Returns: 0 if successful and a negative | |
32 | u64 | |
33 | dma_get_required_mask(struct device *dev) | |
34 | ||
35 | -After setting the mask with dma_set_mask(), this API returns the | |
36 | -actual mask (within that already set) that the platform actually | |
37 | -requires to operate efficiently. Usually this means the returned mask | |
38 | +This API returns the mask that the platform requires to | |
39 | +operate efficiently. Usually this means the returned mask | |
40 | is the minimum required to cover all of memory. Examining the | |
41 | required mask gives drivers with variable descriptor sizes the | |
42 | opportunity to use smaller descriptors as necessary. | |
43 | ||
44 | Requesting the required mask does not alter the current mask. If you | |
45 | -wish to take advantage of it, you should issue another dma_set_mask() | |
46 | -call to lower the mask again. | |
47 | +wish to take advantage of it, you should issue a dma_set_mask() | |
48 | +call to set the mask to the value returned. | |
49 | ||
50 | ||
51 | Part Id - Streaming DMA mappings | |
52 | --- a/arch/ia64/include/asm/dma-mapping.h | |
53 | +++ b/arch/ia64/include/asm/dma-mapping.h | |
54 | @@ -8,6 +8,8 @@ | |
55 | #include <asm/machvec.h> | |
56 | #include <linux/scatterlist.h> | |
57 | ||
58 | +#define ARCH_HAS_DMA_GET_REQUIRED_MASK | |
59 | + | |
60 | #define dma_alloc_coherent platform_dma_alloc_coherent | |
61 | /* coherent mem. is cheap */ | |
62 | static inline void * | |
63 | --- a/arch/ia64/include/asm/machvec.h | |
64 | +++ b/arch/ia64/include/asm/machvec.h | |
65 | @@ -61,6 +61,7 @@ typedef dma_addr_t ia64_mv_dma_map_singl | |
66 | typedef void ia64_mv_dma_unmap_single_attrs (struct device *, dma_addr_t, size_t, int, struct dma_attrs *); | |
67 | typedef int ia64_mv_dma_map_sg_attrs (struct device *, struct scatterlist *, int, int, struct dma_attrs *); | |
68 | typedef void ia64_mv_dma_unmap_sg_attrs (struct device *, struct scatterlist *, int, int, struct dma_attrs *); | |
69 | +typedef u64 ia64_mv_dma_get_required_mask (struct device *); | |
70 | ||
71 | /* | |
72 | * WARNING: The legacy I/O space is _architected_. Platforms are | |
73 | @@ -154,6 +155,7 @@ extern void machvec_tlb_migrate_finish ( | |
74 | # define platform_dma_sync_sg_for_device ia64_mv.dma_sync_sg_for_device | |
75 | # define platform_dma_mapping_error ia64_mv.dma_mapping_error | |
76 | # define platform_dma_supported ia64_mv.dma_supported | |
77 | +# define platform_dma_get_required_mask ia64_mv.dma_get_required_mask | |
78 | # define platform_irq_to_vector ia64_mv.irq_to_vector | |
79 | # define platform_local_vector_to_irq ia64_mv.local_vector_to_irq | |
80 | # define platform_pci_get_legacy_mem ia64_mv.pci_get_legacy_mem | |
81 | @@ -208,6 +210,7 @@ struct ia64_machine_vector { | |
82 | ia64_mv_dma_sync_sg_for_device *dma_sync_sg_for_device; | |
83 | ia64_mv_dma_mapping_error *dma_mapping_error; | |
84 | ia64_mv_dma_supported *dma_supported; | |
85 | + ia64_mv_dma_get_required_mask *dma_get_required_mask; | |
86 | ia64_mv_irq_to_vector *irq_to_vector; | |
87 | ia64_mv_local_vector_to_irq *local_vector_to_irq; | |
88 | ia64_mv_pci_get_legacy_mem_t *pci_get_legacy_mem; | |
89 | @@ -258,6 +261,7 @@ struct ia64_machine_vector { | |
90 | platform_dma_sync_sg_for_device, \ | |
91 | platform_dma_mapping_error, \ | |
92 | platform_dma_supported, \ | |
93 | + platform_dma_get_required_mask, \ | |
94 | platform_irq_to_vector, \ | |
95 | platform_local_vector_to_irq, \ | |
96 | platform_pci_get_legacy_mem, \ | |
97 | @@ -382,6 +386,9 @@ extern ia64_mv_dma_supported swiotlb_dm | |
98 | #ifndef platform_dma_supported | |
99 | # define platform_dma_supported swiotlb_dma_supported | |
100 | #endif | |
101 | +#ifndef platform_dma_get_required_mask | |
102 | +# define platform_dma_get_required_mask ia64_dma_get_required_mask | |
103 | +#endif | |
104 | #ifndef platform_irq_to_vector | |
105 | # define platform_irq_to_vector __ia64_irq_to_vector | |
106 | #endif | |
107 | --- a/arch/ia64/include/asm/machvec_init.h | |
108 | +++ b/arch/ia64/include/asm/machvec_init.h | |
109 | @@ -2,6 +2,7 @@ | |
110 | ||
111 | extern ia64_mv_send_ipi_t ia64_send_ipi; | |
112 | extern ia64_mv_global_tlb_purge_t ia64_global_tlb_purge; | |
113 | +extern ia64_mv_dma_get_required_mask ia64_dma_get_required_mask; | |
114 | extern ia64_mv_irq_to_vector __ia64_irq_to_vector; | |
115 | extern ia64_mv_local_vector_to_irq __ia64_local_vector_to_irq; | |
116 | extern ia64_mv_pci_get_legacy_mem_t ia64_pci_get_legacy_mem; | |
117 | --- a/arch/ia64/include/asm/machvec_sn2.h | |
118 | +++ b/arch/ia64/include/asm/machvec_sn2.h | |
119 | @@ -67,6 +67,7 @@ extern ia64_mv_dma_sync_single_for_devic | |
120 | extern ia64_mv_dma_sync_sg_for_device sn_dma_sync_sg_for_device; | |
121 | extern ia64_mv_dma_mapping_error sn_dma_mapping_error; | |
122 | extern ia64_mv_dma_supported sn_dma_supported; | |
123 | +extern ia64_mv_dma_get_required_mask sn_dma_get_required_mask; | |
124 | extern ia64_mv_migrate_t sn_migrate; | |
125 | extern ia64_mv_kernel_launch_event_t sn_kernel_launch_event; | |
126 | extern ia64_mv_setup_msi_irq_t sn_setup_msi_irq; | |
127 | @@ -123,6 +124,7 @@ extern ia64_mv_pci_fixup_bus_t sn_pci_f | |
128 | #define platform_dma_sync_sg_for_device sn_dma_sync_sg_for_device | |
129 | #define platform_dma_mapping_error sn_dma_mapping_error | |
130 | #define platform_dma_supported sn_dma_supported | |
131 | +#define platform_dma_get_required_mask sn_dma_get_required_mask | |
132 | #define platform_migrate sn_migrate | |
133 | #define platform_kernel_launch_event sn_kernel_launch_event | |
134 | #ifdef CONFIG_PCI_MSI | |
135 | --- a/arch/ia64/pci/pci.c | |
136 | +++ b/arch/ia64/pci/pci.c | |
137 | @@ -19,6 +19,7 @@ | |
138 | #include <linux/ioport.h> | |
139 | #include <linux/slab.h> | |
140 | #include <linux/spinlock.h> | |
141 | +#include <linux/bootmem.h> | |
142 | ||
143 | #include <asm/machvec.h> | |
144 | #include <asm/page.h> | |
145 | @@ -743,6 +744,32 @@ static void __init set_pci_cacheline_siz | |
146 | pci_cache_line_size = (1 << cci.pcci_line_size) / 4; | |
147 | } | |
148 | ||
149 | +u64 ia64_dma_get_required_mask(struct device *dev) | |
150 | +{ | |
151 | + u32 low_totalram = ((max_pfn - 1) << PAGE_SHIFT); | |
152 | + u32 high_totalram = ((max_pfn - 1) >> (32 - PAGE_SHIFT)); | |
153 | + u64 mask; | |
154 | + | |
155 | + if (!high_totalram) { | |
156 | + /* convert to mask just covering totalram */ | |
157 | + low_totalram = (1 << (fls(low_totalram) - 1)); | |
158 | + low_totalram += low_totalram - 1; | |
159 | + mask = low_totalram; | |
160 | + } else { | |
161 | + high_totalram = (1 << (fls(high_totalram) - 1)); | |
162 | + high_totalram += high_totalram - 1; | |
163 | + mask = (((u64)high_totalram) << 32) + 0xffffffff; | |
164 | + } | |
165 | + return mask; | |
166 | +} | |
167 | +EXPORT_SYMBOL_GPL(ia64_dma_get_required_mask); | |
168 | + | |
169 | +u64 dma_get_required_mask(struct device *dev) | |
170 | +{ | |
171 | + return platform_dma_get_required_mask(dev); | |
172 | +} | |
173 | +EXPORT_SYMBOL_GPL(dma_get_required_mask); | |
174 | + | |
175 | static int __init pcibios_init(void) | |
176 | { | |
177 | set_pci_cacheline_size(); | |
178 | --- a/arch/ia64/sn/pci/pci_dma.c | |
179 | +++ b/arch/ia64/sn/pci/pci_dma.c | |
180 | @@ -356,6 +356,12 @@ int sn_dma_mapping_error(struct device * | |
181 | } | |
182 | EXPORT_SYMBOL(sn_dma_mapping_error); | |
183 | ||
184 | +u64 sn_dma_get_required_mask(struct device *dev) | |
185 | +{ | |
186 | + return DMA_64BIT_MASK; | |
187 | +} | |
188 | +EXPORT_SYMBOL_GPL(sn_dma_get_required_mask); | |
189 | + | |
190 | char *sn_pci_get_legacy_mem(struct pci_bus *bus) | |
191 | { | |
192 | if (!SN_PCIBUS_BUSSOFT(bus)) |