*/
extern void local_dcache_page_flush(struct page *page);
extern void local_icache_page_inv(struct page *page);
+extern void local_dcache_range_flush(unsigned long start, unsigned long end);
+extern void local_dcache_range_inv(unsigned long start, unsigned long end);
+extern void local_icache_range_inv(unsigned long start, unsigned long end);
/*
* Data cache flushing always happen on the local cpu. Instruction cache
extern void smp_icache_page_inv(struct page *page);
#endif /* CONFIG_SMP */
+/*
+ * Even if the actual block size is larger than L1_CACHE_BYTES, paddr
+ * can be incremented by L1_CACHE_BYTES. When paddr is written to the
+ * invalidate register, the entire cache line encompassing this address
+ * is invalidated. Each subsequent reference to the same cache line will
+ * not affect the invalidation process.
+ */
+#define local_dcache_block_flush(addr) \
+ local_dcache_range_flush(addr, addr + L1_CACHE_BYTES)
+#define local_dcache_block_inv(addr) \
+ local_dcache_range_inv(addr, addr + L1_CACHE_BYTES)
+#define local_icache_block_inv(addr) \
+ local_icache_range_inv(addr, addr + L1_CACHE_BYTES)
+
/*
* Synchronizes caches. Whenever a cpu writes executable code to memory, this
* should be called to make sure the processor sees the newly written code.
#ifndef __ASM_OPENRISC_CPUINFO_H
#define __ASM_OPENRISC_CPUINFO_H
+#include <asm/spr.h>
+#include <asm/spr_defs.h>
+
struct cache_desc {
u32 size;
u32 sets;
extern struct cpuinfo_or1k cpuinfo_or1k[NR_CPUS];
extern void setup_cpuinfo(void);
+/*
+ * Check if the cache component exists.
+ */
+extern bool cpu_cache_is_present(const unsigned int cache_type);
+
#endif /* __ASM_OPENRISC_CPUINFO_H */
#include <linux/pagewalk.h>
#include <asm/cpuinfo.h>
+#include <asm/cacheflush.h>
#include <asm/spr_defs.h>
#include <asm/tlbflush.h>
page_set_nocache(pte_t *pte, unsigned long addr,
unsigned long next, struct mm_walk *walk)
{
- unsigned long cl;
- struct cpuinfo_or1k *cpuinfo = &cpuinfo_or1k[smp_processor_id()];
-
pte_val(*pte) |= _PAGE_CI;
/*
flush_tlb_kernel_range(addr, addr + PAGE_SIZE);
/* Flush page out of dcache */
- for (cl = __pa(addr); cl < __pa(next); cl += cpuinfo->dcache_block_size)
- mtspr(SPR_DCBFR, cl);
+ local_dcache_range_flush(__pa(addr), __pa(next));
return 0;
}
void arch_sync_dma_for_device(phys_addr_t addr, size_t size,
enum dma_data_direction dir)
{
- unsigned long cl;
- struct cpuinfo_or1k *cpuinfo = &cpuinfo_or1k[smp_processor_id()];
-
switch (dir) {
case DMA_TO_DEVICE:
/* Flush the dcache for the requested range */
- for (cl = addr; cl < addr + size;
- cl += cpuinfo->dcache_block_size)
- mtspr(SPR_DCBFR, cl);
+ local_dcache_range_flush(addr, addr + size);
break;
case DMA_FROM_DEVICE:
/* Invalidate the dcache for the requested range */
- for (cl = addr; cl < addr + size;
- cl += cpuinfo->dcache_block_size)
- mtspr(SPR_DCBIR, cl);
+ local_dcache_range_inv(addr, addr + size);
break;
default:
/*
#include <asm/spr_defs.h>
#include <asm/cache.h>
#include <asm/cacheflush.h>
+#include <asm/cpuinfo.h>
#include <asm/tlbflush.h>
-static __always_inline void cache_loop(struct page *page, const unsigned int reg)
+/*
+ * Check if the cache component exists.
+ */
+bool cpu_cache_is_present(const unsigned int cache_type)
{
- unsigned long paddr = page_to_pfn(page) << PAGE_SHIFT;
- unsigned long line = paddr & ~(L1_CACHE_BYTES - 1);
+ unsigned long upr = mfspr(SPR_UPR);
+ unsigned long mask = SPR_UPR_UP | cache_type;
+
+ return !((upr & mask) ^ mask);
+}
+
+static __always_inline void cache_loop(unsigned long paddr, unsigned long end,
+ const unsigned short reg, const unsigned int cache_type)
+{
+ if (!cpu_cache_is_present(cache_type))
+ return;
- while (line < paddr + PAGE_SIZE) {
- mtspr(reg, line);
- line += L1_CACHE_BYTES;
+ while (paddr < end) {
+ mtspr(reg, paddr);
+ paddr += L1_CACHE_BYTES;
}
}
+static __always_inline void cache_loop_page(struct page *page, const unsigned short reg,
+ const unsigned int cache_type)
+{
+ unsigned long paddr = page_to_pfn(page) << PAGE_SHIFT;
+ unsigned long end = paddr + PAGE_SIZE;
+
+ paddr &= ~(L1_CACHE_BYTES - 1);
+
+ cache_loop(paddr, end, reg, cache_type);
+}
+
void local_dcache_page_flush(struct page *page)
{
- cache_loop(page, SPR_DCBFR);
+ cache_loop_page(page, SPR_DCBFR, SPR_UPR_DCP);
}
EXPORT_SYMBOL(local_dcache_page_flush);
void local_icache_page_inv(struct page *page)
{
- cache_loop(page, SPR_ICBIR);
+ cache_loop_page(page, SPR_ICBIR, SPR_UPR_ICP);
}
EXPORT_SYMBOL(local_icache_page_inv);
+void local_dcache_range_flush(unsigned long start, unsigned long end)
+{
+ cache_loop(start, end, SPR_DCBFR, SPR_UPR_DCP);
+}
+
+void local_dcache_range_inv(unsigned long start, unsigned long end)
+{
+ cache_loop(start, end, SPR_DCBIR, SPR_UPR_DCP);
+}
+
+void local_icache_range_inv(unsigned long start, unsigned long end)
+{
+ cache_loop(start, end, SPR_ICBIR, SPR_UPR_ICP);
+}
+
void update_cache(struct vm_area_struct *vma, unsigned long address,
pte_t *pte)
{
sync_icache_dcache(folio_page(folio, nr));
}
}
-
#include <asm/fixmap.h>
#include <asm/tlbflush.h>
#include <asm/sections.h>
+#include <asm/cacheflush.h>
int mem_init_done;
barrier();
/* Invalidate instruction caches after code modification */
- mtspr(SPR_ICBIR, 0x900);
- mtspr(SPR_ICBIR, 0xa00);
+ local_icache_block_inv(0x900);
+ local_icache_block_inv(0xa00);
/* New TLB miss handlers and kernel page tables are in now place.
* Make sure that page flags get updated for all pages in TLB by