Index: linux-2.4.35.4/arch/mips/kernel/entry.S =================================================================== --- linux-2.4.35.4.orig/arch/mips/kernel/entry.S +++ linux-2.4.35.4/arch/mips/kernel/entry.S @@ -100,6 +100,10 @@ END(except_vec1_generic) * and R4400 SC and MC versions. */ NESTED(except_vec3_generic, 0, sp) +#ifdef CONFIG_BCM4710 + nop + nop +#endif #if R5432_CP0_INTERRUPT_WAR mfc0 k0, CP0_INDEX #endif Index: linux-2.4.35.4/arch/mips/mm/c-r4k.c =================================================================== --- linux-2.4.35.4.orig/arch/mips/mm/c-r4k.c +++ linux-2.4.35.4/arch/mips/mm/c-r4k.c @@ -14,6 +14,12 @@ #include #include +#ifdef CONFIG_BCM4710 +#include "../bcm947xx/include/typedefs.h" +#include "../bcm947xx/include/sbconfig.h" +#include +#endif + #include #include #include @@ -40,6 +46,7 @@ static struct bcache_ops no_sc_ops = { .bc_inv = (void *)no_sc_noop }; +int bcm4710 = 0; struct bcache_ops *bcops = &no_sc_ops; #define cpu_is_r4600_v1_x() ((read_c0_prid() & 0xfffffff0) == 0x2010) @@ -64,8 +71,10 @@ static inline void r4k_blast_dcache_page static inline void r4k_blast_dcache_page_setup(void) { unsigned long dc_lsize = current_cpu_data.dcache.linesz; - - if (dc_lsize == 16) + + if (bcm4710) + r4k_blast_dcache_page = blast_dcache_page; + else if (dc_lsize == 16) r4k_blast_dcache_page = blast_dcache16_page; else if (dc_lsize == 32) r4k_blast_dcache_page = r4k_blast_dcache_page_dc32; @@ -77,7 +86,9 @@ static void r4k_blast_dcache_page_indexe { unsigned long dc_lsize = current_cpu_data.dcache.linesz; - if (dc_lsize == 16) + if (bcm4710) + r4k_blast_dcache_page_indexed = blast_dcache_page_indexed; + else if (dc_lsize == 16) r4k_blast_dcache_page_indexed = blast_dcache16_page_indexed; else if (dc_lsize == 32) r4k_blast_dcache_page_indexed = blast_dcache32_page_indexed; @@ -89,7 +100,9 @@ static inline void r4k_blast_dcache_setu { unsigned long dc_lsize = current_cpu_data.dcache.linesz; - if (dc_lsize == 16) + if (bcm4710) + r4k_blast_dcache = blast_dcache; + else if (dc_lsize == 16) r4k_blast_dcache = blast_dcache16; else if (dc_lsize == 32) r4k_blast_dcache = blast_dcache32; @@ -266,6 +279,7 @@ static void r4k___flush_cache_all(void) r4k_blast_dcache(); r4k_blast_icache(); + if (!bcm4710) switch (current_cpu_data.cputype) { case CPU_R4000SC: case CPU_R4000MC: @@ -304,10 +318,10 @@ static void r4k_flush_cache_mm(struct mm * Kludge alert. For obscure reasons R4000SC and R4400SC go nuts if we * only flush the primary caches but R10000 and R12000 behave sane ... */ - if (current_cpu_data.cputype == CPU_R4000SC || + if (!bcm4710 && (current_cpu_data.cputype == CPU_R4000SC || current_cpu_data.cputype == CPU_R4000MC || current_cpu_data.cputype == CPU_R4400SC || - current_cpu_data.cputype == CPU_R4400MC) + current_cpu_data.cputype == CPU_R4400MC)) r4k_blast_scache(); } @@ -383,12 +397,15 @@ static void r4k_flush_icache_range(unsig unsigned long ic_lsize = current_cpu_data.icache.linesz; unsigned long addr, aend; + addr = start & ~(dc_lsize - 1); + aend = (end - 1) & ~(dc_lsize - 1); + if (!cpu_has_ic_fills_f_dc) { if (end - start > dcache_size) r4k_blast_dcache(); else { - addr = start & ~(dc_lsize - 1); - aend = (end - 1) & ~(dc_lsize - 1); + BCM4710_PROTECTED_FILL_TLB(addr); + BCM4710_PROTECTED_FILL_TLB(aend); while (1) { /* Hit_Writeback_Inv_D */ @@ -403,8 +420,6 @@ static void r4k_flush_icache_range(unsig if (end - start > icache_size) r4k_blast_icache(); else { - addr = start & ~(ic_lsize - 1); - aend = (end - 1) & ~(ic_lsize - 1); while (1) { /* Hit_Invalidate_I */ protected_flush_icache_line(addr); @@ -413,6 +428,9 @@ static void r4k_flush_icache_range(unsig addr += ic_lsize; } } + + if (bcm4710) + flush_cache_all(); } /* @@ -443,7 +461,8 @@ static void r4k_flush_icache_page(struct if (cpu_has_subset_pcaches) { unsigned long addr = (unsigned long) page_address(page); - r4k_blast_scache_page(addr); + if (!bcm4710) + r4k_blast_scache_page(addr); ClearPageDcacheDirty(page); return; @@ -451,6 +470,7 @@ static void r4k_flush_icache_page(struct if (!cpu_has_ic_fills_f_dc) { unsigned long addr = (unsigned long) page_address(page); + r4k_blast_dcache_page(addr); ClearPageDcacheDirty(page); } @@ -477,7 +497,7 @@ static void r4k_dma_cache_wback_inv(unsi /* Catch bad driver code */ BUG_ON(size == 0); - if (cpu_has_subset_pcaches) { + if (!bcm4710 && cpu_has_subset_pcaches) { unsigned long sc_lsize = current_cpu_data.scache.linesz; if (size >= scache_size) { @@ -509,6 +529,8 @@ static void r4k_dma_cache_wback_inv(unsi R4600_HIT_CACHEOP_WAR_IMPL; a = addr & ~(dc_lsize - 1); end = (addr + size - 1) & ~(dc_lsize - 1); + BCM4710_FILL_TLB(a); + BCM4710_FILL_TLB(end); while (1) { flush_dcache_line(a); /* Hit_Writeback_Inv_D */ if (a == end) @@ -527,7 +549,7 @@ static void r4k_dma_cache_inv(unsigned l /* Catch bad driver code */ BUG_ON(size == 0); - if (cpu_has_subset_pcaches) { + if (!bcm4710 && (cpu_has_subset_pcaches)) { unsigned long sc_lsize = current_cpu_data.scache.linesz; if (size >= scache_size) { @@ -554,6 +576,8 @@ static void r4k_dma_cache_inv(unsigned l R4600_HIT_CACHEOP_WAR_IMPL; a = addr & ~(dc_lsize - 1); end = (addr + size - 1) & ~(dc_lsize - 1); + BCM4710_FILL_TLB(a); + BCM4710_FILL_TLB(end); while (1) { flush_dcache_line(a); /* Hit_Writeback_Inv_D */ if (a == end) @@ -577,6 +601,8 @@ static void r4k_flush_cache_sigtramp(uns unsigned long dc_lsize = current_cpu_data.dcache.linesz; R4600_HIT_CACHEOP_WAR_IMPL; + BCM4710_PROTECTED_FILL_TLB(addr); + BCM4710_PROTECTED_FILL_TLB(addr + 4); protected_writeback_dcache_line(addr & ~(dc_lsize - 1)); protected_flush_icache_line(addr & ~(ic_lsize - 1)); if (MIPS4K_ICACHE_REFILL_WAR) { @@ -986,10 +1012,12 @@ static void __init setup_scache(void) case CPU_R4000MC: case CPU_R4400SC: case CPU_R4400MC: - probe_scache_kseg1 = (probe_func_t) (KSEG1ADDR(&probe_scache)); - sc_present = probe_scache_kseg1(config); - if (sc_present) - c->options |= MIPS_CPU_CACHE_CDEX_S; + if (!bcm4710) { + probe_scache_kseg1 = (probe_func_t) (KSEG1ADDR(&probe_scache)); + sc_present = probe_scache_kseg1(config); + if (sc_present) + c->options |= MIPS_CPU_CACHE_CDEX_S; + } break; case CPU_R10000: @@ -1041,6 +1069,19 @@ static void __init setup_scache(void) static inline void coherency_setup(void) { change_c0_config(CONF_CM_CMASK, CONF_CM_DEFAULT); + +#if defined(CONFIG_BCM4310) || defined(CONFIG_BCM4704) || defined(CONFIG_BCM5365) + if (BCM330X(current_cpu_data.processor_id)) { + uint32 cm; + + cm = read_c0_diag(); + /* Enable icache */ + cm |= (1 << 31); + /* Enable dcache */ + cm |= (1 << 30); + write_c0_diag(cm); + } +#endif /* * c0_status.cu=0 specifies that updates by the sc instruction use @@ -1073,6 +1114,12 @@ void __init ld_mmu_r4xx0(void) memcpy((void *)(KSEG0 + 0x100), &except_vec2_generic, 0x80); memcpy((void *)(KSEG1 + 0x100), &except_vec2_generic, 0x80); + if (current_cpu_data.cputype == CPU_BCM4710 && (current_cpu_data.processor_id & PRID_REV_MASK) == 0) { + printk("Enabling BCM4710A0 cache workarounds.\n"); + bcm4710 = 1; + } else + bcm4710 = 0; + probe_pcache(); setup_scache(); Index: linux-2.4.35.4/arch/mips/mm/tlbex-mips32.S =================================================================== --- linux-2.4.35.4.orig/arch/mips/mm/tlbex-mips32.S +++ linux-2.4.35.4/arch/mips/mm/tlbex-mips32.S @@ -90,6 +90,9 @@ .set noat LEAF(except_vec0_r4000) .set mips3 +#ifdef CONFIG_BCM4704 + nop +#endif #ifdef CONFIG_SMP mfc0 k1, CP0_CONTEXT la k0, pgd_current Index: linux-2.4.35.4/include/asm-mips/r4kcache.h =================================================================== --- linux-2.4.35.4.orig/include/asm-mips/r4kcache.h +++ linux-2.4.35.4/include/asm-mips/r4kcache.h @@ -15,6 +15,18 @@ #include #include +#ifdef CONFIG_BCM4710 +#define BCM4710_DUMMY_RREG() (((sbconfig_t *)(KSEG1ADDR(SB_ENUM_BASE + SBCONFIGOFF)))->sbimstate) + +#define BCM4710_FILL_TLB(addr) (*(volatile unsigned long *)(addr)) +#define BCM4710_PROTECTED_FILL_TLB(addr) ({ unsigned long x; get_dbe(x, (volatile unsigned long *)(addr)); }) +#else +#define BCM4710_DUMMY_RREG() + +#define BCM4710_FILL_TLB(addr) +#define BCM4710_PROTECTED_FILL_TLB(addr) +#endif + #define cache_op(op,addr) \ __asm__ __volatile__( \ " .set noreorder \n" \ @@ -27,12 +39,25 @@ static inline void flush_icache_line_indexed(unsigned long addr) { - cache_op(Index_Invalidate_I, addr); + unsigned int way; + unsigned long ws_inc = 1UL << current_cpu_data.dcache.waybit; + + for (way = 0; way < current_cpu_data.dcache.ways; way++) { + cache_op(Index_Invalidate_I, addr); + addr += ws_inc; + } } static inline void flush_dcache_line_indexed(unsigned long addr) { - cache_op(Index_Writeback_Inv_D, addr); + unsigned int way; + unsigned long ws_inc = 1UL << current_cpu_data.dcache.waybit; + + for (way = 0; way < current_cpu_data.dcache.ways; way++) { + BCM4710_DUMMY_RREG(); + cache_op(Index_Writeback_Inv_D, addr); + addr += ws_inc; + } } static inline void flush_scache_line_indexed(unsigned long addr) @@ -47,6 +72,7 @@ static inline void flush_icache_line(uns static inline void flush_dcache_line(unsigned long addr) { + BCM4710_DUMMY_RREG(); cache_op(Hit_Writeback_Inv_D, addr); } @@ -91,6 +117,7 @@ static inline void protected_flush_icach */ static inline void protected_writeback_dcache_line(unsigned long addr) { + BCM4710_DUMMY_RREG(); __asm__ __volatile__( ".set noreorder\n\t" ".set mips3\n" @@ -138,6 +165,62 @@ static inline void invalidate_tcache_pag : "r" (base), \ "i" (op)); +#define cache_unroll(base,op) \ + __asm__ __volatile__(" \ + .set noreorder; \ + .set mips3; \ + cache %1, (%0); \ + .set mips0; \ + .set reorder" \ + : \ + : "r" (base), \ + "i" (op)); + + +static inline void blast_dcache(void) +{ + unsigned long start = KSEG0; + unsigned long dcache_size = current_cpu_data.dcache.waysize * current_cpu_data.dcache.ways; + unsigned long end = (start + dcache_size); + + while(start < end) { + BCM4710_DUMMY_RREG(); + cache_unroll(start,Index_Writeback_Inv_D); + start += current_cpu_data.dcache.linesz; + } +} + +static inline void blast_dcache_page(unsigned long page) +{ + unsigned long start = page; + unsigned long end = start + PAGE_SIZE; + + BCM4710_FILL_TLB(start); + do { + BCM4710_DUMMY_RREG(); + cache_unroll(start,Hit_Writeback_Inv_D); + start += current_cpu_data.dcache.linesz; + } while (start < end); +} + +static inline void blast_dcache_page_indexed(unsigned long page) +{ + unsigned long start = page; + unsigned long end = start + PAGE_SIZE; + unsigned long ws_inc = 1UL << current_cpu_data.dcache.waybit; + unsigned long ws_end = current_cpu_data.dcache.ways << + current_cpu_data.dcache.waybit; + unsigned long ws, addr; + + for (ws = 0; ws < ws_end; ws += ws_inc) { + start = page + ws; + for (addr = start; addr < end; addr += current_cpu_data.dcache.linesz) { + BCM4710_DUMMY_RREG(); + cache_unroll(addr,Index_Writeback_Inv_D); + } + } +} + static inline void blast_dcache16(void) { unsigned long start = KSEG0; @@ -148,8 +231,9 @@ static inline void blast_dcache16(void) unsigned long ws, addr; for (ws = 0; ws < ws_end; ws += ws_inc) - for (addr = start; addr < end; addr += 0x200) + for (addr = start; addr < end; addr += 0x200) { cache16_unroll32(addr|ws,Index_Writeback_Inv_D); + } } static inline void blast_dcache16_page(unsigned long page) @@ -173,8 +257,9 @@ static inline void blast_dcache16_page_i unsigned long ws, addr; for (ws = 0; ws < ws_end; ws += ws_inc) - for (addr = start; addr < end; addr += 0x200) + for (addr = start; addr < end; addr += 0x200) { cache16_unroll32(addr|ws,Index_Writeback_Inv_D); + } } static inline void blast_icache16(void) @@ -196,6 +281,7 @@ static inline void blast_icache16_page(u unsigned long start = page; unsigned long end = start + PAGE_SIZE; + BCM4710_FILL_TLB(start); do { cache16_unroll32(start,Hit_Invalidate_I); start += 0x200; @@ -281,6 +367,7 @@ static inline void blast_scache16_page_i : "r" (base), \ "i" (op)); + static inline void blast_dcache32(void) { unsigned long start = KSEG0; @@ -291,8 +378,9 @@ static inline void blast_dcache32(void) unsigned long ws, addr; for (ws = 0; ws < ws_end; ws += ws_inc) - for (addr = start; addr < end; addr += 0x400) + for (addr = start; addr < end; addr += 0x400) { cache32_unroll32(addr|ws,Index_Writeback_Inv_D); + } } static inline void blast_dcache32_page(unsigned long page) @@ -316,8 +404,9 @@ static inline void blast_dcache32_page_i unsigned long ws, addr; for (ws = 0; ws < ws_end; ws += ws_inc) - for (addr = start; addr < end; addr += 0x400) + for (addr = start; addr < end; addr += 0x400) { cache32_unroll32(addr|ws,Index_Writeback_Inv_D); + } } static inline void blast_icache32(void) @@ -339,6 +428,7 @@ static inline void blast_icache32_page(u unsigned long start = page; unsigned long end = start + PAGE_SIZE; + BCM4710_FILL_TLB(start); do { cache32_unroll32(start,Hit_Invalidate_I); start += 0x400; @@ -443,6 +533,7 @@ static inline void blast_icache64_page(u unsigned long start = page; unsigned long end = start + PAGE_SIZE; + BCM4710_FILL_TLB(start); do { cache64_unroll32(start,Hit_Invalidate_I); start += 0x800; Index: linux-2.4.35.4/include/asm-mips/stackframe.h =================================================================== --- linux-2.4.35.4.orig/include/asm-mips/stackframe.h +++ linux-2.4.35.4/include/asm-mips/stackframe.h @@ -209,6 +209,20 @@ #endif +#if defined(CONFIG_BCM4710) || defined(CONFIG_BCM4704) + +#undef RESTORE_SP_AND_RET +#define RESTORE_SP_AND_RET \ + lw sp, PT_R29(sp); \ + .set mips3; \ + nop; \ + nop; \ + eret; \ + .set mips0 + +#endif + + #define RESTORE_SP \ lw sp, PT_R29(sp); \ Index: linux-2.4.35.4/mm/memory.c =================================================================== --- linux-2.4.35.4.orig/mm/memory.c +++ linux-2.4.35.4/mm/memory.c @@ -927,6 +927,7 @@ static inline void break_cow(struct vm_a flush_page_to_ram(new_page); flush_cache_page(vma, address); establish_pte(vma, address, page_table, pte_mkwrite(pte_mkdirty(mk_pte(new_page, vma->vm_page_prot)))); + flush_icache_page(vma, new_page); } /*