Skip to content

Commit

Permalink
metag: Memory management
Browse files Browse the repository at this point in the history
Add memory management files for metag.

Meta's 32bit virtual address space is split into two halves:
 - local (0x08000000-0x7fffffff): traditionally local to a hardware
   thread and incoherent between hardware threads. Each hardware thread
   has it's own local MMU table. On Meta2 the local space can be
   globally coherent (GCOn) if the cache partitions coincide.
 - global (0x88000000-0xffff0000): coherent and traditionally global
   between hardware threads. On Meta2, each hardware thread has it's own
   global MMU table.

The low 128MiB of each half is non-MMUable and maps directly to the
physical address space:
 - 0x00010000-0x07ffffff: contains Meta core registers and maps SoC bus
 - 0x80000000-0x87ffffff: contains low latency global core memories

Linux usually further splits the local virtual address space like this:
 - 0x08000000-0x3fffffff: user mappings
 - 0x40000000-0x7fffffff: kernel mappings

Signed-off-by: James Hogan <[email protected]>
  • Loading branch information
James Hogan committed Mar 2, 2013
1 parent 99ef7c2 commit f5df8e2
Show file tree
Hide file tree
Showing 10 changed files with 1,833 additions and 0 deletions.
77 changes: 77 additions & 0 deletions arch/metag/include/asm/mmu.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,77 @@
#ifndef __MMU_H
#define __MMU_H

#ifdef CONFIG_METAG_USER_TCM
#include <linux/list.h>
#endif

#ifdef CONFIG_HUGETLB_PAGE
#include <asm/page.h>
#endif

typedef struct {
/* Software pgd base pointer used for Meta 1.x MMU. */
unsigned long pgd_base;
#ifdef CONFIG_METAG_USER_TCM
struct list_head tcm;
#endif
#ifdef CONFIG_HUGETLB_PAGE
#if HPAGE_SHIFT < HUGEPT_SHIFT
/* last partially filled huge page table address */
unsigned long part_huge;
#endif
#endif
} mm_context_t;

/* Given a virtual address, return the pte for the top level 4meg entry
* that maps that address.
* Returns 0 (an empty pte) if that range is not mapped.
*/
unsigned long mmu_read_first_level_page(unsigned long vaddr);

/* Given a linear (virtual) address, return the second level 4k pte
* that maps that address. Returns 0 if the address is not mapped.
*/
unsigned long mmu_read_second_level_page(unsigned long vaddr);

/* Get the virtual base address of the MMU */
unsigned long mmu_get_base(void);

/* Initialize the MMU. */
void mmu_init(unsigned long mem_end);

#ifdef CONFIG_METAG_META21_MMU
/*
* For cpu "cpu" calculate and return the address of the
* MMCU_TnLOCAL_TABLE_PHYS0 if running in local-space or
* MMCU_TnGLOBAL_TABLE_PHYS0 if running in global-space.
*/
static inline unsigned long mmu_phys0_addr(unsigned int cpu)
{
unsigned long phys0;

phys0 = (MMCU_T0LOCAL_TABLE_PHYS0 +
(MMCU_TnX_TABLE_PHYSX_STRIDE * cpu)) +
(MMCU_TXG_TABLE_PHYSX_OFFSET * is_global_space(PAGE_OFFSET));

return phys0;
}

/*
* For cpu "cpu" calculate and return the address of the
* MMCU_TnLOCAL_TABLE_PHYS1 if running in local-space or
* MMCU_TnGLOBAL_TABLE_PHYS1 if running in global-space.
*/
static inline unsigned long mmu_phys1_addr(unsigned int cpu)
{
unsigned long phys1;

phys1 = (MMCU_T0LOCAL_TABLE_PHYS1 +
(MMCU_TnX_TABLE_PHYSX_STRIDE * cpu)) +
(MMCU_TXG_TABLE_PHYSX_OFFSET * is_global_space(PAGE_OFFSET));

return phys1;
}
#endif /* CONFIG_METAG_META21_MMU */

#endif
113 changes: 113 additions & 0 deletions arch/metag/include/asm/mmu_context.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,113 @@
#ifndef __METAG_MMU_CONTEXT_H
#define __METAG_MMU_CONTEXT_H

#include <asm-generic/mm_hooks.h>

#include <asm/page.h>
#include <asm/mmu.h>
#include <asm/tlbflush.h>
#include <asm/cacheflush.h>

#include <linux/io.h>

static inline void enter_lazy_tlb(struct mm_struct *mm,
struct task_struct *tsk)
{
}

static inline int init_new_context(struct task_struct *tsk,
struct mm_struct *mm)
{
#ifndef CONFIG_METAG_META21_MMU
/* We use context to store a pointer to the page holding the
* pgd of a process while it is running. While a process is not
* running the pgd and context fields should be equal.
*/
mm->context.pgd_base = (unsigned long) mm->pgd;
#endif
#ifdef CONFIG_METAG_USER_TCM
INIT_LIST_HEAD(&mm->context.tcm);
#endif
return 0;
}

#ifdef CONFIG_METAG_USER_TCM

#include <linux/slab.h>
#include <asm/tcm.h>

static inline void destroy_context(struct mm_struct *mm)
{
struct tcm_allocation *pos, *n;

list_for_each_entry_safe(pos, n, &mm->context.tcm, list) {
tcm_free(pos->tag, pos->addr, pos->size);
list_del(&pos->list);
kfree(pos);
}
}
#else
#define destroy_context(mm) do { } while (0)
#endif

#ifdef CONFIG_METAG_META21_MMU
static inline void load_pgd(pgd_t *pgd, int thread)
{
unsigned long phys0 = mmu_phys0_addr(thread);
unsigned long phys1 = mmu_phys1_addr(thread);

/*
* 0x900 2Gb address space
* The permission bits apply to MMU table region which gives a 2MB
* window into physical memory. We especially don't want userland to be
* able to access this.
*/
metag_out32(0x900 | _PAGE_CACHEABLE | _PAGE_PRIV | _PAGE_WRITE |
_PAGE_PRESENT, phys0);
/* Set new MMU base address */
metag_out32(__pa(pgd) & MMCU_TBLPHYS1_ADDR_BITS, phys1);
}
#endif

static inline void switch_mmu(struct mm_struct *prev, struct mm_struct *next)
{
#ifdef CONFIG_METAG_META21_MMU
load_pgd(next->pgd, hard_processor_id());
#else
unsigned int i;

/* prev->context == prev->pgd in the case where we are initially
switching from the init task to the first process. */
if (prev->context.pgd_base != (unsigned long) prev->pgd) {
for (i = FIRST_USER_PGD_NR; i < USER_PTRS_PER_PGD; i++)
((pgd_t *) prev->context.pgd_base)[i] = prev->pgd[i];
} else
prev->pgd = (pgd_t *)mmu_get_base();

next->pgd = prev->pgd;
prev->pgd = (pgd_t *) prev->context.pgd_base;

for (i = FIRST_USER_PGD_NR; i < USER_PTRS_PER_PGD; i++)
next->pgd[i] = ((pgd_t *) next->context.pgd_base)[i];

flush_cache_all();
#endif
flush_tlb_all();
}

static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
struct task_struct *tsk)
{
if (prev != next)
switch_mmu(prev, next);
}

static inline void activate_mm(struct mm_struct *prev_mm,
struct mm_struct *next_mm)
{
switch_mmu(prev_mm, next_mm);
}

#define deactivate_mm(tsk, mm) do { } while (0)

#endif
128 changes: 128 additions & 0 deletions arch/metag/include/asm/page.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,128 @@
#ifndef _METAG_PAGE_H
#define _METAG_PAGE_H

#include <linux/const.h>

#include <asm/metag_mem.h>

/* PAGE_SHIFT determines the page size */
#if defined(CONFIG_PAGE_SIZE_4K)
#define PAGE_SHIFT 12
#elif defined(CONFIG_PAGE_SIZE_8K)
#define PAGE_SHIFT 13
#elif defined(CONFIG_PAGE_SIZE_16K)
#define PAGE_SHIFT 14
#endif

#define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
#define PAGE_MASK (~(PAGE_SIZE-1))

#if defined(CONFIG_HUGETLB_PAGE_SIZE_8K)
# define HPAGE_SHIFT 13
#elif defined(CONFIG_HUGETLB_PAGE_SIZE_16K)
# define HPAGE_SHIFT 14
#elif defined(CONFIG_HUGETLB_PAGE_SIZE_32K)
# define HPAGE_SHIFT 15
#elif defined(CONFIG_HUGETLB_PAGE_SIZE_64K)
# define HPAGE_SHIFT 16
#elif defined(CONFIG_HUGETLB_PAGE_SIZE_128K)
# define HPAGE_SHIFT 17
#elif defined(CONFIG_HUGETLB_PAGE_SIZE_256K)
# define HPAGE_SHIFT 18
#elif defined(CONFIG_HUGETLB_PAGE_SIZE_512K)
# define HPAGE_SHIFT 19
#elif defined(CONFIG_HUGETLB_PAGE_SIZE_1M)
# define HPAGE_SHIFT 20
#elif defined(CONFIG_HUGETLB_PAGE_SIZE_2M)
# define HPAGE_SHIFT 21
#elif defined(CONFIG_HUGETLB_PAGE_SIZE_4M)
# define HPAGE_SHIFT 22
#endif

#ifdef CONFIG_HUGETLB_PAGE
# define HPAGE_SIZE (1UL << HPAGE_SHIFT)
# define HPAGE_MASK (~(HPAGE_SIZE-1))
# define HUGETLB_PAGE_ORDER (HPAGE_SHIFT-PAGE_SHIFT)
/*
* We define our own hugetlb_get_unmapped_area so we don't corrupt 2nd level
* page tables with normal pages in them.
*/
# define HUGEPT_SHIFT (22)
# define HUGEPT_ALIGN (1 << HUGEPT_SHIFT)
# define HUGEPT_MASK (HUGEPT_ALIGN - 1)
# define ALIGN_HUGEPT(x) ALIGN(x, HUGEPT_ALIGN)
# define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
#endif

#ifndef __ASSEMBLY__

/* On the Meta, we would like to know if the address (heap) we have is
* in local or global space.
*/
#define is_global_space(addr) ((addr) > 0x7fffffff)
#define is_local_space(addr) (!is_global_space(addr))

extern void clear_page(void *to);
extern void copy_page(void *to, void *from);

#define clear_user_page(page, vaddr, pg) clear_page(page)
#define copy_user_page(to, from, vaddr, pg) copy_page(to, from)

/*
* These are used to make use of C type-checking..
*/
typedef struct { unsigned long pte; } pte_t;
typedef struct { unsigned long pgd; } pgd_t;
typedef struct { unsigned long pgprot; } pgprot_t;
typedef struct page *pgtable_t;

#define pte_val(x) ((x).pte)
#define pgd_val(x) ((x).pgd)
#define pgprot_val(x) ((x).pgprot)

#define __pte(x) ((pte_t) { (x) })
#define __pgd(x) ((pgd_t) { (x) })
#define __pgprot(x) ((pgprot_t) { (x) })

/* The kernel must now ALWAYS live at either 0xC0000000 or 0x40000000 - that
* being either global or local space.
*/
#define PAGE_OFFSET (CONFIG_PAGE_OFFSET)

#if PAGE_OFFSET >= LINGLOBAL_BASE
#define META_MEMORY_BASE LINGLOBAL_BASE
#define META_MEMORY_LIMIT LINGLOBAL_LIMIT
#else
#define META_MEMORY_BASE LINLOCAL_BASE
#define META_MEMORY_LIMIT LINLOCAL_LIMIT
#endif

/* Offset between physical and virtual mapping of kernel memory. */
extern unsigned int meta_memoffset;

#define __pa(x) ((unsigned long)(((unsigned long)(x)) - meta_memoffset))
#define __va(x) ((void *)((unsigned long)(((unsigned long)(x)) + meta_memoffset)))

extern unsigned long pfn_base;
#define ARCH_PFN_OFFSET (pfn_base)
#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
#define page_to_virt(page) __va(page_to_pfn(page) << PAGE_SHIFT)
#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
#define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT)
#ifdef CONFIG_FLATMEM
extern unsigned long max_pfn;
extern unsigned long min_low_pfn;
#define pfn_valid(pfn) ((pfn) >= min_low_pfn && (pfn) < max_pfn)
#endif

#define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)

#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \
VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)

#include <asm-generic/memory_model.h>
#include <asm-generic/getorder.h>

#endif /* __ASSMEBLY__ */

#endif /* _METAG_PAGE_H */
Loading

0 comments on commit f5df8e2

Please sign in to comment.