• Alexey Dobriyan's avatar
    Detach sched.h from mm.h · e8edc6e0
    Alexey Dobriyan authored
    
    First thing mm.h does is including sched.h solely for can_do_mlock() inline
    function which has "current" dereference inside. By dealing with can_do_mlock()
    mm.h can be detached from sched.h which is good. See below, why.
    
    This patch
    a) removes unconditional inclusion of sched.h from mm.h
    b) makes can_do_mlock() normal function in mm/mlock.c
    c) exports can_do_mlock() to not break compilation
    d) adds sched.h inclusions back to files that were getting it indirectly.
    e) adds less bloated headers to some files (asm/signal.h, jiffies.h) that were
       getting them indirectly
    
    Net result is:
    a) mm.h users would get less code to open, read, preprocess, parse, ... if
       they don't need sched.h
    b) sched.h stops being dependency for significant number of files:
       on x86_64 allmodconfig touching sched.h results in recompile of 4083 files,
       after patch it's only 3744 (-8.3%).
    
    Cross-compile tested on
    
    	all arm defconfigs, all mips defconfigs, all powerpc defconfigs,
    	alpha alpha-up
    	arm
    	i386 i386-up i386-defconfig i386-allnoconfig
    	ia64 ia64-up
    	m68k
    	mips
    	parisc parisc-up
    	powerpc powerpc-up
    	s390 s390-up
    	sparc sparc-up
    	sparc64 sparc64-up
    	um-x86_64
    	x86_64 x86_64-up x86_64-defconfig x86_64-allnoconfig
    
    as well as my two usual configs.
    Signed-off-by: default avatarAlexey Dobriyan <adobriyan@gmail.com>
    Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
    e8edc6e0
ioremap.c 2.08 KB
/*
 * Re-map IO memory to kernel address space so that we can access it.
 * This is needed for high PCI addresses that aren't mapped in the
 * 640k-1MB IO memory area on PC's
 *
 * (C) Copyright 1995 1996 Linus Torvalds
 */
#include <linux/vmalloc.h>
#include <linux/mm.h>
#include <linux/sched.h>
#include <asm/cacheflush.h>
#include <asm/pgtable.h>

static int ioremap_pte_range(pmd_t *pmd, unsigned long addr,
		unsigned long end, unsigned long phys_addr, pgprot_t prot)
{
	pte_t *pte;
	unsigned long pfn;

	pfn = phys_addr >> PAGE_SHIFT;
	pte = pte_alloc_kernel(pmd, addr);
	if (!pte)
		return -ENOMEM;
	do {
		BUG_ON(!pte_none(*pte));
		set_pte_at(&init_mm, addr, pte, pfn_pte(pfn, prot));
		pfn++;
	} while (pte++, addr += PAGE_SIZE, addr != end);
	return 0;
}

static inline int ioremap_pmd_range(pud_t *pud, unsigned long addr,
		unsigned long end, unsigned long phys_addr, pgprot_t prot)
{
	pmd_t *pmd;
	unsigned long next;

	phys_addr -= addr;
	pmd = pmd_alloc(&init_mm, pud, addr);
	if (!pmd)
		return -ENOMEM;
	do {
		next = pmd_addr_end(addr, end);
		if (ioremap_pte_range(pmd, addr, next, phys_addr + addr, prot))
			return -ENOMEM;
	} while (pmd++, addr = next, addr != end);
	return 0;
}

static inline int ioremap_pud_range(pgd_t *pgd, unsigned long addr,
		unsigned long end, unsigned long phys_addr, pgprot_t prot)
{
	pud_t *pud;
	unsigned long next;

	phys_addr -= addr;
	pud = pud_alloc(&init_mm, pgd, addr);
	if (!pud)
		return -ENOMEM;
	do {
		next = pud_addr_end(addr, end);
		if (ioremap_pmd_range(pud, addr, next, phys_addr + addr, prot))
			return -ENOMEM;
	} while (pud++, addr = next, addr != end);
	return 0;
}

int ioremap_page_range(unsigned long addr,
		       unsigned long end, unsigned long phys_addr, pgprot_t prot)
{
	pgd_t *pgd;
	unsigned long start;
	unsigned long next;
	int err;

	BUG_ON(addr >= end);

	start = addr;
	phys_addr -= addr;
	pgd = pgd_offset_k(addr);
	do {
		next = pgd_addr_end(addr, end);
		err = ioremap_pud_range(pgd, addr, next, phys_addr+addr, prot);
		if (err)
			break;
	} while (pgd++, addr = next, addr != end);

	flush_cache_vmap(start, end);

	return err;
}