Merge branch 'devel-stable' into for-next

This commit is contained in:
Russell King 2014-01-21 21:26:51 +00:00
commit 857989a7fd
4 changed files with 52 additions and 71 deletions

View file

@ -64,6 +64,7 @@ config ARM
select IRQ_FORCED_THREADING
select KTIME_SCALAR
select MODULES_USE_ELF_REL
select NO_BOOTMEM
select OLD_SIGACTION
select OLD_SIGSUSPEND3
select PERF_USE_VMALLOC

View file

@ -254,25 +254,59 @@ static inline int constant_fls(int x)
}
/*
* On ARMv5 and above those functions can be implemented around
* the clz instruction for much better code efficiency.
* On ARMv5 and above those functions can be implemented around the
* clz instruction for much better code efficiency. __clz returns
* the number of leading zeros, zero input will return 32, and
* 0x80000000 will return 0.
*/
static inline int fls(int x)
static inline unsigned int __clz(unsigned int x)
{
int ret;
if (__builtin_constant_p(x))
return constant_fls(x);
unsigned int ret;
asm("clz\t%0, %1" : "=r" (ret) : "r" (x));
ret = 32 - ret;
return ret;
}
#define __fls(x) (fls(x) - 1)
#define ffs(x) ({ unsigned long __t = (x); fls(__t & -__t); })
#define __ffs(x) (ffs(x) - 1)
/*
* fls() returns zero if the input is zero, otherwise returns the bit
* position of the last set bit, where the LSB is 1 and MSB is 32.
*/
static inline int fls(int x)
{
if (__builtin_constant_p(x))
return constant_fls(x);
return 32 - __clz(x);
}
/*
* __fls() returns the bit position of the last bit set, where the
* LSB is 0 and MSB is 31. Zero input is undefined.
*/
static inline unsigned long __fls(unsigned long x)
{
return fls(x) - 1;
}
/*
* ffs() returns zero if the input was zero, otherwise returns the bit
* position of the first set bit, where the LSB is 1 and MSB is 32.
*/
static inline int ffs(int x)
{
return fls(x & -x);
}
/*
* __ffs() returns the bit position of the first bit set, where the
* LSB is 0 and MSB is 31. Zero input is undefined.
*/
static inline unsigned long __ffs(unsigned long x)
{
return ffs(x) - 1;
}
#define ffz(x) __ffs( ~(x) )
#endif

View file

@ -831,7 +831,7 @@ static void __init reserve_crashkernel(void)
if (ret)
return;
ret = reserve_bootmem(crash_base, crash_size, BOOTMEM_EXCLUSIVE);
ret = memblock_reserve(crash_base, crash_size);
if (ret < 0) {
pr_warn("crashkernel reservation failed - memory is in use (0x%lx)\n",
(unsigned long)crash_base);

View file

@ -145,58 +145,6 @@ static void __init find_limits(unsigned long *min, unsigned long *max_low,
*max_high = bank_pfn_end(&mi->bank[mi->nr_banks - 1]);
}
static void __init arm_bootmem_init(unsigned long start_pfn,
unsigned long end_pfn)
{
struct memblock_region *reg;
unsigned int boot_pages;
phys_addr_t bitmap;
pg_data_t *pgdat;
/*
* Allocate the bootmem bitmap page. This must be in a region
* of memory which has already been mapped.
*/
boot_pages = bootmem_bootmap_pages(end_pfn - start_pfn);
bitmap = memblock_alloc_base(boot_pages << PAGE_SHIFT, L1_CACHE_BYTES,
__pfn_to_phys(end_pfn));
/*
* Initialise the bootmem allocator, handing the
* memory banks over to bootmem.
*/
node_set_online(0);
pgdat = NODE_DATA(0);
init_bootmem_node(pgdat, __phys_to_pfn(bitmap), start_pfn, end_pfn);
/* Free the lowmem regions from memblock into bootmem. */
for_each_memblock(memory, reg) {
unsigned long start = memblock_region_memory_base_pfn(reg);
unsigned long end = memblock_region_memory_end_pfn(reg);
if (end >= end_pfn)
end = end_pfn;
if (start >= end)
break;
free_bootmem(__pfn_to_phys(start), (end - start) << PAGE_SHIFT);
}
/* Reserve the lowmem memblock reserved regions in bootmem. */
for_each_memblock(reserved, reg) {
unsigned long start = memblock_region_reserved_base_pfn(reg);
unsigned long end = memblock_region_reserved_end_pfn(reg);
if (end >= end_pfn)
end = end_pfn;
if (start >= end)
break;
reserve_bootmem(__pfn_to_phys(start),
(end - start) << PAGE_SHIFT, BOOTMEM_DEFAULT);
}
}
#ifdef CONFIG_ZONE_DMA
phys_addr_t arm_dma_zone_size __read_mostly;
@ -236,7 +184,7 @@ void __init setup_dma_zone(const struct machine_desc *mdesc)
#endif
}
static void __init arm_bootmem_free(unsigned long min, unsigned long max_low,
static void __init zone_sizes_init(unsigned long min, unsigned long max_low,
unsigned long max_high)
{
unsigned long zone_size[MAX_NR_ZONES], zhole_size[MAX_NR_ZONES];
@ -384,7 +332,6 @@ void __init arm_memblock_init(struct meminfo *mi,
dma_contiguous_reserve(min(arm_dma_limit, arm_lowmem_limit));
arm_memblock_steal_permitted = false;
memblock_allow_resize();
memblock_dump_all();
}
@ -392,12 +339,11 @@ void __init bootmem_init(void)
{
unsigned long min, max_low, max_high;
memblock_allow_resize();
max_low = max_high = 0;
find_limits(&min, &max_low, &max_high);
arm_bootmem_init(min, max_low);
/*
* Sparsemem tries to allocate bootmem in memory_present(),
* so must be done after the fixed reservations
@ -414,7 +360,7 @@ void __init bootmem_init(void)
* the sparse mem_map arrays initialized by sparse_init()
* for memmap_init_zone(), otherwise all PFNs are invalid.
*/
arm_bootmem_free(min, max_low, max_high);
zone_sizes_init(min, max_low, max_high);
/*
* This doesn't seem to be used by the Linux memory manager any
@ -587,7 +533,7 @@ void __init mem_init(void)
extern u32 itcm_end;
#endif
max_mapnr = pfn_to_page(max_pfn + PHYS_PFN_OFFSET) - mem_map;
set_max_mapnr(pfn_to_page(max_pfn) - mem_map);
/* this will put all unused low memory onto the freelists */
free_unused_memmap(&meminfo);