Loading...
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 | /* SPDX-License-Identifier: GPL-2.0+ */ #ifndef _LINUX_LMB_H #define _LINUX_LMB_H #ifdef __KERNEL__ #include <alist.h> #include <asm/types.h> #include <asm/u-boot.h> #include <linux/bitops.h> /* * Logical memory blocks. * * Copyright (C) 2001 Peter Bergner, IBM Corp. */ #define LMB_ALLOC_ANYWHERE 0 #define LMB_ALIST_INITIAL_SIZE 4 /** * enum lmb_flags - definition of memory region attributes * @LMB_NONE: no special request * @LMB_NOMAP: don't add to mmu configuration * @LMB_NOOVERWRITE: the memory region cannot be overwritten/re-reserved * @LMB_NONOTIFY: do not notify other modules of changes to this memory region */ enum lmb_flags { LMB_NONE = 0, LMB_NOMAP = BIT(1), LMB_NOOVERWRITE = BIT(2), LMB_NONOTIFY = BIT(3), }; /** * struct lmb_region - Description of one region. * * @base: Base address of the region. * @size: Size of the region * @flags: memory region attributes */ struct lmb_region { phys_addr_t base; phys_size_t size; enum lmb_flags flags; }; /** * struct lmb - The LMB structure * * @free_mem: List of free memory regions * @used_mem: List of used/reserved memory regions * @test: Is structure being used for LMB tests */ struct lmb { struct alist free_mem; struct alist used_mem; bool test; }; /** * lmb_init() - Initialise the LMB module * * Initialise the LMB lists needed for keeping the memory map. There * are two lists, in form of alloced list data structure. One for the * available memory, and one for the used memory. Initialise the two * lists as part of board init. Add memory to the available memory * list and reserve common areas by adding them to the used memory * list. * * Return: 0 on success, -ve on error */ int lmb_init(void); /** * lmb_add_memory() - Add memory range for LMB allocations * * Add the entire available memory range to the pool of memory that * can be used by the LMB module for allocations. * * Return: None */ void lmb_add_memory(void); long lmb_add(phys_addr_t base, phys_size_t size); long lmb_reserve(phys_addr_t base, phys_size_t size); /** * lmb_reserve_flags - Reserve one region with a specific flags bitfield. * * @base: base address of the memory region * @size: size of the memory region * @flags: flags for the memory region * Return: 0 if OK, > 0 for coalesced region or a negative error code. */ long lmb_reserve_flags(phys_addr_t base, phys_size_t size, enum lmb_flags flags); #if CONFIG_IS_ENABLED(LMB) phys_addr_t lmb_alloc(phys_size_t size, ulong align); #else static inline phys_addr_t lmb_alloc(phys_size_t size, ulong align) { return 0; } #endif phys_addr_t lmb_alloc_base(phys_size_t size, ulong align, phys_addr_t max_addr); phys_addr_t lmb_alloc_addr(phys_addr_t base, phys_size_t size); phys_size_t lmb_get_free_size(phys_addr_t addr); phys_addr_t lmb_alloc_base_flags(phys_size_t size, ulong align, phys_addr_t max_addr, uint flags); /** * lmb_alloc_addr_flags() - Allocate specified memory address with specified attributes * @base: Base Address requested * @size: Size of the region requested * @flags: Memory region attributes to be set * * Allocate a region of memory with the attributes specified through the * parameter. The base parameter is used to specify the base address * of the requested region. * * Return: base address on success, 0 on error */ phys_addr_t lmb_alloc_addr_flags(phys_addr_t base, phys_size_t size, uint flags); /** * lmb_is_reserved_flags() - test if address is in reserved region with flag bits set * * The function checks if a reserved region comprising @addr exists which has * all flag bits set which are set in @flags. * * @addr: address to be tested * @flags: bitmap with bits to be tested * Return: 1 if matching reservation exists, 0 otherwise */ int lmb_is_reserved_flags(phys_addr_t addr, int flags); /** * lmb_free_flags() - Free up a region of memory * @base: Base Address of region to be freed * @size: Size of the region to be freed * @flags: Memory region attributes * * Free up a region of memory. * * Return: 0 if successful, -1 on failure */ long lmb_free_flags(phys_addr_t base, phys_size_t size, uint flags); long lmb_free(phys_addr_t base, phys_size_t size); void lmb_dump_all(void); void lmb_dump_all_force(void); void lmb_arch_add_memory(void); struct lmb *lmb_get(void); int lmb_push(struct lmb *store); void lmb_pop(struct lmb *store); static inline int lmb_read_check(phys_addr_t addr, phys_size_t len) { return lmb_alloc_addr(addr, len) == addr ? 0 : -1; } /** * io_lmb_setup() - Initialize LMB struct * @io_lmb: IO LMB to initialize * * Returns: 0 on success, negative error code on failure */ int io_lmb_setup(struct lmb *io_lmb); /** * io_lmb_teardown() - Tear LMB struct down * @io_lmb: IO LMB to teardown */ void io_lmb_teardown(struct lmb *io_lmb); /** * io_lmb_add() - Add an IOVA range for allocations * @io_lmb: LMB to add the space to * @base: Base Address of region to add * @size: Size of the region to add * * Add the IOVA space [base, base + size] to be managed by io_lmb. * * Returns: 0 if the region addition was successful, -1 on failure */ long io_lmb_add(struct lmb *io_lmb, phys_addr_t base, phys_size_t size); /** * io_lmb_alloc() - Allocate specified IO memory address with specified alignment * @io_lmb: LMB to alloc from * @size: Size of the region requested * @align: Required address and size alignment * * Allocate a region of IO memory. The base parameter is used to specify the * base address of the requested region. * * Return: base IO address on success, 0 on error */ phys_addr_t io_lmb_alloc(struct lmb *io_lmb, phys_size_t size, ulong align); /** * io_lmb_free() - Free up a region of IOVA space * @io_lmb: LMB to return the IO address space to * @base: Base Address of region to be freed * @size: Size of the region to be freed * * Free up a region of IOVA space. * * Return: 0 if successful, -1 on failure */ long io_lmb_free(struct lmb *io_lmb, phys_addr_t base, phys_size_t size); #endif /* __KERNEL__ */ #endif /* _LINUX_LMB_H */ |