| 1 | /************************************************* |
| 2 | * Exim - an Internet mail transport agent * |
| 3 | *************************************************/ |
| 4 | |
| 5 | /* Copyright (c) University of Cambridge 1995 - 2018 */ |
| 6 | /* Copyright (c) The Exim maintainers 2019 */ |
| 7 | /* See the file NOTICE for conditions of use and distribution. */ |
| 8 | |
| 9 | /* Exim gets and frees all its store through these functions. In the original |
| 10 | implementation there was a lot of mallocing and freeing of small bits of store. |
| 11 | The philosophy has now changed to a scheme which includes the concept of |
| 12 | "stacking pools" of store. For the short-lived processes, there isn't any real |
| 13 | need to do any garbage collection, but the stack concept allows quick resetting |
| 14 | in places where this seems sensible. |
| 15 | |
| 16 | Obviously the long-running processes (the daemon, the queue runner, and eximon) |
| 17 | must take care not to eat store. |
| 18 | |
| 19 | The following different types of store are recognized: |
| 20 | |
| 21 | . Long-lived, large blocks: This is implemented by retaining the original |
| 22 | malloc/free functions, and it used for permanent working buffers and for |
| 23 | getting blocks to cut up for the other types. |
| 24 | |
| 25 | . Long-lived, small blocks: This is used for blocks that have to survive until |
| 26 | the process exits. It is implemented as a stacking pool (POOL_PERM). This is |
| 27 | functionally the same as store_malloc(), except that the store can't be |
| 28 | freed, but I expect it to be more efficient for handling small blocks. |
| 29 | |
| 30 | . Short-lived, short blocks: Most of the dynamic store falls into this |
| 31 | category. It is implemented as a stacking pool (POOL_MAIN) which is reset |
| 32 | after accepting a message when multiple messages are received by a single |
| 33 | process. Resetting happens at some other times as well, usually fairly |
| 34 | locally after some specific processing that needs working store. |
| 35 | |
| 36 | . There is a separate pool (POOL_SEARCH) that is used only for lookup storage. |
| 37 | This means it can be freed when search_tidyup() is called to close down all |
| 38 | the lookup caching. |
| 39 | |
| 40 | . Orthogonal to the three pool types, there are two classes of memory: untainted |
| 41 | and tainted. The latter is used for values derived from untrusted input, and |
| 42 | the string-expansion mechanism refuses to operate on such values (obviously, |
| 43 | it can expand an untainted value to return a tainted result). The classes |
| 44 | are implemented by duplicating the three pool types. Pool resets are requested |
| 45 | against the nontainted sibling and apply to both siblings. |
| 46 | |
| 47 | Only memory blocks requested for tainted use are regarded as tainted; anything |
| 48 | else (including stack auto variables) is untainted. Care is needed when coding |
| 49 | to not copy untrusted data into untainted memory, as downstream taint-checks |
| 50 | would be avoided. |
| 51 | |
| 52 | Intermediate layers (eg. the string functions) can test for taint, and use this |
| 53 | for ensuringn that results have proper state. For example the |
| 54 | string_vformat_trc() routing supporting the string_sprintf() interface will |
| 55 | recopy a string being built into a tainted allocation if it meets a %s for a |
| 56 | tainted argument. |
| 57 | |
| 58 | Internally we currently use malloc for nontainted pools, and mmap for tainted |
| 59 | pools. The disparity is for speed of testing the taintedness of pointers; |
| 60 | because Linux appears to use distinct non-overlapping address allocations for |
| 61 | mmap vs. everything else, which means only two pointer-compares suffice for the |
| 62 | test. Other OS' cannot use that optimisation, and a more lengthy test against |
| 63 | the limits of tainted-pool allcations has to be done. |
| 64 | */ |
| 65 | |
| 66 | |
| 67 | #include "exim.h" |
| 68 | /* keep config.h before memcheck.h, for NVALGRIND */ |
| 69 | #include "config.h" |
| 70 | |
| 71 | #include <sys/mman.h> |
| 72 | #include "memcheck.h" |
| 73 | |
| 74 | |
| 75 | /* We need to know how to align blocks of data for general use. I'm not sure |
| 76 | how to get an alignment factor in general. In the current world, a value of 8 |
| 77 | is probably right, and this is sizeof(double) on some systems and sizeof(void |
| 78 | *) on others, so take the larger of those. Since everything in this expression |
| 79 | is a constant, the compiler should optimize it to a simple constant wherever it |
| 80 | appears (I checked that gcc does do this). */ |
| 81 | |
| 82 | #define alignment \ |
| 83 | (sizeof(void *) > sizeof(double) ? sizeof(void *) : sizeof(double)) |
| 84 | |
| 85 | /* store_reset() will not free the following block if the last used block has |
| 86 | less than this much left in it. */ |
| 87 | |
| 88 | #define STOREPOOL_MIN_SIZE 256 |
| 89 | |
| 90 | /* Structure describing the beginning of each big block. */ |
| 91 | |
| 92 | typedef struct storeblock { |
| 93 | struct storeblock *next; |
| 94 | size_t length; |
| 95 | } storeblock; |
| 96 | |
| 97 | /* Just in case we find ourselves on a system where the structure above has a |
| 98 | length that is not a multiple of the alignment, set up a macro for the padded |
| 99 | length. */ |
| 100 | |
| 101 | #define ALIGNED_SIZEOF_STOREBLOCK \ |
| 102 | (((sizeof(storeblock) + alignment - 1) / alignment) * alignment) |
| 103 | |
| 104 | /* Size of block to get from malloc to carve up into smaller ones. This |
| 105 | must be a multiple of the alignment. We assume that 8192 is going to be |
| 106 | suitably aligned. */ |
| 107 | |
| 108 | #define STORE_BLOCK_SIZE (8192 - ALIGNED_SIZEOF_STOREBLOCK) |
| 109 | |
| 110 | /* Variables holding data for the local pools of store. The current pool number |
| 111 | is held in store_pool, which is global so that it can be changed from outside. |
| 112 | Setting the initial length values to -1 forces a malloc for the first call, |
| 113 | even if the length is zero (which is used for getting a point to reset to). */ |
| 114 | |
| 115 | int store_pool = POOL_MAIN; |
| 116 | |
| 117 | #define NPOOLS 6 |
| 118 | static storeblock *chainbase[NPOOLS]; |
| 119 | static storeblock *current_block[NPOOLS]; |
| 120 | static void *next_yield[NPOOLS]; |
| 121 | static int yield_length[NPOOLS] = { -1, -1, -1, -1, -1, -1 }; |
| 122 | |
| 123 | /* The limits of the tainted pools. Tracking these on new allocations enables |
| 124 | a fast is_tainted implementation. We assume the kernel only allocates mmaps using |
| 125 | one side or the other of data+heap, not both. */ |
| 126 | |
| 127 | void * tainted_base = (void *)-1; |
| 128 | void * tainted_top = (void *)0; |
| 129 | |
| 130 | /* pool_malloc holds the amount of memory used by the store pools; this goes up |
| 131 | and down as store is reset or released. nonpool_malloc is the total got by |
| 132 | malloc from other calls; this doesn't go down because it is just freed by |
| 133 | pointer. */ |
| 134 | |
| 135 | static int pool_malloc; |
| 136 | static int nonpool_malloc; |
| 137 | |
| 138 | /* This variable is set by store_get() to its yield, and by store_reset() to |
| 139 | NULL. This enables string_cat() to optimize its store handling for very long |
| 140 | strings. That's why the variable is global. */ |
| 141 | |
| 142 | void *store_last_get[NPOOLS]; |
| 143 | |
| 144 | /* These are purely for stats-gathering */ |
| 145 | |
| 146 | static int nbytes[NPOOLS]; /* current bytes allocated */ |
| 147 | static int maxbytes[NPOOLS]; /* max number reached */ |
| 148 | static int nblocks[NPOOLS]; /* current number of blocks allocated */ |
| 149 | static int maxblocks[NPOOLS]; |
| 150 | static int n_nonpool_blocks; /* current number of direct store_malloc() blocks */ |
| 151 | static int max_nonpool_blocks; |
| 152 | static int max_pool_malloc; /* max value for pool_malloc */ |
| 153 | static int max_nonpool_malloc; /* max value for nonpool_malloc */ |
| 154 | |
| 155 | |
| 156 | #ifndef COMPILE_UTILITY |
| 157 | static const uschar * pooluse[NPOOLS] = { |
| 158 | [POOL_MAIN] = US"main", |
| 159 | [POOL_PERM] = US"perm", |
| 160 | [POOL_SEARCH] = US"search", |
| 161 | [POOL_TAINT_MAIN] = US"main", |
| 162 | [POOL_TAINT_PERM] = US"perm", |
| 163 | [POOL_TAINT_SEARCH] = US"search", |
| 164 | }; |
| 165 | static const uschar * poolclass[NPOOLS] = { |
| 166 | [POOL_MAIN] = US"untainted", |
| 167 | [POOL_PERM] = US"untainted", |
| 168 | [POOL_SEARCH] = US"untainted", |
| 169 | [POOL_TAINT_MAIN] = US"tainted", |
| 170 | [POOL_TAINT_PERM] = US"tainted", |
| 171 | [POOL_TAINT_SEARCH] = US"tainted", |
| 172 | }; |
| 173 | #endif |
| 174 | |
| 175 | |
| 176 | static void * store_mmap(int, const char *, int); |
| 177 | static void * internal_store_malloc(int, const char *, int); |
| 178 | static void internal_untainted_free(void *, const char *, int linenumber); |
| 179 | static void internal_tainted_free(storeblock *, const char *, int linenumber); |
| 180 | |
| 181 | /******************************************************************************/ |
| 182 | |
| 183 | #ifndef TAINT_CHECK_FAST |
| 184 | /* Slower version check, for use when platform intermixes malloc and mmap area |
| 185 | addresses. */ |
| 186 | |
| 187 | BOOL |
| 188 | is_tainted_fn(const void * p) |
| 189 | { |
| 190 | storeblock * b; |
| 191 | int pool; |
| 192 | |
| 193 | for (pool = 0; pool < nelem(chainbase); pool++) |
| 194 | if ((b = current_block[pool])) |
| 195 | { |
| 196 | char * bc = CS b + ALIGNED_SIZEOF_STOREBLOCK; |
| 197 | if (CS p >= bc && CS p <= bc + b->length) goto hit; |
| 198 | } |
| 199 | |
| 200 | for (pool = 0; pool < nelem(chainbase); pool++) |
| 201 | for (b = chainbase[pool]; b; b = b->next) |
| 202 | { |
| 203 | char * bc = CS b + ALIGNED_SIZEOF_STOREBLOCK; |
| 204 | if (CS p >= bc && CS p <= bc + b->length) goto hit; |
| 205 | } |
| 206 | return FALSE; |
| 207 | |
| 208 | hit: |
| 209 | return pool >= POOL_TAINT_BASE; |
| 210 | } |
| 211 | #endif |
| 212 | |
| 213 | |
| 214 | void |
| 215 | die_tainted(const uschar * msg, const uschar * func, int line) |
| 216 | { |
| 217 | log_write(0, LOG_MAIN|LOG_PANIC_DIE, "Taint mismatch, %s: %s %d\n", |
| 218 | msg, func, line); |
| 219 | } |
| 220 | |
| 221 | |
| 222 | /************************************************* |
| 223 | * Get a block from the current pool * |
| 224 | *************************************************/ |
| 225 | |
| 226 | /* Running out of store is a total disaster. This function is called via the |
| 227 | macro store_get(). It passes back a block of store within the current big |
| 228 | block, getting a new one if necessary. The address is saved in |
| 229 | store_last_was_get. |
| 230 | |
| 231 | Arguments: |
| 232 | size amount wanted, bytes |
| 233 | tainted class: set to true for untrusted data (eg. from smtp input) |
| 234 | func function from which called |
| 235 | linenumber line number in source file |
| 236 | |
| 237 | Returns: pointer to store (panic on malloc failure) |
| 238 | */ |
| 239 | |
| 240 | void * |
| 241 | store_get_3(int size, BOOL tainted, const char *func, int linenumber) |
| 242 | { |
| 243 | int pool = tainted ? store_pool + POOL_TAINT_BASE : store_pool; |
| 244 | |
| 245 | /* Round up the size to a multiple of the alignment. Although this looks a |
| 246 | messy statement, because "alignment" is a constant expression, the compiler can |
| 247 | do a reasonable job of optimizing, especially if the value of "alignment" is a |
| 248 | power of two. I checked this with -O2, and gcc did very well, compiling it to 4 |
| 249 | instructions on a Sparc (alignment = 8). */ |
| 250 | |
| 251 | if (size % alignment != 0) size += alignment - (size % alignment); |
| 252 | |
| 253 | /* If there isn't room in the current block, get a new one. The minimum |
| 254 | size is STORE_BLOCK_SIZE, and we would expect this to be the norm, since |
| 255 | these functions are mostly called for small amounts of store. */ |
| 256 | |
| 257 | if (size > yield_length[pool]) |
| 258 | { |
| 259 | int length = size <= STORE_BLOCK_SIZE ? STORE_BLOCK_SIZE : size; |
| 260 | int mlength = length + ALIGNED_SIZEOF_STOREBLOCK; |
| 261 | storeblock * newblock; |
| 262 | |
| 263 | /* Sometimes store_reset() may leave a block for us; check if we can use it */ |
| 264 | |
| 265 | if ( (newblock = current_block[pool]) |
| 266 | && (newblock = newblock->next) |
| 267 | && newblock->length < length |
| 268 | ) |
| 269 | { |
| 270 | /* Give up on this block, because it's too small */ |
| 271 | nblocks[pool]--; |
| 272 | if (pool < POOL_TAINT_BASE) |
| 273 | internal_untainted_free(newblock, func, linenumber); |
| 274 | else |
| 275 | internal_tainted_free(newblock, func, linenumber); |
| 276 | newblock = NULL; |
| 277 | } |
| 278 | |
| 279 | /* If there was no free block, get a new one */ |
| 280 | |
| 281 | if (!newblock) |
| 282 | { |
| 283 | if ((nbytes[pool] += mlength) > maxbytes[pool]) |
| 284 | maxbytes[pool] = nbytes[pool]; |
| 285 | if ((pool_malloc += mlength) > max_pool_malloc) /* Used in pools */ |
| 286 | max_pool_malloc = pool_malloc; |
| 287 | nonpool_malloc -= mlength; /* Exclude from overall total */ |
| 288 | if (++nblocks[pool] > maxblocks[pool]) |
| 289 | maxblocks[pool] = nblocks[pool]; |
| 290 | |
| 291 | newblock = tainted |
| 292 | ? store_mmap(mlength, func, linenumber) |
| 293 | : internal_store_malloc(mlength, func, linenumber); |
| 294 | newblock->next = NULL; |
| 295 | newblock->length = length; |
| 296 | |
| 297 | if (!chainbase[pool]) |
| 298 | chainbase[pool] = newblock; |
| 299 | else |
| 300 | current_block[pool]->next = newblock; |
| 301 | } |
| 302 | |
| 303 | current_block[pool] = newblock; |
| 304 | yield_length[pool] = newblock->length; |
| 305 | next_yield[pool] = |
| 306 | (void *)(CS current_block[pool] + ALIGNED_SIZEOF_STOREBLOCK); |
| 307 | (void) VALGRIND_MAKE_MEM_NOACCESS(next_yield[pool], yield_length[pool]); |
| 308 | } |
| 309 | |
| 310 | /* There's (now) enough room in the current block; the yield is the next |
| 311 | pointer. */ |
| 312 | |
| 313 | store_last_get[pool] = next_yield[pool]; |
| 314 | |
| 315 | /* Cut out the debugging stuff for utilities, but stop picky compilers from |
| 316 | giving warnings. */ |
| 317 | |
| 318 | #ifdef COMPILE_UTILITY |
| 319 | func = func; |
| 320 | linenumber = linenumber; |
| 321 | #else |
| 322 | DEBUG(D_memory) |
| 323 | debug_printf("---%d Get %6p %5d %-14s %4d\n", pool, |
| 324 | store_last_get[pool], size, func, linenumber); |
| 325 | #endif /* COMPILE_UTILITY */ |
| 326 | |
| 327 | (void) VALGRIND_MAKE_MEM_UNDEFINED(store_last_get[pool], size); |
| 328 | /* Update next pointer and number of bytes left in the current block. */ |
| 329 | |
| 330 | next_yield[pool] = (void *)(CS next_yield[pool] + size); |
| 331 | yield_length[pool] -= size; |
| 332 | return store_last_get[pool]; |
| 333 | } |
| 334 | |
| 335 | |
| 336 | |
| 337 | /************************************************* |
| 338 | * Get a block from the PERM pool * |
| 339 | *************************************************/ |
| 340 | |
| 341 | /* This is just a convenience function, useful when just a single block is to |
| 342 | be obtained. |
| 343 | |
| 344 | Arguments: |
| 345 | size amount wanted |
| 346 | func function from which called |
| 347 | linenumber line number in source file |
| 348 | |
| 349 | Returns: pointer to store (panic on malloc failure) |
| 350 | */ |
| 351 | |
| 352 | void * |
| 353 | store_get_perm_3(int size, BOOL tainted, const char *func, int linenumber) |
| 354 | { |
| 355 | void *yield; |
| 356 | int old_pool = store_pool; |
| 357 | store_pool = POOL_PERM; |
| 358 | yield = store_get_3(size, tainted, func, linenumber); |
| 359 | store_pool = old_pool; |
| 360 | return yield; |
| 361 | } |
| 362 | |
| 363 | |
| 364 | |
| 365 | /************************************************* |
| 366 | * Extend a block if it is at the top * |
| 367 | *************************************************/ |
| 368 | |
| 369 | /* While reading strings of unknown length, it is often the case that the |
| 370 | string is being read into the block at the top of the stack. If it needs to be |
| 371 | extended, it is more efficient just to extend within the top block rather than |
| 372 | allocate a new block and then have to copy the data. This function is provided |
| 373 | for the use of string_cat(), but of course can be used elsewhere too. |
| 374 | The block itself is not expanded; only the top allocation from it. |
| 375 | |
| 376 | Arguments: |
| 377 | ptr pointer to store block |
| 378 | oldsize current size of the block, as requested by user |
| 379 | newsize new size required |
| 380 | func function from which called |
| 381 | linenumber line number in source file |
| 382 | |
| 383 | Returns: TRUE if the block is at the top of the stack and has been |
| 384 | extended; FALSE if it isn't at the top of the stack, or cannot |
| 385 | be extended |
| 386 | */ |
| 387 | |
| 388 | BOOL |
| 389 | store_extend_3(void *ptr, BOOL tainted, int oldsize, int newsize, |
| 390 | const char *func, int linenumber) |
| 391 | { |
| 392 | int pool = tainted ? store_pool + POOL_TAINT_BASE : store_pool; |
| 393 | int inc = newsize - oldsize; |
| 394 | int rounded_oldsize = oldsize; |
| 395 | |
| 396 | /* Check that the block being extended was already of the required taint status; |
| 397 | refuse to extend if not. */ |
| 398 | |
| 399 | if (is_tainted(ptr) != tainted) |
| 400 | return FALSE; |
| 401 | |
| 402 | if (rounded_oldsize % alignment != 0) |
| 403 | rounded_oldsize += alignment - (rounded_oldsize % alignment); |
| 404 | |
| 405 | if (CS ptr + rounded_oldsize != CS (next_yield[pool]) || |
| 406 | inc > yield_length[pool] + rounded_oldsize - oldsize) |
| 407 | return FALSE; |
| 408 | |
| 409 | /* Cut out the debugging stuff for utilities, but stop picky compilers from |
| 410 | giving warnings. */ |
| 411 | |
| 412 | #ifdef COMPILE_UTILITY |
| 413 | func = func; |
| 414 | linenumber = linenumber; |
| 415 | #else |
| 416 | DEBUG(D_memory) |
| 417 | debug_printf("---%d Ext %6p %5d %-14s %4d\n", pool, ptr, newsize, |
| 418 | func, linenumber); |
| 419 | #endif /* COMPILE_UTILITY */ |
| 420 | |
| 421 | if (newsize % alignment != 0) newsize += alignment - (newsize % alignment); |
| 422 | next_yield[pool] = CS ptr + newsize; |
| 423 | yield_length[pool] -= newsize - rounded_oldsize; |
| 424 | (void) VALGRIND_MAKE_MEM_UNDEFINED(ptr + oldsize, inc); |
| 425 | return TRUE; |
| 426 | } |
| 427 | |
| 428 | |
| 429 | |
| 430 | |
| 431 | /************************************************* |
| 432 | * Back up to a previous point on the stack * |
| 433 | *************************************************/ |
| 434 | |
| 435 | /* This function resets the next pointer, freeing any subsequent whole blocks |
| 436 | that are now unused. Call with a cookie obtained from store_mark() only; do |
| 437 | not call with a pointer returned by store_get(). Both the untainted and tainted |
| 438 | pools corresposding to store_pool are reset. |
| 439 | |
| 440 | Arguments: |
| 441 | r place to back up to |
| 442 | func function from which called |
| 443 | linenumber line number in source file |
| 444 | |
| 445 | Returns: nothing |
| 446 | */ |
| 447 | |
| 448 | static void |
| 449 | internal_store_reset(void * ptr, int pool, const char *func, int linenumber) |
| 450 | { |
| 451 | storeblock * bb; |
| 452 | storeblock * b = current_block[pool]; |
| 453 | char * bc = CS b + ALIGNED_SIZEOF_STOREBLOCK; |
| 454 | int newlength, count; |
| 455 | #ifndef COMPILE_UTILITY |
| 456 | int oldmalloc = pool_malloc; |
| 457 | #endif |
| 458 | |
| 459 | /* Last store operation was not a get */ |
| 460 | |
| 461 | store_last_get[pool] = NULL; |
| 462 | |
| 463 | /* See if the place is in the current block - as it often will be. Otherwise, |
| 464 | search for the block in which it lies. */ |
| 465 | |
| 466 | if (CS ptr < bc || CS ptr > bc + b->length) |
| 467 | { |
| 468 | for (b = chainbase[pool]; b; b = b->next) |
| 469 | { |
| 470 | bc = CS b + ALIGNED_SIZEOF_STOREBLOCK; |
| 471 | if (CS ptr >= bc && CS ptr <= bc + b->length) break; |
| 472 | } |
| 473 | if (!b) |
| 474 | log_write(0, LOG_MAIN|LOG_PANIC_DIE, "internal error: store_reset(%p) " |
| 475 | "failed: pool=%d %-14s %4d", ptr, pool, func, linenumber); |
| 476 | } |
| 477 | |
| 478 | /* Back up, rounding to the alignment if necessary. When testing, flatten |
| 479 | the released memory. */ |
| 480 | |
| 481 | newlength = bc + b->length - CS ptr; |
| 482 | #ifndef COMPILE_UTILITY |
| 483 | if (debug_store) |
| 484 | { |
| 485 | assert_no_variables(ptr, newlength, func, linenumber); |
| 486 | if (f.running_in_test_harness) |
| 487 | { |
| 488 | (void) VALGRIND_MAKE_MEM_DEFINED(ptr, newlength); |
| 489 | memset(ptr, 0xF0, newlength); |
| 490 | } |
| 491 | } |
| 492 | #endif |
| 493 | (void) VALGRIND_MAKE_MEM_NOACCESS(ptr, newlength); |
| 494 | next_yield[pool] = CS ptr + (newlength % alignment); |
| 495 | count = yield_length[pool]; |
| 496 | count = (yield_length[pool] = newlength - (newlength % alignment)) - count; |
| 497 | current_block[pool] = b; |
| 498 | |
| 499 | /* Free any subsequent block. Do NOT free the first |
| 500 | successor, if our current block has less than 256 bytes left. This should |
| 501 | prevent us from flapping memory. However, keep this block only when it has |
| 502 | the default size. */ |
| 503 | |
| 504 | if ( yield_length[pool] < STOREPOOL_MIN_SIZE |
| 505 | && b->next |
| 506 | && b->next->length == STORE_BLOCK_SIZE) |
| 507 | { |
| 508 | b = b->next; |
| 509 | #ifndef COMPILE_UTILITY |
| 510 | if (debug_store) |
| 511 | assert_no_variables(b, b->length + ALIGNED_SIZEOF_STOREBLOCK, |
| 512 | func, linenumber); |
| 513 | #endif |
| 514 | (void) VALGRIND_MAKE_MEM_NOACCESS(CS b + ALIGNED_SIZEOF_STOREBLOCK, |
| 515 | b->length - ALIGNED_SIZEOF_STOREBLOCK); |
| 516 | } |
| 517 | |
| 518 | bb = b->next; |
| 519 | b->next = NULL; |
| 520 | |
| 521 | while ((b = bb)) |
| 522 | { |
| 523 | int siz = b->length + ALIGNED_SIZEOF_STOREBLOCK; |
| 524 | #ifndef COMPILE_UTILITY |
| 525 | if (debug_store) |
| 526 | assert_no_variables(b, b->length + ALIGNED_SIZEOF_STOREBLOCK, |
| 527 | func, linenumber); |
| 528 | #endif |
| 529 | bb = bb->next; |
| 530 | nbytes[pool] -= siz; |
| 531 | pool_malloc -= siz; |
| 532 | nblocks[pool]--; |
| 533 | if (pool < POOL_TAINT_BASE) |
| 534 | internal_untainted_free(b, func, linenumber); |
| 535 | else |
| 536 | internal_tainted_free(b, func, linenumber); |
| 537 | } |
| 538 | |
| 539 | /* Cut out the debugging stuff for utilities, but stop picky compilers from |
| 540 | giving warnings. */ |
| 541 | |
| 542 | #ifdef COMPILE_UTILITY |
| 543 | func = func; |
| 544 | linenumber = linenumber; |
| 545 | #else |
| 546 | DEBUG(D_memory) |
| 547 | debug_printf("---%d Rst %6p %5d %-14s %4d %d\n", pool, ptr, |
| 548 | count + oldmalloc - pool_malloc, |
| 549 | func, linenumber, pool_malloc); |
| 550 | #endif /* COMPILE_UTILITY */ |
| 551 | } |
| 552 | |
| 553 | |
| 554 | rmark |
| 555 | store_reset_3(rmark r, int pool, const char *func, int linenumber) |
| 556 | { |
| 557 | void ** ptr = r; |
| 558 | |
| 559 | if (pool >= POOL_TAINT_BASE) |
| 560 | log_write(0, LOG_MAIN|LOG_PANIC_DIE, |
| 561 | "store_reset called for pool %d: %s %d\n", pool, func, linenumber); |
| 562 | if (!r) |
| 563 | log_write(0, LOG_MAIN|LOG_PANIC_DIE, |
| 564 | "store_reset called with bad mark: %s %d\n", func, linenumber); |
| 565 | |
| 566 | internal_store_reset(*ptr, pool + POOL_TAINT_BASE, func, linenumber); |
| 567 | internal_store_reset(ptr, pool, func, linenumber); |
| 568 | return NULL; |
| 569 | } |
| 570 | |
| 571 | |
| 572 | |
| 573 | /* Free tail-end unused allocation. This lets us allocate a big chunk |
| 574 | early, for cases when we only discover later how much was really needed. |
| 575 | |
| 576 | Can be called with a value from store_get(), or an offset after such. Only |
| 577 | the tainted or untainted pool that serviced the store_get() will be affected. |
| 578 | |
| 579 | This is mostly a cut-down version of internal_store_reset(). |
| 580 | XXX needs rationalising |
| 581 | */ |
| 582 | |
| 583 | void |
| 584 | store_release_above_3(void *ptr, const char *func, int linenumber) |
| 585 | { |
| 586 | /* Search all pools' "current" blocks. If it isn't one of those, |
| 587 | ignore it (it usually will be). */ |
| 588 | |
| 589 | for (int pool = 0; pool < nelem(current_block); pool++) |
| 590 | { |
| 591 | storeblock * b = current_block[pool]; |
| 592 | char * bc; |
| 593 | int count, newlength; |
| 594 | |
| 595 | if (!b) |
| 596 | continue; |
| 597 | |
| 598 | bc = CS b + ALIGNED_SIZEOF_STOREBLOCK; |
| 599 | if (CS ptr < bc || CS ptr > bc + b->length) |
| 600 | continue; |
| 601 | |
| 602 | /* Last store operation was not a get */ |
| 603 | |
| 604 | store_last_get[pool] = NULL; |
| 605 | |
| 606 | /* Back up, rounding to the alignment if necessary. When testing, flatten |
| 607 | the released memory. */ |
| 608 | |
| 609 | newlength = bc + b->length - CS ptr; |
| 610 | #ifndef COMPILE_UTILITY |
| 611 | if (debug_store) |
| 612 | { |
| 613 | assert_no_variables(ptr, newlength, func, linenumber); |
| 614 | if (f.running_in_test_harness) |
| 615 | { |
| 616 | (void) VALGRIND_MAKE_MEM_DEFINED(ptr, newlength); |
| 617 | memset(ptr, 0xF0, newlength); |
| 618 | } |
| 619 | } |
| 620 | #endif |
| 621 | (void) VALGRIND_MAKE_MEM_NOACCESS(ptr, newlength); |
| 622 | next_yield[pool] = CS ptr + (newlength % alignment); |
| 623 | count = yield_length[pool]; |
| 624 | count = (yield_length[pool] = newlength - (newlength % alignment)) - count; |
| 625 | |
| 626 | /* Cut out the debugging stuff for utilities, but stop picky compilers from |
| 627 | giving warnings. */ |
| 628 | |
| 629 | #ifdef COMPILE_UTILITY |
| 630 | func = func; |
| 631 | linenumber = linenumber; |
| 632 | #else |
| 633 | DEBUG(D_memory) |
| 634 | debug_printf("---%d Rel %6p %5d %-14s %4d %d\n", pool, ptr, count, |
| 635 | func, linenumber, pool_malloc); |
| 636 | #endif |
| 637 | return; |
| 638 | } |
| 639 | #ifndef COMPILE_UTILITY |
| 640 | DEBUG(D_memory) |
| 641 | debug_printf("non-last memory release try: %s %d\n", func, linenumber); |
| 642 | #endif |
| 643 | } |
| 644 | |
| 645 | |
| 646 | |
| 647 | rmark |
| 648 | store_mark_3(const char *func, int linenumber) |
| 649 | { |
| 650 | void ** p; |
| 651 | |
| 652 | if (store_pool >= POOL_TAINT_BASE) |
| 653 | log_write(0, LOG_MAIN|LOG_PANIC_DIE, |
| 654 | "store_mark called for pool %d: %s %d\n", store_pool, func, linenumber); |
| 655 | |
| 656 | /* Stash a mark for the tainted-twin release, in the untainted twin. Return |
| 657 | a cookie (actually the address in the untainted pool) to the caller. |
| 658 | Reset uses the cookie to recover the t-mark, winds back the tainted pool with it |
| 659 | and winds back the untainted pool with the cookie. */ |
| 660 | |
| 661 | p = store_get_3(sizeof(void *), FALSE, func, linenumber); |
| 662 | *p = store_get_3(0, TRUE, func, linenumber); |
| 663 | return p; |
| 664 | } |
| 665 | |
| 666 | |
| 667 | |
| 668 | |
| 669 | /************************************************ |
| 670 | * Release store * |
| 671 | ************************************************/ |
| 672 | |
| 673 | /* This function checks that the pointer it is given is the first thing in a |
| 674 | block, and if so, releases that block. |
| 675 | |
| 676 | Arguments: |
| 677 | block block of store to consider |
| 678 | func function from which called |
| 679 | linenumber line number in source file |
| 680 | |
| 681 | Returns: nothing |
| 682 | */ |
| 683 | |
| 684 | static void |
| 685 | store_release_3(void * block, int pool, const char * func, int linenumber) |
| 686 | { |
| 687 | /* It will never be the first block, so no need to check that. */ |
| 688 | |
| 689 | for (storeblock * b = chainbase[pool]; b; b = b->next) |
| 690 | { |
| 691 | storeblock * bb = b->next; |
| 692 | if (bb && CS block == CS bb + ALIGNED_SIZEOF_STOREBLOCK) |
| 693 | { |
| 694 | int siz = bb->length + ALIGNED_SIZEOF_STOREBLOCK; |
| 695 | b->next = bb->next; |
| 696 | nbytes[pool] -= siz; |
| 697 | pool_malloc -= siz; |
| 698 | nblocks[pool]--; |
| 699 | |
| 700 | /* Cut out the debugging stuff for utilities, but stop picky compilers |
| 701 | from giving warnings. */ |
| 702 | |
| 703 | #ifdef COMPILE_UTILITY |
| 704 | func = func; |
| 705 | linenumber = linenumber; |
| 706 | #else |
| 707 | DEBUG(D_memory) |
| 708 | debug_printf("-Release %6p %-20s %4d %d\n", (void *)bb, func, |
| 709 | linenumber, pool_malloc); |
| 710 | |
| 711 | if (f.running_in_test_harness) |
| 712 | memset(bb, 0xF0, bb->length+ALIGNED_SIZEOF_STOREBLOCK); |
| 713 | #endif /* COMPILE_UTILITY */ |
| 714 | |
| 715 | free(bb); |
| 716 | return; |
| 717 | } |
| 718 | } |
| 719 | } |
| 720 | |
| 721 | |
| 722 | /************************************************ |
| 723 | * Move store * |
| 724 | ************************************************/ |
| 725 | |
| 726 | /* Allocate a new block big enough to expend to the given size and |
| 727 | copy the current data into it. Free the old one if possible. |
| 728 | |
| 729 | This function is specifically provided for use when reading very |
| 730 | long strings, e.g. header lines. When the string gets longer than a |
| 731 | complete block, it gets copied to a new block. It is helpful to free |
| 732 | the old block iff the previous copy of the string is at its start, |
| 733 | and therefore the only thing in it. Otherwise, for very long strings, |
| 734 | dead store can pile up somewhat disastrously. This function checks that |
| 735 | the pointer it is given is the first thing in a block, and that nothing |
| 736 | has been allocated since. If so, releases that block. |
| 737 | |
| 738 | Arguments: |
| 739 | block |
| 740 | newsize |
| 741 | len |
| 742 | |
| 743 | Returns: new location of data |
| 744 | */ |
| 745 | |
| 746 | void * |
| 747 | store_newblock_3(void * block, BOOL tainted, int newsize, int len, |
| 748 | const char * func, int linenumber) |
| 749 | { |
| 750 | int pool = tainted ? store_pool + POOL_TAINT_BASE : store_pool; |
| 751 | BOOL release_ok = !tainted && store_last_get[pool] == block; |
| 752 | uschar * newtext; |
| 753 | |
| 754 | #ifndef MACRO_PREDEF |
| 755 | if (is_tainted(block) != tainted) |
| 756 | die_tainted(US"store_newblock", CUS func, linenumber); |
| 757 | #endif |
| 758 | |
| 759 | newtext = store_get(newsize, tainted); |
| 760 | memcpy(newtext, block, len); |
| 761 | if (release_ok) store_release_3(block, pool, func, linenumber); |
| 762 | return (void *)newtext; |
| 763 | } |
| 764 | |
| 765 | |
| 766 | |
| 767 | |
| 768 | /******************************************************************************/ |
| 769 | static void * |
| 770 | store_alloc_tail(void * yield, int size, const char * func, int line, |
| 771 | const uschar * type) |
| 772 | { |
| 773 | if ((nonpool_malloc += size) > max_nonpool_malloc) |
| 774 | max_nonpool_malloc = nonpool_malloc; |
| 775 | |
| 776 | /* Cut out the debugging stuff for utilities, but stop picky compilers from |
| 777 | giving warnings. */ |
| 778 | |
| 779 | #ifdef COMPILE_UTILITY |
| 780 | func = func; line = line; type = type; |
| 781 | #else |
| 782 | |
| 783 | /* If running in test harness, spend time making sure all the new store |
| 784 | is not filled with zeros so as to catch problems. */ |
| 785 | |
| 786 | if (f.running_in_test_harness) |
| 787 | memset(yield, 0xF0, (size_t)size); |
| 788 | DEBUG(D_memory) debug_printf("--%6s %6p %5d bytes\t%-14s %4d\tpool %5d nonpool %5d\n", |
| 789 | type, yield, size, func, line, pool_malloc, nonpool_malloc); |
| 790 | #endif /* COMPILE_UTILITY */ |
| 791 | |
| 792 | return yield; |
| 793 | } |
| 794 | |
| 795 | /************************************************* |
| 796 | * Mmap store * |
| 797 | *************************************************/ |
| 798 | |
| 799 | static void * |
| 800 | store_mmap(int size, const char * func, int line) |
| 801 | { |
| 802 | void * yield, * top; |
| 803 | |
| 804 | if (size < 16) size = 16; |
| 805 | |
| 806 | if (!(yield = mmap(NULL, (size_t)size, |
| 807 | PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0))) |
| 808 | log_write(0, LOG_MAIN|LOG_PANIC_DIE, "failed to mmap %d bytes of memory: " |
| 809 | "called from line %d of %s", size, line, func); |
| 810 | |
| 811 | if (yield < tainted_base) tainted_base = yield; |
| 812 | if ((top = US yield + size) > tainted_top) tainted_top = top; |
| 813 | |
| 814 | return store_alloc_tail(yield, size, func, line, US"Mmap"); |
| 815 | } |
| 816 | |
| 817 | /************************************************* |
| 818 | * Malloc store * |
| 819 | *************************************************/ |
| 820 | |
| 821 | /* Running out of store is a total disaster for exim. Some malloc functions |
| 822 | do not run happily on very small sizes, nor do they document this fact. This |
| 823 | function is called via the macro store_malloc(). |
| 824 | |
| 825 | Arguments: |
| 826 | size amount of store wanted |
| 827 | func function from which called |
| 828 | linenumber line number in source file |
| 829 | |
| 830 | Returns: pointer to gotten store (panic on failure) |
| 831 | */ |
| 832 | |
| 833 | static void * |
| 834 | internal_store_malloc(int size, const char *func, int linenumber) |
| 835 | { |
| 836 | void * yield; |
| 837 | |
| 838 | if (size < 16) size = 16; |
| 839 | |
| 840 | if (!(yield = malloc((size_t)size))) |
| 841 | log_write(0, LOG_MAIN|LOG_PANIC_DIE, "failed to malloc %d bytes of memory: " |
| 842 | "called from line %d in %s", size, linenumber, func); |
| 843 | |
| 844 | return store_alloc_tail(yield, size, func, linenumber, US"Malloc"); |
| 845 | } |
| 846 | |
| 847 | void * |
| 848 | store_malloc_3(int size, const char *func, int linenumber) |
| 849 | { |
| 850 | if (n_nonpool_blocks++ > max_nonpool_blocks) |
| 851 | max_nonpool_blocks = n_nonpool_blocks; |
| 852 | return internal_store_malloc(size, func, linenumber); |
| 853 | } |
| 854 | |
| 855 | |
| 856 | /************************************************ |
| 857 | * Free store * |
| 858 | ************************************************/ |
| 859 | |
| 860 | /* This function is called by the macro store_free(). |
| 861 | |
| 862 | Arguments: |
| 863 | block block of store to free |
| 864 | func function from which called |
| 865 | linenumber line number in source file |
| 866 | |
| 867 | Returns: nothing |
| 868 | */ |
| 869 | |
| 870 | static void |
| 871 | internal_untainted_free(void * block, const char * func, int linenumber) |
| 872 | { |
| 873 | #ifdef COMPILE_UTILITY |
| 874 | func = func; |
| 875 | linenumber = linenumber; |
| 876 | #else |
| 877 | DEBUG(D_memory) |
| 878 | debug_printf("----Free %6p %-20s %4d\n", block, func, linenumber); |
| 879 | #endif /* COMPILE_UTILITY */ |
| 880 | free(block); |
| 881 | } |
| 882 | |
| 883 | void |
| 884 | store_free_3(void * block, const char * func, int linenumber) |
| 885 | { |
| 886 | n_nonpool_blocks--; |
| 887 | internal_untainted_free(block, func, linenumber); |
| 888 | } |
| 889 | |
| 890 | /******************************************************************************/ |
| 891 | static void |
| 892 | internal_tainted_free(storeblock * block, const char * func, int linenumber) |
| 893 | { |
| 894 | #ifdef COMPILE_UTILITY |
| 895 | func = func; |
| 896 | linenumber = linenumber; |
| 897 | #else |
| 898 | DEBUG(D_memory) |
| 899 | debug_printf("---Unmap %6p %-20s %4d\n", block, func, linenumber); |
| 900 | #endif |
| 901 | munmap((void *)block, block->length + ALIGNED_SIZEOF_STOREBLOCK); |
| 902 | } |
| 903 | |
| 904 | /******************************************************************************/ |
| 905 | /* Stats output on process exit */ |
| 906 | void |
| 907 | store_exit(void) |
| 908 | { |
| 909 | #ifndef COMPILE_UTILITY |
| 910 | DEBUG(D_memory) |
| 911 | { |
| 912 | debug_printf("----Exit nonpool max: %3d kB in %d blocks\n", |
| 913 | (max_nonpool_malloc+1023)/1024, max_nonpool_blocks); |
| 914 | debug_printf("----Exit npools max: %3d kB\n", max_pool_malloc/1024); |
| 915 | for (int i = 0; i < NPOOLS; i++) |
| 916 | debug_printf("----Exit pool %d max: %3d kB in %d blocks\t%s %s\n", |
| 917 | i, maxbytes[i]/1024, maxblocks[i], poolclass[i], pooluse[i]); |
| 918 | } |
| 919 | #endif |
| 920 | } |
| 921 | |
| 922 | /* End of store.c */ |