inlining
[exim.git] / src / src / store.c
CommitLineData
059ec3d9
PH
1/*************************************************
2* Exim - an Internet mail transport agent *
3*************************************************/
4
f9ba5e22 5/* Copyright (c) University of Cambridge 1995 - 2018 */
f3ebb786 6/* Copyright (c) The Exim maintainers 2019 */
059ec3d9
PH
7/* See the file NOTICE for conditions of use and distribution. */
8
9/* Exim gets and frees all its store through these functions. In the original
10implementation there was a lot of mallocing and freeing of small bits of store.
11The philosophy has now changed to a scheme which includes the concept of
12"stacking pools" of store. For the short-lived processes, there isn't any real
13need to do any garbage collection, but the stack concept allows quick resetting
14in places where this seems sensible.
15
16Obviously the long-running processes (the daemon, the queue runner, and eximon)
17must take care not to eat store.
18
19The following different types of store are recognized:
20
21. Long-lived, large blocks: This is implemented by retaining the original
22 malloc/free functions, and it used for permanent working buffers and for
23 getting blocks to cut up for the other types.
24
25. Long-lived, small blocks: This is used for blocks that have to survive until
26 the process exits. It is implemented as a stacking pool (POOL_PERM). This is
27 functionally the same as store_malloc(), except that the store can't be
28 freed, but I expect it to be more efficient for handling small blocks.
29
30. Short-lived, short blocks: Most of the dynamic store falls into this
31 category. It is implemented as a stacking pool (POOL_MAIN) which is reset
32 after accepting a message when multiple messages are received by a single
33 process. Resetting happens at some other times as well, usually fairly
34 locally after some specific processing that needs working store.
35
36. There is a separate pool (POOL_SEARCH) that is used only for lookup storage.
37 This means it can be freed when search_tidyup() is called to close down all
38 the lookup caching.
f3ebb786
JH
39
40. Orthogonal to the three pool types, there are two classes of memory: untainted
41 and tainted. The latter is used for values derived from untrusted input, and
42 the string-expansion mechanism refuses to operate on such values (obviously,
43 it can expand an untainted value to return a tainted result). The classes
44 are implemented by duplicating the three pool types. Pool resets are requested
45 against the nontainted sibling and apply to both siblings.
059ec3d9
PH
46*/
47
48
49#include "exim.h"
438257ba
PP
50/* keep config.h before memcheck.h, for NVALGRIND */
51#include "config.h"
52
f3ebb786 53#include <sys/mman.h>
7f36d675 54#include "memcheck.h"
059ec3d9
PH
55
56
57/* We need to know how to align blocks of data for general use. I'm not sure
58how to get an alignment factor in general. In the current world, a value of 8
59is probably right, and this is sizeof(double) on some systems and sizeof(void
60*) on others, so take the larger of those. Since everything in this expression
61is a constant, the compiler should optimize it to a simple constant wherever it
62appears (I checked that gcc does do this). */
63
64#define alignment \
f3ebb786 65 (sizeof(void *) > sizeof(double) ? sizeof(void *) : sizeof(double))
059ec3d9
PH
66
67/* store_reset() will not free the following block if the last used block has
68less than this much left in it. */
69
70#define STOREPOOL_MIN_SIZE 256
71
72/* Structure describing the beginning of each big block. */
73
74typedef struct storeblock {
75 struct storeblock *next;
76 size_t length;
77} storeblock;
78
79/* Just in case we find ourselves on a system where the structure above has a
80length that is not a multiple of the alignment, set up a macro for the padded
81length. */
82
83#define ALIGNED_SIZEOF_STOREBLOCK \
84 (((sizeof(storeblock) + alignment - 1) / alignment) * alignment)
85
f3ebb786
JH
86/* Size of block to get from malloc to carve up into smaller ones. This
87must be a multiple of the alignment. We assume that 8192 is going to be
88suitably aligned. */
89
90#define STORE_BLOCK_SIZE (8192 - ALIGNED_SIZEOF_STOREBLOCK)
91
059ec3d9
PH
92/* Variables holding data for the local pools of store. The current pool number
93is held in store_pool, which is global so that it can be changed from outside.
94Setting the initial length values to -1 forces a malloc for the first call,
95even if the length is zero (which is used for getting a point to reset to). */
96
f3ebb786 97int store_pool = POOL_MAIN;
059ec3d9 98
f3ebb786
JH
99#define NPOOLS 6
100static storeblock *chainbase[NPOOLS];
101static storeblock *current_block[NPOOLS];
102static void *next_yield[NPOOLS];
103static int yield_length[NPOOLS] = { -1, -1, -1, -1, -1, -1 };
104
105/* The limits of the tainted pools. Tracking these on new allocations enables
106a fast is_tainted implementation. We assume the kernel only allocates mmaps using
107one side or the other of data+heap, not both. */
108
6d5f5caf
JH
109void * tainted_base = (void *)-1;
110void * tainted_top = (void *)0;
059ec3d9
PH
111
112/* pool_malloc holds the amount of memory used by the store pools; this goes up
113and down as store is reset or released. nonpool_malloc is the total got by
114malloc from other calls; this doesn't go down because it is just freed by
115pointer. */
116
f3ebb786
JH
117static int pool_malloc;
118static int nonpool_malloc;
059ec3d9
PH
119
120/* This variable is set by store_get() to its yield, and by store_reset() to
121NULL. This enables string_cat() to optimize its store handling for very long
122strings. That's why the variable is global. */
123
f3ebb786
JH
124void *store_last_get[NPOOLS];
125
126/* These are purely for stats-gathering */
127
128static int nbytes[NPOOLS]; /* current bytes allocated */
129static int maxbytes[NPOOLS]; /* max number reached */
130static int nblocks[NPOOLS]; /* current number of blocks allocated */
131static int maxblocks[NPOOLS];
132static int n_nonpool_blocks; /* current number of direct store_malloc() blocks */
133static int max_nonpool_blocks;
134static int max_pool_malloc; /* max value for pool_malloc */
135static int max_nonpool_malloc; /* max value for nonpool_malloc */
136
137
138static const uschar * pooluse[NPOOLS] = {
139[POOL_MAIN] = US"main",
140[POOL_PERM] = US"perm",
141[POOL_SEARCH] = US"search",
142[POOL_TAINT_MAIN] = US"main",
143[POOL_TAINT_PERM] = US"perm",
144[POOL_TAINT_SEARCH] = US"search",
145};
146static const uschar * poolclass[NPOOLS] = {
147[POOL_MAIN] = US"untainted",
148[POOL_PERM] = US"untainted",
149[POOL_SEARCH] = US"untainted",
150[POOL_TAINT_MAIN] = US"tainted",
151[POOL_TAINT_PERM] = US"tainted",
152[POOL_TAINT_SEARCH] = US"tainted",
153};
154
155
156static void * store_mmap(int, const char *, int);
157static void * internal_store_malloc(int, const char *, int);
158static void internal_store_free(void *, const char *, int linenumber);
159
160/******************************************************************************/
161
f3ebb786
JH
162void
163die_tainted(const uschar * msg, const uschar * func, int line)
164{
165log_write(0, LOG_MAIN|LOG_PANIC_DIE, "Taint mismatch, %s: %s %d\n",
166 msg, func, line);
167}
059ec3d9
PH
168
169
170/*************************************************
171* Get a block from the current pool *
172*************************************************/
173
174/* Running out of store is a total disaster. This function is called via the
175macro store_get(). It passes back a block of store within the current big
176block, getting a new one if necessary. The address is saved in
177store_last_was_get.
178
179Arguments:
180 size amount wanted
f3ebb786
JH
181 func function from which called
182 linenumber line number in source file
059ec3d9
PH
183
184Returns: pointer to store (panic on malloc failure)
185*/
186
187void *
f3ebb786 188store_get_3(int size, BOOL tainted, const char *func, int linenumber)
059ec3d9 189{
f3ebb786
JH
190int pool = tainted ? store_pool + POOL_TAINT_BASE : store_pool;
191
059ec3d9
PH
192/* Round up the size to a multiple of the alignment. Although this looks a
193messy statement, because "alignment" is a constant expression, the compiler can
194do a reasonable job of optimizing, especially if the value of "alignment" is a
195power of two. I checked this with -O2, and gcc did very well, compiling it to 4
196instructions on a Sparc (alignment = 8). */
197
198if (size % alignment != 0) size += alignment - (size % alignment);
199
200/* If there isn't room in the current block, get a new one. The minimum
201size is STORE_BLOCK_SIZE, and we would expect this to be the norm, since
202these functions are mostly called for small amounts of store. */
203
f3ebb786 204if (size > yield_length[pool])
059ec3d9 205 {
f3ebb786 206 int length = size <= STORE_BLOCK_SIZE ? STORE_BLOCK_SIZE : size;
059ec3d9 207 int mlength = length + ALIGNED_SIZEOF_STOREBLOCK;
f3ebb786 208 storeblock * newblock;
059ec3d9
PH
209
210 /* Sometimes store_reset() may leave a block for us; check if we can use it */
211
f3ebb786 212 if ( (newblock = current_block[pool])
64073d9c
JH
213 && (newblock = newblock->next)
214 && newblock->length < length
215 )
059ec3d9 216 {
64073d9c 217 /* Give up on this block, because it's too small */
f3ebb786
JH
218 nblocks[pool]--;
219 if (pool < POOL_TAINT_BASE)
220 internal_store_free(newblock, func, linenumber);
221 else
222 {
223#ifndef COMPILE_UTILITY
224 DEBUG(D_memory)
225 debug_printf("---Unmap %6p %-20s %4d\n", newblock, func, linenumber);
226#endif
227 munmap(newblock, newblock->length + ALIGNED_SIZEOF_STOREBLOCK);
228 }
64073d9c 229 newblock = NULL;
059ec3d9
PH
230 }
231
232 /* If there was no free block, get a new one */
233
64073d9c 234 if (!newblock)
059ec3d9 235 {
f3ebb786
JH
236 if ((nbytes[pool] += mlength) > maxbytes[pool])
237 maxbytes[pool] = nbytes[pool];
238 if ((pool_malloc += mlength) > max_pool_malloc) /* Used in pools */
239 max_pool_malloc = pool_malloc;
240 nonpool_malloc -= mlength; /* Exclude from overall total */
241 if (++nblocks[pool] > maxblocks[pool])
242 maxblocks[pool] = nblocks[pool];
243
244 newblock = tainted
245 ? store_mmap(mlength, func, linenumber)
246 : internal_store_malloc(mlength, func, linenumber);
059ec3d9
PH
247 newblock->next = NULL;
248 newblock->length = length;
f3ebb786
JH
249
250 if (!chainbase[pool])
251 chainbase[pool] = newblock;
64073d9c 252 else
f3ebb786 253 current_block[pool]->next = newblock;
059ec3d9
PH
254 }
255
f3ebb786
JH
256 current_block[pool] = newblock;
257 yield_length[pool] = newblock->length;
258 next_yield[pool] =
259 (void *)(CS current_block[pool] + ALIGNED_SIZEOF_STOREBLOCK);
260 (void) VALGRIND_MAKE_MEM_NOACCESS(next_yield[pool], yield_length[pool]);
059ec3d9
PH
261 }
262
263/* There's (now) enough room in the current block; the yield is the next
264pointer. */
265
f3ebb786 266store_last_get[pool] = next_yield[pool];
059ec3d9
PH
267
268/* Cut out the debugging stuff for utilities, but stop picky compilers from
269giving warnings. */
270
271#ifdef COMPILE_UTILITY
f3ebb786 272func = func;
059ec3d9
PH
273linenumber = linenumber;
274#else
275DEBUG(D_memory)
f3ebb786
JH
276 debug_printf("---%d Get %6p %5d %-14s %4d\n", pool,
277 store_last_get[pool], size, func, linenumber);
059ec3d9
PH
278#endif /* COMPILE_UTILITY */
279
f3ebb786 280(void) VALGRIND_MAKE_MEM_UNDEFINED(store_last_get[pool], size);
059ec3d9
PH
281/* Update next pointer and number of bytes left in the current block. */
282
f3ebb786
JH
283next_yield[pool] = (void *)(CS next_yield[pool] + size);
284yield_length[pool] -= size;
285return store_last_get[pool];
059ec3d9
PH
286}
287
288
289
290/*************************************************
291* Get a block from the PERM pool *
292*************************************************/
293
294/* This is just a convenience function, useful when just a single block is to
295be obtained.
296
297Arguments:
298 size amount wanted
f3ebb786
JH
299 func function from which called
300 linenumber line number in source file
059ec3d9
PH
301
302Returns: pointer to store (panic on malloc failure)
303*/
304
305void *
f3ebb786 306store_get_perm_3(int size, BOOL tainted, const char *func, int linenumber)
059ec3d9
PH
307{
308void *yield;
309int old_pool = store_pool;
310store_pool = POOL_PERM;
f3ebb786 311yield = store_get_3(size, tainted, func, linenumber);
059ec3d9
PH
312store_pool = old_pool;
313return yield;
314}
315
316
317
318/*************************************************
319* Extend a block if it is at the top *
320*************************************************/
321
322/* While reading strings of unknown length, it is often the case that the
323string is being read into the block at the top of the stack. If it needs to be
f3ebb786 324extended, it is more efficient just to extend within the top block rather than
059ec3d9
PH
325allocate a new block and then have to copy the data. This function is provided
326for the use of string_cat(), but of course can be used elsewhere too.
f3ebb786 327The block itself is not expanded; only the top allocation from it.
059ec3d9
PH
328
329Arguments:
330 ptr pointer to store block
331 oldsize current size of the block, as requested by user
332 newsize new size required
f3ebb786 333 func function from which called
059ec3d9
PH
334 linenumber line number in source file
335
336Returns: TRUE if the block is at the top of the stack and has been
337 extended; FALSE if it isn't at the top of the stack, or cannot
338 be extended
339*/
340
341BOOL
f3ebb786
JH
342store_extend_3(void *ptr, BOOL tainted, int oldsize, int newsize,
343 const char *func, int linenumber)
059ec3d9 344{
f3ebb786 345int pool = tainted ? store_pool + POOL_TAINT_BASE : store_pool;
059ec3d9
PH
346int inc = newsize - oldsize;
347int rounded_oldsize = oldsize;
348
f3ebb786
JH
349/* Check that the block being extended was already of the required taint status;
350refuse to extend if not. */
351
352if (is_tainted(ptr) != tainted)
353 return FALSE;
354
059ec3d9
PH
355if (rounded_oldsize % alignment != 0)
356 rounded_oldsize += alignment - (rounded_oldsize % alignment);
357
f3ebb786
JH
358if (CS ptr + rounded_oldsize != CS (next_yield[pool]) ||
359 inc > yield_length[pool] + rounded_oldsize - oldsize)
059ec3d9
PH
360 return FALSE;
361
362/* Cut out the debugging stuff for utilities, but stop picky compilers from
363giving warnings. */
364
365#ifdef COMPILE_UTILITY
f3ebb786 366func = func;
059ec3d9
PH
367linenumber = linenumber;
368#else
369DEBUG(D_memory)
f3ebb786
JH
370 debug_printf("---%d Ext %6p %5d %-14s %4d\n", pool, ptr, newsize,
371 func, linenumber);
059ec3d9
PH
372#endif /* COMPILE_UTILITY */
373
374if (newsize % alignment != 0) newsize += alignment - (newsize % alignment);
f3ebb786
JH
375next_yield[pool] = CS ptr + newsize;
376yield_length[pool] -= newsize - rounded_oldsize;
4d8bb202 377(void) VALGRIND_MAKE_MEM_UNDEFINED(ptr + oldsize, inc);
059ec3d9
PH
378return TRUE;
379}
380
381
382
383
384/*************************************************
385* Back up to a previous point on the stack *
386*************************************************/
387
388/* This function resets the next pointer, freeing any subsequent whole blocks
f3ebb786
JH
389that are now unused. Call with a cookie obtained from store_mark() only; do
390not call with a pointer returned by store_get(). Both the untainted and tainted
391pools corresposding to store_pool are reset.
059ec3d9
PH
392
393Arguments:
f3ebb786
JH
394 r place to back up to
395 func function from which called
059ec3d9
PH
396 linenumber line number in source file
397
398Returns: nothing
399*/
400
f3ebb786
JH
401static void
402internal_store_reset(void * ptr, int pool, const char *func, int linenumber)
059ec3d9 403{
cf0812d5 404storeblock * bb;
f3ebb786 405storeblock * b = current_block[pool];
cf0812d5 406char * bc = CS b + ALIGNED_SIZEOF_STOREBLOCK;
f3ebb786
JH
407int newlength, count;
408#ifndef COMPILE_UTILITY
409int oldmalloc = pool_malloc;
410#endif
059ec3d9
PH
411
412/* Last store operation was not a get */
413
f3ebb786 414store_last_get[pool] = NULL;
059ec3d9
PH
415
416/* See if the place is in the current block - as it often will be. Otherwise,
417search for the block in which it lies. */
418
cf0812d5 419if (CS ptr < bc || CS ptr > bc + b->length)
059ec3d9 420 {
f3ebb786 421 for (b = chainbase[pool]; b; b = b->next)
059ec3d9 422 {
cf0812d5
JH
423 bc = CS b + ALIGNED_SIZEOF_STOREBLOCK;
424 if (CS ptr >= bc && CS ptr <= bc + b->length) break;
059ec3d9 425 }
cf0812d5 426 if (!b)
438257ba 427 log_write(0, LOG_MAIN|LOG_PANIC_DIE, "internal error: store_reset(%p) "
f3ebb786 428 "failed: pool=%d %-14s %4d", ptr, pool, func, linenumber);
059ec3d9
PH
429 }
430
431/* Back up, rounding to the alignment if necessary. When testing, flatten
432the released memory. */
433
cf0812d5 434newlength = bc + b->length - CS ptr;
059ec3d9 435#ifndef COMPILE_UTILITY
65a32f85 436if (debug_store)
2c9f7ff8 437 {
f3ebb786 438 assert_no_variables(ptr, newlength, func, linenumber);
8768d548 439 if (f.running_in_test_harness)
64073d9c
JH
440 {
441 (void) VALGRIND_MAKE_MEM_DEFINED(ptr, newlength);
442 memset(ptr, 0xF0, newlength);
443 }
2c9f7ff8 444 }
059ec3d9 445#endif
4d8bb202 446(void) VALGRIND_MAKE_MEM_NOACCESS(ptr, newlength);
f3ebb786
JH
447next_yield[pool] = CS ptr + (newlength % alignment);
448count = yield_length[pool];
449count = (yield_length[pool] = newlength - (newlength % alignment)) - count;
450current_block[pool] = b;
451
452/* Free any subsequent block. Do NOT free the first
453successor, if our current block has less than 256 bytes left. This should
454prevent us from flapping memory. However, keep this block only when it has
455the default size. */
456
457if ( yield_length[pool] < STOREPOOL_MIN_SIZE
458 && b->next
459 && b->next->length == STORE_BLOCK_SIZE)
7f36d675 460 {
059ec3d9 461 b = b->next;
cf0812d5 462#ifndef COMPILE_UTILITY
65a32f85 463 if (debug_store)
cf0812d5 464 assert_no_variables(b, b->length + ALIGNED_SIZEOF_STOREBLOCK,
f3ebb786 465 func, linenumber);
cf0812d5
JH
466#endif
467 (void) VALGRIND_MAKE_MEM_NOACCESS(CS b + ALIGNED_SIZEOF_STOREBLOCK,
4d8bb202 468 b->length - ALIGNED_SIZEOF_STOREBLOCK);
7f36d675 469 }
059ec3d9
PH
470
471bb = b->next;
472b->next = NULL;
473
cf0812d5 474while ((b = bb))
059ec3d9 475 {
f3ebb786 476 int siz = b->length + ALIGNED_SIZEOF_STOREBLOCK;
cf0812d5 477#ifndef COMPILE_UTILITY
65a32f85 478 if (debug_store)
cf0812d5 479 assert_no_variables(b, b->length + ALIGNED_SIZEOF_STOREBLOCK,
f3ebb786 480 func, linenumber);
cf0812d5 481#endif
059ec3d9 482 bb = bb->next;
f3ebb786
JH
483 nbytes[pool] -= siz;
484 pool_malloc -= siz;
485 nblocks[pool]--;
486 if (pool < POOL_TAINT_BASE)
487 internal_store_free(b, func, linenumber);
488 else
489 {
490#ifndef COMPILE_UTILITY
491 DEBUG(D_memory)
492 debug_printf("---Unmap %6p %-20s %4d\n", b, func, linenumber);
493#endif
494 munmap(b, b->length + ALIGNED_SIZEOF_STOREBLOCK);
495 }
059ec3d9
PH
496 }
497
498/* Cut out the debugging stuff for utilities, but stop picky compilers from
499giving warnings. */
500
501#ifdef COMPILE_UTILITY
f3ebb786 502func = func;
059ec3d9
PH
503linenumber = linenumber;
504#else
505DEBUG(D_memory)
f3ebb786
JH
506 debug_printf("---%d Rst %6p %5d %-14s %4d %d\n", pool, ptr,
507 count + oldmalloc - pool_malloc,
508 func, linenumber, pool_malloc);
509#endif /* COMPILE_UTILITY */
510}
511
512
513rmark
514store_reset_3(rmark r, int pool, const char *func, int linenumber)
515{
516void ** ptr = r;
517
518if (pool >= POOL_TAINT_BASE)
519 log_write(0, LOG_MAIN|LOG_PANIC_DIE,
520 "store_reset called for pool %d: %s %d\n", pool, func, linenumber);
521if (!r)
522 log_write(0, LOG_MAIN|LOG_PANIC_DIE,
523 "store_reset called with bad mark: %s %d\n", func, linenumber);
524
525internal_store_reset(*ptr, pool + POOL_TAINT_BASE, func, linenumber);
526internal_store_reset(ptr, pool, func, linenumber);
527return NULL;
528}
529
530
531
532/* Free tail-end unused allocation. This lets us allocate a big chunk
533early, for cases when we only discover later how much was really needed.
534
535Can be called with a value from store_get(), or an offset after such. Only
536the tainted or untainted pool that serviced the store_get() will be affected.
537
538This is mostly a cut-down version of internal_store_reset().
539XXX needs rationalising
540*/
541
542void
543store_release_above_3(void *ptr, const char *func, int linenumber)
544{
545/* Search all pools' "current" blocks. If it isn't one of those,
546ignore it (it usually will be). */
547
548for (int pool = 0; pool < nelem(current_block); pool++)
059ec3d9 549 {
f3ebb786
JH
550 storeblock * b = current_block[pool];
551 char * bc;
552 int count, newlength;
553
554 if (!b)
555 continue;
556
557 bc = CS b + ALIGNED_SIZEOF_STOREBLOCK;
558 if (CS ptr < bc || CS ptr > bc + b->length)
559 continue;
560
561 /* Last store operation was not a get */
562
563 store_last_get[pool] = NULL;
564
565 /* Back up, rounding to the alignment if necessary. When testing, flatten
566 the released memory. */
567
568 newlength = bc + b->length - CS ptr;
569#ifndef COMPILE_UTILITY
570 if (debug_store)
571 {
572 assert_no_variables(ptr, newlength, func, linenumber);
573 if (f.running_in_test_harness)
574 {
575 (void) VALGRIND_MAKE_MEM_DEFINED(ptr, newlength);
576 memset(ptr, 0xF0, newlength);
577 }
578 }
579#endif
580 (void) VALGRIND_MAKE_MEM_NOACCESS(ptr, newlength);
581 next_yield[pool] = CS ptr + (newlength % alignment);
582 count = yield_length[pool];
583 count = (yield_length[pool] = newlength - (newlength % alignment)) - count;
584
585 /* Cut out the debugging stuff for utilities, but stop picky compilers from
586 giving warnings. */
587
588#ifdef COMPILE_UTILITY
589 func = func;
590 linenumber = linenumber;
591#else
592 DEBUG(D_memory)
593 debug_printf("---%d Rel %6p %5d %-14s %4d %d\n", pool, ptr, count,
594 func, linenumber, pool_malloc);
595#endif
596 return;
059ec3d9 597 }
f3ebb786
JH
598#ifndef COMPILE_UTILITY
599DEBUG(D_memory)
600 debug_printf("non-last memory release try: %s %d\n", func, linenumber);
601#endif
059ec3d9
PH
602}
603
604
605
f3ebb786
JH
606rmark
607store_mark_3(const char *func, int linenumber)
608{
609void ** p;
610
611if (store_pool >= POOL_TAINT_BASE)
612 log_write(0, LOG_MAIN|LOG_PANIC_DIE,
613 "store_mark called for pool %d: %s %d\n", store_pool, func, linenumber);
614
615/* Stash a mark for the tainted-twin release, in the untainted twin. Return
616a cookie (actually the address in the untainted pool) to the caller.
617Reset uses the cookie to recover the t-mark, winds back the tainted pool with it
618and winds back the untainted pool with the cookie. */
619
620p = store_get_3(sizeof(void *), FALSE, func, linenumber);
621*p = store_get_3(0, TRUE, func, linenumber);
622return p;
623}
624
625
059ec3d9
PH
626
627
628/************************************************
629* Release store *
630************************************************/
631
459fca58
JH
632/* This function checks that the pointer it is given is the first thing in a
633block, and if so, releases that block.
059ec3d9
PH
634
635Arguments:
636 block block of store to consider
f3ebb786 637 func function from which called
059ec3d9
PH
638 linenumber line number in source file
639
640Returns: nothing
641*/
642
459fca58 643static void
f3ebb786 644store_release_3(void * block, int pool, const char * func, int linenumber)
059ec3d9 645{
059ec3d9
PH
646/* It will never be the first block, so no need to check that. */
647
f3ebb786 648for (storeblock * b = chainbase[pool]; b; b = b->next)
059ec3d9 649 {
459fca58
JH
650 storeblock * bb = b->next;
651 if (bb && CS block == CS bb + ALIGNED_SIZEOF_STOREBLOCK)
059ec3d9 652 {
f3ebb786 653 int siz = bb->length + ALIGNED_SIZEOF_STOREBLOCK;
059ec3d9 654 b->next = bb->next;
f3ebb786
JH
655 nbytes[pool] -= siz;
656 pool_malloc -= siz;
657 nblocks[pool]--;
059ec3d9
PH
658
659 /* Cut out the debugging stuff for utilities, but stop picky compilers
660 from giving warnings. */
661
459fca58 662#ifdef COMPILE_UTILITY
f3ebb786 663 func = func;
059ec3d9 664 linenumber = linenumber;
459fca58 665#else
059ec3d9 666 DEBUG(D_memory)
f3ebb786
JH
667 debug_printf("-Release %6p %-20s %4d %d\n", (void *)bb, func,
668 linenumber, pool_malloc);
459fca58 669
8768d548 670 if (f.running_in_test_harness)
059ec3d9 671 memset(bb, 0xF0, bb->length+ALIGNED_SIZEOF_STOREBLOCK);
459fca58 672#endif /* COMPILE_UTILITY */
059ec3d9
PH
673
674 free(bb);
675 return;
676 }
677 }
678}
679
680
459fca58
JH
681/************************************************
682* Move store *
683************************************************/
684
685/* Allocate a new block big enough to expend to the given size and
686copy the current data into it. Free the old one if possible.
687
688This function is specifically provided for use when reading very
689long strings, e.g. header lines. When the string gets longer than a
690complete block, it gets copied to a new block. It is helpful to free
691the old block iff the previous copy of the string is at its start,
692and therefore the only thing in it. Otherwise, for very long strings,
693dead store can pile up somewhat disastrously. This function checks that
694the pointer it is given is the first thing in a block, and that nothing
695has been allocated since. If so, releases that block.
696
697Arguments:
698 block
699 newsize
700 len
701
702Returns: new location of data
703*/
704
705void *
f3ebb786
JH
706store_newblock_3(void * block, BOOL tainted, int newsize, int len,
707 const char * func, int linenumber)
459fca58 708{
f3ebb786
JH
709int pool = tainted ? store_pool + POOL_TAINT_BASE : store_pool;
710BOOL release_ok = !tainted && store_last_get[pool] == block;
711uschar * newtext;
712
713if (is_tainted(block) != tainted)
714 die_tainted(US"store_newblock", CUS func, linenumber);
459fca58 715
f3ebb786 716newtext = store_get(newsize, tainted);
459fca58 717memcpy(newtext, block, len);
f3ebb786 718if (release_ok) store_release_3(block, pool, func, linenumber);
459fca58
JH
719return (void *)newtext;
720}
721
722
059ec3d9
PH
723
724
f3ebb786
JH
725/******************************************************************************/
726static void *
727store_alloc_tail(void * yield, int size, const char * func, int line,
728 const uschar * type)
729{
730if ((nonpool_malloc += size) > max_nonpool_malloc)
731 max_nonpool_malloc = nonpool_malloc;
732
733/* Cut out the debugging stuff for utilities, but stop picky compilers from
734giving warnings. */
735
736#ifdef COMPILE_UTILITY
737func = func; line = line; type = type;
738#else
739
740/* If running in test harness, spend time making sure all the new store
741is not filled with zeros so as to catch problems. */
742
743if (f.running_in_test_harness)
744 memset(yield, 0xF0, (size_t)size);
745DEBUG(D_memory) debug_printf("--%6s %6p %5d bytes\t%-14s %4d\tpool %5d nonpool %5d\n",
746 type, yield, size, func, line, pool_malloc, nonpool_malloc);
747#endif /* COMPILE_UTILITY */
748
749return yield;
750}
751
752/*************************************************
753* Mmap store *
754*************************************************/
755
756static void *
757store_mmap(int size, const char * func, int line)
758{
759void * yield, * top;
760
761if (size < 16) size = 16;
762
763if (!(yield = mmap(NULL, (size_t)size,
764 PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0)))
765 log_write(0, LOG_MAIN|LOG_PANIC_DIE, "failed to mmap %d bytes of memory: "
766 "called from line %d of %s", size, line, func);
767
768if (yield < tainted_base) tainted_base = yield;
769if ((top = yield + size) > tainted_top) tainted_top = top;
770
771return store_alloc_tail(yield, size, func, line, US"Mmap");
772}
773
059ec3d9
PH
774/*************************************************
775* Malloc store *
776*************************************************/
777
778/* Running out of store is a total disaster for exim. Some malloc functions
779do not run happily on very small sizes, nor do they document this fact. This
780function is called via the macro store_malloc().
781
782Arguments:
783 size amount of store wanted
f3ebb786 784 func function from which called
059ec3d9
PH
785 linenumber line number in source file
786
787Returns: pointer to gotten store (panic on failure)
788*/
789
f3ebb786
JH
790static void *
791internal_store_malloc(int size, const char *func, int linenumber)
059ec3d9 792{
f3ebb786 793void * yield;
059ec3d9
PH
794
795if (size < 16) size = 16;
059ec3d9 796
40c90bca 797if (!(yield = malloc((size_t)size)))
059ec3d9 798 log_write(0, LOG_MAIN|LOG_PANIC_DIE, "failed to malloc %d bytes of memory: "
f3ebb786 799 "called from line %d in %s", size, linenumber, func);
059ec3d9 800
f3ebb786
JH
801return store_alloc_tail(yield, size, func, linenumber, US"Malloc");
802}
059ec3d9 803
f3ebb786
JH
804void *
805store_malloc_3(int size, const char *func, int linenumber)
806{
807if (n_nonpool_blocks++ > max_nonpool_blocks)
808 max_nonpool_blocks = n_nonpool_blocks;
809return internal_store_malloc(size, func, linenumber);
059ec3d9
PH
810}
811
812
813/************************************************
814* Free store *
815************************************************/
816
817/* This function is called by the macro store_free().
818
819Arguments:
820 block block of store to free
f3ebb786 821 func function from which called
059ec3d9
PH
822 linenumber line number in source file
823
824Returns: nothing
825*/
826
f3ebb786
JH
827static void
828internal_store_free(void *block, const char *func, int linenumber)
059ec3d9
PH
829{
830#ifdef COMPILE_UTILITY
f3ebb786 831func = func;
059ec3d9
PH
832linenumber = linenumber;
833#else
834DEBUG(D_memory)
f3ebb786 835 debug_printf("----Free %6p %-20s %4d\n", block, func, linenumber);
059ec3d9
PH
836#endif /* COMPILE_UTILITY */
837free(block);
838}
839
f3ebb786
JH
840void
841store_free_3(void *block, const char *func, int linenumber)
842{
843n_nonpool_blocks--;
844internal_store_free(block, func, linenumber);
845}
846
847/******************************************************************************/
848/* Stats output on process exit */
849void
850store_exit(void)
851{
852#ifndef COMPILE_UTILITY
853DEBUG(D_memory)
854 {
855 debug_printf("----Exit nonpool max: %3d kB in %d blocks\n",
856 (max_nonpool_malloc+1023)/1024, max_nonpool_blocks);
857 debug_printf("----Exit npools max: %3d kB\n", max_pool_malloc/1024);
858 for (int i = 0; i < NPOOLS; i++)
859 debug_printf("----Exit pool %d max: %3d kB in %d blocks\t%s %s\n",
860 i, maxbytes[i]/1024, maxblocks[i], poolclass[i], pooluse[i]);
861 }
862#endif
863}
864
059ec3d9 865/* End of store.c */