Fix Solaris build (pt.2)
[exim.git] / src / src / store.c
CommitLineData
059ec3d9
PH
1/*************************************************
2* Exim - an Internet mail transport agent *
3*************************************************/
4
f9ba5e22 5/* Copyright (c) University of Cambridge 1995 - 2018 */
f3ebb786 6/* Copyright (c) The Exim maintainers 2019 */
059ec3d9
PH
7/* See the file NOTICE for conditions of use and distribution. */
8
9/* Exim gets and frees all its store through these functions. In the original
10implementation there was a lot of mallocing and freeing of small bits of store.
11The philosophy has now changed to a scheme which includes the concept of
12"stacking pools" of store. For the short-lived processes, there isn't any real
13need to do any garbage collection, but the stack concept allows quick resetting
14in places where this seems sensible.
15
16Obviously the long-running processes (the daemon, the queue runner, and eximon)
17must take care not to eat store.
18
19The following different types of store are recognized:
20
21. Long-lived, large blocks: This is implemented by retaining the original
22 malloc/free functions, and it used for permanent working buffers and for
23 getting blocks to cut up for the other types.
24
25. Long-lived, small blocks: This is used for blocks that have to survive until
26 the process exits. It is implemented as a stacking pool (POOL_PERM). This is
27 functionally the same as store_malloc(), except that the store can't be
28 freed, but I expect it to be more efficient for handling small blocks.
29
30. Short-lived, short blocks: Most of the dynamic store falls into this
31 category. It is implemented as a stacking pool (POOL_MAIN) which is reset
32 after accepting a message when multiple messages are received by a single
33 process. Resetting happens at some other times as well, usually fairly
34 locally after some specific processing that needs working store.
35
36. There is a separate pool (POOL_SEARCH) that is used only for lookup storage.
37 This means it can be freed when search_tidyup() is called to close down all
38 the lookup caching.
f3ebb786
JH
39
40. Orthogonal to the three pool types, there are two classes of memory: untainted
41 and tainted. The latter is used for values derived from untrusted input, and
42 the string-expansion mechanism refuses to operate on such values (obviously,
43 it can expand an untainted value to return a tainted result). The classes
adc4ecf9 44 are implemented by duplicating the three pool types. Pool resets are requested
f3ebb786 45 against the nontainted sibling and apply to both siblings.
adc4ecf9
JH
46
47 Only memory blocks requested for tainted use are regarded as tainted; anything
48 else (including stack auto variables) is untainted. Care is needed when coding
49 to not copy untrusted data into untainted memory, as downstream taint-checks
50 would be avoided.
51
adc4ecf9
JH
52 Internally we currently use malloc for nontainted pools, and mmap for tainted
53 pools. The disparity is for speed of testing the taintedness of pointers;
54 because Linux appears to use distinct non-overlapping address allocations for
55 mmap vs. everything else, which means only two pointer-compares suffice for the
56 test. Other OS' cannot use that optimisation, and a more lengthy test against
57 the limits of tainted-pool allcations has to be done.
2fd4074d
JH
58
59 Intermediate layers (eg. the string functions) can test for taint, and use this
60 for ensurinng that results have proper state. For example the
61 string_vformat_trc() routing supporting the string_sprintf() interface will
62 recopy a string being built into a tainted allocation if it meets a %s for a
63 tainted argument. Any intermediate-layer function that (can) return a new
64 allocation should behave this way; returning a tainted result if any tainted
f0ed88da
JH
65 content is used. Intermediate-layer functions (eg. Ustrncpy) that modify
66 existing allocations fail if tainted data is written into an untainted area.
67 Users of functions that modify existing allocations should check if a tainted
68 source and an untainted destination is used, and fail instead (sprintf() being
69 the classic case).
059ec3d9
PH
70*/
71
72
73#include "exim.h"
438257ba
PP
74/* keep config.h before memcheck.h, for NVALGRIND */
75#include "config.h"
76
f3ebb786 77#include <sys/mman.h>
7f36d675 78#include "memcheck.h"
059ec3d9
PH
79
80
81/* We need to know how to align blocks of data for general use. I'm not sure
82how to get an alignment factor in general. In the current world, a value of 8
83is probably right, and this is sizeof(double) on some systems and sizeof(void
84*) on others, so take the larger of those. Since everything in this expression
85is a constant, the compiler should optimize it to a simple constant wherever it
86appears (I checked that gcc does do this). */
87
88#define alignment \
f3ebb786 89 (sizeof(void *) > sizeof(double) ? sizeof(void *) : sizeof(double))
059ec3d9
PH
90
91/* store_reset() will not free the following block if the last used block has
92less than this much left in it. */
93
94#define STOREPOOL_MIN_SIZE 256
95
96/* Structure describing the beginning of each big block. */
97
98typedef struct storeblock {
99 struct storeblock *next;
100 size_t length;
101} storeblock;
102
103/* Just in case we find ourselves on a system where the structure above has a
104length that is not a multiple of the alignment, set up a macro for the padded
105length. */
106
107#define ALIGNED_SIZEOF_STOREBLOCK \
108 (((sizeof(storeblock) + alignment - 1) / alignment) * alignment)
109
f3ebb786
JH
110/* Size of block to get from malloc to carve up into smaller ones. This
111must be a multiple of the alignment. We assume that 8192 is going to be
112suitably aligned. */
113
114#define STORE_BLOCK_SIZE (8192 - ALIGNED_SIZEOF_STOREBLOCK)
115
059ec3d9
PH
116/* Variables holding data for the local pools of store. The current pool number
117is held in store_pool, which is global so that it can be changed from outside.
118Setting the initial length values to -1 forces a malloc for the first call,
119even if the length is zero (which is used for getting a point to reset to). */
120
f3ebb786 121int store_pool = POOL_MAIN;
059ec3d9 122
f3ebb786
JH
123#define NPOOLS 6
124static storeblock *chainbase[NPOOLS];
125static storeblock *current_block[NPOOLS];
126static void *next_yield[NPOOLS];
127static int yield_length[NPOOLS] = { -1, -1, -1, -1, -1, -1 };
128
129/* The limits of the tainted pools. Tracking these on new allocations enables
130a fast is_tainted implementation. We assume the kernel only allocates mmaps using
131one side or the other of data+heap, not both. */
132
6d5f5caf
JH
133void * tainted_base = (void *)-1;
134void * tainted_top = (void *)0;
059ec3d9
PH
135
136/* pool_malloc holds the amount of memory used by the store pools; this goes up
137and down as store is reset or released. nonpool_malloc is the total got by
138malloc from other calls; this doesn't go down because it is just freed by
139pointer. */
140
f3ebb786
JH
141static int pool_malloc;
142static int nonpool_malloc;
059ec3d9
PH
143
144/* This variable is set by store_get() to its yield, and by store_reset() to
145NULL. This enables string_cat() to optimize its store handling for very long
146strings. That's why the variable is global. */
147
f3ebb786
JH
148void *store_last_get[NPOOLS];
149
150/* These are purely for stats-gathering */
151
152static int nbytes[NPOOLS]; /* current bytes allocated */
153static int maxbytes[NPOOLS]; /* max number reached */
154static int nblocks[NPOOLS]; /* current number of blocks allocated */
155static int maxblocks[NPOOLS];
156static int n_nonpool_blocks; /* current number of direct store_malloc() blocks */
157static int max_nonpool_blocks;
158static int max_pool_malloc; /* max value for pool_malloc */
159static int max_nonpool_malloc; /* max value for nonpool_malloc */
160
161
81a559c8 162#ifndef COMPILE_UTILITY
f3ebb786
JH
163static const uschar * pooluse[NPOOLS] = {
164[POOL_MAIN] = US"main",
165[POOL_PERM] = US"perm",
166[POOL_SEARCH] = US"search",
167[POOL_TAINT_MAIN] = US"main",
168[POOL_TAINT_PERM] = US"perm",
169[POOL_TAINT_SEARCH] = US"search",
170};
171static const uschar * poolclass[NPOOLS] = {
172[POOL_MAIN] = US"untainted",
173[POOL_PERM] = US"untainted",
174[POOL_SEARCH] = US"untainted",
175[POOL_TAINT_MAIN] = US"tainted",
176[POOL_TAINT_PERM] = US"tainted",
177[POOL_TAINT_SEARCH] = US"tainted",
178};
81a559c8 179#endif
f3ebb786
JH
180
181
182static void * store_mmap(int, const char *, int);
183static void * internal_store_malloc(int, const char *, int);
65766f1b
JH
184static void internal_untainted_free(void *, const char *, int linenumber);
185static void internal_tainted_free(storeblock *, const char *, int linenumber);
f3ebb786
JH
186
187/******************************************************************************/
188
f096bccc 189#ifndef TAINT_CHECK_FAST
2fd4074d
JH
190/* Test if a pointer refers to tainted memory.
191
192Slower version check, for use when platform intermixes malloc and mmap area
193addresses. Test against the current-block of all tainted pools first, then all
194blocks of all tainted pools.
195
196Return: TRUE iff tainted
197*/
14ca5d2a
JH
198
199BOOL
200is_tainted_fn(const void * p)
201{
202storeblock * b;
203int pool;
204
2fd4074d 205for (pool = POOL_TAINT_BASE; pool < nelem(chainbase); pool++)
14ca5d2a
JH
206 if ((b = current_block[pool]))
207 {
208 char * bc = CS b + ALIGNED_SIZEOF_STOREBLOCK;
2fd4074d 209 if (CS p >= bc && CS p <= bc + b->length) return TRUE;
14ca5d2a
JH
210 }
211
2fd4074d 212for (pool = POOL_TAINT_BASE; pool < nelem(chainbase); pool++)
14ca5d2a
JH
213 for (b = chainbase[pool]; b; b = b->next)
214 {
215 char * bc = CS b + ALIGNED_SIZEOF_STOREBLOCK;
2fd4074d 216 if (CS p >= bc && CS p <= bc + b->length) return TRUE;
14ca5d2a
JH
217 }
218return FALSE;
14ca5d2a 219}
f096bccc 220#endif
14ca5d2a
JH
221
222
f3ebb786
JH
223void
224die_tainted(const uschar * msg, const uschar * func, int line)
225{
226log_write(0, LOG_MAIN|LOG_PANIC_DIE, "Taint mismatch, %s: %s %d\n",
227 msg, func, line);
228}
059ec3d9
PH
229
230
231/*************************************************
232* Get a block from the current pool *
233*************************************************/
234
235/* Running out of store is a total disaster. This function is called via the
236macro store_get(). It passes back a block of store within the current big
237block, getting a new one if necessary. The address is saved in
238store_last_was_get.
239
240Arguments:
adc4ecf9
JH
241 size amount wanted, bytes
242 tainted class: set to true for untrusted data (eg. from smtp input)
f3ebb786
JH
243 func function from which called
244 linenumber line number in source file
059ec3d9
PH
245
246Returns: pointer to store (panic on malloc failure)
247*/
248
249void *
f3ebb786 250store_get_3(int size, BOOL tainted, const char *func, int linenumber)
059ec3d9 251{
f3ebb786
JH
252int pool = tainted ? store_pool + POOL_TAINT_BASE : store_pool;
253
059ec3d9
PH
254/* Round up the size to a multiple of the alignment. Although this looks a
255messy statement, because "alignment" is a constant expression, the compiler can
256do a reasonable job of optimizing, especially if the value of "alignment" is a
257power of two. I checked this with -O2, and gcc did very well, compiling it to 4
258instructions on a Sparc (alignment = 8). */
259
260if (size % alignment != 0) size += alignment - (size % alignment);
261
262/* If there isn't room in the current block, get a new one. The minimum
263size is STORE_BLOCK_SIZE, and we would expect this to be the norm, since
264these functions are mostly called for small amounts of store. */
265
f3ebb786 266if (size > yield_length[pool])
059ec3d9 267 {
f3ebb786 268 int length = size <= STORE_BLOCK_SIZE ? STORE_BLOCK_SIZE : size;
059ec3d9 269 int mlength = length + ALIGNED_SIZEOF_STOREBLOCK;
f3ebb786 270 storeblock * newblock;
059ec3d9
PH
271
272 /* Sometimes store_reset() may leave a block for us; check if we can use it */
273
f3ebb786 274 if ( (newblock = current_block[pool])
64073d9c
JH
275 && (newblock = newblock->next)
276 && newblock->length < length
277 )
059ec3d9 278 {
64073d9c 279 /* Give up on this block, because it's too small */
f3ebb786
JH
280 nblocks[pool]--;
281 if (pool < POOL_TAINT_BASE)
65766f1b 282 internal_untainted_free(newblock, func, linenumber);
f3ebb786 283 else
65766f1b 284 internal_tainted_free(newblock, func, linenumber);
64073d9c 285 newblock = NULL;
059ec3d9
PH
286 }
287
288 /* If there was no free block, get a new one */
289
64073d9c 290 if (!newblock)
059ec3d9 291 {
f3ebb786
JH
292 if ((nbytes[pool] += mlength) > maxbytes[pool])
293 maxbytes[pool] = nbytes[pool];
294 if ((pool_malloc += mlength) > max_pool_malloc) /* Used in pools */
295 max_pool_malloc = pool_malloc;
296 nonpool_malloc -= mlength; /* Exclude from overall total */
297 if (++nblocks[pool] > maxblocks[pool])
298 maxblocks[pool] = nblocks[pool];
299
300 newblock = tainted
301 ? store_mmap(mlength, func, linenumber)
302 : internal_store_malloc(mlength, func, linenumber);
059ec3d9
PH
303 newblock->next = NULL;
304 newblock->length = length;
f3ebb786
JH
305
306 if (!chainbase[pool])
307 chainbase[pool] = newblock;
64073d9c 308 else
f3ebb786 309 current_block[pool]->next = newblock;
059ec3d9
PH
310 }
311
f3ebb786
JH
312 current_block[pool] = newblock;
313 yield_length[pool] = newblock->length;
314 next_yield[pool] =
315 (void *)(CS current_block[pool] + ALIGNED_SIZEOF_STOREBLOCK);
316 (void) VALGRIND_MAKE_MEM_NOACCESS(next_yield[pool], yield_length[pool]);
059ec3d9
PH
317 }
318
319/* There's (now) enough room in the current block; the yield is the next
320pointer. */
321
f3ebb786 322store_last_get[pool] = next_yield[pool];
059ec3d9
PH
323
324/* Cut out the debugging stuff for utilities, but stop picky compilers from
325giving warnings. */
326
327#ifdef COMPILE_UTILITY
f3ebb786 328func = func;
059ec3d9
PH
329linenumber = linenumber;
330#else
331DEBUG(D_memory)
f3ebb786
JH
332 debug_printf("---%d Get %6p %5d %-14s %4d\n", pool,
333 store_last_get[pool], size, func, linenumber);
059ec3d9
PH
334#endif /* COMPILE_UTILITY */
335
f3ebb786 336(void) VALGRIND_MAKE_MEM_UNDEFINED(store_last_get[pool], size);
059ec3d9
PH
337/* Update next pointer and number of bytes left in the current block. */
338
f3ebb786
JH
339next_yield[pool] = (void *)(CS next_yield[pool] + size);
340yield_length[pool] -= size;
341return store_last_get[pool];
059ec3d9
PH
342}
343
344
345
346/*************************************************
347* Get a block from the PERM pool *
348*************************************************/
349
350/* This is just a convenience function, useful when just a single block is to
351be obtained.
352
353Arguments:
354 size amount wanted
f3ebb786
JH
355 func function from which called
356 linenumber line number in source file
059ec3d9
PH
357
358Returns: pointer to store (panic on malloc failure)
359*/
360
361void *
f3ebb786 362store_get_perm_3(int size, BOOL tainted, const char *func, int linenumber)
059ec3d9
PH
363{
364void *yield;
365int old_pool = store_pool;
366store_pool = POOL_PERM;
f3ebb786 367yield = store_get_3(size, tainted, func, linenumber);
059ec3d9
PH
368store_pool = old_pool;
369return yield;
370}
371
372
373
374/*************************************************
375* Extend a block if it is at the top *
376*************************************************/
377
378/* While reading strings of unknown length, it is often the case that the
379string is being read into the block at the top of the stack. If it needs to be
f3ebb786 380extended, it is more efficient just to extend within the top block rather than
059ec3d9
PH
381allocate a new block and then have to copy the data. This function is provided
382for the use of string_cat(), but of course can be used elsewhere too.
f3ebb786 383The block itself is not expanded; only the top allocation from it.
059ec3d9
PH
384
385Arguments:
386 ptr pointer to store block
387 oldsize current size of the block, as requested by user
388 newsize new size required
f3ebb786 389 func function from which called
059ec3d9
PH
390 linenumber line number in source file
391
392Returns: TRUE if the block is at the top of the stack and has been
393 extended; FALSE if it isn't at the top of the stack, or cannot
394 be extended
395*/
396
397BOOL
f3ebb786
JH
398store_extend_3(void *ptr, BOOL tainted, int oldsize, int newsize,
399 const char *func, int linenumber)
059ec3d9 400{
f3ebb786 401int pool = tainted ? store_pool + POOL_TAINT_BASE : store_pool;
059ec3d9
PH
402int inc = newsize - oldsize;
403int rounded_oldsize = oldsize;
404
f3ebb786
JH
405/* Check that the block being extended was already of the required taint status;
406refuse to extend if not. */
407
408if (is_tainted(ptr) != tainted)
409 return FALSE;
410
059ec3d9
PH
411if (rounded_oldsize % alignment != 0)
412 rounded_oldsize += alignment - (rounded_oldsize % alignment);
413
f3ebb786
JH
414if (CS ptr + rounded_oldsize != CS (next_yield[pool]) ||
415 inc > yield_length[pool] + rounded_oldsize - oldsize)
059ec3d9
PH
416 return FALSE;
417
418/* Cut out the debugging stuff for utilities, but stop picky compilers from
419giving warnings. */
420
421#ifdef COMPILE_UTILITY
f3ebb786 422func = func;
059ec3d9
PH
423linenumber = linenumber;
424#else
425DEBUG(D_memory)
f3ebb786
JH
426 debug_printf("---%d Ext %6p %5d %-14s %4d\n", pool, ptr, newsize,
427 func, linenumber);
059ec3d9
PH
428#endif /* COMPILE_UTILITY */
429
430if (newsize % alignment != 0) newsize += alignment - (newsize % alignment);
f3ebb786
JH
431next_yield[pool] = CS ptr + newsize;
432yield_length[pool] -= newsize - rounded_oldsize;
4d8bb202 433(void) VALGRIND_MAKE_MEM_UNDEFINED(ptr + oldsize, inc);
059ec3d9
PH
434return TRUE;
435}
436
437
438
439
440/*************************************************
441* Back up to a previous point on the stack *
442*************************************************/
443
444/* This function resets the next pointer, freeing any subsequent whole blocks
f3ebb786
JH
445that are now unused. Call with a cookie obtained from store_mark() only; do
446not call with a pointer returned by store_get(). Both the untainted and tainted
447pools corresposding to store_pool are reset.
059ec3d9
PH
448
449Arguments:
f3ebb786
JH
450 r place to back up to
451 func function from which called
059ec3d9
PH
452 linenumber line number in source file
453
454Returns: nothing
455*/
456
f3ebb786
JH
457static void
458internal_store_reset(void * ptr, int pool, const char *func, int linenumber)
059ec3d9 459{
cf0812d5 460storeblock * bb;
f3ebb786 461storeblock * b = current_block[pool];
cf0812d5 462char * bc = CS b + ALIGNED_SIZEOF_STOREBLOCK;
f3ebb786
JH
463int newlength, count;
464#ifndef COMPILE_UTILITY
465int oldmalloc = pool_malloc;
466#endif
059ec3d9
PH
467
468/* Last store operation was not a get */
469
f3ebb786 470store_last_get[pool] = NULL;
059ec3d9
PH
471
472/* See if the place is in the current block - as it often will be. Otherwise,
473search for the block in which it lies. */
474
cf0812d5 475if (CS ptr < bc || CS ptr > bc + b->length)
059ec3d9 476 {
f3ebb786 477 for (b = chainbase[pool]; b; b = b->next)
059ec3d9 478 {
cf0812d5
JH
479 bc = CS b + ALIGNED_SIZEOF_STOREBLOCK;
480 if (CS ptr >= bc && CS ptr <= bc + b->length) break;
059ec3d9 481 }
cf0812d5 482 if (!b)
438257ba 483 log_write(0, LOG_MAIN|LOG_PANIC_DIE, "internal error: store_reset(%p) "
f3ebb786 484 "failed: pool=%d %-14s %4d", ptr, pool, func, linenumber);
059ec3d9
PH
485 }
486
487/* Back up, rounding to the alignment if necessary. When testing, flatten
488the released memory. */
489
cf0812d5 490newlength = bc + b->length - CS ptr;
059ec3d9 491#ifndef COMPILE_UTILITY
65a32f85 492if (debug_store)
2c9f7ff8 493 {
f3ebb786 494 assert_no_variables(ptr, newlength, func, linenumber);
8768d548 495 if (f.running_in_test_harness)
64073d9c
JH
496 {
497 (void) VALGRIND_MAKE_MEM_DEFINED(ptr, newlength);
498 memset(ptr, 0xF0, newlength);
499 }
2c9f7ff8 500 }
059ec3d9 501#endif
4d8bb202 502(void) VALGRIND_MAKE_MEM_NOACCESS(ptr, newlength);
f3ebb786
JH
503next_yield[pool] = CS ptr + (newlength % alignment);
504count = yield_length[pool];
505count = (yield_length[pool] = newlength - (newlength % alignment)) - count;
506current_block[pool] = b;
507
508/* Free any subsequent block. Do NOT free the first
509successor, if our current block has less than 256 bytes left. This should
510prevent us from flapping memory. However, keep this block only when it has
511the default size. */
512
513if ( yield_length[pool] < STOREPOOL_MIN_SIZE
514 && b->next
515 && b->next->length == STORE_BLOCK_SIZE)
7f36d675 516 {
059ec3d9 517 b = b->next;
cf0812d5 518#ifndef COMPILE_UTILITY
65a32f85 519 if (debug_store)
cf0812d5 520 assert_no_variables(b, b->length + ALIGNED_SIZEOF_STOREBLOCK,
f3ebb786 521 func, linenumber);
cf0812d5
JH
522#endif
523 (void) VALGRIND_MAKE_MEM_NOACCESS(CS b + ALIGNED_SIZEOF_STOREBLOCK,
4d8bb202 524 b->length - ALIGNED_SIZEOF_STOREBLOCK);
7f36d675 525 }
059ec3d9
PH
526
527bb = b->next;
528b->next = NULL;
529
cf0812d5 530while ((b = bb))
059ec3d9 531 {
f3ebb786 532 int siz = b->length + ALIGNED_SIZEOF_STOREBLOCK;
cf0812d5 533#ifndef COMPILE_UTILITY
65a32f85 534 if (debug_store)
cf0812d5 535 assert_no_variables(b, b->length + ALIGNED_SIZEOF_STOREBLOCK,
f3ebb786 536 func, linenumber);
cf0812d5 537#endif
059ec3d9 538 bb = bb->next;
f3ebb786
JH
539 nbytes[pool] -= siz;
540 pool_malloc -= siz;
541 nblocks[pool]--;
542 if (pool < POOL_TAINT_BASE)
65766f1b 543 internal_untainted_free(b, func, linenumber);
f3ebb786 544 else
65766f1b 545 internal_tainted_free(b, func, linenumber);
059ec3d9
PH
546 }
547
548/* Cut out the debugging stuff for utilities, but stop picky compilers from
549giving warnings. */
550
551#ifdef COMPILE_UTILITY
f3ebb786 552func = func;
059ec3d9
PH
553linenumber = linenumber;
554#else
555DEBUG(D_memory)
f3ebb786
JH
556 debug_printf("---%d Rst %6p %5d %-14s %4d %d\n", pool, ptr,
557 count + oldmalloc - pool_malloc,
558 func, linenumber, pool_malloc);
559#endif /* COMPILE_UTILITY */
560}
561
562
563rmark
564store_reset_3(rmark r, int pool, const char *func, int linenumber)
565{
566void ** ptr = r;
567
568if (pool >= POOL_TAINT_BASE)
569 log_write(0, LOG_MAIN|LOG_PANIC_DIE,
570 "store_reset called for pool %d: %s %d\n", pool, func, linenumber);
571if (!r)
572 log_write(0, LOG_MAIN|LOG_PANIC_DIE,
573 "store_reset called with bad mark: %s %d\n", func, linenumber);
574
575internal_store_reset(*ptr, pool + POOL_TAINT_BASE, func, linenumber);
576internal_store_reset(ptr, pool, func, linenumber);
577return NULL;
578}
579
580
581
582/* Free tail-end unused allocation. This lets us allocate a big chunk
583early, for cases when we only discover later how much was really needed.
584
585Can be called with a value from store_get(), or an offset after such. Only
586the tainted or untainted pool that serviced the store_get() will be affected.
587
588This is mostly a cut-down version of internal_store_reset().
589XXX needs rationalising
590*/
591
592void
593store_release_above_3(void *ptr, const char *func, int linenumber)
594{
595/* Search all pools' "current" blocks. If it isn't one of those,
596ignore it (it usually will be). */
597
598for (int pool = 0; pool < nelem(current_block); pool++)
059ec3d9 599 {
f3ebb786
JH
600 storeblock * b = current_block[pool];
601 char * bc;
602 int count, newlength;
603
604 if (!b)
605 continue;
606
607 bc = CS b + ALIGNED_SIZEOF_STOREBLOCK;
608 if (CS ptr < bc || CS ptr > bc + b->length)
609 continue;
610
611 /* Last store operation was not a get */
612
613 store_last_get[pool] = NULL;
614
615 /* Back up, rounding to the alignment if necessary. When testing, flatten
616 the released memory. */
617
618 newlength = bc + b->length - CS ptr;
619#ifndef COMPILE_UTILITY
620 if (debug_store)
621 {
622 assert_no_variables(ptr, newlength, func, linenumber);
623 if (f.running_in_test_harness)
624 {
625 (void) VALGRIND_MAKE_MEM_DEFINED(ptr, newlength);
626 memset(ptr, 0xF0, newlength);
627 }
628 }
629#endif
630 (void) VALGRIND_MAKE_MEM_NOACCESS(ptr, newlength);
631 next_yield[pool] = CS ptr + (newlength % alignment);
632 count = yield_length[pool];
633 count = (yield_length[pool] = newlength - (newlength % alignment)) - count;
634
635 /* Cut out the debugging stuff for utilities, but stop picky compilers from
636 giving warnings. */
637
638#ifdef COMPILE_UTILITY
639 func = func;
640 linenumber = linenumber;
641#else
642 DEBUG(D_memory)
643 debug_printf("---%d Rel %6p %5d %-14s %4d %d\n", pool, ptr, count,
644 func, linenumber, pool_malloc);
645#endif
646 return;
059ec3d9 647 }
f3ebb786
JH
648#ifndef COMPILE_UTILITY
649DEBUG(D_memory)
650 debug_printf("non-last memory release try: %s %d\n", func, linenumber);
651#endif
059ec3d9
PH
652}
653
654
655
f3ebb786
JH
656rmark
657store_mark_3(const char *func, int linenumber)
658{
659void ** p;
660
661if (store_pool >= POOL_TAINT_BASE)
662 log_write(0, LOG_MAIN|LOG_PANIC_DIE,
663 "store_mark called for pool %d: %s %d\n", store_pool, func, linenumber);
664
665/* Stash a mark for the tainted-twin release, in the untainted twin. Return
666a cookie (actually the address in the untainted pool) to the caller.
667Reset uses the cookie to recover the t-mark, winds back the tainted pool with it
668and winds back the untainted pool with the cookie. */
669
670p = store_get_3(sizeof(void *), FALSE, func, linenumber);
671*p = store_get_3(0, TRUE, func, linenumber);
672return p;
673}
674
675
059ec3d9
PH
676
677
678/************************************************
679* Release store *
680************************************************/
681
459fca58
JH
682/* This function checks that the pointer it is given is the first thing in a
683block, and if so, releases that block.
059ec3d9
PH
684
685Arguments:
686 block block of store to consider
f3ebb786 687 func function from which called
059ec3d9
PH
688 linenumber line number in source file
689
690Returns: nothing
691*/
692
459fca58 693static void
f3ebb786 694store_release_3(void * block, int pool, const char * func, int linenumber)
059ec3d9 695{
059ec3d9
PH
696/* It will never be the first block, so no need to check that. */
697
f3ebb786 698for (storeblock * b = chainbase[pool]; b; b = b->next)
059ec3d9 699 {
459fca58
JH
700 storeblock * bb = b->next;
701 if (bb && CS block == CS bb + ALIGNED_SIZEOF_STOREBLOCK)
059ec3d9 702 {
f3ebb786 703 int siz = bb->length + ALIGNED_SIZEOF_STOREBLOCK;
059ec3d9 704 b->next = bb->next;
f3ebb786
JH
705 nbytes[pool] -= siz;
706 pool_malloc -= siz;
707 nblocks[pool]--;
059ec3d9
PH
708
709 /* Cut out the debugging stuff for utilities, but stop picky compilers
710 from giving warnings. */
711
459fca58 712#ifdef COMPILE_UTILITY
f3ebb786 713 func = func;
059ec3d9 714 linenumber = linenumber;
459fca58 715#else
059ec3d9 716 DEBUG(D_memory)
f3ebb786
JH
717 debug_printf("-Release %6p %-20s %4d %d\n", (void *)bb, func,
718 linenumber, pool_malloc);
459fca58 719
8768d548 720 if (f.running_in_test_harness)
059ec3d9 721 memset(bb, 0xF0, bb->length+ALIGNED_SIZEOF_STOREBLOCK);
459fca58 722#endif /* COMPILE_UTILITY */
059ec3d9
PH
723
724 free(bb);
725 return;
726 }
727 }
728}
729
730
459fca58
JH
731/************************************************
732* Move store *
733************************************************/
734
735/* Allocate a new block big enough to expend to the given size and
736copy the current data into it. Free the old one if possible.
737
738This function is specifically provided for use when reading very
739long strings, e.g. header lines. When the string gets longer than a
740complete block, it gets copied to a new block. It is helpful to free
741the old block iff the previous copy of the string is at its start,
742and therefore the only thing in it. Otherwise, for very long strings,
743dead store can pile up somewhat disastrously. This function checks that
744the pointer it is given is the first thing in a block, and that nothing
745has been allocated since. If so, releases that block.
746
747Arguments:
748 block
749 newsize
750 len
751
752Returns: new location of data
753*/
754
755void *
f3ebb786
JH
756store_newblock_3(void * block, BOOL tainted, int newsize, int len,
757 const char * func, int linenumber)
459fca58 758{
f3ebb786
JH
759int pool = tainted ? store_pool + POOL_TAINT_BASE : store_pool;
760BOOL release_ok = !tainted && store_last_get[pool] == block;
761uschar * newtext;
762
aaabfafe 763#ifndef MACRO_PREDEF
f3ebb786
JH
764if (is_tainted(block) != tainted)
765 die_tainted(US"store_newblock", CUS func, linenumber);
aaabfafe 766#endif
459fca58 767
f3ebb786 768newtext = store_get(newsize, tainted);
459fca58 769memcpy(newtext, block, len);
f3ebb786 770if (release_ok) store_release_3(block, pool, func, linenumber);
459fca58
JH
771return (void *)newtext;
772}
773
774
059ec3d9
PH
775
776
f3ebb786
JH
777/******************************************************************************/
778static void *
779store_alloc_tail(void * yield, int size, const char * func, int line,
780 const uschar * type)
781{
782if ((nonpool_malloc += size) > max_nonpool_malloc)
783 max_nonpool_malloc = nonpool_malloc;
784
785/* Cut out the debugging stuff for utilities, but stop picky compilers from
786giving warnings. */
787
788#ifdef COMPILE_UTILITY
789func = func; line = line; type = type;
790#else
791
792/* If running in test harness, spend time making sure all the new store
793is not filled with zeros so as to catch problems. */
794
795if (f.running_in_test_harness)
796 memset(yield, 0xF0, (size_t)size);
797DEBUG(D_memory) debug_printf("--%6s %6p %5d bytes\t%-14s %4d\tpool %5d nonpool %5d\n",
798 type, yield, size, func, line, pool_malloc, nonpool_malloc);
799#endif /* COMPILE_UTILITY */
800
801return yield;
802}
803
804/*************************************************
805* Mmap store *
806*************************************************/
807
808static void *
809store_mmap(int size, const char * func, int line)
810{
811void * yield, * top;
812
813if (size < 16) size = 16;
814
815if (!(yield = mmap(NULL, (size_t)size,
816 PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0)))
817 log_write(0, LOG_MAIN|LOG_PANIC_DIE, "failed to mmap %d bytes of memory: "
818 "called from line %d of %s", size, line, func);
819
820if (yield < tainted_base) tainted_base = yield;
6d95688d 821if ((top = US yield + size) > tainted_top) tainted_top = top;
f3ebb786
JH
822
823return store_alloc_tail(yield, size, func, line, US"Mmap");
824}
825
059ec3d9
PH
826/*************************************************
827* Malloc store *
828*************************************************/
829
830/* Running out of store is a total disaster for exim. Some malloc functions
831do not run happily on very small sizes, nor do they document this fact. This
832function is called via the macro store_malloc().
833
834Arguments:
835 size amount of store wanted
f3ebb786 836 func function from which called
059ec3d9
PH
837 linenumber line number in source file
838
839Returns: pointer to gotten store (panic on failure)
840*/
841
f3ebb786
JH
842static void *
843internal_store_malloc(int size, const char *func, int linenumber)
059ec3d9 844{
f3ebb786 845void * yield;
059ec3d9
PH
846
847if (size < 16) size = 16;
059ec3d9 848
40c90bca 849if (!(yield = malloc((size_t)size)))
059ec3d9 850 log_write(0, LOG_MAIN|LOG_PANIC_DIE, "failed to malloc %d bytes of memory: "
f3ebb786 851 "called from line %d in %s", size, linenumber, func);
059ec3d9 852
f3ebb786
JH
853return store_alloc_tail(yield, size, func, linenumber, US"Malloc");
854}
059ec3d9 855
f3ebb786
JH
856void *
857store_malloc_3(int size, const char *func, int linenumber)
858{
859if (n_nonpool_blocks++ > max_nonpool_blocks)
860 max_nonpool_blocks = n_nonpool_blocks;
861return internal_store_malloc(size, func, linenumber);
059ec3d9
PH
862}
863
864
865/************************************************
866* Free store *
867************************************************/
868
869/* This function is called by the macro store_free().
870
871Arguments:
872 block block of store to free
f3ebb786 873 func function from which called
059ec3d9
PH
874 linenumber line number in source file
875
876Returns: nothing
877*/
878
f3ebb786 879static void
65766f1b 880internal_untainted_free(void * block, const char * func, int linenumber)
059ec3d9
PH
881{
882#ifdef COMPILE_UTILITY
f3ebb786 883func = func;
059ec3d9
PH
884linenumber = linenumber;
885#else
886DEBUG(D_memory)
f3ebb786 887 debug_printf("----Free %6p %-20s %4d\n", block, func, linenumber);
059ec3d9
PH
888#endif /* COMPILE_UTILITY */
889free(block);
890}
891
f3ebb786 892void
65766f1b 893store_free_3(void * block, const char * func, int linenumber)
f3ebb786
JH
894{
895n_nonpool_blocks--;
65766f1b
JH
896internal_untainted_free(block, func, linenumber);
897}
898
899/******************************************************************************/
900static void
901internal_tainted_free(storeblock * block, const char * func, int linenumber)
902{
903#ifdef COMPILE_UTILITY
904func = func;
905linenumber = linenumber;
906#else
907DEBUG(D_memory)
908 debug_printf("---Unmap %6p %-20s %4d\n", block, func, linenumber);
909#endif
910munmap((void *)block, block->length + ALIGNED_SIZEOF_STOREBLOCK);
f3ebb786
JH
911}
912
913/******************************************************************************/
914/* Stats output on process exit */
915void
916store_exit(void)
917{
918#ifndef COMPILE_UTILITY
919DEBUG(D_memory)
920 {
921 debug_printf("----Exit nonpool max: %3d kB in %d blocks\n",
922 (max_nonpool_malloc+1023)/1024, max_nonpool_blocks);
923 debug_printf("----Exit npools max: %3d kB\n", max_pool_malloc/1024);
924 for (int i = 0; i < NPOOLS; i++)
925 debug_printf("----Exit pool %d max: %3d kB in %d blocks\t%s %s\n",
926 i, maxbytes[i]/1024, maxblocks[i], poolclass[i], pooluse[i]);
927 }
928#endif
929}
930
059ec3d9 931/* End of store.c */