GNU libmicrohttpd 1.0.0
Loading...
Searching...
No Matches
memorypool.c
Go to the documentation of this file.
1/*
2 This file is part of libmicrohttpd
3 Copyright (C) 2007--2024 Daniel Pittman and Christian Grothoff
4 Copyright (C) 2014--2024 Evgeny Grin (Karlson2k)
5
6 This library is free software; you can redistribute it and/or
7 modify it under the terms of the GNU Lesser General Public
8 License as published by the Free Software Foundation; either
9 version 2.1 of the License, or (at your option) any later version.
10
11 This library is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 Lesser General Public License for more details.
15
16 You should have received a copy of the GNU Lesser General Public
17 License along with this library; if not, write to the Free Software
18 Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19*/
20
27#include "memorypool.h"
28#ifdef HAVE_STDLIB_H
29#include <stdlib.h>
30#endif /* HAVE_STDLIB_H */
31#include <string.h>
32#include <stdint.h>
33#include "mhd_assert.h"
34#ifdef HAVE_SYS_MMAN_H
35#include <sys/mman.h>
36#endif
37#ifdef _WIN32
38#include <windows.h>
39#endif
40#ifdef HAVE_SYSCONF
41#include <unistd.h>
42#if defined(_SC_PAGE_SIZE)
43#define MHD_SC_PAGESIZE _SC_PAGE_SIZE
44#elif defined(_SC_PAGESIZE)
45#define MHD_SC_PAGESIZE _SC_PAGESIZE
46#endif /* _SC_PAGESIZE */
47#endif /* HAVE_SYSCONF */
48#include "mhd_limits.h" /* for SIZE_MAX, PAGESIZE / PAGE_SIZE */
49
50#if defined(MHD_USE_PAGESIZE_MACRO) || defined(MHD_USE_PAGE_SIZE_MACRO)
51#ifndef HAVE_SYSCONF /* Avoid duplicate include */
52#include <unistd.h>
53#endif /* HAVE_SYSCONF */
54#ifdef HAVE_SYS_PARAM_H
55#include <sys/param.h>
56#endif /* HAVE_SYS_PARAM_H */
57#endif /* MHD_USE_PAGESIZE_MACRO || MHD_USE_PAGE_SIZE_MACRO */
58
62#define _MHD_FALLBACK_PAGE_SIZE (4096)
63
64#if defined(MHD_USE_PAGESIZE_MACRO)
65#define MHD_DEF_PAGE_SIZE_ PAGESIZE
66#elif defined(MHD_USE_PAGE_SIZE_MACRO)
67#define MHD_DEF_PAGE_SIZE_ PAGE_SIZE
68#else /* ! PAGESIZE */
69#define MHD_DEF_PAGE_SIZE_ _MHD_FALLBACK_PAGE_SIZE
70#endif /* ! PAGESIZE */
71
72
73#ifdef MHD_ASAN_POISON_ACTIVE
74#include <sanitizer/asan_interface.h>
75#endif /* MHD_ASAN_POISON_ACTIVE */
76
77/* define MAP_ANONYMOUS for Mac OS X */
78#if defined(MAP_ANON) && ! defined(MAP_ANONYMOUS)
79#define MAP_ANONYMOUS MAP_ANON
80#endif
81#if defined(_WIN32)
82#define MAP_FAILED NULL
83#elif ! defined(MAP_FAILED)
84#define MAP_FAILED ((void*) -1)
85#endif
86
90#define ALIGN_SIZE (2 * sizeof(void*))
91
95#define ROUND_TO_ALIGN(n) (((n) + (ALIGN_SIZE - 1)) \
96 / (ALIGN_SIZE) *(ALIGN_SIZE))
97
98
99#ifndef MHD_ASAN_POISON_ACTIVE
100#define _MHD_NOSANITIZE_PTRS /**/
101#define _MHD_RED_ZONE_SIZE (0)
102#define ROUND_TO_ALIGN_PLUS_RED_ZONE(n) ROUND_TO_ALIGN(n)
103#define _MHD_POISON_MEMORY(pointer, size) (void)0
104#define _MHD_UNPOISON_MEMORY(pointer, size) (void)0
108#define mp_ptr_le_(p1,p2) \
109 (((const uint8_t*)(p1)) <= ((const uint8_t*)(p2)))
110
114#define mp_ptr_diff_(p1,p2) \
115 ((size_t)(((const uint8_t*)(p1)) - ((const uint8_t*)(p2))))
116#else /* MHD_ASAN_POISON_ACTIVE */
117#define _MHD_RED_ZONE_SIZE (ALIGN_SIZE)
118#define ROUND_TO_ALIGN_PLUS_RED_ZONE(n) (ROUND_TO_ALIGN(n) + _MHD_RED_ZONE_SIZE)
119#define _MHD_POISON_MEMORY(pointer, size) \
120 ASAN_POISON_MEMORY_REGION ((pointer), (size))
121#define _MHD_UNPOISON_MEMORY(pointer, size) \
122 ASAN_UNPOISON_MEMORY_REGION ((pointer), (size))
123#if defined(FUNC_PTRCOMPARE_CAST_WORKAROUND_WORKS)
127#define mp_ptr_le_(p1,p2) \
128 (((uintptr_t)((const void*)(p1))) <= ((uintptr_t)((const void*)(p2))))
133#define mp_ptr_diff_(p1,p2) \
134 ((size_t)(((uintptr_t)((const uint8_t*)(p1))) - \
135 ((uintptr_t)((const uint8_t*)(p2)))))
136#elif defined(FUNC_ATTR_PTRCOMPARE_WORKS) && \
137 defined(FUNC_ATTR_PTRSUBTRACT_WORKS)
138#ifdef _DEBUG
142__attribute__((no_sanitize ("pointer-compare"))) static bool
143mp_ptr_le_ (const void *p1, const void *p2)
144{
145 return (((const uint8_t *) p1) <= ((const uint8_t *) p2));
146}
147
148
149#endif /* _DEBUG */
150
151
156__attribute__((no_sanitize ("pointer-subtract"))) static size_t
157mp_ptr_diff_ (const void *p1, const void *p2)
158{
159 return (size_t) (((const uint8_t *) p1) - ((const uint8_t *) p2));
160}
161
162
163#elif defined(FUNC_ATTR_NOSANITIZE_WORKS)
164#ifdef _DEBUG
168__attribute__((no_sanitize ("address"))) static bool
169mp_ptr_le_ (const void *p1, const void *p2)
170{
171 return (((const uint8_t *) p1) <= ((const uint8_t *) p2));
172}
173
174
175#endif /* _DEBUG */
176
181__attribute__((no_sanitize ("address"))) static size_t
182mp_ptr_diff_ (const void *p1, const void *p2)
183{
184 return (size_t) (((const uint8_t *) p1) - ((const uint8_t *) p2));
185}
186
187
188#else /* ! FUNC_ATTR_NOSANITIZE_WORKS */
189#error User-poisoning cannot be used
190#endif /* ! FUNC_ATTR_NOSANITIZE_WORKS */
191#endif /* MHD_ASAN_POISON_ACTIVE */
192
196static size_t MHD_sys_page_size_ = (size_t)
197#if defined(MHD_USE_PAGESIZE_MACRO_STATIC)
198 PAGESIZE;
199#elif defined(MHD_USE_PAGE_SIZE_MACRO_STATIC)
200 PAGE_SIZE;
201#else /* ! MHD_USE_PAGE_SIZE_MACRO_STATIC */
202 _MHD_FALLBACK_PAGE_SIZE; /* Default fallback value */
203#endif /* ! MHD_USE_PAGE_SIZE_MACRO_STATIC */
204
208void
210{
211#ifdef MHD_SC_PAGESIZE
212 long result;
213 result = sysconf (MHD_SC_PAGESIZE);
214 if (-1 != result)
215 MHD_sys_page_size_ = (size_t) result;
216 else
218#elif defined(_WIN32)
219 SYSTEM_INFO si;
220 GetSystemInfo (&si);
221 MHD_sys_page_size_ = (size_t) si.dwPageSize;
222#else
224#endif /* _WIN32 */
226}
227
228
233struct MemoryPool
234{
235
239 uint8_t *memory;
240
244 size_t size;
245
249 size_t pos;
250
254 size_t end;
255
259 bool is_mmap;
260};
261
262
269struct MemoryPool *
270MHD_pool_create (size_t max)
271{
272 struct MemoryPool *pool;
273 size_t alloc_size;
274
275 mhd_assert (max > 0);
276 alloc_size = 0;
277 pool = malloc (sizeof (struct MemoryPool));
278 if (NULL == pool)
279 return NULL;
280#if defined(MAP_ANONYMOUS) || defined(_WIN32)
281 if ( (max <= 32 * 1024) ||
282 (max < MHD_sys_page_size_ * 4 / 3) )
283 {
284 pool->memory = MAP_FAILED;
285 }
286 else
287 {
288 /* Round up allocation to page granularity. */
289 alloc_size = max + MHD_sys_page_size_ - 1;
290 alloc_size -= alloc_size % MHD_sys_page_size_;
291#if defined(MAP_ANONYMOUS) && ! defined(_WIN32)
292 pool->memory = mmap (NULL,
293 alloc_size,
294 PROT_READ | PROT_WRITE,
295 MAP_PRIVATE | MAP_ANONYMOUS,
296 -1,
297 0);
298#elif defined(_WIN32)
299 pool->memory = VirtualAlloc (NULL,
300 alloc_size,
301 MEM_COMMIT | MEM_RESERVE,
302 PAGE_READWRITE);
303#endif /* _WIN32 */
304 }
305#else /* ! _WIN32 && ! MAP_ANONYMOUS */
306 pool->memory = MAP_FAILED;
307#endif /* ! _WIN32 && ! MAP_ANONYMOUS */
308 if (MAP_FAILED == pool->memory)
309 {
310 alloc_size = ROUND_TO_ALIGN (max);
311 pool->memory = malloc (alloc_size);
312 if (NULL == pool->memory)
313 {
314 free (pool);
315 return NULL;
316 }
317 pool->is_mmap = false;
318 }
319#if defined(MAP_ANONYMOUS) || defined(_WIN32)
320 else
321 {
322 pool->is_mmap = true;
323 }
324#endif /* _WIN32 || MAP_ANONYMOUS */
325 mhd_assert (0 == (((uintptr_t) pool->memory) % ALIGN_SIZE));
326 pool->pos = 0;
327 pool->end = alloc_size;
328 pool->size = alloc_size;
329 mhd_assert (0 < alloc_size);
330 _MHD_POISON_MEMORY (pool->memory, pool->size);
331 return pool;
332}
333
334
340void
341MHD_pool_destroy (struct MemoryPool *pool)
342{
343 if (NULL == pool)
344 return;
345
346 mhd_assert (pool->end >= pool->pos);
347 mhd_assert (pool->size >= pool->end - pool->pos);
348 mhd_assert (pool->pos == ROUND_TO_ALIGN (pool->pos));
349 _MHD_UNPOISON_MEMORY (pool->memory, pool->size);
350 if (! pool->is_mmap)
351 free (pool->memory);
352 else
353#if defined(MAP_ANONYMOUS) && ! defined(_WIN32)
354 munmap (pool->memory,
355 pool->size);
356#elif defined(_WIN32)
357 VirtualFree (pool->memory,
358 0,
359 MEM_RELEASE);
360#else
361 abort ();
362#endif
363 free (pool);
364}
365
366
373size_t
374MHD_pool_get_free (struct MemoryPool *pool)
375{
376 mhd_assert (pool->end >= pool->pos);
377 mhd_assert (pool->size >= pool->end - pool->pos);
378 mhd_assert (pool->pos == ROUND_TO_ALIGN (pool->pos));
379#ifdef MHD_ASAN_POISON_ACTIVE
380 if ((pool->end - pool->pos) <= _MHD_RED_ZONE_SIZE)
381 return 0;
382#endif /* MHD_ASAN_POISON_ACTIVE */
383 return (pool->end - pool->pos) - _MHD_RED_ZONE_SIZE;
384}
385
386
398void *
399MHD_pool_allocate (struct MemoryPool *pool,
400 size_t size,
401 bool from_end)
402{
403 void *ret;
404 size_t asize;
405
406 mhd_assert (pool->end >= pool->pos);
407 mhd_assert (pool->size >= pool->end - pool->pos);
408 mhd_assert (pool->pos == ROUND_TO_ALIGN (pool->pos));
409 asize = ROUND_TO_ALIGN_PLUS_RED_ZONE (size);
410 if ( (0 == asize) && (0 != size) )
411 return NULL; /* size too close to SIZE_MAX */
412 if (asize > pool->end - pool->pos)
413 return NULL;
414 if (from_end)
415 {
416 ret = &pool->memory[pool->end - asize];
417 pool->end -= asize;
418 }
419 else
420 {
421 ret = &pool->memory[pool->pos];
422 pool->pos += asize;
423 }
424 _MHD_UNPOISON_MEMORY (ret, size);
425 return ret;
426}
427
428
439bool
440MHD_pool_is_resizable_inplace (struct MemoryPool *pool,
441 void *block,
442 size_t block_size)
443{
444 mhd_assert (pool->end >= pool->pos);
445 mhd_assert (pool->size >= pool->end - pool->pos);
446 mhd_assert (block != NULL || block_size == 0);
447 mhd_assert (pool->size >= block_size);
448 if (NULL != block)
449 {
450 const size_t block_offset = mp_ptr_diff_ (block, pool->memory);
451 mhd_assert (mp_ptr_le_ (pool->memory, block));
452 mhd_assert (pool->size >= block_offset);
453 mhd_assert (pool->size >= block_offset + block_size);
454 return (pool->pos ==
455 ROUND_TO_ALIGN_PLUS_RED_ZONE (block_offset + block_size));
456 }
457 return false; /* Unallocated blocks cannot be resized in-place */
458}
459
460
480void *
481MHD_pool_try_alloc (struct MemoryPool *pool,
482 size_t size,
483 size_t *required_bytes)
484{
485 void *ret;
486 size_t asize;
487
488 mhd_assert (pool->end >= pool->pos);
489 mhd_assert (pool->size >= pool->end - pool->pos);
490 mhd_assert (pool->pos == ROUND_TO_ALIGN (pool->pos));
491 asize = ROUND_TO_ALIGN_PLUS_RED_ZONE (size);
492 if ( (0 == asize) && (0 != size) )
493 { /* size is too close to SIZE_MAX, very unlikely */
494 *required_bytes = SIZE_MAX;
495 return NULL;
496 }
497 if (asize > pool->end - pool->pos)
498 {
499 mhd_assert ((pool->end - pool->pos) == \
500 ROUND_TO_ALIGN (pool->end - pool->pos));
501 if (asize <= pool->end)
502 *required_bytes = asize - (pool->end - pool->pos);
503 else
504 *required_bytes = SIZE_MAX;
505 return NULL;
506 }
507 *required_bytes = 0;
508 ret = &pool->memory[pool->end - asize];
509 pool->end -= asize;
510 _MHD_UNPOISON_MEMORY (ret, size);
511 return ret;
512}
513
514
532void *
533MHD_pool_reallocate (struct MemoryPool *pool,
534 void *old,
535 size_t old_size,
536 size_t new_size)
537{
538 size_t asize;
539 uint8_t *new_blc;
540
541 mhd_assert (pool->end >= pool->pos);
542 mhd_assert (pool->size >= pool->end - pool->pos);
543 mhd_assert (old != NULL || old_size == 0);
544 mhd_assert (pool->size >= old_size);
545 mhd_assert (pool->pos == ROUND_TO_ALIGN (pool->pos));
546#if defined(MHD_ASAN_POISON_ACTIVE) && defined(HAVE___ASAN_REGION_IS_POISONED)
547 mhd_assert (NULL == __asan_region_is_poisoned (old, old_size));
548#endif /* MHD_ASAN_POISON_ACTIVE && HAVE___ASAN_REGION_IS_POISONED */
549
550 if (NULL != old)
551 { /* Have previously allocated data */
552 const size_t old_offset = mp_ptr_diff_ (old, pool->memory);
553 const bool shrinking = (old_size > new_size);
554
555 mhd_assert (mp_ptr_le_ (pool->memory, old));
556 /* (pool->memory + pool->size >= (uint8_t*) old + old_size) */
557 mhd_assert ((pool->size - _MHD_RED_ZONE_SIZE) >= (old_offset + old_size));
558 /* Blocks "from the end" must not be reallocated */
559 /* (old_size == 0 || pool->memory + pool->pos > (uint8_t*) old) */
560 mhd_assert ((old_size == 0) || \
561 (pool->pos > old_offset));
562 mhd_assert ((old_size == 0) || \
563 ((pool->end - _MHD_RED_ZONE_SIZE) >= (old_offset + old_size)));
564 /* Try resizing in-place */
565 if (shrinking)
566 { /* Shrinking in-place, zero-out freed part */
567 memset ((uint8_t *) old + new_size, 0, old_size - new_size);
568 _MHD_POISON_MEMORY ((uint8_t *) old + new_size, old_size - new_size);
569 }
570 if (pool->pos ==
571 ROUND_TO_ALIGN_PLUS_RED_ZONE (old_offset + old_size))
572 { /* "old" block is the last allocated block */
573 const size_t new_apos =
574 ROUND_TO_ALIGN_PLUS_RED_ZONE (old_offset + new_size);
575 if (! shrinking)
576 { /* Grow in-place, check for enough space. */
577 if ( (new_apos > pool->end) ||
578 (new_apos < pool->pos) ) /* Value wrap */
579 return NULL; /* No space */
580 }
581 /* Resized in-place */
582 pool->pos = new_apos;
583 _MHD_UNPOISON_MEMORY (old, new_size);
584 return old;
585 }
586 if (shrinking)
587 return old; /* Resized in-place, freed part remains allocated */
588 }
589 /* Need to allocate new block */
590 asize = ROUND_TO_ALIGN_PLUS_RED_ZONE (new_size);
591 if ( ( (0 == asize) &&
592 (0 != new_size) ) || /* Value wrap, too large new_size. */
593 (asize > pool->end - pool->pos) ) /* Not enough space */
594 return NULL;
595
596 new_blc = pool->memory + pool->pos;
597 pool->pos += asize;
598
599 _MHD_UNPOISON_MEMORY (new_blc, new_size);
600 if (0 != old_size)
601 {
602 /* Move data to new block, old block remains allocated */
603 memcpy (new_blc, old, old_size);
604 /* Zero-out old block */
605 memset (old, 0, old_size);
606 _MHD_POISON_MEMORY (old, old_size);
607 }
608 return new_blc;
609}
610
611
624void
625MHD_pool_deallocate (struct MemoryPool *pool,
626 void *block,
627 size_t block_size)
628{
629 mhd_assert (pool->end >= pool->pos);
630 mhd_assert (pool->size >= pool->end - pool->pos);
631 mhd_assert (block != NULL || block_size == 0);
632 mhd_assert (pool->size >= block_size);
633 mhd_assert (pool->pos == ROUND_TO_ALIGN (pool->pos));
634
635 if (NULL != block)
636 { /* Have previously allocated data */
637 const size_t block_offset = mp_ptr_diff_ (block, pool->memory);
638 mhd_assert (mp_ptr_le_ (pool->memory, block));
639 mhd_assert (block_offset <= pool->size);
640 mhd_assert ((block_offset != pool->pos) || (block_size == 0));
641 /* Zero-out deallocated region */
642 if (0 != block_size)
643 {
644 memset (block, 0, block_size);
645 _MHD_POISON_MEMORY (block, block_size);
646 }
647#if ! defined(MHD_FAVOR_SMALL_CODE) && ! defined(MHD_ASAN_POISON_ACTIVE)
648 else
649 return; /* Zero size, no need to do anything */
650#endif /* ! MHD_FAVOR_SMALL_CODE && ! MHD_ASAN_POISON_ACTIVE */
651 if (block_offset <= pool->pos)
652 {
653 /* "Normal" block, not allocated "from the end". */
654 const size_t alg_end =
655 ROUND_TO_ALIGN_PLUS_RED_ZONE (block_offset + block_size);
656 mhd_assert (alg_end <= pool->pos);
657 if (alg_end == pool->pos)
658 {
659 /* The last allocated block, return deallocated block to the pool */
660 size_t alg_start = ROUND_TO_ALIGN (block_offset);
661 mhd_assert (alg_start >= block_offset);
662#if defined(MHD_ASAN_POISON_ACTIVE)
663 if (alg_start != block_offset)
664 {
665 _MHD_POISON_MEMORY (pool->memory + block_offset, \
666 alg_start - block_offset);
667 }
668 else if (0 != alg_start)
669 {
670 bool need_red_zone_before;
671 mhd_assert (_MHD_RED_ZONE_SIZE <= alg_start);
672#if defined(HAVE___ASAN_REGION_IS_POISONED)
673 need_red_zone_before =
674 (NULL == __asan_region_is_poisoned (pool->memory
675 + alg_start
678#elif defined(HAVE___ASAN_ADDRESS_IS_POISONED)
679 need_red_zone_before =
680 (0 == __asan_address_is_poisoned (pool->memory + alg_start - 1));
681#else /* ! HAVE___ASAN_ADDRESS_IS_POISONED */
682 need_red_zone_before = true; /* Unknown, assume new red zone needed */
683#endif /* ! HAVE___ASAN_ADDRESS_IS_POISONED */
684 if (need_red_zone_before)
685 {
686 _MHD_POISON_MEMORY (pool->memory + alg_start, _MHD_RED_ZONE_SIZE);
687 alg_start += _MHD_RED_ZONE_SIZE;
688 }
689 }
690#endif /* MHD_ASAN_POISON_ACTIVE */
691 mhd_assert (alg_start <= pool->pos);
692 mhd_assert (alg_start == ROUND_TO_ALIGN (alg_start));
693 pool->pos = alg_start;
694 }
695 }
696 else
697 {
698 /* Allocated "from the end" block. */
699 /* The size and the pointers of such block should not be manipulated by
700 MHD code (block split is disallowed). */
701 mhd_assert (block_offset >= pool->end);
702 mhd_assert (ROUND_TO_ALIGN (block_offset) == block_offset);
703 if (block_offset == pool->end)
704 {
705 /* The last allocated block, return deallocated block to the pool */
706 const size_t alg_end =
707 ROUND_TO_ALIGN_PLUS_RED_ZONE (block_offset + block_size);
708 pool->end = alg_end;
709 }
710 }
711 }
712}
713
714
728void *
729MHD_pool_reset (struct MemoryPool *pool,
730 void *keep,
731 size_t copy_bytes,
732 size_t new_size)
733{
734 mhd_assert (pool->end >= pool->pos);
735 mhd_assert (pool->size >= pool->end - pool->pos);
736 mhd_assert (copy_bytes <= new_size);
737 mhd_assert (copy_bytes <= pool->size);
738 mhd_assert (keep != NULL || copy_bytes == 0);
739 mhd_assert (keep == NULL || mp_ptr_le_ (pool->memory, keep));
740 /* (keep == NULL || pool->memory + pool->size >= (uint8_t*) keep + copy_bytes) */
741 mhd_assert ((keep == NULL) || \
742 (pool->size >= mp_ptr_diff_ (keep, pool->memory) + copy_bytes));
743#if defined(MHD_ASAN_POISON_ACTIVE) && defined(HAVE___ASAN_REGION_IS_POISONED)
744 mhd_assert (NULL == __asan_region_is_poisoned (keep, copy_bytes));
745#endif /* MHD_ASAN_POISON_ACTIVE && HAVE___ASAN_REGION_IS_POISONED */
746 _MHD_UNPOISON_MEMORY (pool->memory, new_size);
747 if ( (NULL != keep) &&
748 (keep != pool->memory) )
749 {
750 if (0 != copy_bytes)
751 memmove (pool->memory,
752 keep,
753 copy_bytes);
754 }
755 /* technically not needed, but safer to zero out */
756 if (pool->size > copy_bytes)
757 {
758 size_t to_zero;
759
760 to_zero = pool->size - copy_bytes;
761 _MHD_UNPOISON_MEMORY (pool->memory + copy_bytes, to_zero);
762#ifdef _WIN32
763 if (pool->is_mmap)
764 {
765 size_t to_recommit;
766 uint8_t *recommit_addr;
767 /* Round down to page size */
768 to_recommit = to_zero - to_zero % MHD_sys_page_size_;
769 recommit_addr = pool->memory + pool->size - to_recommit;
770
771 /* De-committing and re-committing again clear memory and make
772 * pages free / available for other needs until accessed. */
773 if (VirtualFree (recommit_addr,
774 to_recommit,
775 MEM_DECOMMIT))
776 {
777 to_zero -= to_recommit;
778
779 if (recommit_addr != VirtualAlloc (recommit_addr,
780 to_recommit,
781 MEM_COMMIT,
782 PAGE_READWRITE))
783 abort (); /* Serious error, must never happen */
784 }
785 }
786#endif /* _WIN32 */
787 memset (&pool->memory[copy_bytes],
788 0,
789 to_zero);
790 }
791 pool->pos = ROUND_TO_ALIGN_PLUS_RED_ZONE (new_size);
792 pool->end = pool->size;
793 _MHD_POISON_MEMORY (((uint8_t *) pool->memory) + new_size, \
794 pool->size - new_size);
795 return pool->memory;
796}
797
798
799/* end of memorypool.c */
void * MHD_pool_reallocate(struct MemoryPool *pool, void *old, size_t old_size, size_t new_size)
Definition memorypool.c:248
void MHD_pool_destroy(struct MemoryPool *pool)
Definition memorypool.c:157
#define ALIGN_SIZE
Definition memorypool.c:38
#define MAP_FAILED
Definition memorypool.c:32
size_t MHD_pool_get_free(struct MemoryPool *pool)
Definition memorypool.c:185
void * MHD_pool_reset(struct MemoryPool *pool, void *keep, size_t copy_bytes, size_t new_size)
Definition memorypool.c:314
struct MemoryPool * MHD_pool_create(size_t max)
Definition memorypool.c:102
void * MHD_pool_allocate(struct MemoryPool *pool, size_t size, int from_end)
Definition memorypool.c:203
#define ROUND_TO_ALIGN(n)
Definition memorypool.c:43
#define mhd_assert(CHK)
Definition mhd_assert.h:39
#define SIZE_MAX
Definition mhd_limits.h:99
#define NULL
void MHD_init_mem_pools_(void)
Definition memorypool.c:209
#define mp_ptr_diff_(p1, p2)
Definition memorypool.c:114
bool MHD_pool_is_resizable_inplace(struct MemoryPool *pool, void *block, size_t block_size)
Definition memorypool.c:440
#define MHD_DEF_PAGE_SIZE_
Definition memorypool.c:69
#define _MHD_FALLBACK_PAGE_SIZE
Definition memorypool.c:62
void MHD_pool_deallocate(struct MemoryPool *pool, void *block, size_t block_size)
Definition memorypool.c:625
void * MHD_pool_try_alloc(struct MemoryPool *pool, size_t size, size_t *required_bytes)
Definition memorypool.c:481
#define ROUND_TO_ALIGN_PLUS_RED_ZONE(n)
Definition memorypool.c:102
#define _MHD_UNPOISON_MEMORY(pointer, size)
Definition memorypool.c:104
static size_t MHD_sys_page_size_
Definition memorypool.c:196
#define mp_ptr_le_(p1, p2)
Definition memorypool.c:108
#define _MHD_POISON_MEMORY(pointer, size)
Definition memorypool.c:103
#define _MHD_RED_ZONE_SIZE
Definition memorypool.c:101
memory pool; mostly used for efficient (de)allocation for each connection and bounding memory use for...
macros for mhd_assert()
limits values definitions