1/*
2   +----------------------------------------------------------------------+
3   | Zend Engine                                                          |
4   +----------------------------------------------------------------------+
5   | Copyright (c) 1998-2015 Zend Technologies Ltd. (http://www.zend.com) |
6   +----------------------------------------------------------------------+
7   | This source file is subject to version 2.00 of the Zend license,     |
8   | that is bundled with this package in the file LICENSE, and is        |
9   | available through the world-wide-web at the following url:           |
10   | http://www.zend.com/license/2_00.txt.                                |
11   | If you did not receive a copy of the Zend license and are unable to  |
12   | obtain it through the world-wide-web, please send a note to          |
13   | license@zend.com so we can mail you a copy immediately.              |
14   +----------------------------------------------------------------------+
15   | Authors: Andi Gutmans <andi@zend.com>                                |
16   |          Zeev Suraski <zeev@zend.com>                                |
17   |          Dmitry Stogov <dmitry@zend.com>                             |
18   +----------------------------------------------------------------------+
19*/
20
21/* $Id$ */
22
23/*
24 * zend_alloc is designed to be a modern CPU cache friendly memory manager
25 * for PHP. Most ideas are taken from jemalloc and tcmalloc implementations.
26 *
27 * All allocations are split into 3 categories:
28 *
29 * Huge  - the size is greater than CHUNK size (~2M by default), allocation is
30 *         performed using mmap(). The result is aligned on 2M boundary.
31 *
32 * Large - a number of 4096K pages inside a CHUNK. Large blocks
33 *         are always aligned on page boundary.
34 *
35 * Small - less than 3/4 of page size. Small sizes are rounded up to nearest
36 *         greater predefined small size (there are 30 predefined sizes:
37 *         8, 16, 24, 32, ... 3072). Small blocks are allocated from
38 *         RUNs. Each RUN is allocated as a single or few following pages.
39 *         Allocation inside RUNs implemented using linked list of free
40 *         elements. The result is aligned to 8 bytes.
41 *
42 * zend_alloc allocates memory from OS by CHUNKs, these CHUNKs and huge memory
43 * blocks are always aligned to CHUNK boundary. So it's very easy to determine
44 * the CHUNK owning the certain pointer. Regular CHUNKs reserve a single
45 * page at start for special purpose. It contains bitset of free pages,
46 * few bitset for available runs of predefined small sizes, map of pages that
47 * keeps information about usage of each page in this CHUNK, etc.
48 *
49 * zend_alloc provides familiar emalloc/efree/erealloc API, but in addition it
50 * provides specialized and optimized routines to allocate blocks of predefined
51 * sizes (e.g. emalloc_2(), emallc_4(), ..., emalloc_large(), etc)
52 * The library uses C preprocessor tricks that substitute calls to emalloc()
53 * with more specialized routines when the requested size is known.
54 */
55
56#include "zend.h"
57#include "zend_alloc.h"
58#include "zend_globals.h"
59#include "zend_operators.h"
60#include "zend_multiply.h"
61
62#ifdef HAVE_SIGNAL_H
63# include <signal.h>
64#endif
65#ifdef HAVE_UNISTD_H
66# include <unistd.h>
67#endif
68
69#ifdef ZEND_WIN32
70# include <wincrypt.h>
71# include <process.h>
72#endif
73
74#include <stdio.h>
75#include <stdlib.h>
76#include <string.h>
77
78#include <sys/types.h>
79#include <sys/stat.h>
80#if HAVE_LIMITS_H
81#include <limits.h>
82#endif
83#include <fcntl.h>
84#include <errno.h>
85
86#ifndef _WIN32
87# ifdef HAVE_MREMAP
88#  ifndef _GNU_SOURCE
89#   define _GNU_SOURCE
90#  endif
91#  ifndef __USE_GNU
92#   define __USE_GNU
93#  endif
94# endif
95# include <sys/mman.h>
96# ifndef MAP_ANON
97#  ifdef MAP_ANONYMOUS
98#   define MAP_ANON MAP_ANONYMOUS
99#  endif
100# endif
101# ifndef MREMAP_MAYMOVE
102#  define MREMAP_MAYMOVE 0
103# endif
104# ifndef MAP_FAILED
105#  define MAP_FAILED ((void*)-1)
106# endif
107# ifndef MAP_POPULATE
108#  define MAP_POPULATE 0
109# endif
110#  if defined(_SC_PAGESIZE) || (_SC_PAGE_SIZE)
111#    define REAL_PAGE_SIZE _real_page_size
112static size_t _real_page_size = ZEND_MM_PAGE_SIZE;
113#  endif
114#endif
115
116#ifndef REAL_PAGE_SIZE
117# define REAL_PAGE_SIZE ZEND_MM_PAGE_SIZE
118#endif
119
120#ifndef ZEND_MM_STAT
121# define ZEND_MM_STAT 1    /* track current and peak memory usage            */
122#endif
123#ifndef ZEND_MM_LIMIT
124# define ZEND_MM_LIMIT 1   /* support for user-defined memory limit          */
125#endif
126#ifndef ZEND_MM_CUSTOM
127# define ZEND_MM_CUSTOM 1  /* support for custom memory allocator            */
128                           /* USE_ZEND_ALLOC=0 may switch to system malloc() */
129#endif
130#ifndef ZEND_MM_STORAGE
131# define ZEND_MM_STORAGE 1 /* support for custom memory storage              */
132#endif
133#ifndef ZEND_MM_ERROR
134# define ZEND_MM_ERROR 1   /* report system errors                           */
135#endif
136
137#ifndef ZEND_MM_CHECK
138# define ZEND_MM_CHECK(condition, message)  do { \
139        if (UNEXPECTED(!(condition))) { \
140            zend_mm_panic(message); \
141        } \
142    } while (0)
143#endif
144
145typedef uint32_t   zend_mm_page_info; /* 4-byte integer */
146typedef zend_ulong zend_mm_bitset;    /* 4-byte or 8-byte integer */
147
148#define ZEND_MM_ALIGNED_OFFSET(size, alignment) \
149    (((size_t)(size)) & ((alignment) - 1))
150#define ZEND_MM_ALIGNED_BASE(size, alignment) \
151    (((size_t)(size)) & ~((alignment) - 1))
152#define ZEND_MM_ALIGNED_SIZE_EX(size, alignment) \
153    (((size_t)(size) + ((alignment) - 1)) & ~((alignment) - 1))
154#define ZEND_MM_SIZE_TO_NUM(size, alignment) \
155    (((size_t)(size) + ((alignment) - 1)) / (alignment))
156
157#define ZEND_MM_BITSET_LEN      (sizeof(zend_mm_bitset) * 8)       /* 32 or 64 */
158#define ZEND_MM_PAGE_MAP_LEN    (ZEND_MM_PAGES / ZEND_MM_BITSET_LEN) /* 16 or 8 */
159
160typedef zend_mm_bitset zend_mm_page_map[ZEND_MM_PAGE_MAP_LEN];     /* 64B */
161
162#define ZEND_MM_IS_FRUN                  0x00000000
163#define ZEND_MM_IS_LRUN                  0x40000000
164#define ZEND_MM_IS_SRUN                  0x80000000
165
166#define ZEND_MM_LRUN_PAGES_MASK          0x000003ff
167#define ZEND_MM_LRUN_PAGES_OFFSET        0
168
169#define ZEND_MM_SRUN_BIN_NUM_MASK        0x0000001f
170#define ZEND_MM_SRUN_BIN_NUM_OFFSET      0
171
172#define ZEND_MM_SRUN_FREE_COUNTER_MASK   0x01ff0000
173#define ZEND_MM_SRUN_FREE_COUNTER_OFFSET 16
174
175#define ZEND_MM_NRUN_OFFSET_MASK         0x01ff0000
176#define ZEND_MM_NRUN_OFFSET_OFFSET       16
177
178#define ZEND_MM_LRUN_PAGES(info)         (((info) & ZEND_MM_LRUN_PAGES_MASK) >> ZEND_MM_LRUN_PAGES_OFFSET)
179#define ZEND_MM_SRUN_BIN_NUM(info)       (((info) & ZEND_MM_SRUN_BIN_NUM_MASK) >> ZEND_MM_SRUN_BIN_NUM_OFFSET)
180#define ZEND_MM_SRUN_FREE_COUNTER(info)  (((info) & ZEND_MM_SRUN_FREE_COUNTER_MASK) >> ZEND_MM_SRUN_FREE_COUNTER_OFFSET)
181#define ZEND_MM_NRUN_OFFSET(info)        (((info) & ZEND_MM_NRUN_OFFSET_MASK) >> ZEND_MM_NRUN_OFFSET_OFFSET)
182
183#define ZEND_MM_FRUN()                   ZEND_MM_IS_FRUN
184#define ZEND_MM_LRUN(count)              (ZEND_MM_IS_LRUN | ((count) << ZEND_MM_LRUN_PAGES_OFFSET))
185#define ZEND_MM_SRUN(bin_num)            (ZEND_MM_IS_SRUN | ((bin_num) << ZEND_MM_SRUN_BIN_NUM_OFFSET))
186#define ZEND_MM_SRUN_EX(bin_num, count)  (ZEND_MM_IS_SRUN | ((bin_num) << ZEND_MM_SRUN_BIN_NUM_OFFSET) | ((count) << ZEND_MM_SRUN_FREE_COUNTER_OFFSET))
187#define ZEND_MM_NRUN(bin_num, offset)    (ZEND_MM_IS_SRUN | ZEND_MM_IS_SRUN | ((bin_num) << ZEND_MM_SRUN_BIN_NUM_OFFSET) | ((offset) << ZEND_MM_NRUN_OFFSET_OFFSET))
188
189#define ZEND_MM_BINS 30
190
191typedef struct  _zend_mm_page      zend_mm_page;
192typedef struct  _zend_mm_bin       zend_mm_bin;
193typedef struct  _zend_mm_free_slot zend_mm_free_slot;
194typedef struct  _zend_mm_chunk     zend_mm_chunk;
195typedef struct  _zend_mm_huge_list zend_mm_huge_list;
196
197#ifdef _WIN64
198# define PTR_FMT "0x%0.16I64x"
199#elif SIZEOF_LONG == 8
200# define PTR_FMT "0x%0.16lx"
201#else
202# define PTR_FMT "0x%0.8lx"
203#endif
204
205/*
206 * Memory is retrived from OS by chunks of fixed size 2MB.
207 * Inside chunk it's managed by pages of fixed size 4096B.
208 * So each chunk consists from 512 pages.
209 * The first page of each chunk is reseved for chunk header.
210 * It contains service information about all pages.
211 *
212 * free_pages - current number of free pages in this chunk
213 *
214 * free_tail  - number of continuous free pages at the end of chunk
215 *
216 * free_map   - bitset (a bit for each page). The bit is set if the corresponding
217 *              page is allocated. Allocator for "lage sizes" may easily find a
218 *              free page (or a continuous number of pages) searching for zero
219 *              bits.
220 *
221 * map        - contains service information for each page. (32-bits for each
222 *              page).
223 *    usage:
224 *              (2 bits)
225 *              FRUN - free page,
226 *              LRUN - first page of "large" allocation
227 *              SRUN - first page of a bin used for "small" allocation
228 *
229 *    lrun_pages:
230 *              (10 bits) number of allocated pages
231 *
232 *    srun_bin_num:
233 *              (5 bits) bin number (e.g. 0 for sizes 0-2, 1 for 3-4,
234 *               2 for 5-8, 3 for 9-16 etc) see zend_alloc_sizes.h
235 */
236
237struct _zend_mm_heap {
238#if ZEND_MM_CUSTOM
239    int                use_custom_heap;
240#endif
241#if ZEND_MM_STORAGE
242    zend_mm_storage   *storage;
243#endif
244#if ZEND_MM_STAT
245    size_t             size;                    /* current memory usage */
246    size_t             peak;                    /* peak memory usage */
247#endif
248    zend_mm_free_slot *free_slot[ZEND_MM_BINS]; /* free lists for small sizes */
249#if ZEND_MM_STAT || ZEND_MM_LIMIT
250    size_t             real_size;               /* current size of allocated pages */
251#endif
252#if ZEND_MM_STAT
253    size_t             real_peak;               /* peak size of allocated pages */
254#endif
255#if ZEND_MM_LIMIT
256    size_t             limit;                   /* memory limit */
257    int                overflow;                /* memory overflow flag */
258#endif
259
260    zend_mm_huge_list *huge_list;               /* list of huge allocated blocks */
261
262    zend_mm_chunk     *main_chunk;
263    zend_mm_chunk     *cached_chunks;           /* list of unused chunks */
264    int                chunks_count;            /* number of alocated chunks */
265    int                peak_chunks_count;       /* peak number of allocated chunks for current request */
266    int                cached_chunks_count;     /* number of cached chunks */
267    double             avg_chunks_count;        /* average number of chunks allocated per request */
268#if ZEND_MM_CUSTOM
269    union {
270        struct {
271            void      *(*_malloc)(size_t);
272            void       (*_free)(void*);
273            void      *(*_realloc)(void*, size_t);
274        } std;
275        struct {
276            void      *(*_malloc)(size_t ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC);
277            void       (*_free)(void*  ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC);
278            void      *(*_realloc)(void*, size_t  ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC);
279        } debug;
280    } custom_heap;
281#endif
282};
283
284struct _zend_mm_chunk {
285    zend_mm_heap      *heap;
286    zend_mm_chunk     *next;
287    zend_mm_chunk     *prev;
288    int                free_pages;              /* number of free pages */
289    int                free_tail;               /* number of free pages at the end of chunk */
290    int                num;
291    char               reserve[64 - (sizeof(void*) * 3 + sizeof(int) * 3)];
292    zend_mm_heap       heap_slot;               /* used only in main chunk */
293    zend_mm_page_map   free_map;                /* 512 bits or 64 bytes */
294    zend_mm_page_info  map[ZEND_MM_PAGES];      /* 2 KB = 512 * 4 */
295};
296
297struct _zend_mm_page {
298    char               bytes[ZEND_MM_PAGE_SIZE];
299};
300
301/*
302 * bin - is one or few continuous pages (up to 8) used for allocation of
303 * a particular "small size".
304 */
305struct _zend_mm_bin {
306    char               bytes[ZEND_MM_PAGE_SIZE * 8];
307};
308
309struct _zend_mm_free_slot {
310    zend_mm_free_slot *next_free_slot;
311};
312
313struct _zend_mm_huge_list {
314    void              *ptr;
315    size_t             size;
316    zend_mm_huge_list *next;
317#if ZEND_DEBUG
318    zend_mm_debug_info dbg;
319#endif
320};
321
322#define ZEND_MM_PAGE_ADDR(chunk, page_num) \
323    ((void*)(((zend_mm_page*)(chunk)) + (page_num)))
324
325#define _BIN_DATA_SIZE(num, size, elements, pages, x, y) size,
326static const unsigned int bin_data_size[] = {
327  ZEND_MM_BINS_INFO(_BIN_DATA_SIZE, x, y)
328};
329
330#define _BIN_DATA_ELEMENTS(num, size, elements, pages, x, y) elements,
331static const int bin_elements[] = {
332  ZEND_MM_BINS_INFO(_BIN_DATA_ELEMENTS, x, y)
333};
334
335#define _BIN_DATA_PAGES(num, size, elements, pages, x, y) pages,
336static const int bin_pages[] = {
337  ZEND_MM_BINS_INFO(_BIN_DATA_PAGES, x, y)
338};
339
340#if ZEND_DEBUG
341ZEND_COLD void zend_debug_alloc_output(char *format, ...)
342{
343    char output_buf[256];
344    va_list args;
345
346    va_start(args, format);
347    vsprintf(output_buf, format, args);
348    va_end(args);
349
350#ifdef ZEND_WIN32
351    OutputDebugString(output_buf);
352#else
353    fprintf(stderr, "%s", output_buf);
354#endif
355}
356#endif
357
358static ZEND_COLD ZEND_NORETURN void zend_mm_panic(const char *message)
359{
360    fprintf(stderr, "%s\n", message);
361/* See http://support.microsoft.com/kb/190351 */
362#ifdef ZEND_WIN32
363    fflush(stderr);
364#endif
365#if ZEND_DEBUG && defined(HAVE_KILL) && defined(HAVE_GETPID)
366    kill(getpid(), SIGSEGV);
367#endif
368    exit(1);
369}
370
371static ZEND_COLD ZEND_NORETURN void zend_mm_safe_error(zend_mm_heap *heap,
372    const char *format,
373    size_t limit,
374#if ZEND_DEBUG
375    const char *filename,
376    uint lineno,
377#endif
378    size_t size)
379{
380
381    heap->overflow = 1;
382    zend_try {
383        zend_error_noreturn(E_ERROR,
384            format,
385            limit,
386#if ZEND_DEBUG
387            filename,
388            lineno,
389#endif
390            size);
391    } zend_catch {
392    }  zend_end_try();
393    heap->overflow = 0;
394    zend_bailout();
395    exit(1);
396}
397
398#ifdef _WIN32
399void
400stderr_last_error(char *msg)
401{
402    LPSTR buf = NULL;
403    DWORD err = GetLastError();
404
405    if (!FormatMessage(
406            FORMAT_MESSAGE_ALLOCATE_BUFFER |
407            FORMAT_MESSAGE_FROM_SYSTEM |
408            FORMAT_MESSAGE_IGNORE_INSERTS,
409            NULL,
410            err,
411            MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT),
412            (LPSTR)&buf,
413        0, NULL)) {
414        fprintf(stderr, "\n%s: [0x%08lx]\n", msg, err);
415    }
416    else {
417        fprintf(stderr, "\n%s: [0x%08lx] %s\n", msg, err, buf);
418    }
419}
420#endif
421
422/*****************/
423/* OS Allocation */
424/*****************/
425
426static void *zend_mm_mmap_fixed(void *addr, size_t size)
427{
428#ifdef _WIN32
429    return VirtualAlloc(addr, size, MEM_COMMIT | MEM_RESERVE, PAGE_READWRITE);
430#else
431    /* MAP_FIXED leads to discarding of the old mapping, so it can't be used. */
432    void *ptr = mmap(addr, size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON /*| MAP_POPULATE | MAP_HUGETLB*/, -1, 0);
433
434    if (ptr == MAP_FAILED) {
435#if ZEND_MM_ERROR
436        fprintf(stderr, "\nmmap() failed: [%d] %s\n", errno, strerror(errno));
437#endif
438        return NULL;
439    } else if (ptr != addr) {
440        if (munmap(ptr, size) != 0) {
441#if ZEND_MM_ERROR
442            fprintf(stderr, "\nmunmap() failed: [%d] %s\n", errno, strerror(errno));
443#endif
444        }
445        return NULL;
446    }
447    return ptr;
448#endif
449}
450
451static void *zend_mm_mmap(size_t size)
452{
453#ifdef _WIN32
454    void *ptr = VirtualAlloc(NULL, size, MEM_COMMIT | MEM_RESERVE, PAGE_READWRITE);
455
456    if (ptr == NULL) {
457#if ZEND_MM_ERROR
458        stderr_last_error("VirtualAlloc() failed");
459#endif
460        return NULL;
461    }
462    return ptr;
463#else
464    void *ptr = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON /*| MAP_POPULATE | MAP_HUGETLB*/, -1, 0);
465
466    if (ptr == MAP_FAILED) {
467#if ZEND_MM_ERROR
468        fprintf(stderr, "\nmmap() failed: [%d] %s\n", errno, strerror(errno));
469#endif
470        return NULL;
471    }
472    return ptr;
473#endif
474}
475
476static void zend_mm_munmap(void *addr, size_t size)
477{
478#ifdef _WIN32
479    if (VirtualFree(addr, 0, MEM_RELEASE) == 0) {
480#if ZEND_MM_ERROR
481        stderr_last_error("VirtualFree() failed");
482#endif
483    }
484#else
485    if (munmap(addr, size) != 0) {
486#if ZEND_MM_ERROR
487        fprintf(stderr, "\nmunmap() failed: [%d] %s\n", errno, strerror(errno));
488#endif
489    }
490#endif
491}
492
493/***********/
494/* Bitmask */
495/***********/
496
497/* number of trailing set (1) bits */
498static zend_always_inline int zend_mm_bitset_nts(zend_mm_bitset bitset)
499{
500#if (defined(__GNUC__) || __has_builtin(__builtin_ctzl)) && SIZEOF_ZEND_LONG == SIZEOF_LONG
501    return __builtin_ctzl(~bitset);
502#elif defined(__GNUC__) || __has_builtin(__builtin_ctzll)
503    return __builtin_ctzll(~bitset);
504#elif defined(_WIN32)
505    unsigned long index;
506
507#if defined(_WIN64)
508    if (!BitScanForward64(&index, ~bitset)) {
509#else
510    if (!BitScanForward(&index, ~bitset)) {
511#endif
512        /* undefined behavior */
513        return 32;
514    }
515
516    return (int)index;
517#else
518    int n;
519
520    if (bitset == (zend_mm_bitset)-1) return ZEND_MM_BITSET_LEN;
521
522    n = 0;
523#if SIZEOF_ZEND_LONG == 8
524    if (sizeof(zend_mm_bitset) == 8) {
525        if ((bitset & 0xffffffff) == 0xffffffff) {n += 32; bitset = bitset >> Z_UL(32);}
526    }
527#endif
528    if ((bitset & 0x0000ffff) == 0x0000ffff) {n += 16; bitset = bitset >> 16;}
529    if ((bitset & 0x000000ff) == 0x000000ff) {n +=  8; bitset = bitset >>  8;}
530    if ((bitset & 0x0000000f) == 0x0000000f) {n +=  4; bitset = bitset >>  4;}
531    if ((bitset & 0x00000003) == 0x00000003) {n +=  2; bitset = bitset >>  2;}
532    return n + (bitset & 1);
533#endif
534}
535
536/* number of trailing zero bits (0x01 -> 1; 0x40 -> 6; 0x00 -> LEN) */
537static zend_always_inline int zend_mm_bitset_ntz(zend_mm_bitset bitset)
538{
539#if (defined(__GNUC__) || __has_builtin(__builtin_ctzl)) && SIZEOF_ZEND_LONG == SIZEOF_LONG
540    return __builtin_ctzl(bitset);
541#elif defined(__GNUC__) || __has_builtin(__builtin_ctzll)
542    return __builtin_ctzll(bitset);
543#elif defined(_WIN32)
544    unsigned long index;
545
546#if defined(_WIN64)
547    if (!BitScanForward64(&index, bitset)) {
548#else
549    if (!BitScanForward(&index, bitset)) {
550#endif
551        /* undefined behavior */
552        return 32;
553    }
554
555    return (int)index;
556#else
557    int n;
558
559    if (bitset == (zend_mm_bitset)0) return ZEND_MM_BITSET_LEN;
560
561    n = 1;
562#if SIZEOF_ZEND_LONG == 8
563    if (sizeof(zend_mm_bitset) == 8) {
564        if ((bitset & 0xffffffff) == 0) {n += 32; bitset = bitset >> Z_UL(32);}
565    }
566#endif
567    if ((bitset & 0x0000ffff) == 0) {n += 16; bitset = bitset >> 16;}
568    if ((bitset & 0x000000ff) == 0) {n +=  8; bitset = bitset >>  8;}
569    if ((bitset & 0x0000000f) == 0) {n +=  4; bitset = bitset >>  4;}
570    if ((bitset & 0x00000003) == 0) {n +=  2; bitset = bitset >>  2;}
571    return n - (bitset & 1);
572#endif
573}
574
575static zend_always_inline int zend_mm_bitset_find_zero(zend_mm_bitset *bitset, int size)
576{
577    int i = 0;
578
579    do {
580        zend_mm_bitset tmp = bitset[i];
581        if (tmp != (zend_mm_bitset)-1) {
582            return i * ZEND_MM_BITSET_LEN + zend_mm_bitset_nts(tmp);
583        }
584        i++;
585    } while (i < size);
586    return -1;
587}
588
589static zend_always_inline int zend_mm_bitset_find_one(zend_mm_bitset *bitset, int size)
590{
591    int i = 0;
592
593    do {
594        zend_mm_bitset tmp = bitset[i];
595        if (tmp != 0) {
596            return i * ZEND_MM_BITSET_LEN + zend_mm_bitset_ntz(tmp);
597        }
598        i++;
599    } while (i < size);
600    return -1;
601}
602
603static zend_always_inline int zend_mm_bitset_find_zero_and_set(zend_mm_bitset *bitset, int size)
604{
605    int i = 0;
606
607    do {
608        zend_mm_bitset tmp = bitset[i];
609        if (tmp != (zend_mm_bitset)-1) {
610            int n = zend_mm_bitset_nts(tmp);
611            bitset[i] |= Z_UL(1) << n;
612            return i * ZEND_MM_BITSET_LEN + n;
613        }
614        i++;
615    } while (i < size);
616    return -1;
617}
618
619static zend_always_inline int zend_mm_bitset_is_set(zend_mm_bitset *bitset, int bit)
620{
621    return (bitset[bit / ZEND_MM_BITSET_LEN] & (Z_L(1) << (bit & (ZEND_MM_BITSET_LEN-1)))) != 0;
622}
623
624static zend_always_inline void zend_mm_bitset_set_bit(zend_mm_bitset *bitset, int bit)
625{
626    bitset[bit / ZEND_MM_BITSET_LEN] |= (Z_L(1) << (bit & (ZEND_MM_BITSET_LEN-1)));
627}
628
629static zend_always_inline void zend_mm_bitset_reset_bit(zend_mm_bitset *bitset, int bit)
630{
631    bitset[bit / ZEND_MM_BITSET_LEN] &= ~(Z_L(1) << (bit & (ZEND_MM_BITSET_LEN-1)));
632}
633
634static zend_always_inline void zend_mm_bitset_set_range(zend_mm_bitset *bitset, int start, int len)
635{
636    if (len == 1) {
637        zend_mm_bitset_set_bit(bitset, start);
638    } else {
639        int pos = start / ZEND_MM_BITSET_LEN;
640        int end = (start + len - 1) / ZEND_MM_BITSET_LEN;
641        int bit = start & (ZEND_MM_BITSET_LEN - 1);
642        zend_mm_bitset tmp;
643
644        if (pos != end) {
645            /* set bits from "bit" to ZEND_MM_BITSET_LEN-1 */
646            tmp = (zend_mm_bitset)-1 << bit;
647            bitset[pos++] |= tmp;
648            while (pos != end) {
649                /* set all bits */
650                bitset[pos++] = (zend_mm_bitset)-1;
651            }
652            end = (start + len - 1) & (ZEND_MM_BITSET_LEN - 1);
653            /* set bits from "0" to "end" */
654            tmp = (zend_mm_bitset)-1 >> ((ZEND_MM_BITSET_LEN - 1) - end);
655            bitset[pos] |= tmp;
656        } else {
657            end = (start + len - 1) & (ZEND_MM_BITSET_LEN - 1);
658            /* set bits from "bit" to "end" */
659            tmp = (zend_mm_bitset)-1 << bit;
660            tmp &= (zend_mm_bitset)-1 >> ((ZEND_MM_BITSET_LEN - 1) - end);
661            bitset[pos] |= tmp;
662        }
663    }
664}
665
666static zend_always_inline void zend_mm_bitset_reset_range(zend_mm_bitset *bitset, int start, int len)
667{
668    if (len == 1) {
669        zend_mm_bitset_reset_bit(bitset, start);
670    } else {
671        int pos = start / ZEND_MM_BITSET_LEN;
672        int end = (start + len - 1) / ZEND_MM_BITSET_LEN;
673        int bit = start & (ZEND_MM_BITSET_LEN - 1);
674        zend_mm_bitset tmp;
675
676        if (pos != end) {
677            /* reset bits from "bit" to ZEND_MM_BITSET_LEN-1 */
678            tmp = ~((Z_L(1) << bit) - 1);
679            bitset[pos++] &= ~tmp;
680            while (pos != end) {
681                /* set all bits */
682                bitset[pos++] = 0;
683            }
684            end = (start + len - 1) & (ZEND_MM_BITSET_LEN - 1);
685            /* reset bits from "0" to "end" */
686            tmp = (zend_mm_bitset)-1 >> ((ZEND_MM_BITSET_LEN - 1) - end);
687            bitset[pos] &= ~tmp;
688        } else {
689            end = (start + len - 1) & (ZEND_MM_BITSET_LEN - 1);
690            /* reset bits from "bit" to "end" */
691            tmp = (zend_mm_bitset)-1 << bit;
692            tmp &= (zend_mm_bitset)-1 >> ((ZEND_MM_BITSET_LEN - 1) - end);
693            bitset[pos] &= ~tmp;
694        }
695    }
696}
697
698static zend_always_inline int zend_mm_bitset_is_free_range(zend_mm_bitset *bitset, int start, int len)
699{
700    if (len == 1) {
701        return !zend_mm_bitset_is_set(bitset, start);
702    } else {
703        int pos = start / ZEND_MM_BITSET_LEN;
704        int end = (start + len - 1) / ZEND_MM_BITSET_LEN;
705        int bit = start & (ZEND_MM_BITSET_LEN - 1);
706        zend_mm_bitset tmp;
707
708        if (pos != end) {
709            /* set bits from "bit" to ZEND_MM_BITSET_LEN-1 */
710            tmp = (zend_mm_bitset)-1 << bit;
711            if ((bitset[pos++] & tmp) != 0) {
712                return 0;
713            }
714            while (pos != end) {
715                /* set all bits */
716                if (bitset[pos++] != 0) {
717                    return 0;
718                }
719            }
720            end = (start + len - 1) & (ZEND_MM_BITSET_LEN - 1);
721            /* set bits from "0" to "end" */
722            tmp = (zend_mm_bitset)-1 >> ((ZEND_MM_BITSET_LEN - 1) - end);
723            return (bitset[pos] & tmp) == 0;
724        } else {
725            end = (start + len - 1) & (ZEND_MM_BITSET_LEN - 1);
726            /* set bits from "bit" to "end" */
727            tmp = (zend_mm_bitset)-1 << bit;
728            tmp &= (zend_mm_bitset)-1 >> ((ZEND_MM_BITSET_LEN - 1) - end);
729            return (bitset[pos] & tmp) == 0;
730        }
731    }
732}
733
734/**********/
735/* Chunks */
736/**********/
737
738static void *zend_mm_chunk_alloc_int(size_t size, size_t alignment)
739{
740    void *ptr = zend_mm_mmap(size);
741
742    if (ptr == NULL) {
743        return NULL;
744    } else if (ZEND_MM_ALIGNED_OFFSET(ptr, alignment) == 0) {
745#ifdef MADV_HUGEPAGE
746        madvise(ptr, size, MADV_HUGEPAGE);
747#endif
748        return ptr;
749    } else {
750        size_t offset;
751
752        /* chunk has to be aligned */
753        zend_mm_munmap(ptr, size);
754        ptr = zend_mm_mmap(size + alignment - REAL_PAGE_SIZE);
755#ifdef _WIN32
756        offset = ZEND_MM_ALIGNED_OFFSET(ptr, alignment);
757        zend_mm_munmap(ptr, size + alignment - REAL_PAGE_SIZE);
758        ptr = zend_mm_mmap_fixed((void*)((char*)ptr + (alignment - offset)), size);
759        offset = ZEND_MM_ALIGNED_OFFSET(ptr, alignment);
760        if (offset != 0) {
761            zend_mm_munmap(ptr, size);
762            return NULL;
763        }
764        return ptr;
765#else
766        offset = ZEND_MM_ALIGNED_OFFSET(ptr, alignment);
767        if (offset != 0) {
768            offset = alignment - offset;
769            zend_mm_munmap(ptr, offset);
770            ptr = (char*)ptr + offset;
771            alignment -= offset;
772        }
773        if (alignment > REAL_PAGE_SIZE) {
774            zend_mm_munmap((char*)ptr + size, alignment - REAL_PAGE_SIZE);
775        }
776# ifdef MADV_HUGEPAGE
777        madvise(ptr, size, MADV_HUGEPAGE);
778# endif
779#endif
780        return ptr;
781    }
782}
783
784static void *zend_mm_chunk_alloc(zend_mm_heap *heap, size_t size, size_t alignment)
785{
786#if ZEND_MM_STORAGE
787    if (UNEXPECTED(heap->storage)) {
788        void *ptr = heap->storage->handlers.chunk_alloc(heap->storage, size, alignment);
789        ZEND_ASSERT(((zend_uintptr_t)((char*)ptr + (alignment-1)) & (alignment-1)) == (zend_uintptr_t)ptr);
790        return ptr;
791    }
792#endif
793    return zend_mm_chunk_alloc_int(size, alignment);
794}
795
796static void zend_mm_chunk_free(zend_mm_heap *heap, void *addr, size_t size)
797{
798#if ZEND_MM_STORAGE
799    if (UNEXPECTED(heap->storage)) {
800        heap->storage->handlers.chunk_free(heap->storage, addr, size);
801        return;
802    }
803#endif
804    zend_mm_munmap(addr, size);
805}
806
807static int zend_mm_chunk_truncate(zend_mm_heap *heap, void *addr, size_t old_size, size_t new_size)
808{
809#if ZEND_MM_STORAGE
810    if (UNEXPECTED(heap->storage)) {
811        if (heap->storage->handlers.chunk_truncate) {
812            return heap->storage->handlers.chunk_truncate(heap->storage, addr, old_size, new_size);
813        } else {
814            return 0;
815        }
816    }
817#endif
818#ifndef _WIN32
819    zend_mm_munmap((char*)addr + new_size, old_size - new_size);
820    return 1;
821#else
822    return 0;
823#endif
824}
825
826static int zend_mm_chunk_extend(zend_mm_heap *heap, void *addr, size_t old_size, size_t new_size)
827{
828#if ZEND_MM_STORAGE
829    if (UNEXPECTED(heap->storage)) {
830        if (heap->storage->handlers.chunk_extend) {
831            return heap->storage->handlers.chunk_extend(heap->storage, addr, old_size, new_size);
832        } else {
833            return 0;
834        }
835    }
836#endif
837#ifndef _WIN32
838    return (zend_mm_mmap_fixed((char*)addr + old_size, new_size - old_size) != NULL);
839#else
840    return 0;
841#endif
842}
843
844static zend_always_inline void zend_mm_chunk_init(zend_mm_heap *heap, zend_mm_chunk *chunk)
845{
846    chunk->heap = heap;
847    chunk->next = heap->main_chunk;
848    chunk->prev = heap->main_chunk->prev;
849    chunk->prev->next = chunk;
850    chunk->next->prev = chunk;
851    /* mark first pages as allocated */
852    chunk->free_pages = ZEND_MM_PAGES - ZEND_MM_FIRST_PAGE;
853    chunk->free_tail = ZEND_MM_FIRST_PAGE;
854    /* the younger chunks have bigger number */
855    chunk->num = chunk->prev->num + 1;
856    /* mark first pages as allocated */
857    chunk->free_map[0] = (1L << ZEND_MM_FIRST_PAGE) - 1;
858    chunk->map[0] = ZEND_MM_LRUN(ZEND_MM_FIRST_PAGE);
859}
860
861/***********************/
862/* Huge Runs (forward) */
863/***********************/
864
865static size_t zend_mm_get_huge_block_size(zend_mm_heap *heap, void *ptr ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC);
866static void *zend_mm_alloc_huge(zend_mm_heap *heap, size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC);
867static void zend_mm_free_huge(zend_mm_heap *heap, void *ptr ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC);
868
869#if ZEND_DEBUG
870static void zend_mm_change_huge_block_size(zend_mm_heap *heap, void *ptr, size_t size, size_t dbg_size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC);
871#else
872static void zend_mm_change_huge_block_size(zend_mm_heap *heap, void *ptr, size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC);
873#endif
874
875/**************/
876/* Large Runs */
877/**************/
878
879#if ZEND_DEBUG
880static void *zend_mm_alloc_pages(zend_mm_heap *heap, int pages_count, size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
881#else
882static void *zend_mm_alloc_pages(zend_mm_heap *heap, int pages_count ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
883#endif
884{
885    zend_mm_chunk *chunk = heap->main_chunk;
886    int page_num, len;
887
888    while (1) {
889        if (UNEXPECTED(chunk->free_pages < pages_count)) {
890            goto not_found;
891#if 0
892        } else if (UNEXPECTED(chunk->free_pages + chunk->free_tail == ZEND_MM_PAGES)) {
893            if (UNEXPECTED(ZEND_MM_PAGES - chunk->free_tail < pages_count)) {
894                goto not_found;
895            } else {
896                page_num = chunk->free_tail;
897                goto found;
898            }
899        } else if (0) {
900            /* First-Fit Search */
901            int free_tail = chunk->free_tail;
902            zend_mm_bitset *bitset = chunk->free_map;
903            zend_mm_bitset tmp = *(bitset++);
904            int i = 0;
905
906            while (1) {
907                /* skip allocated blocks */
908                while (tmp == (zend_mm_bitset)-1) {
909                    i += ZEND_MM_BITSET_LEN;
910                    if (i == ZEND_MM_PAGES) {
911                        goto not_found;
912                    }
913                    tmp = *(bitset++);
914                }
915                /* find first 0 bit */
916                page_num = i + zend_mm_bitset_nts(tmp);
917                /* reset bits from 0 to "bit" */
918                tmp &= tmp + 1;
919                /* skip free blocks */
920                while (tmp == 0) {
921                    i += ZEND_MM_BITSET_LEN;
922                    len = i - page_num;
923                    if (len >= pages_count) {
924                        goto found;
925                    } else if (i >= free_tail) {
926                        goto not_found;
927                    }
928                    tmp = *(bitset++);
929                }
930                /* find first 1 bit */
931                len = (i + zend_mm_bitset_ntz(tmp)) - page_num;
932                if (len >= pages_count) {
933                    goto found;
934                }
935                /* set bits from 0 to "bit" */
936                tmp |= tmp - 1;
937            }
938#endif
939        } else {
940            /* Best-Fit Search */
941            int best = -1;
942            int best_len = ZEND_MM_PAGES;
943            int free_tail = chunk->free_tail;
944            zend_mm_bitset *bitset = chunk->free_map;
945            zend_mm_bitset tmp = *(bitset++);
946            int i = 0;
947
948            while (1) {
949                /* skip allocated blocks */
950                while (tmp == (zend_mm_bitset)-1) {
951                    i += ZEND_MM_BITSET_LEN;
952                    if (i == ZEND_MM_PAGES) {
953                        if (best > 0) {
954                            page_num = best;
955                            goto found;
956                        } else {
957                            goto not_found;
958                        }
959                    }
960                    tmp = *(bitset++);
961                }
962                /* find first 0 bit */
963                page_num = i + zend_mm_bitset_nts(tmp);
964                /* reset bits from 0 to "bit" */
965                tmp &= tmp + 1;
966                /* skip free blocks */
967                while (tmp == 0) {
968                    i += ZEND_MM_BITSET_LEN;
969                    if (i >= free_tail) {
970                        len = ZEND_MM_PAGES - page_num;
971                        if (len >= pages_count && len < best_len) {
972                            chunk->free_tail = page_num + pages_count;
973                            goto found;
974                        } else {
975                            /* set accurate value */
976                            chunk->free_tail = page_num;
977                            if (best > 0) {
978                                page_num = best;
979                                goto found;
980                            } else {
981                                goto not_found;
982                            }
983                        }
984                    }
985                    tmp = *(bitset++);
986                }
987                /* find first 1 bit */
988                len = i + zend_mm_bitset_ntz(tmp) - page_num;
989                if (len >= pages_count) {
990                    if (len == pages_count) {
991                        goto found;
992                    } else if (len < best_len) {
993                        best_len = len;
994                        best = page_num;
995                    }
996                }
997                /* set bits from 0 to "bit" */
998                tmp |= tmp - 1;
999            }
1000        }
1001
1002not_found:
1003        if (chunk->next == heap->main_chunk) {
1004get_chunk:
1005            if (heap->cached_chunks) {
1006                heap->cached_chunks_count--;
1007                chunk = heap->cached_chunks;
1008                heap->cached_chunks = chunk->next;
1009            } else {
1010#if ZEND_MM_LIMIT
1011                if (UNEXPECTED(heap->real_size + ZEND_MM_CHUNK_SIZE > heap->limit)) {
1012                    if (zend_mm_gc(heap)) {
1013                        goto get_chunk;
1014                    } else if (heap->overflow == 0) {
1015#if ZEND_DEBUG
1016                        zend_mm_safe_error(heap, "Allowed memory size of %zu bytes exhausted at %s:%d (tried to allocate %zu bytes)", heap->limit, __zend_filename, __zend_lineno, size);
1017#else
1018                        zend_mm_safe_error(heap, "Allowed memory size of %zu bytes exhausted (tried to allocate %zu bytes)", heap->limit, ZEND_MM_PAGE_SIZE * pages_count);
1019#endif
1020                        return NULL;
1021                    }
1022                }
1023#endif
1024                chunk = (zend_mm_chunk*)zend_mm_chunk_alloc(heap, ZEND_MM_CHUNK_SIZE, ZEND_MM_CHUNK_SIZE);
1025                if (UNEXPECTED(chunk == NULL)) {
1026                    /* insufficient memory */
1027                    if (zend_mm_gc(heap) &&
1028                        (chunk = (zend_mm_chunk*)zend_mm_chunk_alloc(heap, ZEND_MM_CHUNK_SIZE, ZEND_MM_CHUNK_SIZE)) != NULL) {
1029                        /* pass */
1030                    } else {
1031#if !ZEND_MM_LIMIT
1032                        zend_mm_safe_error(heap, "Out of memory");
1033#elif ZEND_DEBUG
1034                        zend_mm_safe_error(heap, "Out of memory (allocated %zu) at %s:%d (tried to allocate %zu bytes)", heap->real_size, __zend_filename, __zend_lineno, size);
1035#else
1036                        zend_mm_safe_error(heap, "Out of memory (allocated %zu) (tried to allocate %zu bytes)", heap->real_size, ZEND_MM_PAGE_SIZE * pages_count);
1037#endif
1038                        return NULL;
1039                    }
1040                }
1041#if ZEND_MM_STAT
1042                do {
1043                    size_t size = heap->real_size + ZEND_MM_CHUNK_SIZE;
1044                    size_t peak = MAX(heap->real_peak, size);
1045                    heap->real_size = size;
1046                    heap->real_peak = peak;
1047                } while (0);
1048#elif ZEND_MM_LIMIT
1049                heap->real_size += ZEND_MM_CHUNK_SIZE;
1050
1051#endif
1052            }
1053            heap->chunks_count++;
1054            if (heap->chunks_count > heap->peak_chunks_count) {
1055                heap->peak_chunks_count = heap->chunks_count;
1056            }
1057            zend_mm_chunk_init(heap, chunk);
1058            page_num = ZEND_MM_FIRST_PAGE;
1059            len = ZEND_MM_PAGES - ZEND_MM_FIRST_PAGE;
1060            goto found;
1061        } else {
1062            chunk = chunk->next;
1063        }
1064    }
1065
1066found:
1067    /* mark run as allocated */
1068    chunk->free_pages -= pages_count;
1069    zend_mm_bitset_set_range(chunk->free_map, page_num, pages_count);
1070    chunk->map[page_num] = ZEND_MM_LRUN(pages_count);
1071    if (page_num == chunk->free_tail) {
1072        chunk->free_tail = page_num + pages_count;
1073    }
1074    return ZEND_MM_PAGE_ADDR(chunk, page_num);
1075}
1076
1077static zend_always_inline void *zend_mm_alloc_large(zend_mm_heap *heap, size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
1078{
1079    int pages_count = (int)ZEND_MM_SIZE_TO_NUM(size, ZEND_MM_PAGE_SIZE);
1080#if ZEND_DEBUG
1081    void *ptr = zend_mm_alloc_pages(heap, pages_count, size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1082#else
1083    void *ptr = zend_mm_alloc_pages(heap, pages_count ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1084#endif
1085#if ZEND_MM_STAT
1086    do {
1087        size_t size = heap->size + pages_count * ZEND_MM_PAGE_SIZE;
1088        size_t peak = MAX(heap->peak, size);
1089        heap->size = size;
1090        heap->peak = peak;
1091    } while (0);
1092#endif
1093    return ptr;
1094}
1095
1096static zend_always_inline void zend_mm_delete_chunk(zend_mm_heap *heap, zend_mm_chunk *chunk)
1097{
1098    chunk->next->prev = chunk->prev;
1099    chunk->prev->next = chunk->next;
1100    heap->chunks_count--;
1101    if (heap->chunks_count + heap->cached_chunks_count < heap->avg_chunks_count + 0.1) {
1102        /* delay deletion */
1103        heap->cached_chunks_count++;
1104        chunk->next = heap->cached_chunks;
1105        heap->cached_chunks = chunk;
1106    } else {
1107#if ZEND_MM_STAT || ZEND_MM_LIMIT
1108        heap->real_size -= ZEND_MM_CHUNK_SIZE;
1109#endif
1110        if (!heap->cached_chunks || chunk->num > heap->cached_chunks->num) {
1111            zend_mm_chunk_free(heap, chunk, ZEND_MM_CHUNK_SIZE);
1112        } else {
1113//TODO: select the best chunk to delete???
1114            chunk->next = heap->cached_chunks->next;
1115            zend_mm_chunk_free(heap, heap->cached_chunks, ZEND_MM_CHUNK_SIZE);
1116            heap->cached_chunks = chunk;
1117        }
1118    }
1119}
1120
1121static zend_always_inline void zend_mm_free_pages_ex(zend_mm_heap *heap, zend_mm_chunk *chunk, int page_num, int pages_count, int free_chunk)
1122{
1123    chunk->free_pages += pages_count;
1124    zend_mm_bitset_reset_range(chunk->free_map, page_num, pages_count);
1125    chunk->map[page_num] = 0;
1126    if (chunk->free_tail == page_num + pages_count) {
1127        /* this setting may be not accurate */
1128        chunk->free_tail = page_num;
1129    }
1130    if (free_chunk && chunk->free_pages == ZEND_MM_PAGES - ZEND_MM_FIRST_PAGE) {
1131        zend_mm_delete_chunk(heap, chunk);
1132    }
1133}
1134
1135static void zend_mm_free_pages(zend_mm_heap *heap, zend_mm_chunk *chunk, int page_num, int pages_count)
1136{
1137    zend_mm_free_pages_ex(heap, chunk, page_num, pages_count, 1);
1138}
1139
1140static zend_always_inline void zend_mm_free_large(zend_mm_heap *heap, zend_mm_chunk *chunk, int page_num, int pages_count)
1141{
1142#if ZEND_MM_STAT
1143    heap->size -= pages_count * ZEND_MM_PAGE_SIZE;
1144#endif
1145    zend_mm_free_pages(heap, chunk, page_num, pages_count);
1146}
1147
1148/**************/
1149/* Small Runs */
1150/**************/
1151
1152/* higher set bit number (0->N/A, 1->1, 2->2, 4->3, 8->4, 127->7, 128->8 etc) */
1153static zend_always_inline int zend_mm_small_size_to_bit(int size)
1154{
1155#if defined(__GNUC__) || __has_builtin(__builtin_clz)
1156    return (__builtin_clz(size) ^ 0x1f) + 1;
1157#elif defined(_WIN32)
1158    unsigned long index;
1159
1160    if (!BitScanReverse(&index, (unsigned long)size)) {
1161        /* undefined behavior */
1162        return 64;
1163    }
1164
1165    return (((31 - (int)index) ^ 0x1f) + 1);
1166#else
1167    int n = 16;
1168    if (size <= 0x00ff) {n -= 8; size = size << 8;}
1169    if (size <= 0x0fff) {n -= 4; size = size << 4;}
1170    if (size <= 0x3fff) {n -= 2; size = size << 2;}
1171    if (size <= 0x7fff) {n -= 1;}
1172    return n;
1173#endif
1174}
1175
1176#ifndef MAX
1177# define MAX(a, b) (((a) > (b)) ? (a) : (b))
1178#endif
1179
1180#ifndef MIN
1181# define MIN(a, b) (((a) < (b)) ? (a) : (b))
1182#endif
1183
1184static zend_always_inline int zend_mm_small_size_to_bin(size_t size)
1185{
1186#if 0
1187    int n;
1188                            /*0,  1,  2,  3,  4,  5,  6,  7,  8,  9  10, 11, 12*/
1189    static const int f1[] = { 3,  3,  3,  3,  3,  3,  3,  4,  5,  6,  7,  8,  9};
1190    static const int f2[] = { 0,  0,  0,  0,  0,  0,  0,  4,  8, 12, 16, 20, 24};
1191
1192    if (UNEXPECTED(size <= 2)) return 0;
1193    n = zend_mm_small_size_to_bit(size - 1);
1194    return ((size-1) >> f1[n]) + f2[n];
1195#else
1196    int t1, t2, t3;
1197
1198    if (UNEXPECTED(size <= 8)) return 0;
1199    t1 = (int)(size - 1);
1200    t2 = zend_mm_small_size_to_bit(t1);
1201    t3 = t2 - 6;
1202    t3 = (t3 < 0) ? 0 : t3;
1203    t2 = t3 + 3;
1204    t1 = t1 >> t2;
1205    t3 = t3 << 2;
1206    return t1 + t3;
1207#endif
1208}
1209
1210#define ZEND_MM_SMALL_SIZE_TO_BIN(size)  zend_mm_small_size_to_bin(size)
1211
1212static zend_never_inline void *zend_mm_alloc_small_slow(zend_mm_heap *heap, int bin_num ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
1213{
1214    zend_mm_chunk *chunk;
1215    int page_num;
1216    zend_mm_bin *bin;
1217    zend_mm_free_slot *p, *end;
1218
1219#if ZEND_DEBUG
1220    bin = (zend_mm_bin*)zend_mm_alloc_pages(heap, bin_pages[bin_num], bin_data_size[bin_num] ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1221#else
1222    bin = (zend_mm_bin*)zend_mm_alloc_pages(heap, bin_pages[bin_num] ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1223#endif
1224    if (UNEXPECTED(bin == NULL)) {
1225        /* insufficient memory */
1226        return NULL;
1227    }
1228
1229    chunk = (zend_mm_chunk*)ZEND_MM_ALIGNED_BASE(bin, ZEND_MM_CHUNK_SIZE);
1230    page_num = ZEND_MM_ALIGNED_OFFSET(bin, ZEND_MM_CHUNK_SIZE) / ZEND_MM_PAGE_SIZE;
1231    chunk->map[page_num] = ZEND_MM_SRUN(bin_num);
1232    if (bin_pages[bin_num] > 1) {
1233        int i = 1;
1234        do {
1235            chunk->map[page_num+i] = ZEND_MM_NRUN(bin_num, i);
1236            i++;
1237        } while (i < bin_pages[bin_num]);
1238    }
1239
1240    /* create a linked list of elements from 1 to last */
1241    end = (zend_mm_free_slot*)((char*)bin + (bin_data_size[bin_num] * (bin_elements[bin_num] - 1)));
1242    heap->free_slot[bin_num] = p = (zend_mm_free_slot*)((char*)bin + bin_data_size[bin_num]);
1243    do {
1244        p->next_free_slot = (zend_mm_free_slot*)((char*)p + bin_data_size[bin_num]);;
1245#if ZEND_DEBUG
1246        do {
1247            zend_mm_debug_info *dbg = (zend_mm_debug_info*)((char*)p + bin_data_size[bin_num] - ZEND_MM_ALIGNED_SIZE(sizeof(zend_mm_debug_info)));
1248            dbg->size = 0;
1249        } while (0);
1250#endif
1251        p = (zend_mm_free_slot*)((char*)p + bin_data_size[bin_num]);
1252    } while (p != end);
1253
1254    /* terminate list using NULL */
1255    p->next_free_slot = NULL;
1256#if ZEND_DEBUG
1257        do {
1258            zend_mm_debug_info *dbg = (zend_mm_debug_info*)((char*)p + bin_data_size[bin_num] - ZEND_MM_ALIGNED_SIZE(sizeof(zend_mm_debug_info)));
1259            dbg->size = 0;
1260        } while (0);
1261#endif
1262
1263    /* return first element */
1264    return (char*)bin;
1265}
1266
1267static zend_always_inline void *zend_mm_alloc_small(zend_mm_heap *heap, size_t size, int bin_num ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
1268{
1269#if ZEND_MM_STAT
1270    do {
1271        size_t size = heap->size + bin_data_size[bin_num];
1272        size_t peak = MAX(heap->peak, size);
1273        heap->size = size;
1274        heap->peak = peak;
1275    } while (0);
1276#endif
1277
1278    if (EXPECTED(heap->free_slot[bin_num] != NULL)) {
1279        zend_mm_free_slot *p = heap->free_slot[bin_num];
1280        heap->free_slot[bin_num] = p->next_free_slot;
1281        return (void*)p;
1282    } else {
1283        return zend_mm_alloc_small_slow(heap, bin_num ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1284    }
1285}
1286
1287static zend_always_inline void zend_mm_free_small(zend_mm_heap *heap, void *ptr, int bin_num)
1288{
1289    zend_mm_free_slot *p;
1290
1291#if ZEND_MM_STAT
1292    heap->size -= bin_data_size[bin_num];
1293#endif
1294
1295#if ZEND_DEBUG
1296    do {
1297        zend_mm_debug_info *dbg = (zend_mm_debug_info*)((char*)ptr + bin_data_size[bin_num] - ZEND_MM_ALIGNED_SIZE(sizeof(zend_mm_debug_info)));
1298        dbg->size = 0;
1299    } while (0);
1300#endif
1301
1302    p = (zend_mm_free_slot*)ptr;
1303    p->next_free_slot = heap->free_slot[bin_num];
1304    heap->free_slot[bin_num] = p;
1305}
1306
1307/********/
1308/* Heap */
1309/********/
1310
1311#if ZEND_DEBUG
1312static zend_always_inline zend_mm_debug_info *zend_mm_get_debug_info(zend_mm_heap *heap, void *ptr)
1313{
1314    size_t page_offset = ZEND_MM_ALIGNED_OFFSET(ptr, ZEND_MM_CHUNK_SIZE);
1315    zend_mm_chunk *chunk;
1316    int page_num;
1317    zend_mm_page_info info;
1318
1319    ZEND_MM_CHECK(page_offset != 0, "zend_mm_heap corrupted");
1320    chunk = (zend_mm_chunk*)ZEND_MM_ALIGNED_BASE(ptr, ZEND_MM_CHUNK_SIZE);
1321    page_num = (int)(page_offset / ZEND_MM_PAGE_SIZE);
1322    info = chunk->map[page_num];
1323    ZEND_MM_CHECK(chunk->heap == heap, "zend_mm_heap corrupted");
1324    if (EXPECTED(info & ZEND_MM_IS_SRUN)) {
1325        int bin_num = ZEND_MM_SRUN_BIN_NUM(info);
1326        return (zend_mm_debug_info*)((char*)ptr + bin_data_size[bin_num] - ZEND_MM_ALIGNED_SIZE(sizeof(zend_mm_debug_info)));
1327    } else /* if (info & ZEND_MM_IS_LRUN) */ {
1328        int pages_count = ZEND_MM_LRUN_PAGES(info);
1329
1330        return (zend_mm_debug_info*)((char*)ptr + ZEND_MM_PAGE_SIZE * pages_count - ZEND_MM_ALIGNED_SIZE(sizeof(zend_mm_debug_info)));
1331    }
1332}
1333#endif
1334
1335static zend_always_inline void *zend_mm_alloc_heap(zend_mm_heap *heap, size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
1336{
1337    void *ptr;
1338#if ZEND_DEBUG
1339    size_t real_size = size;
1340    zend_mm_debug_info *dbg;
1341
1342    /* special handling for zero-size allocation */
1343    size = MAX(size, 1);
1344    size = ZEND_MM_ALIGNED_SIZE(size) + ZEND_MM_ALIGNED_SIZE(sizeof(zend_mm_debug_info));
1345#endif
1346    if (size <= ZEND_MM_MAX_SMALL_SIZE) {
1347        ptr = zend_mm_alloc_small(heap, size, ZEND_MM_SMALL_SIZE_TO_BIN(size) ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1348#if ZEND_DEBUG
1349        dbg = zend_mm_get_debug_info(heap, ptr);
1350        dbg->size = real_size;
1351        dbg->filename = __zend_filename;
1352        dbg->orig_filename = __zend_orig_filename;
1353        dbg->lineno = __zend_lineno;
1354        dbg->orig_lineno = __zend_orig_lineno;
1355#endif
1356        return ptr;
1357    } else if (size <= ZEND_MM_MAX_LARGE_SIZE) {
1358        ptr = zend_mm_alloc_large(heap, size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1359#if ZEND_DEBUG
1360        dbg = zend_mm_get_debug_info(heap, ptr);
1361        dbg->size = real_size;
1362        dbg->filename = __zend_filename;
1363        dbg->orig_filename = __zend_orig_filename;
1364        dbg->lineno = __zend_lineno;
1365        dbg->orig_lineno = __zend_orig_lineno;
1366#endif
1367        return ptr;
1368    } else {
1369#if ZEND_DEBUG
1370        size = real_size;
1371#endif
1372        return zend_mm_alloc_huge(heap, size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1373    }
1374}
1375
1376static zend_always_inline void zend_mm_free_heap(zend_mm_heap *heap, void *ptr ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
1377{
1378    size_t page_offset = ZEND_MM_ALIGNED_OFFSET(ptr, ZEND_MM_CHUNK_SIZE);
1379
1380    if (UNEXPECTED(page_offset == 0)) {
1381        if (ptr != NULL) {
1382            zend_mm_free_huge(heap, ptr ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1383        }
1384    } else {
1385        zend_mm_chunk *chunk = (zend_mm_chunk*)ZEND_MM_ALIGNED_BASE(ptr, ZEND_MM_CHUNK_SIZE);
1386        int page_num = (int)(page_offset / ZEND_MM_PAGE_SIZE);
1387        zend_mm_page_info info = chunk->map[page_num];
1388
1389        ZEND_MM_CHECK(chunk->heap == heap, "zend_mm_heap corrupted");
1390        if (EXPECTED(info & ZEND_MM_IS_SRUN)) {
1391            zend_mm_free_small(heap, ptr, ZEND_MM_SRUN_BIN_NUM(info));
1392        } else /* if (info & ZEND_MM_IS_LRUN) */ {
1393            int pages_count = ZEND_MM_LRUN_PAGES(info);
1394
1395            ZEND_MM_CHECK(ZEND_MM_ALIGNED_OFFSET(page_offset, ZEND_MM_PAGE_SIZE) == 0, "zend_mm_heap corrupted");
1396            zend_mm_free_large(heap, chunk, page_num, pages_count);
1397        }
1398    }
1399}
1400
1401static size_t zend_mm_size(zend_mm_heap *heap, void *ptr ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
1402{
1403    size_t page_offset = ZEND_MM_ALIGNED_OFFSET(ptr, ZEND_MM_CHUNK_SIZE);
1404
1405    if (UNEXPECTED(page_offset == 0)) {
1406        return zend_mm_get_huge_block_size(heap, ptr ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1407    } else {
1408        zend_mm_chunk *chunk;
1409#if 0 && ZEND_DEBUG
1410        zend_mm_debug_info *dbg = zend_mm_get_debug_info(heap, ptr);
1411        return dbg->size;
1412#else
1413        int page_num;
1414        zend_mm_page_info info;
1415
1416        chunk = (zend_mm_chunk*)ZEND_MM_ALIGNED_BASE(ptr, ZEND_MM_CHUNK_SIZE);
1417        page_num = (int)(page_offset / ZEND_MM_PAGE_SIZE);
1418        info = chunk->map[page_num];
1419        ZEND_MM_CHECK(chunk->heap == heap, "zend_mm_heap corrupted");
1420        if (EXPECTED(info & ZEND_MM_IS_SRUN)) {
1421            return bin_data_size[ZEND_MM_SRUN_BIN_NUM(info)];
1422        } else /* if (info & ZEND_MM_IS_LARGE_RUN) */ {
1423            return ZEND_MM_LRUN_PAGES(info) * ZEND_MM_PAGE_SIZE;
1424        }
1425#endif
1426    }
1427}
1428
1429static void *zend_mm_realloc_heap(zend_mm_heap *heap, void *ptr, size_t size, size_t copy_size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
1430{
1431    size_t page_offset;
1432    size_t old_size;
1433    size_t new_size;
1434    void *ret;
1435#if ZEND_DEBUG
1436    size_t real_size;
1437    zend_mm_debug_info *dbg;
1438#endif
1439
1440    page_offset = ZEND_MM_ALIGNED_OFFSET(ptr, ZEND_MM_CHUNK_SIZE);
1441    if (UNEXPECTED(page_offset == 0)) {
1442        if (UNEXPECTED(ptr == NULL)) {
1443            return zend_mm_alloc_heap(heap, size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1444        }
1445        old_size = zend_mm_get_huge_block_size(heap, ptr ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1446#if ZEND_DEBUG
1447        real_size = size;
1448        size = ZEND_MM_ALIGNED_SIZE(size) + ZEND_MM_ALIGNED_SIZE(sizeof(zend_mm_debug_info));
1449#endif
1450        if (size > ZEND_MM_MAX_LARGE_SIZE) {
1451#if ZEND_DEBUG
1452            size = real_size;
1453#endif
1454            new_size = ZEND_MM_ALIGNED_SIZE_EX(size, REAL_PAGE_SIZE);
1455            if (new_size == old_size) {
1456#if ZEND_DEBUG
1457                zend_mm_change_huge_block_size(heap, ptr, new_size, real_size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1458#else
1459                zend_mm_change_huge_block_size(heap, ptr, new_size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1460#endif
1461                return ptr;
1462            } else if (new_size < old_size) {
1463                /* unmup tail */
1464                if (zend_mm_chunk_truncate(heap, ptr, old_size, new_size)) {
1465#if ZEND_MM_STAT || ZEND_MM_LIMIT
1466                    heap->real_size -= old_size - new_size;
1467#endif
1468#if ZEND_MM_STAT
1469                    heap->size -= old_size - new_size;
1470#endif
1471#if ZEND_DEBUG
1472                    zend_mm_change_huge_block_size(heap, ptr, new_size, real_size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1473#else
1474                    zend_mm_change_huge_block_size(heap, ptr, new_size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1475#endif
1476                    return ptr;
1477                }
1478            } else /* if (new_size > old_size) */ {
1479#if ZEND_MM_LIMIT
1480                if (UNEXPECTED(heap->real_size + (new_size - old_size) > heap->limit)) {
1481                    if (zend_mm_gc(heap) && heap->real_size + (new_size - old_size) <= heap->limit) {
1482                        /* pass */
1483                    } else if (heap->overflow == 0) {
1484#if ZEND_DEBUG
1485                        zend_mm_safe_error(heap, "Allowed memory size of %zu bytes exhausted at %s:%d (tried to allocate %zu bytes)", heap->limit, __zend_filename, __zend_lineno, size);
1486#else
1487                        zend_mm_safe_error(heap, "Allowed memory size of %zu bytes exhausted (tried to allocate %zu bytes)", heap->limit, size);
1488#endif
1489                        return NULL;
1490                    }
1491                }
1492#endif
1493                /* try to map tail right after this block */
1494                if (zend_mm_chunk_extend(heap, ptr, old_size, new_size)) {
1495#if ZEND_MM_STAT || ZEND_MM_LIMIT
1496                    heap->real_size += new_size - old_size;
1497#endif
1498#if ZEND_MM_STAT
1499                    heap->size += new_size - old_size;
1500#endif
1501#if ZEND_DEBUG
1502                    zend_mm_change_huge_block_size(heap, ptr, new_size, real_size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1503#else
1504                    zend_mm_change_huge_block_size(heap, ptr, new_size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1505#endif
1506                    return ptr;
1507                }
1508            }
1509        }
1510    } else {
1511        zend_mm_chunk *chunk = (zend_mm_chunk*)ZEND_MM_ALIGNED_BASE(ptr, ZEND_MM_CHUNK_SIZE);
1512        int page_num = (int)(page_offset / ZEND_MM_PAGE_SIZE);
1513        zend_mm_page_info info = chunk->map[page_num];
1514#if ZEND_DEBUG
1515        size_t real_size = size;
1516
1517        size = ZEND_MM_ALIGNED_SIZE(size) + ZEND_MM_ALIGNED_SIZE(sizeof(zend_mm_debug_info));
1518#endif
1519
1520        ZEND_MM_CHECK(chunk->heap == heap, "zend_mm_heap corrupted");
1521        if (info & ZEND_MM_IS_SRUN) {
1522            int old_bin_num, bin_num;
1523
1524            old_bin_num = ZEND_MM_SRUN_BIN_NUM(info);
1525            old_size = bin_data_size[old_bin_num];
1526            bin_num = ZEND_MM_SMALL_SIZE_TO_BIN(size);
1527            if (old_bin_num == bin_num) {
1528#if ZEND_DEBUG
1529                dbg = zend_mm_get_debug_info(heap, ptr);
1530                dbg->size = real_size;
1531                dbg->filename = __zend_filename;
1532                dbg->orig_filename = __zend_orig_filename;
1533                dbg->lineno = __zend_lineno;
1534                dbg->orig_lineno = __zend_orig_lineno;
1535#endif
1536                return ptr;
1537            }
1538        } else /* if (info & ZEND_MM_IS_LARGE_RUN) */ {
1539            ZEND_MM_CHECK(ZEND_MM_ALIGNED_OFFSET(page_offset, ZEND_MM_PAGE_SIZE) == 0, "zend_mm_heap corrupted");
1540            old_size = ZEND_MM_LRUN_PAGES(info) * ZEND_MM_PAGE_SIZE;
1541            if (size > ZEND_MM_MAX_SMALL_SIZE && size <= ZEND_MM_MAX_LARGE_SIZE) {
1542                new_size = ZEND_MM_ALIGNED_SIZE_EX(size, ZEND_MM_PAGE_SIZE);
1543                if (new_size == old_size) {
1544#if ZEND_DEBUG
1545                    dbg = zend_mm_get_debug_info(heap, ptr);
1546                    dbg->size = real_size;
1547                    dbg->filename = __zend_filename;
1548                    dbg->orig_filename = __zend_orig_filename;
1549                    dbg->lineno = __zend_lineno;
1550                    dbg->orig_lineno = __zend_orig_lineno;
1551#endif
1552                    return ptr;
1553                } else if (new_size < old_size) {
1554                    /* free tail pages */
1555                    int new_pages_count = (int)(new_size / ZEND_MM_PAGE_SIZE);
1556                    int rest_pages_count = (int)((old_size - new_size) / ZEND_MM_PAGE_SIZE);
1557
1558#if ZEND_MM_STAT
1559                    heap->size -= rest_pages_count * ZEND_MM_PAGE_SIZE;
1560#endif
1561                    chunk->map[page_num] = ZEND_MM_LRUN(new_pages_count);
1562                    chunk->free_pages += rest_pages_count;
1563                    zend_mm_bitset_reset_range(chunk->free_map, page_num + new_pages_count, rest_pages_count);
1564#if ZEND_DEBUG
1565                    dbg = zend_mm_get_debug_info(heap, ptr);
1566                    dbg->size = real_size;
1567                    dbg->filename = __zend_filename;
1568                    dbg->orig_filename = __zend_orig_filename;
1569                    dbg->lineno = __zend_lineno;
1570                    dbg->orig_lineno = __zend_orig_lineno;
1571#endif
1572                    return ptr;
1573                } else /* if (new_size > old_size) */ {
1574                    int new_pages_count = (int)(new_size / ZEND_MM_PAGE_SIZE);
1575                    int old_pages_count = (int)(old_size / ZEND_MM_PAGE_SIZE);
1576
1577                    /* try to allocate tail pages after this block */
1578                    if (page_num + new_pages_count <= ZEND_MM_PAGES &&
1579                        zend_mm_bitset_is_free_range(chunk->free_map, page_num + old_pages_count, new_pages_count - old_pages_count)) {
1580#if ZEND_MM_STAT
1581                        do {
1582                            size_t size = heap->size + (new_size - old_size);
1583                            size_t peak = MAX(heap->peak, size);
1584                            heap->size = size;
1585                            heap->peak = peak;
1586                        } while (0);
1587#endif
1588                        chunk->free_pages -= new_pages_count - old_pages_count;
1589                        zend_mm_bitset_set_range(chunk->free_map, page_num + old_pages_count, new_pages_count - old_pages_count);
1590                        chunk->map[page_num] = ZEND_MM_LRUN(new_pages_count);
1591#if ZEND_DEBUG
1592                        dbg = zend_mm_get_debug_info(heap, ptr);
1593                        dbg->size = real_size;
1594                        dbg->filename = __zend_filename;
1595                        dbg->orig_filename = __zend_orig_filename;
1596                        dbg->lineno = __zend_lineno;
1597                        dbg->orig_lineno = __zend_orig_lineno;
1598#endif
1599                        return ptr;
1600                    }
1601                }
1602            }
1603        }
1604#if ZEND_DEBUG
1605        size = real_size;
1606#endif
1607    }
1608
1609    /* Naive reallocation */
1610    ret = zend_mm_alloc_heap(heap, size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1611    memcpy(ret, ptr, MIN(old_size, copy_size));
1612    zend_mm_free_heap(heap, ptr ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1613    return ret;
1614}
1615
1616/*********************/
1617/* Huge Runs (again) */
1618/*********************/
1619
1620#if ZEND_DEBUG
1621static void zend_mm_add_huge_block(zend_mm_heap *heap, void *ptr, size_t size, size_t dbg_size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
1622#else
1623static void zend_mm_add_huge_block(zend_mm_heap *heap, void *ptr, size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
1624#endif
1625{
1626    zend_mm_huge_list *list = (zend_mm_huge_list*)zend_mm_alloc_heap(heap, sizeof(zend_mm_huge_list) ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1627    list->ptr = ptr;
1628    list->size = size;
1629    list->next = heap->huge_list;
1630#if ZEND_DEBUG
1631    list->dbg.size = dbg_size;
1632    list->dbg.filename = __zend_filename;
1633    list->dbg.orig_filename = __zend_orig_filename;
1634    list->dbg.lineno = __zend_lineno;
1635    list->dbg.orig_lineno = __zend_orig_lineno;
1636#endif
1637    heap->huge_list = list;
1638}
1639
1640static size_t zend_mm_del_huge_block(zend_mm_heap *heap, void *ptr ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
1641{
1642    zend_mm_huge_list *prev = NULL;
1643    zend_mm_huge_list *list = heap->huge_list;
1644    while (list != NULL) {
1645        if (list->ptr == ptr) {
1646            size_t size;
1647
1648            if (prev) {
1649                prev->next = list->next;
1650            } else {
1651                heap->huge_list = list->next;
1652            }
1653            size = list->size;
1654            zend_mm_free_heap(heap, list ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1655            return size;
1656        }
1657        prev = list;
1658        list = list->next;
1659    }
1660    ZEND_MM_CHECK(0, "zend_mm_heap corrupted");
1661    return 0;
1662}
1663
1664static size_t zend_mm_get_huge_block_size(zend_mm_heap *heap, void *ptr ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
1665{
1666    zend_mm_huge_list *list = heap->huge_list;
1667    while (list != NULL) {
1668        if (list->ptr == ptr) {
1669            return list->size;
1670        }
1671        list = list->next;
1672    }
1673    ZEND_MM_CHECK(0, "zend_mm_heap corrupted");
1674    return 0;
1675}
1676
1677#if ZEND_DEBUG
1678static void zend_mm_change_huge_block_size(zend_mm_heap *heap, void *ptr, size_t size, size_t dbg_size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
1679#else
1680static void zend_mm_change_huge_block_size(zend_mm_heap *heap, void *ptr, size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
1681#endif
1682{
1683    zend_mm_huge_list *list = heap->huge_list;
1684    while (list != NULL) {
1685        if (list->ptr == ptr) {
1686            list->size = size;
1687#if ZEND_DEBUG
1688            list->dbg.size = dbg_size;
1689            list->dbg.filename = __zend_filename;
1690            list->dbg.orig_filename = __zend_orig_filename;
1691            list->dbg.lineno = __zend_lineno;
1692            list->dbg.orig_lineno = __zend_orig_lineno;
1693#endif
1694            return;
1695        }
1696        list = list->next;
1697    }
1698}
1699
1700static void *zend_mm_alloc_huge(zend_mm_heap *heap, size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
1701{
1702    size_t new_size = ZEND_MM_ALIGNED_SIZE_EX(size, REAL_PAGE_SIZE);
1703    void *ptr;
1704
1705#if ZEND_MM_LIMIT
1706    if (UNEXPECTED(heap->real_size + new_size > heap->limit)) {
1707        if (zend_mm_gc(heap) && heap->real_size + new_size <= heap->limit) {
1708            /* pass */
1709        } else if (heap->overflow == 0) {
1710#if ZEND_DEBUG
1711            zend_mm_safe_error(heap, "Allowed memory size of %zu bytes exhausted at %s:%d (tried to allocate %zu bytes)", heap->limit, __zend_filename, __zend_lineno, size);
1712#else
1713            zend_mm_safe_error(heap, "Allowed memory size of %zu bytes exhausted (tried to allocate %zu bytes)", heap->limit, size);
1714#endif
1715            return NULL;
1716        }
1717    }
1718#endif
1719    ptr = zend_mm_chunk_alloc(heap, new_size, ZEND_MM_CHUNK_SIZE);
1720    if (UNEXPECTED(ptr == NULL)) {
1721        /* insufficient memory */
1722        if (zend_mm_gc(heap) &&
1723            (ptr = zend_mm_chunk_alloc(heap, new_size, ZEND_MM_CHUNK_SIZE)) != NULL) {
1724            /* pass */
1725        } else {
1726#if !ZEND_MM_LIMIT
1727            zend_mm_safe_error(heap, "Out of memory");
1728#elif ZEND_DEBUG
1729            zend_mm_safe_error(heap, "Out of memory (allocated %zu) at %s:%d (tried to allocate %zu bytes)", heap->real_size, __zend_filename, __zend_lineno, size);
1730#else
1731            zend_mm_safe_error(heap, "Out of memory (allocated %zu) (tried to allocate %zu bytes)", heap->real_size, size);
1732#endif
1733            return NULL;
1734        }
1735    }
1736#if ZEND_DEBUG
1737    zend_mm_add_huge_block(heap, ptr, new_size, size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1738#else
1739    zend_mm_add_huge_block(heap, ptr, new_size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1740#endif
1741#if ZEND_MM_STAT
1742    do {
1743        size_t size = heap->real_size + new_size;
1744        size_t peak = MAX(heap->real_peak, size);
1745        heap->real_size = size;
1746        heap->real_peak = peak;
1747    } while (0);
1748    do {
1749        size_t size = heap->size + new_size;
1750        size_t peak = MAX(heap->peak, size);
1751        heap->size = size;
1752        heap->peak = peak;
1753    } while (0);
1754#elif ZEND_MM_LIMIT
1755    heap->real_size += new_size;
1756#endif
1757    return ptr;
1758}
1759
1760static void zend_mm_free_huge(zend_mm_heap *heap, void *ptr ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
1761{
1762    size_t size;
1763
1764    ZEND_MM_CHECK(ZEND_MM_ALIGNED_OFFSET(ptr, ZEND_MM_CHUNK_SIZE) == 0, "zend_mm_heap corrupted");
1765    size = zend_mm_del_huge_block(heap, ptr ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1766    zend_mm_chunk_free(heap, ptr, size);
1767#if ZEND_MM_STAT || ZEND_MM_LIMIT
1768    heap->real_size -= size;
1769#endif
1770#if ZEND_MM_STAT
1771    heap->size -= size;
1772#endif
1773}
1774
1775/******************/
1776/* Initialization */
1777/******************/
1778
1779static zend_mm_heap *zend_mm_init(void)
1780{
1781    zend_mm_chunk *chunk = (zend_mm_chunk*)zend_mm_chunk_alloc_int(ZEND_MM_CHUNK_SIZE, ZEND_MM_CHUNK_SIZE);
1782    zend_mm_heap *heap;
1783
1784    if (UNEXPECTED(chunk == NULL)) {
1785#if ZEND_MM_ERROR
1786#ifdef _WIN32
1787        stderr_last_error("Can't initialize heap");
1788#else
1789        fprintf(stderr, "\nCan't initialize heap: [%d] %s\n", errno, strerror(errno));
1790#endif
1791#endif
1792        return NULL;
1793    }
1794    heap = &chunk->heap_slot;
1795    chunk->heap = heap;
1796    chunk->next = chunk;
1797    chunk->prev = chunk;
1798    chunk->free_pages = ZEND_MM_PAGES - ZEND_MM_FIRST_PAGE;
1799    chunk->free_tail = ZEND_MM_FIRST_PAGE;
1800    chunk->num = 0;
1801    chunk->free_map[0] = (Z_L(1) << ZEND_MM_FIRST_PAGE) - 1;
1802    chunk->map[0] = ZEND_MM_LRUN(ZEND_MM_FIRST_PAGE);
1803    heap->main_chunk = chunk;
1804    heap->cached_chunks = NULL;
1805    heap->chunks_count = 1;
1806    heap->peak_chunks_count = 1;
1807    heap->cached_chunks_count = 0;
1808    heap->avg_chunks_count = 1.0;
1809#if ZEND_MM_STAT || ZEND_MM_LIMIT
1810    heap->real_size = ZEND_MM_CHUNK_SIZE;
1811#endif
1812#if ZEND_MM_STAT
1813    heap->real_peak = ZEND_MM_CHUNK_SIZE;
1814    heap->size = 0;
1815    heap->peak = 0;
1816#endif
1817#if ZEND_MM_LIMIT
1818    heap->limit = (Z_L(-1) >> Z_L(1));
1819    heap->overflow = 0;
1820#endif
1821#if ZEND_MM_CUSTOM
1822    heap->use_custom_heap = ZEND_MM_CUSTOM_HEAP_NONE;
1823#endif
1824#if ZEND_MM_STORAGE
1825    heap->storage = NULL;
1826#endif
1827    heap->huge_list = NULL;
1828    return heap;
1829}
1830
1831ZEND_API size_t zend_mm_gc(zend_mm_heap *heap)
1832{
1833    zend_mm_free_slot *p, **q;
1834    zend_mm_chunk *chunk;
1835    size_t page_offset;
1836    int page_num;
1837    zend_mm_page_info info;
1838    int i, has_free_pages, free_counter;
1839    size_t collected = 0;
1840
1841#if ZEND_MM_CUSTOM
1842    if (heap->use_custom_heap) {
1843        return 0;
1844    }
1845#endif
1846
1847    for (i = 0; i < ZEND_MM_BINS; i++) {
1848        has_free_pages = 0;
1849        p = heap->free_slot[i];
1850        while (p != NULL) {
1851            chunk = (zend_mm_chunk*)ZEND_MM_ALIGNED_BASE(p, ZEND_MM_CHUNK_SIZE);
1852            ZEND_MM_CHECK(chunk->heap == heap, "zend_mm_heap corrupted");
1853            page_offset = ZEND_MM_ALIGNED_OFFSET(p, ZEND_MM_CHUNK_SIZE);
1854            ZEND_ASSERT(page_offset != 0);
1855            page_num = (int)(page_offset / ZEND_MM_PAGE_SIZE);
1856            info = chunk->map[page_num];
1857            ZEND_ASSERT(info & ZEND_MM_IS_SRUN);
1858            if (info & ZEND_MM_IS_LRUN) {
1859                page_num -= ZEND_MM_NRUN_OFFSET(info);
1860                info = chunk->map[page_num];
1861                ZEND_ASSERT(info & ZEND_MM_IS_SRUN);
1862                ZEND_ASSERT(!(info & ZEND_MM_IS_LRUN));
1863            }
1864            ZEND_ASSERT(ZEND_MM_SRUN_BIN_NUM(info) == i);
1865            free_counter = ZEND_MM_SRUN_FREE_COUNTER(info) + 1;
1866            if (free_counter == bin_elements[i]) {
1867                has_free_pages = 1;
1868            }
1869            chunk->map[page_num] = ZEND_MM_SRUN_EX(i, free_counter);;
1870            p = p->next_free_slot;
1871        }
1872
1873        if (!has_free_pages) {
1874            continue;
1875        }
1876
1877        q = &heap->free_slot[i];
1878        p = *q;
1879        while (p != NULL) {
1880            chunk = (zend_mm_chunk*)ZEND_MM_ALIGNED_BASE(p, ZEND_MM_CHUNK_SIZE);
1881            ZEND_MM_CHECK(chunk->heap == heap, "zend_mm_heap corrupted");
1882            page_offset = ZEND_MM_ALIGNED_OFFSET(p, ZEND_MM_CHUNK_SIZE);
1883            ZEND_ASSERT(page_offset != 0);
1884            page_num = (int)(page_offset / ZEND_MM_PAGE_SIZE);
1885            info = chunk->map[page_num];
1886            ZEND_ASSERT(info & ZEND_MM_IS_SRUN);
1887            if (info & ZEND_MM_IS_LRUN) {
1888                page_num -= ZEND_MM_NRUN_OFFSET(info);
1889                info = chunk->map[page_num];
1890                ZEND_ASSERT(info & ZEND_MM_IS_SRUN);
1891                ZEND_ASSERT(!(info & ZEND_MM_IS_LRUN));
1892            }
1893            ZEND_ASSERT(ZEND_MM_SRUN_BIN_NUM(info) == i);
1894            if (ZEND_MM_SRUN_FREE_COUNTER(info) == bin_elements[i]) {
1895                /* remove from cache */
1896                p = p->next_free_slot;;
1897                *q = p;
1898            } else {
1899                q = &p->next_free_slot;
1900                p = *q;
1901            }
1902        }
1903    }
1904
1905    chunk = heap->main_chunk;
1906    do {
1907        i = ZEND_MM_FIRST_PAGE;
1908        while (i < chunk->free_tail) {
1909            if (zend_mm_bitset_is_set(chunk->free_map, i)) {
1910                info = chunk->map[i];
1911                if (info & ZEND_MM_IS_SRUN) {
1912                    int bin_num = ZEND_MM_SRUN_BIN_NUM(info);
1913                    int pages_count = bin_pages[bin_num];
1914
1915                    if (ZEND_MM_SRUN_FREE_COUNTER(info) == bin_elements[bin_num]) {
1916                        /* all elemens are free */
1917                        zend_mm_free_pages_ex(heap, chunk, i, pages_count, 0);
1918                        collected += pages_count;
1919                    } else {
1920                        /* reset counter */
1921                        chunk->map[i] = ZEND_MM_SRUN(bin_num);
1922                    }
1923                    i += bin_pages[bin_num];
1924                } else /* if (info & ZEND_MM_IS_LRUN) */ {
1925                    i += ZEND_MM_LRUN_PAGES(info);
1926                }
1927            } else {
1928                i++;
1929            }
1930        }
1931        if (chunk->free_pages == ZEND_MM_PAGES - ZEND_MM_FIRST_PAGE) {
1932            zend_mm_chunk *next_chunk = chunk->next;
1933
1934            zend_mm_delete_chunk(heap, chunk);
1935            chunk = next_chunk;
1936        } else {
1937            chunk = chunk->next;
1938        }
1939    } while (chunk != heap->main_chunk);
1940
1941    return collected * ZEND_MM_PAGE_SIZE;
1942}
1943
1944#if ZEND_DEBUG
1945/******************/
1946/* Leak detection */
1947/******************/
1948
1949static zend_long zend_mm_find_leaks_small(zend_mm_chunk *p, int i, int j, zend_leak_info *leak)
1950{
1951    int empty = 1;
1952    zend_long count = 0;
1953    int bin_num = ZEND_MM_SRUN_BIN_NUM(p->map[i]);
1954    zend_mm_debug_info *dbg = (zend_mm_debug_info*)((char*)p + ZEND_MM_PAGE_SIZE * i + bin_data_size[bin_num] * (j + 1) - ZEND_MM_ALIGNED_SIZE(sizeof(zend_mm_debug_info)));
1955
1956    while (j < bin_elements[bin_num]) {
1957        if (dbg->size != 0) {
1958            if (dbg->filename == leak->filename && dbg->lineno == leak->lineno) {
1959                count++;
1960                dbg->size = 0;
1961                dbg->filename = NULL;
1962                dbg->lineno = 0;
1963            } else {
1964                empty = 0;
1965            }
1966        }
1967        j++;
1968        dbg = (zend_mm_debug_info*)((char*)dbg + bin_data_size[bin_num]);
1969    }
1970    if (empty) {
1971        zend_mm_bitset_reset_range(p->free_map, i, bin_pages[bin_num]);
1972    }
1973    return count;
1974}
1975
1976static zend_long zend_mm_find_leaks(zend_mm_heap *heap, zend_mm_chunk *p, int i, zend_leak_info *leak)
1977{
1978    zend_long count = 0;
1979
1980    do {
1981        while (i < p->free_tail) {
1982            if (zend_mm_bitset_is_set(p->free_map, i)) {
1983                if (p->map[i] & ZEND_MM_IS_SRUN) {
1984                    int bin_num = ZEND_MM_SRUN_BIN_NUM(p->map[i]);
1985                    count += zend_mm_find_leaks_small(p, i, 0, leak);
1986                    i += bin_pages[bin_num];
1987                } else /* if (p->map[i] & ZEND_MM_IS_LRUN) */ {
1988                    int pages_count = ZEND_MM_LRUN_PAGES(p->map[i]);
1989                    zend_mm_debug_info *dbg = (zend_mm_debug_info*)((char*)p + ZEND_MM_PAGE_SIZE * (i + pages_count) - ZEND_MM_ALIGNED_SIZE(sizeof(zend_mm_debug_info)));
1990
1991                    if (dbg->filename == leak->filename && dbg->lineno == leak->lineno) {
1992                        count++;
1993                    }
1994                    zend_mm_bitset_reset_range(p->free_map, i, pages_count);
1995                    i += pages_count;
1996                }
1997            } else {
1998                i++;
1999            }
2000        }
2001        p = p->next;
2002    } while (p != heap->main_chunk);
2003    return count;
2004}
2005
2006static void zend_mm_check_leaks(zend_mm_heap *heap)
2007{
2008    zend_mm_huge_list *list;
2009    zend_mm_chunk *p;
2010    zend_leak_info leak;
2011    zend_long repeated = 0;
2012    uint32_t total = 0;
2013    int i, j;
2014
2015    /* find leaked huge blocks and free them */
2016    list = heap->huge_list;
2017    while (list) {
2018        zend_mm_huge_list *q = list;
2019
2020        heap->huge_list = list->next;
2021
2022        leak.addr = list->ptr;
2023        leak.size = list->dbg.size;
2024        leak.filename = list->dbg.filename;
2025        leak.orig_filename = list->dbg.orig_filename;
2026        leak.lineno = list->dbg.lineno;
2027        leak.orig_lineno = list->dbg.orig_lineno;
2028
2029        zend_message_dispatcher(ZMSG_LOG_SCRIPT_NAME, NULL);
2030        zend_message_dispatcher(ZMSG_MEMORY_LEAK_DETECTED, &leak);
2031//???       repeated = zend_mm_find_leaks_huge(segment, p);
2032        total += 1 + repeated;
2033        if (repeated) {
2034            zend_message_dispatcher(ZMSG_MEMORY_LEAK_REPEATED, (void *)(zend_uintptr_t)repeated);
2035        }
2036
2037        list = list->next;
2038        zend_mm_chunk_free(heap, q->ptr, q->size);
2039        zend_mm_free_heap(heap, q, NULL, 0, NULL, 0);
2040    }
2041
2042    /* for each chunk */
2043    p = heap->main_chunk;
2044    do {
2045        i = ZEND_MM_FIRST_PAGE;
2046        while (i < p->free_tail) {
2047            if (zend_mm_bitset_is_set(p->free_map, i)) {
2048                if (p->map[i] & ZEND_MM_IS_SRUN) {
2049                    int bin_num = ZEND_MM_SRUN_BIN_NUM(p->map[i]);
2050                    zend_mm_debug_info *dbg = (zend_mm_debug_info*)((char*)p + ZEND_MM_PAGE_SIZE * i + bin_data_size[bin_num] - ZEND_MM_ALIGNED_SIZE(sizeof(zend_mm_debug_info)));
2051
2052                    j = 0;
2053                    while (j < bin_elements[bin_num]) {
2054                        if (dbg->size != 0) {
2055                            leak.addr = (zend_mm_debug_info*)((char*)p + ZEND_MM_PAGE_SIZE * i + bin_data_size[bin_num] * j);
2056                            leak.size = dbg->size;
2057                            leak.filename = dbg->filename;
2058                            leak.orig_filename = dbg->orig_filename;
2059                            leak.lineno = dbg->lineno;
2060                            leak.orig_lineno = dbg->orig_lineno;
2061
2062                            zend_message_dispatcher(ZMSG_LOG_SCRIPT_NAME, NULL);
2063                            zend_message_dispatcher(ZMSG_MEMORY_LEAK_DETECTED, &leak);
2064
2065                            dbg->size = 0;
2066                            dbg->filename = NULL;
2067                            dbg->lineno = 0;
2068
2069                            repeated = zend_mm_find_leaks_small(p, i, j + 1, &leak) +
2070                                       zend_mm_find_leaks(heap, p, i + bin_pages[bin_num], &leak);
2071                            total += 1 + repeated;
2072                            if (repeated) {
2073                                zend_message_dispatcher(ZMSG_MEMORY_LEAK_REPEATED, (void *)(zend_uintptr_t)repeated);
2074                            }
2075                        }
2076                        dbg = (zend_mm_debug_info*)((char*)dbg + bin_data_size[bin_num]);
2077                        j++;
2078                    }
2079                    i += bin_pages[bin_num];
2080                } else /* if (p->map[i] & ZEND_MM_IS_LRUN) */ {
2081                    int pages_count = ZEND_MM_LRUN_PAGES(p->map[i]);
2082                    zend_mm_debug_info *dbg = (zend_mm_debug_info*)((char*)p + ZEND_MM_PAGE_SIZE * (i + pages_count) - ZEND_MM_ALIGNED_SIZE(sizeof(zend_mm_debug_info)));
2083
2084                    leak.addr = (void*)((char*)p + ZEND_MM_PAGE_SIZE * i);
2085                    leak.size = dbg->size;
2086                    leak.filename = dbg->filename;
2087                    leak.orig_filename = dbg->orig_filename;
2088                    leak.lineno = dbg->lineno;
2089                    leak.orig_lineno = dbg->orig_lineno;
2090
2091                    zend_message_dispatcher(ZMSG_LOG_SCRIPT_NAME, NULL);
2092                    zend_message_dispatcher(ZMSG_MEMORY_LEAK_DETECTED, &leak);
2093
2094                    zend_mm_bitset_reset_range(p->free_map, i, pages_count);
2095
2096                    repeated = zend_mm_find_leaks(heap, p, i + pages_count, &leak);
2097                    total += 1 + repeated;
2098                    if (repeated) {
2099                        zend_message_dispatcher(ZMSG_MEMORY_LEAK_REPEATED, (void *)(zend_uintptr_t)repeated);
2100                    }
2101                    i += pages_count;
2102                }
2103            } else {
2104                i++;
2105            }
2106        }
2107        p = p->next;
2108    } while (p != heap->main_chunk);
2109    if (total) {
2110        zend_message_dispatcher(ZMSG_MEMORY_LEAKS_GRAND_TOTAL, &total);
2111    }
2112}
2113#endif
2114
2115void zend_mm_shutdown(zend_mm_heap *heap, int full, int silent)
2116{
2117    zend_mm_chunk *p;
2118    zend_mm_huge_list *list;
2119
2120#if ZEND_MM_CUSTOM
2121    if (heap->use_custom_heap) {
2122        if (full) {
2123            if (ZEND_DEBUG && heap->use_custom_heap == ZEND_MM_CUSTOM_HEAP_DEBUG) {
2124                heap->custom_heap.debug._free(heap ZEND_FILE_LINE_CC ZEND_FILE_LINE_EMPTY_CC);
2125            } else {
2126                heap->custom_heap.std._free(heap);
2127            }
2128        }
2129        return;
2130    }
2131#endif
2132
2133#if ZEND_DEBUG
2134    if (!silent) {
2135        zend_mm_check_leaks(heap);
2136    }
2137#endif
2138
2139    /* free huge blocks */
2140    list = heap->huge_list;
2141    while (list) {
2142        zend_mm_huge_list *q = list;
2143        list = list->next;
2144        zend_mm_chunk_free(heap, q->ptr, q->size);
2145    }
2146
2147    /* move all chunks except of the first one into the cache */
2148    p = heap->main_chunk->next;
2149    while (p != heap->main_chunk) {
2150        zend_mm_chunk *q = p->next;
2151        p->next = heap->cached_chunks;
2152        heap->cached_chunks = p;
2153        p = q;
2154        heap->chunks_count--;
2155        heap->cached_chunks_count++;
2156    }
2157
2158    if (full) {
2159        /* free all cached chunks */
2160        while (heap->cached_chunks) {
2161            p = heap->cached_chunks;
2162            heap->cached_chunks = p->next;
2163            zend_mm_chunk_free(heap, p, ZEND_MM_CHUNK_SIZE);
2164        }
2165        /* free the first chunk */
2166        zend_mm_chunk_free(heap, heap->main_chunk, ZEND_MM_CHUNK_SIZE);
2167    } else {
2168        zend_mm_heap old_heap;
2169
2170        /* free some cached chunks to keep average count */
2171        heap->avg_chunks_count = (heap->avg_chunks_count + (double)heap->peak_chunks_count) / 2.0;
2172        while ((double)heap->cached_chunks_count + 0.9 > heap->avg_chunks_count &&
2173               heap->cached_chunks) {
2174            p = heap->cached_chunks;
2175            heap->cached_chunks = p->next;
2176            zend_mm_chunk_free(heap, p, ZEND_MM_CHUNK_SIZE);
2177            heap->cached_chunks_count--;
2178        }
2179        /* clear cached chunks */
2180        p = heap->cached_chunks;
2181        while (p != NULL) {
2182            zend_mm_chunk *q = p->next;
2183            memset(p, 0, sizeof(zend_mm_chunk));
2184            p->next = q;
2185            p = q;
2186        }
2187
2188        /* reinitialize the first chunk and heap */
2189        old_heap = *heap;
2190        p = heap->main_chunk;
2191        memset(p, 0, ZEND_MM_FIRST_PAGE * ZEND_MM_PAGE_SIZE);
2192        *heap = old_heap;
2193        memset(heap->free_slot, 0, sizeof(heap->free_slot));
2194        heap->main_chunk = p;
2195        p->heap = &p->heap_slot;
2196        p->next = p;
2197        p->prev = p;
2198        p->free_pages = ZEND_MM_PAGES - ZEND_MM_FIRST_PAGE;
2199        p->free_tail = ZEND_MM_FIRST_PAGE;
2200        p->free_map[0] = (1L << ZEND_MM_FIRST_PAGE) - 1;
2201        p->map[0] = ZEND_MM_LRUN(ZEND_MM_FIRST_PAGE);
2202        heap->chunks_count = 1;
2203        heap->peak_chunks_count = 1;
2204#if ZEND_MM_STAT || ZEND_MM_LIMIT
2205        heap->real_size = ZEND_MM_CHUNK_SIZE;
2206#endif
2207#if ZEND_MM_STAT
2208        heap->real_peak = ZEND_MM_CHUNK_SIZE;
2209        heap->size = heap->peak = 0;
2210#endif
2211    }
2212}
2213
2214/**************/
2215/* PUBLIC API */
2216/**************/
2217
2218ZEND_API void* ZEND_FASTCALL _zend_mm_alloc(zend_mm_heap *heap, size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
2219{
2220    return zend_mm_alloc_heap(heap, size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
2221}
2222
2223ZEND_API void ZEND_FASTCALL _zend_mm_free(zend_mm_heap *heap, void *ptr ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
2224{
2225    zend_mm_free_heap(heap, ptr ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
2226}
2227
2228void* ZEND_FASTCALL _zend_mm_realloc(zend_mm_heap *heap, void *ptr, size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
2229{
2230    return zend_mm_realloc_heap(heap, ptr, size, size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
2231}
2232
2233void* ZEND_FASTCALL _zend_mm_realloc2(zend_mm_heap *heap, void *ptr, size_t size, size_t copy_size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
2234{
2235    return zend_mm_realloc_heap(heap, ptr, size, copy_size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
2236}
2237
2238ZEND_API size_t ZEND_FASTCALL _zend_mm_block_size(zend_mm_heap *heap, void *ptr ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
2239{
2240    return zend_mm_size(heap, ptr ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
2241}
2242
2243/**********************/
2244/* Allocation Manager */
2245/**********************/
2246
2247typedef struct _zend_alloc_globals {
2248    zend_mm_heap *mm_heap;
2249} zend_alloc_globals;
2250
2251#ifdef ZTS
2252static int alloc_globals_id;
2253# define AG(v) ZEND_TSRMG(alloc_globals_id, zend_alloc_globals *, v)
2254#else
2255# define AG(v) (alloc_globals.v)
2256static zend_alloc_globals alloc_globals;
2257#endif
2258
2259ZEND_API int is_zend_mm(void)
2260{
2261#if ZEND_MM_CUSTOM
2262    return !AG(mm_heap)->use_custom_heap;
2263#else
2264    return 1;
2265#endif
2266}
2267
2268#if !ZEND_DEBUG && !defined(_WIN32)
2269#undef _emalloc
2270
2271#if ZEND_MM_CUSTOM
2272# define ZEND_MM_CUSTOM_ALLOCATOR(size) do { \
2273        if (UNEXPECTED(AG(mm_heap)->use_custom_heap)) { \
2274            if (ZEND_DEBUG && AG(mm_heap)->use_custom_heap == ZEND_MM_CUSTOM_HEAP_DEBUG) { \
2275                return AG(mm_heap)->custom_heap.debug._malloc(size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC); \
2276            } else { \
2277                return AG(mm_heap)->custom_heap.std._malloc(size); \
2278            } \
2279        } \
2280    } while (0)
2281# define ZEND_MM_CUSTOM_DEALLOCATOR(ptr) do { \
2282        if (UNEXPECTED(AG(mm_heap)->use_custom_heap)) { \
2283            if (ZEND_DEBUG && AG(mm_heap)->use_custom_heap == ZEND_MM_CUSTOM_HEAP_DEBUG) { \
2284                AG(mm_heap)->custom_heap.debug._free(ptr ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC); \
2285            } else { \
2286                AG(mm_heap)->custom_heap.std._free(ptr); \
2287            } \
2288            return; \
2289        } \
2290    } while (0)
2291#else
2292# define ZEND_MM_CUSTOM_ALLOCATOR(size)
2293# define ZEND_MM_CUSTOM_DEALLOCATOR(ptr)
2294#endif
2295
2296# define _ZEND_BIN_ALLOCATOR(_num, _size, _elements, _pages, x, y) \
2297    ZEND_API void* ZEND_FASTCALL _emalloc_ ## _size(void) { \
2298        ZEND_MM_CUSTOM_ALLOCATOR(_size); \
2299        return zend_mm_alloc_small(AG(mm_heap), _size, _num ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC); \
2300    }
2301
2302ZEND_MM_BINS_INFO(_ZEND_BIN_ALLOCATOR, x, y)
2303
2304ZEND_API void* ZEND_FASTCALL _emalloc_large(size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
2305{
2306
2307    ZEND_MM_CUSTOM_ALLOCATOR(size);
2308    return zend_mm_alloc_large(AG(mm_heap), size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
2309}
2310
2311ZEND_API void* ZEND_FASTCALL _emalloc_huge(size_t size)
2312{
2313
2314    ZEND_MM_CUSTOM_ALLOCATOR(size);
2315    return zend_mm_alloc_huge(AG(mm_heap), size);
2316}
2317
2318#if ZEND_DEBUG
2319# define _ZEND_BIN_FREE(_num, _size, _elements, _pages, x, y) \
2320    ZEND_API void ZEND_FASTCALL _efree_ ## _size(void *ptr) { \
2321        ZEND_MM_CUSTOM_DEALLOCATOR(ptr); \
2322        { \
2323            size_t page_offset = ZEND_MM_ALIGNED_OFFSET(ptr, ZEND_MM_CHUNK_SIZE); \
2324            zend_mm_chunk *chunk = (zend_mm_chunk*)ZEND_MM_ALIGNED_BASE(ptr, ZEND_MM_CHUNK_SIZE); \
2325            int page_num = page_offset / ZEND_MM_PAGE_SIZE; \
2326            ZEND_MM_CHECK(chunk->heap == AG(mm_heap), "zend_mm_heap corrupted"); \
2327            ZEND_ASSERT(chunk->map[page_num] & ZEND_MM_IS_SRUN); \
2328            ZEND_ASSERT(ZEND_MM_SRUN_BIN_NUM(chunk->map[page_num]) == _num); \
2329            zend_mm_free_small(AG(mm_heap), ptr, _num); \
2330        } \
2331    }
2332#else
2333# define _ZEND_BIN_FREE(_num, _size, _elements, _pages, x, y) \
2334    ZEND_API void ZEND_FASTCALL _efree_ ## _size(void *ptr) { \
2335        ZEND_MM_CUSTOM_DEALLOCATOR(ptr); \
2336        { \
2337            zend_mm_chunk *chunk = (zend_mm_chunk*)ZEND_MM_ALIGNED_BASE(ptr, ZEND_MM_CHUNK_SIZE); \
2338            ZEND_MM_CHECK(chunk->heap == AG(mm_heap), "zend_mm_heap corrupted"); \
2339            zend_mm_free_small(AG(mm_heap), ptr, _num); \
2340        } \
2341    }
2342#endif
2343
2344ZEND_MM_BINS_INFO(_ZEND_BIN_FREE, x, y)
2345
2346ZEND_API void ZEND_FASTCALL _efree_large(void *ptr, size_t size)
2347{
2348
2349    ZEND_MM_CUSTOM_DEALLOCATOR(ptr);
2350    {
2351        size_t page_offset = ZEND_MM_ALIGNED_OFFSET(ptr, ZEND_MM_CHUNK_SIZE);
2352        zend_mm_chunk *chunk = (zend_mm_chunk*)ZEND_MM_ALIGNED_BASE(ptr, ZEND_MM_CHUNK_SIZE);
2353        int page_num = page_offset / ZEND_MM_PAGE_SIZE;
2354        int pages_count = ZEND_MM_ALIGNED_SIZE_EX(size, ZEND_MM_PAGE_SIZE) / ZEND_MM_PAGE_SIZE;
2355
2356        ZEND_MM_CHECK(chunk->heap == AG(mm_heap) && ZEND_MM_ALIGNED_OFFSET(page_offset, ZEND_MM_PAGE_SIZE) == 0, "zend_mm_heap corrupted");
2357        ZEND_ASSERT(chunk->map[page_num] & ZEND_MM_IS_LRUN);
2358        ZEND_ASSERT(ZEND_MM_LRUN_PAGES(chunk->map[page_num]) == pages_count);
2359        zend_mm_free_large(AG(mm_heap), chunk, page_num, pages_count);
2360    }
2361}
2362
2363ZEND_API void ZEND_FASTCALL _efree_huge(void *ptr, size_t size)
2364{
2365
2366    ZEND_MM_CUSTOM_DEALLOCATOR(ptr);
2367    // TODO: use size???
2368    zend_mm_free_huge(AG(mm_heap), ptr);
2369}
2370#endif
2371
2372ZEND_API void* ZEND_FASTCALL _emalloc(size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
2373{
2374
2375#if ZEND_MM_CUSTOM
2376    if (UNEXPECTED(AG(mm_heap)->use_custom_heap)) {
2377        if (ZEND_DEBUG && AG(mm_heap)->use_custom_heap == ZEND_MM_CUSTOM_HEAP_DEBUG) {
2378            return AG(mm_heap)->custom_heap.debug._malloc(size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
2379        } else {
2380            return AG(mm_heap)->custom_heap.std._malloc(size);
2381        }
2382    }
2383#endif
2384    return zend_mm_alloc_heap(AG(mm_heap), size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
2385}
2386
2387ZEND_API void ZEND_FASTCALL _efree(void *ptr ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
2388{
2389
2390#if ZEND_MM_CUSTOM
2391    if (UNEXPECTED(AG(mm_heap)->use_custom_heap)) {
2392        if (ZEND_DEBUG && AG(mm_heap)->use_custom_heap == ZEND_MM_CUSTOM_HEAP_DEBUG) {
2393            AG(mm_heap)->custom_heap.debug._free(ptr ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
2394        } else {
2395            AG(mm_heap)->custom_heap.std._free(ptr);
2396        }
2397        return;
2398    }
2399#endif
2400    zend_mm_free_heap(AG(mm_heap), ptr ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
2401}
2402
2403ZEND_API void* ZEND_FASTCALL _erealloc(void *ptr, size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
2404{
2405
2406    if (UNEXPECTED(AG(mm_heap)->use_custom_heap)) {
2407        if (ZEND_DEBUG && AG(mm_heap)->use_custom_heap == ZEND_MM_CUSTOM_HEAP_DEBUG) {
2408            return AG(mm_heap)->custom_heap.debug._realloc(ptr, size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
2409        } else {
2410            return AG(mm_heap)->custom_heap.std._realloc(ptr, size);
2411        }
2412    }
2413    return zend_mm_realloc_heap(AG(mm_heap), ptr, size, size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
2414}
2415
2416ZEND_API void* ZEND_FASTCALL _erealloc2(void *ptr, size_t size, size_t copy_size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
2417{
2418
2419    if (UNEXPECTED(AG(mm_heap)->use_custom_heap)) {
2420        if (ZEND_DEBUG && AG(mm_heap)->use_custom_heap == ZEND_MM_CUSTOM_HEAP_DEBUG) {
2421            return AG(mm_heap)->custom_heap.debug._realloc(ptr, size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
2422        } else {
2423            return AG(mm_heap)->custom_heap.std._realloc(ptr, size);
2424        }
2425    }
2426    return zend_mm_realloc_heap(AG(mm_heap), ptr, size, copy_size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
2427}
2428
2429ZEND_API size_t ZEND_FASTCALL _zend_mem_block_size(void *ptr ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
2430{
2431    if (UNEXPECTED(AG(mm_heap)->use_custom_heap)) {
2432        return 0;
2433    }
2434    return zend_mm_size(AG(mm_heap), ptr ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
2435}
2436
2437static zend_always_inline size_t safe_address(size_t nmemb, size_t size, size_t offset)
2438{
2439    int overflow;
2440    size_t ret = zend_safe_address(nmemb, size, offset, &overflow);
2441
2442    if (UNEXPECTED(overflow)) {
2443        zend_error_noreturn(E_ERROR, "Possible integer overflow in memory allocation (%zu * %zu + %zu)", nmemb, size, offset);
2444        return 0;
2445    }
2446    return ret;
2447}
2448
2449
2450ZEND_API void* ZEND_FASTCALL _safe_emalloc(size_t nmemb, size_t size, size_t offset ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
2451{
2452    return emalloc_rel(safe_address(nmemb, size, offset));
2453}
2454
2455ZEND_API void* ZEND_FASTCALL _safe_malloc(size_t nmemb, size_t size, size_t offset)
2456{
2457    return pemalloc(safe_address(nmemb, size, offset), 1);
2458}
2459
2460ZEND_API void* ZEND_FASTCALL _safe_erealloc(void *ptr, size_t nmemb, size_t size, size_t offset ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
2461{
2462    return erealloc_rel(ptr, safe_address(nmemb, size, offset));
2463}
2464
2465ZEND_API void* ZEND_FASTCALL _safe_realloc(void *ptr, size_t nmemb, size_t size, size_t offset)
2466{
2467    return perealloc(ptr, safe_address(nmemb, size, offset), 1);
2468}
2469
2470
2471ZEND_API void* ZEND_FASTCALL _ecalloc(size_t nmemb, size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
2472{
2473    void *p;
2474
2475    p = _safe_emalloc(nmemb, size, 0 ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
2476    if (UNEXPECTED(p == NULL)) {
2477        return p;
2478    }
2479    memset(p, 0, size * nmemb);
2480    return p;
2481}
2482
2483ZEND_API char* ZEND_FASTCALL _estrdup(const char *s ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
2484{
2485    size_t length;
2486    char *p;
2487
2488    length = strlen(s);
2489    if (UNEXPECTED(length + 1 == 0)) {
2490        zend_error_noreturn(E_ERROR, "Possible integer overflow in memory allocation (%zu * %zu + %zu)", 1, length, 1);
2491    }
2492    p = (char *) _emalloc(length + 1 ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
2493    if (UNEXPECTED(p == NULL)) {
2494        return p;
2495    }
2496    memcpy(p, s, length+1);
2497    return p;
2498}
2499
2500ZEND_API char* ZEND_FASTCALL _estrndup(const char *s, size_t length ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
2501{
2502    char *p;
2503
2504    if (UNEXPECTED(length + 1 == 0)) {
2505        zend_error_noreturn(E_ERROR, "Possible integer overflow in memory allocation (%zu * %zu + %zu)", 1, length, 1);
2506    }
2507    p = (char *) _emalloc(length + 1 ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
2508    if (UNEXPECTED(p == NULL)) {
2509        return p;
2510    }
2511    memcpy(p, s, length);
2512    p[length] = 0;
2513    return p;
2514}
2515
2516
2517ZEND_API char* ZEND_FASTCALL zend_strndup(const char *s, size_t length)
2518{
2519    char *p;
2520
2521    if (UNEXPECTED(length + 1 == 0)) {
2522        zend_error_noreturn(E_ERROR, "Possible integer overflow in memory allocation (%zu * %zu + %zu)", 1, length, 1);
2523    }
2524    p = (char *) malloc(length + 1);
2525    if (UNEXPECTED(p == NULL)) {
2526        return p;
2527    }
2528    if (EXPECTED(length)) {
2529        memcpy(p, s, length);
2530    }
2531    p[length] = 0;
2532    return p;
2533}
2534
2535
2536ZEND_API int zend_set_memory_limit(size_t memory_limit)
2537{
2538#if ZEND_MM_LIMIT
2539    AG(mm_heap)->limit = (memory_limit >= ZEND_MM_CHUNK_SIZE) ? memory_limit : ZEND_MM_CHUNK_SIZE;
2540#endif
2541    return SUCCESS;
2542}
2543
2544ZEND_API size_t zend_memory_usage(int real_usage)
2545{
2546#if ZEND_MM_STAT
2547    if (real_usage) {
2548        return AG(mm_heap)->real_size;
2549    } else {
2550        size_t usage = AG(mm_heap)->size;
2551        return usage;
2552    }
2553#endif
2554    return 0;
2555}
2556
2557ZEND_API size_t zend_memory_peak_usage(int real_usage)
2558{
2559#if ZEND_MM_STAT
2560    if (real_usage) {
2561        return AG(mm_heap)->real_peak;
2562    } else {
2563        return AG(mm_heap)->peak;
2564    }
2565#endif
2566    return 0;
2567}
2568
2569ZEND_API void shutdown_memory_manager(int silent, int full_shutdown)
2570{
2571    zend_mm_shutdown(AG(mm_heap), full_shutdown, silent);
2572}
2573
2574static void alloc_globals_ctor(zend_alloc_globals *alloc_globals)
2575{
2576#if ZEND_MM_CUSTOM
2577    char *tmp = getenv("USE_ZEND_ALLOC");
2578
2579    if (tmp && !zend_atoi(tmp, 0)) {
2580        alloc_globals->mm_heap = malloc(sizeof(zend_mm_heap));
2581        memset(alloc_globals->mm_heap, 0, sizeof(zend_mm_heap));
2582        alloc_globals->mm_heap->use_custom_heap = ZEND_MM_CUSTOM_HEAP_STD;
2583        alloc_globals->mm_heap->custom_heap.std._malloc = malloc;
2584        alloc_globals->mm_heap->custom_heap.std._free = free;
2585        alloc_globals->mm_heap->custom_heap.std._realloc = realloc;
2586        return;
2587    }
2588#endif
2589    ZEND_TSRMLS_CACHE_UPDATE();
2590    alloc_globals->mm_heap = zend_mm_init();
2591}
2592
2593#ifdef ZTS
2594static void alloc_globals_dtor(zend_alloc_globals *alloc_globals)
2595{
2596    zend_mm_shutdown(alloc_globals->mm_heap, 1, 1);
2597}
2598#endif
2599
2600ZEND_API void start_memory_manager(void)
2601{
2602#ifdef ZTS
2603    ts_allocate_id(&alloc_globals_id, sizeof(zend_alloc_globals), (ts_allocate_ctor) alloc_globals_ctor, (ts_allocate_dtor) alloc_globals_dtor);
2604#else
2605    alloc_globals_ctor(&alloc_globals);
2606#endif
2607#ifndef _WIN32
2608#  if defined(_SC_PAGESIZE)
2609    REAL_PAGE_SIZE = sysconf(_SC_PAGESIZE);
2610#  elif defined(_SC_PAGE_SIZE)
2611    REAL_PAGE_SIZE = sysconf(_SC_PAGE_SIZE);
2612#  endif
2613#endif
2614}
2615
2616ZEND_API zend_mm_heap *zend_mm_set_heap(zend_mm_heap *new_heap)
2617{
2618    zend_mm_heap *old_heap;
2619
2620    old_heap = AG(mm_heap);
2621    AG(mm_heap) = (zend_mm_heap*)new_heap;
2622    return (zend_mm_heap*)old_heap;
2623}
2624
2625ZEND_API zend_mm_heap *zend_mm_get_heap(void)
2626{
2627    return AG(mm_heap);
2628}
2629
2630ZEND_API int zend_mm_is_custom_heap(zend_mm_heap *new_heap)
2631{
2632#if ZEND_MM_CUSTOM
2633    return AG(mm_heap)->use_custom_heap;
2634#else
2635    return 0;
2636#endif
2637}
2638
2639ZEND_API void zend_mm_set_custom_handlers(zend_mm_heap *heap,
2640                                          void* (*_malloc)(size_t),
2641                                          void  (*_free)(void*),
2642                                          void* (*_realloc)(void*, size_t))
2643{
2644#if ZEND_MM_CUSTOM
2645    zend_mm_heap *_heap = (zend_mm_heap*)heap;
2646
2647    _heap->use_custom_heap = ZEND_MM_CUSTOM_HEAP_STD;
2648    _heap->custom_heap.std._malloc = _malloc;
2649    _heap->custom_heap.std._free = _free;
2650    _heap->custom_heap.std._realloc = _realloc;
2651#endif
2652}
2653
2654ZEND_API void zend_mm_get_custom_handlers(zend_mm_heap *heap,
2655                                          void* (**_malloc)(size_t),
2656                                          void  (**_free)(void*),
2657                                          void* (**_realloc)(void*, size_t))
2658{
2659#if ZEND_MM_CUSTOM
2660    zend_mm_heap *_heap = (zend_mm_heap*)heap;
2661
2662    if (heap->use_custom_heap) {
2663        *_malloc = _heap->custom_heap.std._malloc;
2664        *_free = _heap->custom_heap.std._free;
2665        *_realloc = _heap->custom_heap.std._realloc;
2666    } else {
2667        *_malloc = NULL;
2668        *_free = NULL;
2669        *_realloc = NULL;
2670    }
2671#else
2672    *_malloc = NULL;
2673    *_free = NULL;
2674    *_realloc = NULL;
2675#endif
2676}
2677
2678#if ZEND_DEBUG
2679ZEND_API void zend_mm_set_custom_debug_handlers(zend_mm_heap *heap,
2680                                          void* (*_malloc)(size_t ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC),
2681                                          void  (*_free)(void* ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC),
2682                                          void* (*_realloc)(void*, size_t ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC))
2683{
2684#if ZEND_MM_CUSTOM
2685    zend_mm_heap *_heap = (zend_mm_heap*)heap;
2686
2687    _heap->use_custom_heap = ZEND_MM_CUSTOM_HEAP_DEBUG;
2688    _heap->custom_heap.debug._malloc = _malloc;
2689    _heap->custom_heap.debug._free = _free;
2690    _heap->custom_heap.debug._realloc = _realloc;
2691#endif
2692}
2693#endif
2694
2695ZEND_API zend_mm_storage *zend_mm_get_storage(zend_mm_heap *heap)
2696{
2697#if ZEND_MM_STORAGE
2698    return heap->storage;
2699#else
2700    return NULL
2701#endif
2702}
2703
2704ZEND_API zend_mm_heap *zend_mm_startup(void)
2705{
2706    return zend_mm_init();
2707}
2708
2709ZEND_API zend_mm_heap *zend_mm_startup_ex(const zend_mm_handlers *handlers, void *data, size_t data_size)
2710{
2711#if ZEND_MM_STORAGE
2712    zend_mm_storage tmp_storage, *storage;
2713    zend_mm_chunk *chunk;
2714    zend_mm_heap *heap;
2715
2716    memcpy((zend_mm_handlers*)&tmp_storage.handlers, handlers, sizeof(zend_mm_handlers));
2717    tmp_storage.data = data;
2718    chunk = (zend_mm_chunk*)handlers->chunk_alloc(&tmp_storage, ZEND_MM_CHUNK_SIZE, ZEND_MM_CHUNK_SIZE);
2719    if (UNEXPECTED(chunk == NULL)) {
2720#if ZEND_MM_ERROR
2721#ifdef _WIN32
2722        stderr_last_error("Can't initialize heap");
2723#else
2724        fprintf(stderr, "\nCan't initialize heap: [%d] %s\n", errno, strerror(errno));
2725#endif
2726#endif
2727        return NULL;
2728    }
2729    heap = &chunk->heap_slot;
2730    chunk->heap = heap;
2731    chunk->next = chunk;
2732    chunk->prev = chunk;
2733    chunk->free_pages = ZEND_MM_PAGES - ZEND_MM_FIRST_PAGE;
2734    chunk->free_tail = ZEND_MM_FIRST_PAGE;
2735    chunk->num = 0;
2736    chunk->free_map[0] = (Z_L(1) << ZEND_MM_FIRST_PAGE) - 1;
2737    chunk->map[0] = ZEND_MM_LRUN(ZEND_MM_FIRST_PAGE);
2738    heap->main_chunk = chunk;
2739    heap->cached_chunks = NULL;
2740    heap->chunks_count = 1;
2741    heap->peak_chunks_count = 1;
2742    heap->cached_chunks_count = 0;
2743    heap->avg_chunks_count = 1.0;
2744#if ZEND_MM_STAT || ZEND_MM_LIMIT
2745    heap->real_size = ZEND_MM_CHUNK_SIZE;
2746#endif
2747#if ZEND_MM_STAT
2748    heap->real_peak = ZEND_MM_CHUNK_SIZE;
2749    heap->size = 0;
2750    heap->peak = 0;
2751#endif
2752#if ZEND_MM_LIMIT
2753    heap->limit = (Z_L(-1) >> Z_L(1));
2754    heap->overflow = 0;
2755#endif
2756#if ZEND_MM_CUSTOM
2757    heap->use_custom_heap = 0;
2758#endif
2759    heap->storage = &tmp_storage;
2760    heap->huge_list = NULL;
2761    memset(heap->free_slot, 0, sizeof(heap->free_slot));
2762    storage = _zend_mm_alloc(heap, sizeof(zend_mm_storage) + data_size ZEND_FILE_LINE_CC ZEND_FILE_LINE_CC);
2763    if (!storage) {
2764        handlers->chunk_free(&tmp_storage, chunk, ZEND_MM_CHUNK_SIZE);
2765#if ZEND_MM_ERROR
2766#ifdef _WIN32
2767        stderr_last_error("Can't initialize heap");
2768#else
2769        fprintf(stderr, "\nCan't initialize heap: [%d] %s\n", errno, strerror(errno));
2770#endif
2771#endif
2772        return NULL;
2773    }
2774    memcpy(storage, &tmp_storage, sizeof(zend_mm_storage));
2775    if (data) {
2776        storage->data = (void*)(((char*)storage + sizeof(zend_mm_storage)));
2777        memcpy(storage->data, data, data_size);
2778    }
2779    heap->storage = storage;
2780    return heap;
2781#else
2782    return NULL;
2783#endif
2784}
2785
2786/*
2787 * Local variables:
2788 * tab-width: 4
2789 * c-basic-offset: 4
2790 * indent-tabs-mode: t
2791 * End:
2792 */
2793