1/*
2   +----------------------------------------------------------------------+
3   | Zend Engine                                                          |
4   +----------------------------------------------------------------------+
5   | Copyright (c) 1998-2015 Zend Technologies Ltd. (http://www.zend.com) |
6   +----------------------------------------------------------------------+
7   | This source file is subject to version 2.00 of the Zend license,     |
8   | that is bundled with this package in the file LICENSE, and is        |
9   | available through the world-wide-web at the following url:           |
10   | http://www.zend.com/license/2_00.txt.                                |
11   | If you did not receive a copy of the Zend license and are unable to  |
12   | obtain it through the world-wide-web, please send a note to          |
13   | license@zend.com so we can mail you a copy immediately.              |
14   +----------------------------------------------------------------------+
15   | Authors: Andi Gutmans <andi@zend.com>                                |
16   |          Zeev Suraski <zeev@zend.com>                                |
17   |          Dmitry Stogov <dmitry@zend.com>                             |
18   +----------------------------------------------------------------------+
19*/
20
21/* $Id$ */
22
23/*
24 * zend_alloc is designed to be a modern CPU cache friendly memory manager
25 * for PHP. Most ideas are taken from jemalloc and tcmalloc implementations.
26 *
27 * All allocations are split into 3 categories:
28 *
29 * Huge  - the size is greater than CHUNK size (~2M by default), allocation is
30 *         performed using mmap(). The result is aligned on 2M boundary.
31 *
32 * Large - a number of 4096K pages inside a CHUNK. Large blocks
33 *         are always aligned on page boundary.
34 *
35 * Small - less than 3/4 of page size. Small sizes are rounded up to nearest
36 *         greater predefined small size (there are 30 predefined sizes:
37 *         8, 16, 24, 32, ... 3072). Small blocks are allocated from
38 *         RUNs. Each RUN is allocated as a single or few following pages.
39 *         Allocation inside RUNs implemented using linked list of free
40 *         elements. The result is aligned to 8 bytes.
41 *
42 * zend_alloc allocates memory from OS by CHUNKs, these CHUNKs and huge memory
43 * blocks are always aligned to CHUNK boundary. So it's very easy to determine
44 * the CHUNK owning the certain pointer. Regular CHUNKs reserve a single
45 * page at start for special purpose. It contains bitset of free pages,
46 * few bitset for available runs of predefined small sizes, map of pages that
47 * keeps information about usage of each page in this CHUNK, etc.
48 *
49 * zend_alloc provides familiar emalloc/efree/erealloc API, but in addition it
50 * provides specialized and optimized routines to allocate blocks of predefined
51 * sizes (e.g. emalloc_2(), emallc_4(), ..., emalloc_large(), etc)
52 * The library uses C preprocessor tricks that substitute calls to emalloc()
53 * with more specialized routines when the requested size is known.
54 */
55
56#include "zend.h"
57#include "zend_alloc.h"
58#include "zend_globals.h"
59#include "zend_operators.h"
60#include "zend_multiply.h"
61
62#ifdef HAVE_SIGNAL_H
63# include <signal.h>
64#endif
65#ifdef HAVE_UNISTD_H
66# include <unistd.h>
67#endif
68
69#ifdef ZEND_WIN32
70# include <wincrypt.h>
71# include <process.h>
72#endif
73
74#include <stdio.h>
75#include <stdlib.h>
76#include <string.h>
77
78#include <sys/types.h>
79#include <sys/stat.h>
80#if HAVE_LIMITS_H
81#include <limits.h>
82#endif
83#include <fcntl.h>
84#include <errno.h>
85
86#ifndef _WIN32
87# ifdef HAVE_MREMAP
88#  ifndef _GNU_SOURCE
89#   define _GNU_SOURCE
90#  endif
91#  ifndef __USE_GNU
92#   define __USE_GNU
93#  endif
94# endif
95# include <sys/mman.h>
96# ifndef MAP_ANON
97#  ifdef MAP_ANONYMOUS
98#   define MAP_ANON MAP_ANONYMOUS
99#  endif
100# endif
101# ifndef MREMAP_MAYMOVE
102#  define MREMAP_MAYMOVE 0
103# endif
104# ifndef MAP_FAILED
105#  define MAP_FAILED ((void*)-1)
106# endif
107# ifndef MAP_POPULATE
108#  define MAP_POPULATE 0
109# endif
110#  if defined(_SC_PAGESIZE) || (_SC_PAGE_SIZE)
111#    define REAL_PAGE_SIZE _real_page_size
112static size_t _real_page_size = ZEND_MM_PAGE_SIZE;
113#  endif
114#endif
115
116#ifndef REAL_PAGE_SIZE
117# define REAL_PAGE_SIZE ZEND_MM_PAGE_SIZE
118#endif
119
120#ifndef ZEND_MM_STAT
121# define ZEND_MM_STAT 1    /* track current and peak memory usage            */
122#endif
123#ifndef ZEND_MM_LIMIT
124# define ZEND_MM_LIMIT 1   /* support for user-defined memory limit          */
125#endif
126#ifndef ZEND_MM_CUSTOM
127# define ZEND_MM_CUSTOM 1  /* support for custom memory allocator            */
128                           /* USE_ZEND_ALLOC=0 may switch to system malloc() */
129#endif
130#ifndef ZEND_MM_STORAGE
131# define ZEND_MM_STORAGE 1 /* support for custom memory storage              */
132#endif
133#ifndef ZEND_MM_ERROR
134# define ZEND_MM_ERROR 1   /* report system errors                           */
135#endif
136
137#ifndef ZEND_MM_CHECK
138# define ZEND_MM_CHECK(condition, message)  do { \
139        if (UNEXPECTED(!(condition))) { \
140            zend_mm_panic(message); \
141        } \
142    } while (0)
143#endif
144
145typedef uint32_t   zend_mm_page_info; /* 4-byte integer */
146typedef zend_ulong zend_mm_bitset;    /* 4-byte or 8-byte integer */
147
148#define ZEND_MM_ALIGNED_OFFSET(size, alignment) \
149    (((size_t)(size)) & ((alignment) - 1))
150#define ZEND_MM_ALIGNED_BASE(size, alignment) \
151    (((size_t)(size)) & ~((alignment) - 1))
152#define ZEND_MM_ALIGNED_SIZE_EX(size, alignment) \
153    (((size_t)(size) + ((alignment) - 1)) & ~((alignment) - 1))
154#define ZEND_MM_SIZE_TO_NUM(size, alignment) \
155    (((size_t)(size) + ((alignment) - 1)) / (alignment))
156
157#define ZEND_MM_BITSET_LEN      (sizeof(zend_mm_bitset) * 8)       /* 32 or 64 */
158#define ZEND_MM_PAGE_MAP_LEN    (ZEND_MM_PAGES / ZEND_MM_BITSET_LEN) /* 16 or 8 */
159
160typedef zend_mm_bitset zend_mm_page_map[ZEND_MM_PAGE_MAP_LEN];     /* 64B */
161
162#define ZEND_MM_IS_FRUN                  0x00000000
163#define ZEND_MM_IS_LRUN                  0x40000000
164#define ZEND_MM_IS_SRUN                  0x80000000
165
166#define ZEND_MM_LRUN_PAGES_MASK          0x000003ff
167#define ZEND_MM_LRUN_PAGES_OFFSET        0
168
169#define ZEND_MM_SRUN_BIN_NUM_MASK        0x0000001f
170#define ZEND_MM_SRUN_BIN_NUM_OFFSET      0
171
172#define ZEND_MM_LRUN_PAGES(info)         (((info) & ZEND_MM_LRUN_PAGES_MASK) >> ZEND_MM_LRUN_PAGES_OFFSET)
173#define ZEND_MM_SRUN_BIN_NUM(info)       (((info) & ZEND_MM_SRUN_BIN_NUM_MASK) >> ZEND_MM_SRUN_BIN_NUM_OFFSET)
174
175#define ZEND_MM_FRUN()                   ZEND_MM_IS_FRUN
176#define ZEND_MM_LRUN(count)              (ZEND_MM_IS_LRUN | ((count) << ZEND_MM_LRUN_PAGES_OFFSET))
177#define ZEND_MM_SRUN(bin_num)            (ZEND_MM_IS_SRUN | ((bin_num) << ZEND_MM_SRUN_BIN_NUM_OFFSET))
178
179#define ZEND_MM_BINS 30
180
181typedef struct  _zend_mm_page      zend_mm_page;
182typedef struct  _zend_mm_bin       zend_mm_bin;
183typedef struct  _zend_mm_free_slot zend_mm_free_slot;
184typedef struct  _zend_mm_chunk     zend_mm_chunk;
185typedef struct  _zend_mm_huge_list zend_mm_huge_list;
186
187#ifdef _WIN64
188# define PTR_FMT "0x%0.16I64x"
189#elif SIZEOF_LONG == 8
190# define PTR_FMT "0x%0.16lx"
191#else
192# define PTR_FMT "0x%0.8lx"
193#endif
194
195/*
196 * Memory is retrived from OS by chunks of fixed size 2MB.
197 * Inside chunk it's managed by pages of fixed size 4096B.
198 * So each chunk consists from 512 pages.
199 * The first page of each chunk is reseved for chunk header.
200 * It contains service information about all pages.
201 *
202 * free_pages - current number of free pages in this chunk
203 *
204 * free_tail  - number of continuous free pages at the end of chunk
205 *
206 * free_map   - bitset (a bit for each page). The bit is set if the corresponding
207 *              page is allocated. Allocator for "lage sizes" may easily find a
208 *              free page (or a continuous number of pages) searching for zero
209 *              bits.
210 *
211 * map        - contains service information for each page. (32-bits for each
212 *              page).
213 *    usage:
214 *              (2 bits)
215 *              FRUN - free page,
216 *              LRUN - first page of "large" allocation
217 *              SRUN - first page of a bin used for "small" allocation
218 *
219 *    lrun_pages:
220 *              (10 bits) number of allocated pages
221 *
222 *    srun_bin_num:
223 *              (5 bits) bin number (e.g. 0 for sizes 0-2, 1 for 3-4,
224 *               2 for 5-8, 3 for 9-16 etc) see zend_alloc_sizes.h
225 */
226
227struct _zend_mm_heap {
228#if ZEND_MM_CUSTOM
229    int                use_custom_heap;
230#endif
231#if ZEND_MM_STORAGE
232    zend_mm_storage   *storage;
233#endif
234#if ZEND_MM_STAT
235    size_t             size;                    /* current memory usage */
236    size_t             peak;                    /* peak memory usage */
237#endif
238    zend_mm_free_slot *free_slot[ZEND_MM_BINS]; /* free lists for small sizes */
239#if ZEND_MM_STAT || ZEND_MM_LIMIT
240    size_t             real_size;               /* current size of allocated pages */
241#endif
242#if ZEND_MM_STAT
243    size_t             real_peak;               /* peak size of allocated pages */
244#endif
245#if ZEND_MM_LIMIT
246    size_t             limit;                   /* memory limit */
247    int                overflow;                /* memory overflow flag */
248#endif
249
250    zend_mm_huge_list *huge_list;               /* list of huge allocated blocks */
251
252    zend_mm_chunk     *main_chunk;
253    zend_mm_chunk     *cached_chunks;           /* list of unused chunks */
254    int                chunks_count;            /* number of alocated chunks */
255    int                peak_chunks_count;       /* peak number of allocated chunks for current request */
256    int                cached_chunks_count;     /* number of cached chunks */
257    double             avg_chunks_count;        /* average number of chunks allocated per request */
258#if ZEND_MM_CUSTOM
259    void              *(*_malloc)(size_t);
260    void               (*_free)(void*);
261    void              *(*_realloc)(void*, size_t);
262#endif
263};
264
265struct _zend_mm_chunk {
266    zend_mm_heap      *heap;
267    zend_mm_chunk     *next;
268    zend_mm_chunk     *prev;
269    int                free_pages;              /* number of free pages */
270    int                free_tail;               /* number of free pages at the end of chunk */
271    int                num;
272    char               reserve[64 - (sizeof(void*) * 3 + sizeof(int) * 3)];
273    zend_mm_heap       heap_slot;               /* used only in main chunk */
274    zend_mm_page_map   free_map;                /* 512 bits or 64 bytes */
275    zend_mm_page_info  map[ZEND_MM_PAGES];      /* 2 KB = 512 * 4 */
276};
277
278struct _zend_mm_page {
279    char               bytes[ZEND_MM_PAGE_SIZE];
280};
281
282/*
283 * bin - is one or few continuous pages (up to 8) used for allocation of
284 * a particular "small size".
285 */
286struct _zend_mm_bin {
287    char               bytes[ZEND_MM_PAGE_SIZE * 8];
288};
289
290struct _zend_mm_free_slot {
291    zend_mm_free_slot *next_free_slot;
292};
293
294struct _zend_mm_huge_list {
295    void              *ptr;
296    size_t             size;
297    zend_mm_huge_list *next;
298#if ZEND_DEBUG
299    zend_mm_debug_info dbg;
300#endif
301};
302
303#define ZEND_MM_PAGE_ADDR(chunk, page_num) \
304    ((void*)(((zend_mm_page*)(chunk)) + (page_num)))
305
306#define _BIN_DATA_SIZE(num, size, elements, pages, x, y) size,
307static const unsigned int bin_data_size[] = {
308  ZEND_MM_BINS_INFO(_BIN_DATA_SIZE, x, y)
309};
310
311#define _BIN_DATA_ELEMENTS(num, size, elements, pages, x, y) elements,
312static const int bin_elements[] = {
313  ZEND_MM_BINS_INFO(_BIN_DATA_ELEMENTS, x, y)
314};
315
316#define _BIN_DATA_PAGES(num, size, elements, pages, x, y) pages,
317static const int bin_pages[] = {
318  ZEND_MM_BINS_INFO(_BIN_DATA_PAGES, x, y)
319};
320
321#if ZEND_DEBUG
322void zend_debug_alloc_output(char *format, ...)
323{
324    char output_buf[256];
325    va_list args;
326
327    va_start(args, format);
328    vsprintf(output_buf, format, args);
329    va_end(args);
330
331#ifdef ZEND_WIN32
332    OutputDebugString(output_buf);
333#else
334    fprintf(stderr, "%s", output_buf);
335#endif
336}
337#endif
338
339static ZEND_NORETURN void zend_mm_panic(const char *message)
340{
341    fprintf(stderr, "%s\n", message);
342/* See http://support.microsoft.com/kb/190351 */
343#ifdef PHP_WIN32
344    fflush(stderr);
345#endif
346#if ZEND_DEBUG && defined(HAVE_KILL) && defined(HAVE_GETPID)
347    kill(getpid(), SIGSEGV);
348#endif
349    exit(1);
350}
351
352static ZEND_NORETURN void zend_mm_safe_error(zend_mm_heap *heap,
353    const char *format,
354    size_t limit,
355#if ZEND_DEBUG
356    const char *filename,
357    uint lineno,
358#endif
359    size_t size)
360{
361
362    heap->overflow = 1;
363    zend_try {
364        zend_error_noreturn(E_ERROR,
365            format,
366            limit,
367#if ZEND_DEBUG
368            filename,
369            lineno,
370#endif
371            size);
372    } zend_catch {
373    }  zend_end_try();
374    heap->overflow = 0;
375    zend_bailout();
376    exit(1);
377}
378
379#ifdef _WIN32
380void
381stderr_last_error(char *msg)
382{
383    LPSTR buf = NULL;
384    DWORD err = GetLastError();
385
386    if (!FormatMessage(
387            FORMAT_MESSAGE_ALLOCATE_BUFFER |
388            FORMAT_MESSAGE_FROM_SYSTEM |
389            FORMAT_MESSAGE_IGNORE_INSERTS,
390            NULL,
391            err,
392            MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT),
393            (LPSTR)&buf,
394        0, NULL)) {
395        fprintf(stderr, "\n%s: [0x%08lx]\n", msg, err);
396    }
397    else {
398        fprintf(stderr, "\n%s: [0x%08lx] %s\n", msg, err, buf);
399    }
400}
401#endif
402
403/*****************/
404/* OS Allocation */
405/*****************/
406
407static void *zend_mm_mmap_fixed(void *addr, size_t size)
408{
409#ifdef _WIN32
410    return VirtualAlloc(addr, size, MEM_COMMIT | MEM_RESERVE, PAGE_READWRITE);
411#else
412    /* MAP_FIXED leads to discarding of the old mapping, so it can't be used. */
413    void *ptr = mmap(addr, size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON /*| MAP_POPULATE | MAP_HUGETLB*/, -1, 0);
414
415    if (ptr == MAP_FAILED) {
416#if ZEND_MM_ERROR
417        fprintf(stderr, "\nmmap() failed: [%d] %s\n", errno, strerror(errno));
418#endif
419        return NULL;
420    } else if (ptr != addr) {
421        if (munmap(ptr, size) != 0) {
422#if ZEND_MM_ERROR
423            fprintf(stderr, "\nmunmap() failed: [%d] %s\n", errno, strerror(errno));
424#endif
425        }
426        return NULL;
427    }
428    return ptr;
429#endif
430}
431
432static void *zend_mm_mmap(size_t size)
433{
434#ifdef _WIN32
435    void *ptr = VirtualAlloc(NULL, size, MEM_COMMIT | MEM_RESERVE, PAGE_READWRITE);
436
437    if (ptr == NULL) {
438#if ZEND_MM_ERROR
439        stderr_last_error("VirtualAlloc() failed");
440#endif
441        return NULL;
442    }
443    return ptr;
444#else
445    void *ptr = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON /*| MAP_POPULATE | MAP_HUGETLB*/, -1, 0);
446
447    if (ptr == MAP_FAILED) {
448#if ZEND_MM_ERROR
449        fprintf(stderr, "\nmmap() failed: [%d] %s\n", errno, strerror(errno));
450#endif
451        return NULL;
452    }
453    return ptr;
454#endif
455}
456
457static void zend_mm_munmap(void *addr, size_t size)
458{
459#ifdef _WIN32
460    if (VirtualFree(addr, 0, MEM_RELEASE) == 0) {
461#if ZEND_MM_ERROR
462        stderr_last_error("VirtualFree() failed");
463#endif
464    }
465#else
466    if (munmap(addr, size) != 0) {
467#if ZEND_MM_ERROR
468        fprintf(stderr, "\nmunmap() failed: [%d] %s\n", errno, strerror(errno));
469#endif
470    }
471#endif
472}
473
474/***********/
475/* Bitmask */
476/***********/
477
478/* number of trailing set (1) bits */
479static zend_always_inline int zend_mm_bitset_nts(zend_mm_bitset bitset)
480{
481#if defined(__GNUC__)
482# if SIZEOF_ZEND_LONG == SIZEOF_LONG
483    return __builtin_ctzl(~bitset);
484# else
485    return __builtin_ctzll(~bitset);
486# endif
487#elif defined(_WIN32)
488    unsigned long index;
489
490#if defined(_WIN64)
491    if (!BitScanForward64(&index, ~bitset)) {
492#else
493    if (!BitScanForward(&index, ~bitset)) {
494#endif
495        /* undefined behavior */
496        return 32;
497    }
498
499    return (int)index;
500#else
501    int n;
502
503    if (bitset == (zend_mm_bitset)-1) return ZEND_MM_BITSET_LEN;
504
505    n = 0;
506#if SIZEOF_ZEND_LONG == 8
507    if (sizeof(zend_mm_bitset) == 8) {
508        if ((bitset & 0xffffffff) == 0xffffffff) {n += 32; bitset = bitset >> Z_UL(32);}
509    }
510#endif
511    if ((bitset & 0x0000ffff) == 0x0000ffff) {n += 16; bitset = bitset >> 16;}
512    if ((bitset & 0x000000ff) == 0x000000ff) {n +=  8; bitset = bitset >>  8;}
513    if ((bitset & 0x0000000f) == 0x0000000f) {n +=  4; bitset = bitset >>  4;}
514    if ((bitset & 0x00000003) == 0x00000003) {n +=  2; bitset = bitset >>  2;}
515    return n + (bitset & 1);
516#endif
517}
518
519/* number of trailing zero bits (0x01 -> 1; 0x40 -> 6; 0x00 -> LEN) */
520static zend_always_inline int zend_mm_bitset_ntz(zend_mm_bitset bitset)
521{
522#if defined(__GNUC__)
523# if SIZEOF_ZEND_LONG == SIZEOF_LONG
524    return __builtin_ctzl(bitset);
525# else
526    return __builtin_ctzll(bitset);
527# endif
528#elif defined(_WIN32)
529    unsigned long index;
530
531#if defined(_WIN64)
532    if (!BitScanForward64(&index, bitset)) {
533#else
534    if (!BitScanForward(&index, bitset)) {
535#endif
536        /* undefined behavior */
537        return 32;
538    }
539
540    return (int)index;
541#else
542    int n;
543
544    if (bitset == (zend_mm_bitset)0) return ZEND_MM_BITSET_LEN;
545
546    n = 1;
547#if SIZEOF_ZEND_LONG == 8
548    if (sizeof(zend_mm_bitset) == 8) {
549        if ((bitset & 0xffffffff) == 0) {n += 32; bitset = bitset >> Z_UL(32);}
550    }
551#endif
552    if ((bitset & 0x0000ffff) == 0) {n += 16; bitset = bitset >> 16;}
553    if ((bitset & 0x000000ff) == 0) {n +=  8; bitset = bitset >>  8;}
554    if ((bitset & 0x0000000f) == 0) {n +=  4; bitset = bitset >>  4;}
555    if ((bitset & 0x00000003) == 0) {n +=  2; bitset = bitset >>  2;}
556    return n - (bitset & 1);
557#endif
558}
559
560static zend_always_inline int zend_mm_bitset_find_zero(zend_mm_bitset *bitset, int size)
561{
562    int i = 0;
563
564    do {
565        zend_mm_bitset tmp = bitset[i];
566        if (tmp != (zend_mm_bitset)-1) {
567            return i * ZEND_MM_BITSET_LEN + zend_mm_bitset_nts(tmp);
568        }
569        i++;
570    } while (i < size);
571    return -1;
572}
573
574static zend_always_inline int zend_mm_bitset_find_one(zend_mm_bitset *bitset, int size)
575{
576    int i = 0;
577
578    do {
579        zend_mm_bitset tmp = bitset[i];
580        if (tmp != 0) {
581            return i * ZEND_MM_BITSET_LEN + zend_mm_bitset_ntz(tmp);
582        }
583        i++;
584    } while (i < size);
585    return -1;
586}
587
588static zend_always_inline int zend_mm_bitset_find_zero_and_set(zend_mm_bitset *bitset, int size)
589{
590    int i = 0;
591
592    do {
593        zend_mm_bitset tmp = bitset[i];
594        if (tmp != (zend_mm_bitset)-1) {
595            int n = zend_mm_bitset_nts(tmp);
596            bitset[i] |= Z_UL(1) << n;
597            return i * ZEND_MM_BITSET_LEN + n;
598        }
599        i++;
600    } while (i < size);
601    return -1;
602}
603
604static zend_always_inline int zend_mm_bitset_is_set(zend_mm_bitset *bitset, int bit)
605{
606    return (bitset[bit / ZEND_MM_BITSET_LEN] & (Z_L(1) << (bit & (ZEND_MM_BITSET_LEN-1)))) != 0;
607}
608
609static zend_always_inline void zend_mm_bitset_set_bit(zend_mm_bitset *bitset, int bit)
610{
611    bitset[bit / ZEND_MM_BITSET_LEN] |= (Z_L(1) << (bit & (ZEND_MM_BITSET_LEN-1)));
612}
613
614static zend_always_inline void zend_mm_bitset_reset_bit(zend_mm_bitset *bitset, int bit)
615{
616    bitset[bit / ZEND_MM_BITSET_LEN] &= ~(Z_L(1) << (bit & (ZEND_MM_BITSET_LEN-1)));
617}
618
619static zend_always_inline void zend_mm_bitset_set_range(zend_mm_bitset *bitset, int start, int len)
620{
621    if (len == 1) {
622        zend_mm_bitset_set_bit(bitset, start);
623    } else {
624        int pos = start / ZEND_MM_BITSET_LEN;
625        int end = (start + len - 1) / ZEND_MM_BITSET_LEN;
626        int bit = start & (ZEND_MM_BITSET_LEN - 1);
627        zend_mm_bitset tmp;
628
629        if (pos != end) {
630            /* set bits from "bit" to ZEND_MM_BITSET_LEN-1 */
631            tmp = (zend_mm_bitset)-1 << bit;
632            bitset[pos++] |= tmp;
633            while (pos != end) {
634                /* set all bits */
635                bitset[pos++] = (zend_mm_bitset)-1;
636            }
637            end = (start + len - 1) & (ZEND_MM_BITSET_LEN - 1);
638            /* set bits from "0" to "end" */
639            tmp = (zend_mm_bitset)-1 >> ((ZEND_MM_BITSET_LEN - 1) - end);
640            bitset[pos] |= tmp;
641        } else {
642            end = (start + len - 1) & (ZEND_MM_BITSET_LEN - 1);
643            /* set bits from "bit" to "end" */
644            tmp = (zend_mm_bitset)-1 << bit;
645            tmp &= (zend_mm_bitset)-1 >> ((ZEND_MM_BITSET_LEN - 1) - end);
646            bitset[pos] |= tmp;
647        }
648    }
649}
650
651static zend_always_inline void zend_mm_bitset_reset_range(zend_mm_bitset *bitset, int start, int len)
652{
653    if (len == 1) {
654        zend_mm_bitset_reset_bit(bitset, start);
655    } else {
656        int pos = start / ZEND_MM_BITSET_LEN;
657        int end = (start + len - 1) / ZEND_MM_BITSET_LEN;
658        int bit = start & (ZEND_MM_BITSET_LEN - 1);
659        zend_mm_bitset tmp;
660
661        if (pos != end) {
662            /* reset bits from "bit" to ZEND_MM_BITSET_LEN-1 */
663            tmp = ~((Z_L(1) << bit) - 1);
664            bitset[pos++] &= ~tmp;
665            while (pos != end) {
666                /* set all bits */
667                bitset[pos++] = 0;
668            }
669            end = (start + len - 1) & (ZEND_MM_BITSET_LEN - 1);
670            /* reset bits from "0" to "end" */
671            tmp = (zend_mm_bitset)-1 >> ((ZEND_MM_BITSET_LEN - 1) - end);
672            bitset[pos] &= ~tmp;
673        } else {
674            end = (start + len - 1) & (ZEND_MM_BITSET_LEN - 1);
675            /* reset bits from "bit" to "end" */
676            tmp = (zend_mm_bitset)-1 << bit;
677            tmp &= (zend_mm_bitset)-1 >> ((ZEND_MM_BITSET_LEN - 1) - end);
678            bitset[pos] &= ~tmp;
679        }
680    }
681}
682
683static zend_always_inline int zend_mm_bitset_is_free_range(zend_mm_bitset *bitset, int start, int len)
684{
685    if (len == 1) {
686        return !zend_mm_bitset_is_set(bitset, start);
687    } else {
688        int pos = start / ZEND_MM_BITSET_LEN;
689        int end = (start + len - 1) / ZEND_MM_BITSET_LEN;
690        int bit = start & (ZEND_MM_BITSET_LEN - 1);
691        zend_mm_bitset tmp;
692
693        if (pos != end) {
694            /* set bits from "bit" to ZEND_MM_BITSET_LEN-1 */
695            tmp = (zend_mm_bitset)-1 << bit;
696            if ((bitset[pos++] & tmp) != 0) {
697                return 0;
698            }
699            while (pos != end) {
700                /* set all bits */
701                if (bitset[pos++] != 0) {
702                    return 0;
703                }
704            }
705            end = (start + len - 1) & (ZEND_MM_BITSET_LEN - 1);
706            /* set bits from "0" to "end" */
707            tmp = (zend_mm_bitset)-1 >> ((ZEND_MM_BITSET_LEN - 1) - end);
708            return (bitset[pos] & tmp) == 0;
709        } else {
710            end = (start + len - 1) & (ZEND_MM_BITSET_LEN - 1);
711            /* set bits from "bit" to "end" */
712            tmp = (zend_mm_bitset)-1 << bit;
713            tmp &= (zend_mm_bitset)-1 >> ((ZEND_MM_BITSET_LEN - 1) - end);
714            return (bitset[pos] & tmp) == 0;
715        }
716    }
717}
718
719/**********/
720/* Chunks */
721/**********/
722
723static void *zend_mm_chunk_alloc_int(size_t size, size_t alignment)
724{
725    void *ptr = zend_mm_mmap(size);
726
727    if (ptr == NULL) {
728        return NULL;
729    } else if (ZEND_MM_ALIGNED_OFFSET(ptr, alignment) == 0) {
730#ifdef MADV_HUGEPAGE
731        madvise(ptr, size, MADV_HUGEPAGE);
732#endif
733        return ptr;
734    } else {
735        size_t offset;
736
737        /* chunk has to be aligned */
738        zend_mm_munmap(ptr, size);
739        ptr = zend_mm_mmap(size + alignment - REAL_PAGE_SIZE);
740#ifdef _WIN32
741        offset = ZEND_MM_ALIGNED_OFFSET(ptr, alignment);
742        zend_mm_munmap(ptr, size + alignment - REAL_PAGE_SIZE);
743        ptr = zend_mm_mmap_fixed((void*)((char*)ptr + (alignment - offset)), size);
744        offset = ZEND_MM_ALIGNED_OFFSET(ptr, alignment);
745        if (offset != 0) {
746            zend_mm_munmap(ptr, size);
747            return NULL;
748        }
749        return ptr;
750#else
751        offset = ZEND_MM_ALIGNED_OFFSET(ptr, alignment);
752        if (offset != 0) {
753            offset = alignment - offset;
754            zend_mm_munmap(ptr, offset);
755            ptr = (char*)ptr + offset;
756            alignment -= offset;
757        }
758        if (alignment > REAL_PAGE_SIZE) {
759            zend_mm_munmap((char*)ptr + size, alignment - REAL_PAGE_SIZE);
760        }
761# ifdef MADV_HUGEPAGE
762        madvise(ptr, size, MADV_HUGEPAGE);
763# endif
764#endif
765        return ptr;
766    }
767}
768
769static void *zend_mm_chunk_alloc(zend_mm_heap *heap, size_t size, size_t alignment)
770{
771#if ZEND_MM_STORAGE
772    if (UNEXPECTED(heap->storage)) {
773        void *ptr = heap->storage->handlers.chunk_alloc(heap->storage, size, alignment);
774        ZEND_ASSERT(((zend_uintptr_t)((char*)ptr + (alignment-1)) & (alignment-1)) == (zend_uintptr_t)ptr);
775        return ptr;
776    }
777#endif
778    return zend_mm_chunk_alloc_int(size, alignment);
779}
780
781static void zend_mm_chunk_free(zend_mm_heap *heap, void *addr, size_t size)
782{
783#if ZEND_MM_STORAGE
784    if (UNEXPECTED(heap->storage)) {
785        heap->storage->handlers.chunk_free(heap->storage, addr, size);
786        return;
787    }
788#endif
789    zend_mm_munmap(addr, size);
790}
791
792static int zend_mm_chunk_truncate(zend_mm_heap *heap, void *addr, size_t old_size, size_t new_size)
793{
794#if ZEND_MM_STORAGE
795    if (UNEXPECTED(heap->storage)) {
796        if (heap->storage->handlers.chunk_truncate) {
797            return heap->storage->handlers.chunk_truncate(heap->storage, addr, old_size, new_size);
798        } else {
799            return 0;
800        }
801    }
802#endif
803#ifndef _WIN32
804    zend_mm_munmap((char*)addr + new_size, old_size - new_size);
805    return 1;
806#else
807    return 0;
808#endif
809}
810
811static int zend_mm_chunk_extend(zend_mm_heap *heap, void *addr, size_t old_size, size_t new_size)
812{
813#if ZEND_MM_STORAGE
814    if (UNEXPECTED(heap->storage)) {
815        if (heap->storage->handlers.chunk_extend) {
816            return heap->storage->handlers.chunk_extend(heap->storage, addr, old_size, new_size);
817        } else {
818            return 0;
819        }
820    }
821#endif
822#ifndef _WIN32
823    return (zend_mm_mmap_fixed((char*)addr + old_size, new_size - old_size) != NULL);
824#else
825    return 0;
826#endif
827}
828
829static zend_always_inline void zend_mm_chunk_init(zend_mm_heap *heap, zend_mm_chunk *chunk)
830{
831    chunk->heap = heap;
832    chunk->next = heap->main_chunk;
833    chunk->prev = heap->main_chunk->prev;
834    chunk->prev->next = chunk;
835    chunk->next->prev = chunk;
836    /* mark first pages as allocated */
837    chunk->free_pages = ZEND_MM_PAGES - ZEND_MM_FIRST_PAGE;
838    chunk->free_tail = ZEND_MM_FIRST_PAGE;
839    /* the younger chunks have bigger number */
840    chunk->num = chunk->prev->num + 1;
841    /* mark first pages as allocated */
842    chunk->free_map[0] = (1L << ZEND_MM_FIRST_PAGE) - 1;
843    chunk->map[0] = ZEND_MM_LRUN(ZEND_MM_FIRST_PAGE);
844}
845
846/***********************/
847/* Huge Runs (forward) */
848/***********************/
849
850static size_t zend_mm_get_huge_block_size(zend_mm_heap *heap, void *ptr ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC);
851static void *zend_mm_alloc_huge(zend_mm_heap *heap, size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC);
852static void zend_mm_free_huge(zend_mm_heap *heap, void *ptr ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC);
853
854#if ZEND_DEBUG
855static void zend_mm_change_huge_block_size(zend_mm_heap *heap, void *ptr, size_t size, size_t dbg_size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC);
856#else
857static void zend_mm_change_huge_block_size(zend_mm_heap *heap, void *ptr, size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC);
858#endif
859
860/**************/
861/* Large Runs */
862/**************/
863
864#if ZEND_DEBUG
865static void *zend_mm_alloc_pages(zend_mm_heap *heap, int pages_count, size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
866#else
867static void *zend_mm_alloc_pages(zend_mm_heap *heap, int pages_count ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
868#endif
869{
870    zend_mm_chunk *chunk = heap->main_chunk;
871    int page_num, len;
872
873    while (1) {
874        if (UNEXPECTED(chunk->free_pages < pages_count)) {
875            goto not_found;
876#if 0
877        } else if (UNEXPECTED(chunk->free_pages + chunk->free_tail == ZEND_MM_PAGES)) {
878            if (UNEXPECTED(ZEND_MM_PAGES - chunk->free_tail < pages_count)) {
879                goto not_found;
880            } else {
881                page_num = chunk->free_tail;
882                goto found;
883            }
884        } else if (0) {
885            /* First-Fit Search */
886            int free_tail = chunk->free_tail;
887            zend_mm_bitset *bitset = chunk->free_map;
888            zend_mm_bitset tmp = *(bitset++);
889            int i = 0;
890
891            while (1) {
892                /* skip allocated blocks */
893                while (tmp == (zend_mm_bitset)-1) {
894                    i += ZEND_MM_BITSET_LEN;
895                    if (i == ZEND_MM_PAGES) {
896                        goto not_found;
897                    }
898                    tmp = *(bitset++);
899                }
900                /* find first 0 bit */
901                page_num = i + zend_mm_bitset_nts(tmp);
902                /* reset bits from 0 to "bit" */
903                tmp &= tmp + 1;
904                /* skip free blocks */
905                while (tmp == 0) {
906                    i += ZEND_MM_BITSET_LEN;
907                    len = i - page_num;
908                    if (len >= pages_count) {
909                        goto found;
910                    } else if (i >= free_tail) {
911                        goto not_found;
912                    }
913                    tmp = *(bitset++);
914                }
915                /* find first 1 bit */
916                len = (i + zend_mm_bitset_ntz(tmp)) - page_num;
917                if (len >= pages_count) {
918                    goto found;
919                }
920                /* set bits from 0 to "bit" */
921                tmp |= tmp - 1;
922            }
923#endif
924        } else {
925            /* Best-Fit Search */
926            int best = -1;
927            int best_len = ZEND_MM_PAGES;
928            int free_tail = chunk->free_tail;
929            zend_mm_bitset *bitset = chunk->free_map;
930            zend_mm_bitset tmp = *(bitset++);
931            int i = 0;
932
933            while (1) {
934                /* skip allocated blocks */
935                while (tmp == (zend_mm_bitset)-1) {
936                    i += ZEND_MM_BITSET_LEN;
937                    if (i == ZEND_MM_PAGES) {
938                        if (best > 0) {
939                            page_num = best;
940                            goto found;
941                        } else {
942                            goto not_found;
943                        }
944                    }
945                    tmp = *(bitset++);
946                }
947                /* find first 0 bit */
948                page_num = i + zend_mm_bitset_nts(tmp);
949                /* reset bits from 0 to "bit" */
950                tmp &= tmp + 1;
951                /* skip free blocks */
952                while (tmp == 0) {
953                    i += ZEND_MM_BITSET_LEN;
954                    if (i >= free_tail) {
955                        len = ZEND_MM_PAGES - page_num;
956                        if (len >= pages_count && len < best_len) {
957                            chunk->free_tail = page_num + pages_count;
958                            goto found;
959                        } else {
960                            /* set accurate value */
961                            chunk->free_tail = page_num;
962                            if (best > 0) {
963                                page_num = best;
964                                goto found;
965                            } else {
966                                goto not_found;
967                            }
968                        }
969                    }
970                    tmp = *(bitset++);
971                }
972                /* find first 1 bit */
973                len = i + zend_mm_bitset_ntz(tmp) - page_num;
974                if (len >= pages_count) {
975                    if (len == pages_count) {
976                        goto found;
977                    } else if (len < best_len) {
978                        best_len = len;
979                        best = page_num;
980                    }
981                }
982                /* set bits from 0 to "bit" */
983                tmp |= tmp - 1;
984            }
985        }
986
987not_found:
988        if (chunk->next == heap->main_chunk) {
989            if (heap->cached_chunks) {
990                heap->cached_chunks_count--;
991                chunk = heap->cached_chunks;
992                heap->cached_chunks = chunk->next;
993            } else {
994#if ZEND_MM_LIMIT
995                if (heap->real_size + ZEND_MM_CHUNK_SIZE > heap->limit) {
996                    if (heap->overflow == 0) {
997#if ZEND_DEBUG
998                        zend_mm_safe_error(heap, "Allowed memory size of %zu bytes exhausted at %s:%d (tried to allocate %zu bytes)", heap->limit, __zend_filename, __zend_lineno, size);
999#else
1000                        zend_mm_safe_error(heap, "Allowed memory size of %zu bytes exhausted (tried to allocate %zu bytes)", heap->limit, ZEND_MM_PAGE_SIZE * pages_count);
1001#endif
1002                        return NULL;
1003                    }
1004                }
1005#endif
1006                chunk = (zend_mm_chunk*)zend_mm_chunk_alloc(heap, ZEND_MM_CHUNK_SIZE, ZEND_MM_CHUNK_SIZE);
1007                if (UNEXPECTED(chunk == NULL)) {
1008                    /* insufficient memory */
1009#if !ZEND_MM_LIMIT
1010                    zend_mm_safe_error(heap, "Out of memory");
1011#elif ZEND_DEBUG
1012                    zend_mm_safe_error(heap, "Out of memory (allocated %zu) at %s:%d (tried to allocate %zu bytes)", heap->real_size, __zend_filename, __zend_lineno, size);
1013#else
1014                    zend_mm_safe_error(heap, "Out of memory (allocated %zu) (tried to allocate %zu bytes)", heap->real_size, ZEND_MM_PAGE_SIZE * pages_count);
1015#endif
1016                    return NULL;
1017                }
1018#if ZEND_MM_STAT
1019                do {
1020                    size_t size = heap->real_size + ZEND_MM_CHUNK_SIZE;
1021                    size_t peak = MAX(heap->real_peak, size);
1022                    heap->real_size = size;
1023                    heap->real_peak = peak;
1024                } while (0);
1025#elif ZEND_MM_LIMIT
1026                heap->real_size += ZEND_MM_CHUNK_SIZE;
1027
1028#endif
1029            }
1030            heap->chunks_count++;
1031            if (heap->chunks_count > heap->peak_chunks_count) {
1032                heap->peak_chunks_count = heap->chunks_count;
1033            }
1034            zend_mm_chunk_init(heap, chunk);
1035            page_num = ZEND_MM_FIRST_PAGE;
1036            len = ZEND_MM_PAGES - ZEND_MM_FIRST_PAGE;
1037            goto found;
1038        } else {
1039            chunk = chunk->next;
1040        }
1041    }
1042
1043found:
1044    /* mark run as allocated */
1045    chunk->free_pages -= pages_count;
1046    zend_mm_bitset_set_range(chunk->free_map, page_num, pages_count);
1047    chunk->map[page_num] = ZEND_MM_LRUN(pages_count);
1048    if (page_num == chunk->free_tail) {
1049        chunk->free_tail = page_num + pages_count;
1050    }
1051    return ZEND_MM_PAGE_ADDR(chunk, page_num);
1052}
1053
1054static zend_always_inline void *zend_mm_alloc_large(zend_mm_heap *heap, size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
1055{
1056    int pages_count = (int)ZEND_MM_SIZE_TO_NUM(size, ZEND_MM_PAGE_SIZE);
1057#if ZEND_DEBUG
1058    void *ptr = zend_mm_alloc_pages(heap, pages_count, size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1059#else
1060    void *ptr = zend_mm_alloc_pages(heap, pages_count ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1061#endif
1062#if ZEND_MM_STAT
1063    do {
1064        size_t size = heap->size + pages_count * ZEND_MM_PAGE_SIZE;
1065        size_t peak = MAX(heap->peak, size);
1066        heap->size = size;
1067        heap->peak = peak;
1068    } while (0);
1069#endif
1070    return ptr;
1071}
1072
1073static void zend_mm_free_pages(zend_mm_heap *heap, zend_mm_chunk *chunk, int page_num, int pages_count)
1074{
1075    chunk->free_pages += pages_count;
1076    zend_mm_bitset_reset_range(chunk->free_map, page_num, pages_count);
1077    chunk->map[page_num] = 0;
1078    if (chunk->free_tail == page_num + pages_count) {
1079        /* this setting may be not accurate */
1080        chunk->free_tail = page_num;
1081    }
1082    if (chunk->free_pages == ZEND_MM_PAGES - ZEND_MM_FIRST_PAGE) {
1083        /* delete chunk */
1084        chunk->next->prev = chunk->prev;
1085        chunk->prev->next = chunk->next;
1086        heap->chunks_count--;
1087        if (heap->chunks_count + heap->cached_chunks_count < heap->avg_chunks_count + 0.1) {
1088            /* delay deletion */
1089            heap->cached_chunks_count++;
1090            chunk->next = heap->cached_chunks;
1091            heap->cached_chunks = chunk;
1092        } else {
1093#if ZEND_MM_STAT || ZEND_MM_LIMIT
1094            heap->real_size -= ZEND_MM_CHUNK_SIZE;
1095#endif
1096            if (!heap->cached_chunks || chunk->num > heap->cached_chunks->num) {
1097                zend_mm_chunk_free(heap, chunk, ZEND_MM_CHUNK_SIZE);
1098            } else {
1099//TODO: select the best chunk to delete???
1100                chunk->next = heap->cached_chunks->next;
1101                zend_mm_chunk_free(heap, heap->cached_chunks, ZEND_MM_CHUNK_SIZE);
1102                heap->cached_chunks = chunk;
1103            }
1104        }
1105    }
1106}
1107
1108static zend_always_inline void zend_mm_free_large(zend_mm_heap *heap, zend_mm_chunk *chunk, int page_num, int pages_count)
1109{
1110#if ZEND_MM_STAT
1111    heap->size -= pages_count * ZEND_MM_PAGE_SIZE;
1112#endif
1113    zend_mm_free_pages(heap, chunk, page_num, pages_count);
1114}
1115
1116/**************/
1117/* Small Runs */
1118/**************/
1119
1120/* higher set bit number (0->N/A, 1->1, 2->2, 4->3, 8->4, 127->7, 128->8 etc) */
1121static zend_always_inline int zend_mm_small_size_to_bit(int size)
1122{
1123#if defined(__GNUC__)
1124    return (__builtin_clz(size) ^ 0x1f) + 1;
1125#elif defined(_WIN32)
1126    unsigned long index;
1127
1128    if (!BitScanReverse(&index, (unsigned long)size)) {
1129        /* undefined behavior */
1130        return 64;
1131    }
1132
1133    return (((31 - (int)index) ^ 0x1f) + 1);
1134#else
1135    int n = 16;
1136    if (size <= 0x00ff) {n -= 8; size = size << 8;}
1137    if (size <= 0x0fff) {n -= 4; size = size << 4;}
1138    if (size <= 0x3fff) {n -= 2; size = size << 2;}
1139    if (size <= 0x7fff) {n -= 1;}
1140    return n;
1141#endif
1142}
1143
1144#ifndef MAX
1145# define MAX(a, b) (((a) > (b)) ? (a) : (b))
1146#endif
1147
1148#ifndef MIN
1149# define MIN(a, b) (((a) < (b)) ? (a) : (b))
1150#endif
1151
1152static zend_always_inline int zend_mm_small_size_to_bin(size_t size)
1153{
1154#if 0
1155    int n;
1156                            /*0,  1,  2,  3,  4,  5,  6,  7,  8,  9  10, 11, 12*/
1157    static const int f1[] = { 3,  3,  3,  3,  3,  3,  3,  4,  5,  6,  7,  8,  9};
1158    static const int f2[] = { 0,  0,  0,  0,  0,  0,  0,  4,  8, 12, 16, 20, 24};
1159
1160    if (UNEXPECTED(size <= 2)) return 0;
1161    n = zend_mm_small_size_to_bit(size - 1);
1162    return ((size-1) >> f1[n]) + f2[n];
1163#else
1164    int t1, t2, t3;
1165
1166    if (UNEXPECTED(size <= 8)) return 0;
1167    t1 = (int)(size - 1);
1168    t2 = zend_mm_small_size_to_bit(t1);
1169    t3 = t2 - 6;
1170    t3 = (t3 < 0) ? 0 : t3;
1171    t2 = t3 + 3;
1172    t1 = t1 >> t2;
1173    t3 = t3 << 2;
1174    return t1 + t3;
1175#endif
1176}
1177
1178#define ZEND_MM_SMALL_SIZE_TO_BIN(size)  zend_mm_small_size_to_bin(size)
1179
1180static zend_never_inline void *zend_mm_alloc_small_slow(zend_mm_heap *heap, int bin_num ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
1181{
1182    zend_mm_chunk *chunk;
1183    int page_num;
1184    zend_mm_bin *bin;
1185    zend_mm_free_slot *p, *end;
1186
1187#if ZEND_DEBUG
1188    bin = (zend_mm_bin*)zend_mm_alloc_pages(heap, bin_pages[bin_num], bin_data_size[bin_num] ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1189#else
1190    bin = (zend_mm_bin*)zend_mm_alloc_pages(heap, bin_pages[bin_num] ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1191#endif
1192    if (UNEXPECTED(bin == NULL)) {
1193        /* insufficient memory */
1194        return NULL;
1195    }
1196
1197    chunk = (zend_mm_chunk*)ZEND_MM_ALIGNED_BASE(bin, ZEND_MM_CHUNK_SIZE);
1198    page_num = ZEND_MM_ALIGNED_OFFSET(bin, ZEND_MM_CHUNK_SIZE) / ZEND_MM_PAGE_SIZE;
1199    chunk->map[page_num] = ZEND_MM_SRUN(bin_num);
1200    if (bin_pages[bin_num] > 1) {
1201        int i = 1;
1202        do {
1203            chunk->map[page_num+i] = ZEND_MM_SRUN(bin_num);
1204            i++;
1205        } while (i < bin_pages[bin_num]);
1206    }
1207
1208    /* create a linked list of elements from 1 to last */
1209    end = (zend_mm_free_slot*)((char*)bin + (bin_data_size[bin_num] * (bin_elements[bin_num] - 1)));
1210    heap->free_slot[bin_num] = p = (zend_mm_free_slot*)((char*)bin + bin_data_size[bin_num]);
1211    do {
1212        p->next_free_slot = (zend_mm_free_slot*)((char*)p + bin_data_size[bin_num]);;
1213#if ZEND_DEBUG
1214        do {
1215            zend_mm_debug_info *dbg = (zend_mm_debug_info*)((char*)p + bin_data_size[bin_num] - ZEND_MM_ALIGNED_SIZE(sizeof(zend_mm_debug_info)));
1216            dbg->size = 0;
1217        } while (0);
1218#endif
1219        p = (zend_mm_free_slot*)((char*)p + bin_data_size[bin_num]);
1220    } while (p != end);
1221
1222    /* terminate list using NULL */
1223    p->next_free_slot = NULL;
1224#if ZEND_DEBUG
1225        do {
1226            zend_mm_debug_info *dbg = (zend_mm_debug_info*)((char*)p + bin_data_size[bin_num] - ZEND_MM_ALIGNED_SIZE(sizeof(zend_mm_debug_info)));
1227            dbg->size = 0;
1228        } while (0);
1229#endif
1230
1231    /* return first element */
1232    return (char*)bin;
1233}
1234
1235static zend_always_inline void *zend_mm_alloc_small(zend_mm_heap *heap, size_t size, int bin_num ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
1236{
1237#if ZEND_MM_STAT
1238    do {
1239        size_t size = heap->size + bin_data_size[bin_num];
1240        size_t peak = MAX(heap->peak, size);
1241        heap->size = size;
1242        heap->peak = peak;
1243    } while (0);
1244#endif
1245
1246    if (EXPECTED(heap->free_slot[bin_num] != NULL)) {
1247        zend_mm_free_slot *p = heap->free_slot[bin_num];
1248        heap->free_slot[bin_num] = p->next_free_slot;
1249        return (void*)p;
1250    } else {
1251        return zend_mm_alloc_small_slow(heap, bin_num ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1252    }
1253}
1254
1255static zend_always_inline void zend_mm_free_small(zend_mm_heap *heap, void *ptr, int bin_num)
1256{
1257    zend_mm_free_slot *p;
1258
1259#if ZEND_MM_STAT
1260    heap->size -= bin_data_size[bin_num];
1261#endif
1262
1263#if ZEND_DEBUG
1264    do {
1265        zend_mm_debug_info *dbg = (zend_mm_debug_info*)((char*)ptr + bin_data_size[bin_num] - ZEND_MM_ALIGNED_SIZE(sizeof(zend_mm_debug_info)));
1266        dbg->size = 0;
1267    } while (0);
1268#endif
1269
1270    p = (zend_mm_free_slot*)ptr;
1271    p->next_free_slot = heap->free_slot[bin_num];
1272    heap->free_slot[bin_num] = p;
1273}
1274
1275/********/
1276/* Heap */
1277/********/
1278
1279#if ZEND_DEBUG
1280static zend_always_inline zend_mm_debug_info *zend_mm_get_debug_info(zend_mm_heap *heap, void *ptr)
1281{
1282    size_t page_offset = ZEND_MM_ALIGNED_OFFSET(ptr, ZEND_MM_CHUNK_SIZE);
1283    zend_mm_chunk *chunk;
1284    int page_num;
1285    zend_mm_page_info info;
1286
1287    ZEND_MM_CHECK(page_offset != 0, "zend_mm_heap corrupted");
1288    chunk = (zend_mm_chunk*)ZEND_MM_ALIGNED_BASE(ptr, ZEND_MM_CHUNK_SIZE);
1289    page_num = (int)(page_offset / ZEND_MM_PAGE_SIZE);
1290    info = chunk->map[page_num];
1291    ZEND_MM_CHECK(chunk->heap == heap, "zend_mm_heap corrupted");
1292    if (EXPECTED(info & ZEND_MM_IS_SRUN)) {
1293        int bin_num = ZEND_MM_SRUN_BIN_NUM(info);
1294        return (zend_mm_debug_info*)((char*)ptr + bin_data_size[bin_num] - ZEND_MM_ALIGNED_SIZE(sizeof(zend_mm_debug_info)));
1295    } else /* if (info & ZEND_MM_IS_LRUN) */ {
1296        int pages_count = ZEND_MM_LRUN_PAGES(info);
1297
1298        return (zend_mm_debug_info*)((char*)ptr + ZEND_MM_PAGE_SIZE * pages_count - ZEND_MM_ALIGNED_SIZE(sizeof(zend_mm_debug_info)));
1299    }
1300}
1301#endif
1302
1303static zend_always_inline void *zend_mm_alloc_heap(zend_mm_heap *heap, size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
1304{
1305    void *ptr;
1306#if ZEND_DEBUG
1307    size_t real_size = size;
1308    zend_mm_debug_info *dbg;
1309
1310    /* special handling for zero-size allocation */
1311    size = MAX(size, 1);
1312    size = ZEND_MM_ALIGNED_SIZE(size) + ZEND_MM_ALIGNED_SIZE(sizeof(zend_mm_debug_info));
1313#endif
1314    if (size <= ZEND_MM_MAX_SMALL_SIZE) {
1315        ptr = zend_mm_alloc_small(heap, size, ZEND_MM_SMALL_SIZE_TO_BIN(size) ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1316#if ZEND_DEBUG
1317        dbg = zend_mm_get_debug_info(heap, ptr);
1318        dbg->size = real_size;
1319        dbg->filename = __zend_filename;
1320        dbg->orig_filename = __zend_orig_filename;
1321        dbg->lineno = __zend_lineno;
1322        dbg->orig_lineno = __zend_orig_lineno;
1323#endif
1324        return ptr;
1325    } else if (size <= ZEND_MM_MAX_LARGE_SIZE) {
1326        ptr = zend_mm_alloc_large(heap, size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1327#if ZEND_DEBUG
1328        dbg = zend_mm_get_debug_info(heap, ptr);
1329        dbg->size = real_size;
1330        dbg->filename = __zend_filename;
1331        dbg->orig_filename = __zend_orig_filename;
1332        dbg->lineno = __zend_lineno;
1333        dbg->orig_lineno = __zend_orig_lineno;
1334#endif
1335        return ptr;
1336    } else {
1337#if ZEND_DEBUG
1338        size = real_size;
1339#endif
1340        return zend_mm_alloc_huge(heap, size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1341    }
1342}
1343
1344static zend_always_inline void zend_mm_free_heap(zend_mm_heap *heap, void *ptr ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
1345{
1346    size_t page_offset = ZEND_MM_ALIGNED_OFFSET(ptr, ZEND_MM_CHUNK_SIZE);
1347
1348    if (UNEXPECTED(page_offset == 0)) {
1349        if (ptr != NULL) {
1350            zend_mm_free_huge(heap, ptr ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1351        }
1352    } else {
1353        zend_mm_chunk *chunk = (zend_mm_chunk*)ZEND_MM_ALIGNED_BASE(ptr, ZEND_MM_CHUNK_SIZE);
1354        int page_num = (int)(page_offset / ZEND_MM_PAGE_SIZE);
1355        zend_mm_page_info info = chunk->map[page_num];
1356
1357        ZEND_MM_CHECK(chunk->heap == heap, "zend_mm_heap corrupted");
1358        if (EXPECTED(info & ZEND_MM_IS_SRUN)) {
1359            zend_mm_free_small(heap, ptr, ZEND_MM_SRUN_BIN_NUM(info));
1360        } else /* if (info & ZEND_MM_IS_LRUN) */ {
1361            int pages_count = ZEND_MM_LRUN_PAGES(info);
1362
1363            ZEND_MM_CHECK(ZEND_MM_ALIGNED_OFFSET(page_offset, ZEND_MM_PAGE_SIZE) == 0, "zend_mm_heap corrupted");
1364            zend_mm_free_large(heap, chunk, page_num, pages_count);
1365        }
1366    }
1367}
1368
1369static size_t zend_mm_size(zend_mm_heap *heap, void *ptr ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
1370{
1371    size_t page_offset = ZEND_MM_ALIGNED_OFFSET(ptr, ZEND_MM_CHUNK_SIZE);
1372
1373    if (UNEXPECTED(page_offset == 0)) {
1374        return zend_mm_get_huge_block_size(heap, ptr ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1375    } else {
1376        zend_mm_chunk *chunk;
1377#if 0 && ZEND_DEBUG
1378        zend_mm_debug_info *dbg = zend_mm_get_debug_info(heap, ptr);
1379        return dbg->size;
1380#else
1381        int page_num;
1382        zend_mm_page_info info;
1383
1384        chunk = (zend_mm_chunk*)ZEND_MM_ALIGNED_BASE(ptr, ZEND_MM_CHUNK_SIZE);
1385        page_num = (int)(page_offset / ZEND_MM_PAGE_SIZE);
1386        info = chunk->map[page_num];
1387        ZEND_MM_CHECK(chunk->heap == heap, "zend_mm_heap corrupted");
1388        if (EXPECTED(info & ZEND_MM_IS_SRUN)) {
1389            return bin_data_size[ZEND_MM_SRUN_BIN_NUM(info)];
1390        } else /* if (info & ZEND_MM_IS_LARGE_RUN) */ {
1391            return ZEND_MM_LRUN_PAGES(info) * ZEND_MM_PAGE_SIZE;
1392        }
1393#endif
1394    }
1395}
1396
1397static void *zend_mm_realloc_heap(zend_mm_heap *heap, void *ptr, size_t size, size_t copy_size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
1398{
1399    size_t page_offset;
1400    size_t old_size;
1401    size_t new_size;
1402    void *ret;
1403#if ZEND_DEBUG
1404    size_t real_size;
1405    zend_mm_debug_info *dbg;
1406#endif
1407
1408    page_offset = ZEND_MM_ALIGNED_OFFSET(ptr, ZEND_MM_CHUNK_SIZE);
1409    if (UNEXPECTED(page_offset == 0)) {
1410        if (UNEXPECTED(ptr == NULL)) {
1411            return zend_mm_alloc_heap(heap, size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1412        }
1413        old_size = zend_mm_get_huge_block_size(heap, ptr ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1414#if ZEND_DEBUG
1415        real_size = size;
1416        size = ZEND_MM_ALIGNED_SIZE(size) + ZEND_MM_ALIGNED_SIZE(sizeof(zend_mm_debug_info));
1417#endif
1418        if (size > ZEND_MM_MAX_LARGE_SIZE) {
1419#if ZEND_DEBUG
1420            size = real_size;
1421#endif
1422            new_size = ZEND_MM_ALIGNED_SIZE_EX(size, REAL_PAGE_SIZE);
1423            if (new_size == old_size) {
1424#if ZEND_DEBUG
1425                zend_mm_change_huge_block_size(heap, ptr, new_size, real_size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1426#else
1427                zend_mm_change_huge_block_size(heap, ptr, new_size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1428#endif
1429                return ptr;
1430            } else if (new_size < old_size) {
1431                /* unmup tail */
1432                if (zend_mm_chunk_truncate(heap, ptr, old_size, new_size)) {
1433#if ZEND_MM_STAT || ZEND_MM_LIMIT
1434                    heap->real_size -= old_size - new_size;
1435#endif
1436#if ZEND_MM_STAT
1437                    heap->size -= old_size - new_size;
1438#endif
1439#if ZEND_DEBUG
1440                    zend_mm_change_huge_block_size(heap, ptr, new_size, real_size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1441#else
1442                    zend_mm_change_huge_block_size(heap, ptr, new_size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1443#endif
1444                    return ptr;
1445                }
1446            } else /* if (new_size > old_size) */ {
1447#if ZEND_MM_LIMIT
1448                if (heap->real_size + (new_size - old_size) > heap->limit) {
1449                    if (heap->overflow == 0) {
1450#if ZEND_DEBUG
1451                        zend_mm_safe_error(heap, "Allowed memory size of %zu bytes exhausted at %s:%d (tried to allocate %zu bytes)", heap->limit, __zend_filename, __zend_lineno, size);
1452#else
1453                        zend_mm_safe_error(heap, "Allowed memory size of %zu bytes exhausted (tried to allocate %zu bytes)", heap->limit, size);
1454#endif
1455                        return NULL;
1456                    }
1457                }
1458#endif
1459                /* try to map tail right after this block */
1460                if (zend_mm_chunk_extend(heap, ptr, old_size, new_size)) {
1461#if ZEND_MM_STAT || ZEND_MM_LIMIT
1462                    heap->real_size += new_size - old_size;
1463#endif
1464#if ZEND_MM_STAT
1465                    heap->size += new_size - old_size;
1466#endif
1467#if ZEND_DEBUG
1468                    zend_mm_change_huge_block_size(heap, ptr, new_size, real_size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1469#else
1470                    zend_mm_change_huge_block_size(heap, ptr, new_size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1471#endif
1472                    return ptr;
1473                }
1474            }
1475        }
1476    } else {
1477        zend_mm_chunk *chunk = (zend_mm_chunk*)ZEND_MM_ALIGNED_BASE(ptr, ZEND_MM_CHUNK_SIZE);
1478        int page_num = (int)(page_offset / ZEND_MM_PAGE_SIZE);
1479        zend_mm_page_info info = chunk->map[page_num];
1480#if ZEND_DEBUG
1481        size_t real_size = size;
1482
1483        size = ZEND_MM_ALIGNED_SIZE(size) + ZEND_MM_ALIGNED_SIZE(sizeof(zend_mm_debug_info));
1484#endif
1485
1486        ZEND_MM_CHECK(chunk->heap == heap, "zend_mm_heap corrupted");
1487        if (info & ZEND_MM_IS_SRUN) {
1488            int old_bin_num, bin_num;
1489
1490            old_bin_num = ZEND_MM_SRUN_BIN_NUM(info);
1491            old_size = bin_data_size[old_bin_num];
1492            bin_num = ZEND_MM_SMALL_SIZE_TO_BIN(size);
1493            if (old_bin_num == bin_num) {
1494#if ZEND_DEBUG
1495                dbg = zend_mm_get_debug_info(heap, ptr);
1496                dbg->size = real_size;
1497                dbg->filename = __zend_filename;
1498                dbg->orig_filename = __zend_orig_filename;
1499                dbg->lineno = __zend_lineno;
1500                dbg->orig_lineno = __zend_orig_lineno;
1501#endif
1502                return ptr;
1503            }
1504        } else /* if (info & ZEND_MM_IS_LARGE_RUN) */ {
1505            ZEND_MM_CHECK(ZEND_MM_ALIGNED_OFFSET(page_offset, ZEND_MM_PAGE_SIZE) == 0, "zend_mm_heap corrupted");
1506            old_size = ZEND_MM_LRUN_PAGES(info) * ZEND_MM_PAGE_SIZE;
1507            if (size > ZEND_MM_MAX_SMALL_SIZE && size <= ZEND_MM_MAX_LARGE_SIZE) {
1508                new_size = ZEND_MM_ALIGNED_SIZE_EX(size, ZEND_MM_PAGE_SIZE);
1509                if (new_size == old_size) {
1510#if ZEND_DEBUG
1511                    dbg = zend_mm_get_debug_info(heap, ptr);
1512                    dbg->size = real_size;
1513                    dbg->filename = __zend_filename;
1514                    dbg->orig_filename = __zend_orig_filename;
1515                    dbg->lineno = __zend_lineno;
1516                    dbg->orig_lineno = __zend_orig_lineno;
1517#endif
1518                    return ptr;
1519                } else if (new_size < old_size) {
1520                    /* free tail pages */
1521                    int new_pages_count = (int)(new_size / ZEND_MM_PAGE_SIZE);
1522                    int rest_pages_count = (int)((old_size - new_size) / ZEND_MM_PAGE_SIZE);
1523
1524#if ZEND_MM_STAT
1525                    heap->size -= rest_pages_count * ZEND_MM_PAGE_SIZE;
1526#endif
1527                    chunk->map[page_num] = ZEND_MM_LRUN(new_pages_count);
1528                    chunk->free_pages += rest_pages_count;
1529                    zend_mm_bitset_reset_range(chunk->free_map, page_num + new_pages_count, rest_pages_count);
1530#if ZEND_DEBUG
1531                    dbg = zend_mm_get_debug_info(heap, ptr);
1532                    dbg->size = real_size;
1533                    dbg->filename = __zend_filename;
1534                    dbg->orig_filename = __zend_orig_filename;
1535                    dbg->lineno = __zend_lineno;
1536                    dbg->orig_lineno = __zend_orig_lineno;
1537#endif
1538                    return ptr;
1539                } else /* if (new_size > old_size) */ {
1540                    int new_pages_count = (int)(new_size / ZEND_MM_PAGE_SIZE);
1541                    int old_pages_count = (int)(old_size / ZEND_MM_PAGE_SIZE);
1542
1543                    /* try to allocate tail pages after this block */
1544                    if (page_num + new_pages_count <= ZEND_MM_PAGES &&
1545                        zend_mm_bitset_is_free_range(chunk->free_map, page_num + old_pages_count, new_pages_count - old_pages_count)) {
1546#if ZEND_MM_STAT
1547                        do {
1548                            size_t size = heap->size + (new_size - old_size);
1549                            size_t peak = MAX(heap->peak, size);
1550                            heap->size = size;
1551                            heap->peak = peak;
1552                        } while (0);
1553#endif
1554                        chunk->free_pages -= new_pages_count - old_pages_count;
1555                        zend_mm_bitset_set_range(chunk->free_map, page_num + old_pages_count, new_pages_count - old_pages_count);
1556                        chunk->map[page_num] = ZEND_MM_LRUN(new_pages_count);
1557#if ZEND_DEBUG
1558                        dbg = zend_mm_get_debug_info(heap, ptr);
1559                        dbg->size = real_size;
1560                        dbg->filename = __zend_filename;
1561                        dbg->orig_filename = __zend_orig_filename;
1562                        dbg->lineno = __zend_lineno;
1563                        dbg->orig_lineno = __zend_orig_lineno;
1564#endif
1565                        return ptr;
1566                    }
1567                }
1568            }
1569        }
1570#if ZEND_DEBUG
1571        size = real_size;
1572#endif
1573    }
1574
1575    /* Naive reallocation */
1576    ret = zend_mm_alloc_heap(heap, size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1577    memcpy(ret, ptr, MIN(old_size, copy_size));
1578    zend_mm_free_heap(heap, ptr ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1579    return ret;
1580}
1581
1582/*********************/
1583/* Huge Runs (again) */
1584/*********************/
1585
1586#if ZEND_DEBUG
1587static void zend_mm_add_huge_block(zend_mm_heap *heap, void *ptr, size_t size, size_t dbg_size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
1588#else
1589static void zend_mm_add_huge_block(zend_mm_heap *heap, void *ptr, size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
1590#endif
1591{
1592    zend_mm_huge_list *list = (zend_mm_huge_list*)zend_mm_alloc_heap(heap, sizeof(zend_mm_huge_list) ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1593    list->ptr = ptr;
1594    list->size = size;
1595    list->next = heap->huge_list;
1596#if ZEND_DEBUG
1597    list->dbg.size = dbg_size;
1598    list->dbg.filename = __zend_filename;
1599    list->dbg.orig_filename = __zend_orig_filename;
1600    list->dbg.lineno = __zend_lineno;
1601    list->dbg.orig_lineno = __zend_orig_lineno;
1602#endif
1603    heap->huge_list = list;
1604}
1605
1606static size_t zend_mm_del_huge_block(zend_mm_heap *heap, void *ptr ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
1607{
1608    zend_mm_huge_list *prev = NULL;
1609    zend_mm_huge_list *list = heap->huge_list;
1610    while (list != NULL) {
1611        if (list->ptr == ptr) {
1612            size_t size;
1613
1614            if (prev) {
1615                prev->next = list->next;
1616            } else {
1617                heap->huge_list = list->next;
1618            }
1619            size = list->size;
1620            zend_mm_free_heap(heap, list ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1621            return size;
1622        }
1623        prev = list;
1624        list = list->next;
1625    }
1626    ZEND_MM_CHECK(0, "zend_mm_heap corrupted");
1627    return 0;
1628}
1629
1630static size_t zend_mm_get_huge_block_size(zend_mm_heap *heap, void *ptr ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
1631{
1632    zend_mm_huge_list *list = heap->huge_list;
1633    while (list != NULL) {
1634        if (list->ptr == ptr) {
1635            return list->size;
1636        }
1637        list = list->next;
1638    }
1639    ZEND_MM_CHECK(0, "zend_mm_heap corrupted");
1640    return 0;
1641}
1642
1643#if ZEND_DEBUG
1644static void zend_mm_change_huge_block_size(zend_mm_heap *heap, void *ptr, size_t size, size_t dbg_size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
1645#else
1646static void zend_mm_change_huge_block_size(zend_mm_heap *heap, void *ptr, size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
1647#endif
1648{
1649    zend_mm_huge_list *list = heap->huge_list;
1650    while (list != NULL) {
1651        if (list->ptr == ptr) {
1652            list->size = size;
1653#if ZEND_DEBUG
1654            list->dbg.size = dbg_size;
1655            list->dbg.filename = __zend_filename;
1656            list->dbg.orig_filename = __zend_orig_filename;
1657            list->dbg.lineno = __zend_lineno;
1658            list->dbg.orig_lineno = __zend_orig_lineno;
1659#endif
1660            return;
1661        }
1662        list = list->next;
1663    }
1664}
1665
1666static void *zend_mm_alloc_huge(zend_mm_heap *heap, size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
1667{
1668    size_t new_size = ZEND_MM_ALIGNED_SIZE_EX(size, REAL_PAGE_SIZE);
1669    void *ptr;
1670
1671#if ZEND_MM_LIMIT
1672    if (heap->real_size + new_size > heap->limit) {
1673        if (heap->overflow == 0) {
1674#if ZEND_DEBUG
1675            zend_mm_safe_error(heap, "Allowed memory size of %zu bytes exhausted at %s:%d (tried to allocate %zu bytes)", heap->limit, __zend_filename, __zend_lineno, size);
1676#else
1677            zend_mm_safe_error(heap, "Allowed memory size of %zu bytes exhausted (tried to allocate %zu bytes)", heap->limit, size);
1678#endif
1679            return NULL;
1680        }
1681    }
1682#endif
1683    ptr = zend_mm_chunk_alloc(heap, new_size, ZEND_MM_CHUNK_SIZE);
1684    if (UNEXPECTED(ptr == NULL)) {
1685        /* insufficient memory */
1686#if !ZEND_MM_LIMIT
1687        zend_mm_safe_error(heap, "Out of memory");
1688#elif ZEND_DEBUG
1689        zend_mm_safe_error(heap, "Out of memory (allocated %zu) at %s:%d (tried to allocate %zu bytes)", heap->real_size, __zend_filename, __zend_lineno, size);
1690#else
1691        zend_mm_safe_error(heap, "Out of memory (allocated %zu) (tried to allocate %zu bytes)", heap->real_size, size);
1692#endif
1693        return NULL;
1694    }
1695#if ZEND_DEBUG
1696    zend_mm_add_huge_block(heap, ptr, new_size, size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1697#else
1698    zend_mm_add_huge_block(heap, ptr, new_size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1699#endif
1700#if ZEND_MM_STAT
1701    do {
1702        size_t size = heap->real_size + new_size;
1703        size_t peak = MAX(heap->real_peak, size);
1704        heap->real_size = size;
1705        heap->real_peak = peak;
1706    } while (0);
1707    do {
1708        size_t size = heap->size + new_size;
1709        size_t peak = MAX(heap->peak, size);
1710        heap->size = size;
1711        heap->peak = peak;
1712    } while (0);
1713#elif ZEND_MM_LIMIT
1714    heap->real_size += new_size;
1715#endif
1716    return ptr;
1717}
1718
1719static void zend_mm_free_huge(zend_mm_heap *heap, void *ptr ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
1720{
1721    size_t size;
1722
1723    ZEND_MM_CHECK(ZEND_MM_ALIGNED_OFFSET(ptr, ZEND_MM_CHUNK_SIZE) == 0, "zend_mm_heap corrupted");
1724    size = zend_mm_del_huge_block(heap, ptr ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1725    zend_mm_chunk_free(heap, ptr, size);
1726#if ZEND_MM_STAT || ZEND_MM_LIMIT
1727    heap->real_size -= size;
1728#endif
1729#if ZEND_MM_STAT
1730    heap->size -= size;
1731#endif
1732}
1733
1734/******************/
1735/* Initialization */
1736/******************/
1737
1738static zend_mm_heap *zend_mm_init(void)
1739{
1740    zend_mm_chunk *chunk = (zend_mm_chunk*)zend_mm_chunk_alloc_int(ZEND_MM_CHUNK_SIZE, ZEND_MM_CHUNK_SIZE);
1741    zend_mm_heap *heap;
1742
1743    if (UNEXPECTED(chunk == NULL)) {
1744#if ZEND_MM_ERROR
1745#ifdef _WIN32
1746        stderr_last_error("Can't initialize heap");
1747#else
1748        fprintf(stderr, "\nCan't initialize heap: [%d] %s\n", errno, strerror(errno));
1749#endif
1750#endif
1751        return NULL;
1752    }
1753    heap = &chunk->heap_slot;
1754    chunk->heap = heap;
1755    chunk->next = chunk;
1756    chunk->prev = chunk;
1757    chunk->free_pages = ZEND_MM_PAGES - ZEND_MM_FIRST_PAGE;
1758    chunk->free_tail = ZEND_MM_FIRST_PAGE;
1759    chunk->num = 0;
1760    chunk->free_map[0] = (Z_L(1) << ZEND_MM_FIRST_PAGE) - 1;
1761    chunk->map[0] = ZEND_MM_LRUN(ZEND_MM_FIRST_PAGE);
1762    heap->main_chunk = chunk;
1763    heap->cached_chunks = NULL;
1764    heap->chunks_count = 1;
1765    heap->peak_chunks_count = 1;
1766    heap->cached_chunks_count = 0;
1767    heap->avg_chunks_count = 1.0;
1768#if ZEND_MM_STAT || ZEND_MM_LIMIT
1769    heap->real_size = ZEND_MM_CHUNK_SIZE;
1770#endif
1771#if ZEND_MM_STAT
1772    heap->real_peak = ZEND_MM_CHUNK_SIZE;
1773    heap->size = 0;
1774    heap->peak = 0;
1775#endif
1776#if ZEND_MM_LIMIT
1777    heap->limit = (Z_L(-1) >> Z_L(1));
1778    heap->overflow = 0;
1779#endif
1780#if ZEND_MM_CUSTOM
1781    heap->use_custom_heap = 0;
1782#endif
1783#if ZEND_MM_STORAGE
1784    heap->storage = NULL;
1785#endif
1786    heap->huge_list = NULL;
1787    return heap;
1788}
1789
1790#if ZEND_DEBUG
1791/******************/
1792/* Leak detection */
1793/******************/
1794
1795static zend_long zend_mm_find_leaks_small(zend_mm_chunk *p, int i, int j, zend_leak_info *leak)
1796{
1797    int empty = 1;
1798    zend_long count = 0;
1799    int bin_num = ZEND_MM_SRUN_BIN_NUM(p->map[i]);
1800    zend_mm_debug_info *dbg = (zend_mm_debug_info*)((char*)p + ZEND_MM_PAGE_SIZE * i + bin_data_size[bin_num] * (j + 1) - ZEND_MM_ALIGNED_SIZE(sizeof(zend_mm_debug_info)));
1801
1802    while (j < bin_elements[bin_num]) {
1803        if (dbg->size != 0) {
1804            if (dbg->filename == leak->filename && dbg->lineno == leak->lineno) {
1805                count++;
1806                dbg->size = 0;
1807                dbg->filename = NULL;
1808                dbg->lineno = 0;
1809            } else {
1810                empty = 0;
1811            }
1812        }
1813        j++;
1814        dbg = (zend_mm_debug_info*)((char*)dbg + bin_data_size[bin_num]);
1815    }
1816    if (empty) {
1817        zend_mm_bitset_reset_range(p->free_map, i, bin_pages[bin_num]);
1818    }
1819    return count;
1820}
1821
1822static zend_long zend_mm_find_leaks(zend_mm_heap *heap, zend_mm_chunk *p, int i, zend_leak_info *leak)
1823{
1824    zend_long count = 0;
1825
1826    do {
1827        while (i < p->free_tail) {
1828            if (zend_mm_bitset_is_set(p->free_map, i)) {
1829                if (p->map[i] & ZEND_MM_IS_SRUN) {
1830                    int bin_num = ZEND_MM_SRUN_BIN_NUM(p->map[i]);
1831                    count += zend_mm_find_leaks_small(p, i, 0, leak);
1832                    i += bin_pages[bin_num];
1833                } else /* if (p->map[i] & ZEND_MM_IS_LRUN) */ {
1834                    int pages_count = ZEND_MM_LRUN_PAGES(p->map[i]);
1835                    zend_mm_debug_info *dbg = (zend_mm_debug_info*)((char*)p + ZEND_MM_PAGE_SIZE * (i + pages_count) - ZEND_MM_ALIGNED_SIZE(sizeof(zend_mm_debug_info)));
1836
1837                    if (dbg->filename == leak->filename && dbg->lineno == leak->lineno) {
1838                        count++;
1839                    }
1840                    zend_mm_bitset_reset_range(p->free_map, i, pages_count);
1841                    i += pages_count;
1842                }
1843            } else {
1844                i++;
1845            }
1846        }
1847        p = p->next;
1848    } while (p != heap->main_chunk);
1849    return count;
1850}
1851
1852static void zend_mm_check_leaks(zend_mm_heap *heap)
1853{
1854    zend_mm_huge_list *list;
1855    zend_mm_chunk *p;
1856    zend_leak_info leak;
1857    zend_long repeated = 0;
1858    uint32_t total = 0;
1859    int i, j;
1860
1861    /* find leaked huge blocks and free them */
1862    list = heap->huge_list;
1863    while (list) {
1864        zend_mm_huge_list *q = list;
1865
1866        heap->huge_list = list->next;
1867
1868        leak.addr = list->ptr;
1869        leak.size = list->dbg.size;
1870        leak.filename = list->dbg.filename;
1871        leak.orig_filename = list->dbg.orig_filename;
1872        leak.lineno = list->dbg.lineno;
1873        leak.orig_lineno = list->dbg.orig_lineno;
1874
1875        zend_message_dispatcher(ZMSG_LOG_SCRIPT_NAME, NULL);
1876        zend_message_dispatcher(ZMSG_MEMORY_LEAK_DETECTED, &leak);
1877//???       repeated = zend_mm_find_leaks_huge(segment, p);
1878        total += 1 + repeated;
1879        if (repeated) {
1880            zend_message_dispatcher(ZMSG_MEMORY_LEAK_REPEATED, (void *)(zend_uintptr_t)repeated);
1881        }
1882
1883        list = list->next;
1884        zend_mm_chunk_free(heap, q->ptr, q->size);
1885        zend_mm_free_heap(heap, q, NULL, 0, NULL, 0);
1886    }
1887
1888    /* for each chunk */
1889    p = heap->main_chunk;
1890    do {
1891        i = ZEND_MM_FIRST_PAGE;
1892        while (i < p->free_tail) {
1893            if (zend_mm_bitset_is_set(p->free_map, i)) {
1894                if (p->map[i] & ZEND_MM_IS_SRUN) {
1895                    int bin_num = ZEND_MM_SRUN_BIN_NUM(p->map[i]);
1896                    zend_mm_debug_info *dbg = (zend_mm_debug_info*)((char*)p + ZEND_MM_PAGE_SIZE * i + bin_data_size[bin_num] - ZEND_MM_ALIGNED_SIZE(sizeof(zend_mm_debug_info)));
1897
1898                    j = 0;
1899                    while (j < bin_elements[bin_num]) {
1900                        if (dbg->size != 0) {
1901                            leak.addr = (zend_mm_debug_info*)((char*)p + ZEND_MM_PAGE_SIZE * i + bin_data_size[bin_num] * j);
1902                            leak.size = dbg->size;
1903                            leak.filename = dbg->filename;
1904                            leak.orig_filename = dbg->orig_filename;
1905                            leak.lineno = dbg->lineno;
1906                            leak.orig_lineno = dbg->orig_lineno;
1907
1908                            zend_message_dispatcher(ZMSG_LOG_SCRIPT_NAME, NULL);
1909                            zend_message_dispatcher(ZMSG_MEMORY_LEAK_DETECTED, &leak);
1910
1911                            dbg->size = 0;
1912                            dbg->filename = NULL;
1913                            dbg->lineno = 0;
1914
1915                            repeated = zend_mm_find_leaks_small(p, i, j + 1, &leak) +
1916                                       zend_mm_find_leaks(heap, p, i + bin_pages[bin_num], &leak);
1917                            total += 1 + repeated;
1918                            if (repeated) {
1919                                zend_message_dispatcher(ZMSG_MEMORY_LEAK_REPEATED, (void *)(zend_uintptr_t)repeated);
1920                            }
1921                        }
1922                        dbg = (zend_mm_debug_info*)((char*)dbg + bin_data_size[bin_num]);
1923                        j++;
1924                    }
1925                    i += bin_pages[bin_num];
1926                } else /* if (p->map[i] & ZEND_MM_IS_LRUN) */ {
1927                    int pages_count = ZEND_MM_LRUN_PAGES(p->map[i]);
1928                    zend_mm_debug_info *dbg = (zend_mm_debug_info*)((char*)p + ZEND_MM_PAGE_SIZE * (i + pages_count) - ZEND_MM_ALIGNED_SIZE(sizeof(zend_mm_debug_info)));
1929
1930                    leak.addr = (void*)((char*)p + ZEND_MM_PAGE_SIZE * i);
1931                    leak.size = dbg->size;
1932                    leak.filename = dbg->filename;
1933                    leak.orig_filename = dbg->orig_filename;
1934                    leak.lineno = dbg->lineno;
1935                    leak.orig_lineno = dbg->orig_lineno;
1936
1937                    zend_message_dispatcher(ZMSG_LOG_SCRIPT_NAME, NULL);
1938                    zend_message_dispatcher(ZMSG_MEMORY_LEAK_DETECTED, &leak);
1939
1940                    zend_mm_bitset_reset_range(p->free_map, i, pages_count);
1941
1942                    repeated = zend_mm_find_leaks(heap, p, i + pages_count, &leak);
1943                    total += 1 + repeated;
1944                    if (repeated) {
1945                        zend_message_dispatcher(ZMSG_MEMORY_LEAK_REPEATED, (void *)(zend_uintptr_t)repeated);
1946                    }
1947                    i += pages_count;
1948                }
1949            } else {
1950                i++;
1951            }
1952        }
1953        p = p->next;
1954    } while (p != heap->main_chunk);
1955    if (total) {
1956        zend_message_dispatcher(ZMSG_MEMORY_LEAKS_GRAND_TOTAL, &total);
1957    }
1958}
1959#endif
1960
1961void zend_mm_shutdown(zend_mm_heap *heap, int full, int silent)
1962{
1963    zend_mm_chunk *p;
1964    zend_mm_huge_list *list;
1965
1966#if ZEND_MM_CUSTOM
1967    if (heap->use_custom_heap) {
1968        if (full) {
1969            heap->_free(heap);
1970        }
1971        return;
1972    }
1973#endif
1974
1975#if ZEND_DEBUG
1976    if (!silent) {
1977        zend_mm_check_leaks(heap);
1978    }
1979#endif
1980
1981    /* free huge blocks */
1982    list = heap->huge_list;
1983    while (list) {
1984        zend_mm_huge_list *q = list;
1985        list = list->next;
1986        zend_mm_chunk_free(heap, q->ptr, q->size);
1987    }
1988
1989    /* move all chunks except of the first one into the cache */
1990    p = heap->main_chunk->next;
1991    while (p != heap->main_chunk) {
1992        zend_mm_chunk *q = p->next;
1993        p->next = heap->cached_chunks;
1994        heap->cached_chunks = p;
1995        p = q;
1996        heap->chunks_count--;
1997        heap->cached_chunks_count++;
1998    }
1999
2000    if (full) {
2001        /* free all cached chunks */
2002        while (heap->cached_chunks) {
2003            p = heap->cached_chunks;
2004            heap->cached_chunks = p->next;
2005            zend_mm_chunk_free(heap, p, ZEND_MM_CHUNK_SIZE);
2006        }
2007        /* free the first chunk */
2008        zend_mm_chunk_free(heap, heap->main_chunk, ZEND_MM_CHUNK_SIZE);
2009    } else {
2010        zend_mm_heap old_heap;
2011
2012        /* free some cached chunks to keep average count */
2013        heap->avg_chunks_count = (heap->avg_chunks_count + (double)heap->peak_chunks_count) / 2.0;
2014        while ((double)heap->cached_chunks_count + 0.9 > heap->avg_chunks_count &&
2015               heap->cached_chunks) {
2016            p = heap->cached_chunks;
2017            heap->cached_chunks = p->next;
2018            zend_mm_chunk_free(heap, p, ZEND_MM_CHUNK_SIZE);
2019            heap->cached_chunks_count--;
2020        }
2021        /* clear cached chunks */
2022        p = heap->cached_chunks;
2023        while (p != NULL) {
2024            zend_mm_chunk *q = p->next;
2025            memset(p, 0, sizeof(zend_mm_chunk));
2026            p->next = q;
2027            p = q;
2028        }
2029
2030        /* reinitialize the first chunk and heap */
2031        old_heap = *heap;
2032        p = heap->main_chunk;
2033        memset(p, 0, ZEND_MM_FIRST_PAGE * ZEND_MM_PAGE_SIZE);
2034        *heap = old_heap;
2035        memset(heap->free_slot, 0, sizeof(heap->free_slot));
2036        heap->main_chunk = p;
2037        p->heap = &p->heap_slot;
2038        p->next = p;
2039        p->prev = p;
2040        p->free_pages = ZEND_MM_PAGES - ZEND_MM_FIRST_PAGE;
2041        p->free_tail = ZEND_MM_FIRST_PAGE;
2042        p->free_map[0] = (1L << ZEND_MM_FIRST_PAGE) - 1;
2043        p->map[0] = ZEND_MM_LRUN(ZEND_MM_FIRST_PAGE);
2044        heap->chunks_count = 1;
2045        heap->peak_chunks_count = 1;
2046#if ZEND_MM_STAT || ZEND_MM_LIMIT
2047        heap->real_size = ZEND_MM_CHUNK_SIZE;
2048#endif
2049#if ZEND_MM_STAT
2050        heap->real_peak = ZEND_MM_CHUNK_SIZE;
2051        heap->size = heap->peak = 0;
2052#endif
2053    }
2054}
2055
2056/**************/
2057/* PUBLIC API */
2058/**************/
2059
2060ZEND_API void* ZEND_FASTCALL _zend_mm_alloc(zend_mm_heap *heap, size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
2061{
2062    return zend_mm_alloc_heap(heap, size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
2063}
2064
2065ZEND_API void ZEND_FASTCALL _zend_mm_free(zend_mm_heap *heap, void *ptr ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
2066{
2067    zend_mm_free_heap(heap, ptr ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
2068}
2069
2070void* ZEND_FASTCALL _zend_mm_realloc(zend_mm_heap *heap, void *ptr, size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
2071{
2072    return zend_mm_realloc_heap(heap, ptr, size, size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
2073}
2074
2075void* ZEND_FASTCALL _zend_mm_realloc2(zend_mm_heap *heap, void *ptr, size_t size, size_t copy_size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
2076{
2077    return zend_mm_realloc_heap(heap, ptr, size, copy_size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
2078}
2079
2080ZEND_API size_t ZEND_FASTCALL _zend_mm_block_size(zend_mm_heap *heap, void *ptr ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
2081{
2082    return zend_mm_size(heap, ptr ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
2083}
2084
2085/**********************/
2086/* Allocation Manager */
2087/**********************/
2088
2089typedef struct _zend_alloc_globals {
2090    zend_mm_heap *mm_heap;
2091} zend_alloc_globals;
2092
2093#ifdef ZTS
2094static int alloc_globals_id;
2095# define AG(v) ZEND_TSRMG(alloc_globals_id, zend_alloc_globals *, v)
2096#else
2097# define AG(v) (alloc_globals.v)
2098static zend_alloc_globals alloc_globals;
2099#endif
2100
2101ZEND_API int is_zend_mm(void)
2102{
2103#if ZEND_MM_CUSTOM
2104    return !AG(mm_heap)->use_custom_heap;
2105#else
2106    return 1;
2107#endif
2108}
2109
2110#if !ZEND_DEBUG && !defined(_WIN32)
2111#undef _emalloc
2112
2113#if ZEND_MM_CUSTOM
2114# define ZEND_MM_CUSTOM_ALLOCATOR(size) do { \
2115        if (UNEXPECTED(AG(mm_heap)->use_custom_heap)) { \
2116            return AG(mm_heap)->_malloc(size); \
2117        } \
2118    } while (0)
2119# define ZEND_MM_CUSTOM_DEALLOCATOR(ptr) do { \
2120        if (UNEXPECTED(AG(mm_heap)->use_custom_heap)) { \
2121            AG(mm_heap)->_free(ptr); \
2122            return; \
2123        } \
2124    } while (0)
2125#else
2126# define ZEND_MM_CUSTOM_ALLOCATOR(size)
2127# define ZEND_MM_CUSTOM_DEALLOCATOR(ptr)
2128#endif
2129
2130# define _ZEND_BIN_ALLOCATOR(_num, _size, _elements, _pages, x, y) \
2131    ZEND_API void* ZEND_FASTCALL _emalloc_ ## _size(void) { \
2132        ZEND_MM_CUSTOM_ALLOCATOR(_size); \
2133        return zend_mm_alloc_small(AG(mm_heap), _size, _num ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC); \
2134    }
2135
2136ZEND_MM_BINS_INFO(_ZEND_BIN_ALLOCATOR, x, y)
2137
2138ZEND_API void* ZEND_FASTCALL _emalloc_large(size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
2139{
2140
2141    ZEND_MM_CUSTOM_ALLOCATOR(size);
2142    return zend_mm_alloc_large(AG(mm_heap), size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
2143}
2144
2145ZEND_API void* ZEND_FASTCALL _emalloc_huge(size_t size)
2146{
2147
2148    ZEND_MM_CUSTOM_ALLOCATOR(size);
2149    return zend_mm_alloc_huge(AG(mm_heap), size);
2150}
2151
2152#if ZEND_DEBUG
2153# define _ZEND_BIN_FREE(_num, _size, _elements, _pages, x, y) \
2154    ZEND_API void ZEND_FASTCALL _efree_ ## _size(void *ptr) { \
2155        ZEND_MM_CUSTOM_DEALLOCATOR(ptr); \
2156        { \
2157            size_t page_offset = ZEND_MM_ALIGNED_OFFSET(ptr, ZEND_MM_CHUNK_SIZE); \
2158            zend_mm_chunk *chunk = (zend_mm_chunk*)ZEND_MM_ALIGNED_BASE(ptr, ZEND_MM_CHUNK_SIZE); \
2159            int page_num = page_offset / ZEND_MM_PAGE_SIZE; \
2160            ZEND_MM_CHECK(chunk->heap == AG(mm_heap), "zend_mm_heap corrupted"); \
2161            ZEND_ASSERT(chunk->map[page_num] & ZEND_MM_IS_SRUN); \
2162            ZEND_ASSERT(ZEND_MM_SRUN_BIN_NUM(chunk->map[page_num]) == _num); \
2163            zend_mm_free_small(AG(mm_heap), ptr, _num); \
2164        } \
2165    }
2166#else
2167# define _ZEND_BIN_FREE(_num, _size, _elements, _pages, x, y) \
2168    ZEND_API void ZEND_FASTCALL _efree_ ## _size(void *ptr) { \
2169        ZEND_MM_CUSTOM_DEALLOCATOR(ptr); \
2170        { \
2171            zend_mm_chunk *chunk = (zend_mm_chunk*)ZEND_MM_ALIGNED_BASE(ptr, ZEND_MM_CHUNK_SIZE); \
2172            ZEND_MM_CHECK(chunk->heap == AG(mm_heap), "zend_mm_heap corrupted"); \
2173            zend_mm_free_small(AG(mm_heap), ptr, _num); \
2174        } \
2175    }
2176#endif
2177
2178ZEND_MM_BINS_INFO(_ZEND_BIN_FREE, x, y)
2179
2180ZEND_API void ZEND_FASTCALL _efree_large(void *ptr, size_t size)
2181{
2182
2183    ZEND_MM_CUSTOM_DEALLOCATOR(ptr);
2184    {
2185        size_t page_offset = ZEND_MM_ALIGNED_OFFSET(ptr, ZEND_MM_CHUNK_SIZE);
2186        zend_mm_chunk *chunk = (zend_mm_chunk*)ZEND_MM_ALIGNED_BASE(ptr, ZEND_MM_CHUNK_SIZE);
2187        int page_num = page_offset / ZEND_MM_PAGE_SIZE;
2188        int pages_count = ZEND_MM_ALIGNED_SIZE_EX(size, ZEND_MM_PAGE_SIZE) / ZEND_MM_PAGE_SIZE;
2189
2190        ZEND_MM_CHECK(chunk->heap == AG(mm_heap) && ZEND_MM_ALIGNED_OFFSET(page_offset, ZEND_MM_PAGE_SIZE) == 0, "zend_mm_heap corrupted");
2191        ZEND_ASSERT(chunk->map[page_num] & ZEND_MM_IS_LRUN);
2192        ZEND_ASSERT(ZEND_MM_LRUN_PAGES(chunk->map[page_num]) == pages_count);
2193        zend_mm_free_large(AG(mm_heap), chunk, page_num, pages_count);
2194    }
2195}
2196
2197ZEND_API void ZEND_FASTCALL _efree_huge(void *ptr, size_t size)
2198{
2199
2200    ZEND_MM_CUSTOM_DEALLOCATOR(ptr);
2201    // TODO: use size???
2202    zend_mm_free_huge(AG(mm_heap), ptr);
2203}
2204#endif
2205
2206ZEND_API void* ZEND_FASTCALL _emalloc(size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
2207{
2208
2209#if ZEND_MM_CUSTOM
2210    if (UNEXPECTED(AG(mm_heap)->use_custom_heap)) {
2211        return AG(mm_heap)->_malloc(size);
2212    }
2213#endif
2214    return zend_mm_alloc_heap(AG(mm_heap), size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
2215}
2216
2217ZEND_API void ZEND_FASTCALL _efree(void *ptr ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
2218{
2219
2220#if ZEND_MM_CUSTOM
2221    if (UNEXPECTED(AG(mm_heap)->use_custom_heap)) {
2222        AG(mm_heap)->_free(ptr);
2223        return;
2224    }
2225#endif
2226    zend_mm_free_heap(AG(mm_heap), ptr ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
2227}
2228
2229ZEND_API void* ZEND_FASTCALL _erealloc(void *ptr, size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
2230{
2231
2232    if (UNEXPECTED(AG(mm_heap)->use_custom_heap)) {
2233        return AG(mm_heap)->_realloc(ptr, size);
2234    }
2235    return zend_mm_realloc_heap(AG(mm_heap), ptr, size, size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
2236}
2237
2238ZEND_API void* ZEND_FASTCALL _erealloc2(void *ptr, size_t size, size_t copy_size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
2239{
2240
2241    if (UNEXPECTED(AG(mm_heap)->use_custom_heap)) {
2242        return AG(mm_heap)->_realloc(ptr, size);
2243    }
2244    return zend_mm_realloc_heap(AG(mm_heap), ptr, size, copy_size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
2245}
2246
2247ZEND_API size_t ZEND_FASTCALL _zend_mem_block_size(void *ptr ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
2248{
2249    if (UNEXPECTED(AG(mm_heap)->use_custom_heap)) {
2250        return 0;
2251    }
2252    return zend_mm_size(AG(mm_heap), ptr ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
2253}
2254
2255static zend_always_inline size_t safe_address(size_t nmemb, size_t size, size_t offset)
2256{
2257    int overflow;
2258    size_t ret = zend_safe_address(nmemb, size, offset, &overflow);
2259
2260    if (UNEXPECTED(overflow)) {
2261        zend_error_noreturn(E_ERROR, "Possible integer overflow in memory allocation (%zu * %zu + %zu)", nmemb, size, offset);
2262        return 0;
2263    }
2264    return ret;
2265}
2266
2267
2268ZEND_API void* ZEND_FASTCALL _safe_emalloc(size_t nmemb, size_t size, size_t offset ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
2269{
2270    return emalloc_rel(safe_address(nmemb, size, offset));
2271}
2272
2273ZEND_API void* ZEND_FASTCALL _safe_malloc(size_t nmemb, size_t size, size_t offset)
2274{
2275    return pemalloc(safe_address(nmemb, size, offset), 1);
2276}
2277
2278ZEND_API void* ZEND_FASTCALL _safe_erealloc(void *ptr, size_t nmemb, size_t size, size_t offset ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
2279{
2280    return erealloc_rel(ptr, safe_address(nmemb, size, offset));
2281}
2282
2283ZEND_API void* ZEND_FASTCALL _safe_realloc(void *ptr, size_t nmemb, size_t size, size_t offset)
2284{
2285    return perealloc(ptr, safe_address(nmemb, size, offset), 1);
2286}
2287
2288
2289ZEND_API void* ZEND_FASTCALL _ecalloc(size_t nmemb, size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
2290{
2291    void *p;
2292
2293    HANDLE_BLOCK_INTERRUPTIONS();
2294
2295    p = _safe_emalloc(nmemb, size, 0 ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
2296    if (UNEXPECTED(p == NULL)) {
2297        HANDLE_UNBLOCK_INTERRUPTIONS();
2298        return p;
2299    }
2300    memset(p, 0, size * nmemb);
2301    HANDLE_UNBLOCK_INTERRUPTIONS();
2302    return p;
2303}
2304
2305ZEND_API char* ZEND_FASTCALL _estrdup(const char *s ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
2306{
2307    size_t length;
2308    char *p;
2309
2310    HANDLE_BLOCK_INTERRUPTIONS();
2311
2312    length = strlen(s);
2313    p = (char *) _emalloc(safe_address(length, 1, 1) ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
2314    if (UNEXPECTED(p == NULL)) {
2315        HANDLE_UNBLOCK_INTERRUPTIONS();
2316        return p;
2317    }
2318    memcpy(p, s, length+1);
2319    HANDLE_UNBLOCK_INTERRUPTIONS();
2320    return p;
2321}
2322
2323ZEND_API char* ZEND_FASTCALL _estrndup(const char *s, size_t length ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
2324{
2325    char *p;
2326
2327    HANDLE_BLOCK_INTERRUPTIONS();
2328
2329    p = (char *) _emalloc(safe_address(length, 1, 1) ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
2330    if (UNEXPECTED(p == NULL)) {
2331        HANDLE_UNBLOCK_INTERRUPTIONS();
2332        return p;
2333    }
2334    memcpy(p, s, length);
2335    p[length] = 0;
2336    HANDLE_UNBLOCK_INTERRUPTIONS();
2337    return p;
2338}
2339
2340
2341ZEND_API char* ZEND_FASTCALL zend_strndup(const char *s, size_t length)
2342{
2343    char *p;
2344
2345    HANDLE_BLOCK_INTERRUPTIONS();
2346
2347    p = (char *) malloc(safe_address(length, 1, 1));
2348    if (UNEXPECTED(p == NULL)) {
2349        HANDLE_UNBLOCK_INTERRUPTIONS();
2350        return p;
2351    }
2352    if (length) {
2353        memcpy(p, s, length);
2354    }
2355    p[length] = 0;
2356    HANDLE_UNBLOCK_INTERRUPTIONS();
2357    return p;
2358}
2359
2360
2361ZEND_API int zend_set_memory_limit(size_t memory_limit)
2362{
2363#if ZEND_MM_LIMIT
2364    AG(mm_heap)->limit = (memory_limit >= ZEND_MM_CHUNK_SIZE) ? memory_limit : ZEND_MM_CHUNK_SIZE;
2365#endif
2366    return SUCCESS;
2367}
2368
2369ZEND_API size_t zend_memory_usage(int real_usage)
2370{
2371#if ZEND_MM_STAT
2372    if (real_usage) {
2373        return AG(mm_heap)->real_size;
2374    } else {
2375        size_t usage = AG(mm_heap)->size;
2376        return usage;
2377    }
2378#endif
2379    return 0;
2380}
2381
2382ZEND_API size_t zend_memory_peak_usage(int real_usage)
2383{
2384#if ZEND_MM_STAT
2385    if (real_usage) {
2386        return AG(mm_heap)->real_peak;
2387    } else {
2388        return AG(mm_heap)->peak;
2389    }
2390#endif
2391    return 0;
2392}
2393
2394ZEND_API void shutdown_memory_manager(int silent, int full_shutdown)
2395{
2396    zend_mm_shutdown(AG(mm_heap), full_shutdown, silent);
2397}
2398
2399static void alloc_globals_ctor(zend_alloc_globals *alloc_globals)
2400{
2401#if ZEND_MM_CUSTOM
2402    char *tmp = getenv("USE_ZEND_ALLOC");
2403
2404    if (tmp && !zend_atoi(tmp, 0)) {
2405        alloc_globals->mm_heap = malloc(sizeof(zend_mm_heap));
2406        memset(alloc_globals->mm_heap, 0, sizeof(zend_mm_heap));
2407        alloc_globals->mm_heap->use_custom_heap = 1;
2408        alloc_globals->mm_heap->_malloc = malloc;
2409        alloc_globals->mm_heap->_free = free;
2410        alloc_globals->mm_heap->_realloc = realloc;
2411        return;
2412    }
2413#endif
2414    ZEND_TSRMLS_CACHE_UPDATE();
2415    alloc_globals->mm_heap = zend_mm_init();
2416}
2417
2418#ifdef ZTS
2419static void alloc_globals_dtor(zend_alloc_globals *alloc_globals)
2420{
2421    zend_mm_shutdown(alloc_globals->mm_heap, 1, 1);
2422}
2423#endif
2424
2425ZEND_API void start_memory_manager(void)
2426{
2427#ifdef ZTS
2428    ts_allocate_id(&alloc_globals_id, sizeof(zend_alloc_globals), (ts_allocate_ctor) alloc_globals_ctor, (ts_allocate_dtor) alloc_globals_dtor);
2429#else
2430    alloc_globals_ctor(&alloc_globals);
2431#endif
2432#ifndef _WIN32
2433#  if defined(_SC_PAGESIZE)
2434    REAL_PAGE_SIZE = sysconf(_SC_PAGESIZE);
2435#  elif defined(_SC_PAGE_SIZE)
2436    REAL_PAGE_SIZE = sysconf(_SC_PAGE_SIZE);
2437#  endif
2438#endif
2439}
2440
2441ZEND_API zend_mm_heap *zend_mm_set_heap(zend_mm_heap *new_heap)
2442{
2443    zend_mm_heap *old_heap;
2444
2445    old_heap = AG(mm_heap);
2446    AG(mm_heap) = (zend_mm_heap*)new_heap;
2447    return (zend_mm_heap*)old_heap;
2448}
2449
2450ZEND_API void zend_mm_set_custom_handlers(zend_mm_heap *heap,
2451                                          void* (*_malloc)(size_t),
2452                                          void  (*_free)(void*),
2453                                          void* (*_realloc)(void*, size_t))
2454{
2455#if ZEND_MM_CUSTOM
2456    zend_mm_heap *_heap = (zend_mm_heap*)heap;
2457
2458    _heap->use_custom_heap = 1;
2459    _heap->_malloc = _malloc;
2460    _heap->_free = _free;
2461    _heap->_realloc = _realloc;
2462#endif
2463}
2464
2465ZEND_API void zend_mm_get_custom_handlers(zend_mm_heap *heap,
2466                                          void* (**_malloc)(size_t),
2467                                          void  (**_free)(void*),
2468                                          void* (**_realloc)(void*, size_t))
2469{
2470#if ZEND_MM_CUSTOM
2471    zend_mm_heap *_heap = (zend_mm_heap*)heap;
2472
2473    if (heap->use_custom_heap) {
2474        *_malloc = _heap->_malloc;
2475        *_free = _heap->_free;
2476        *_realloc = _heap->_realloc;
2477    } else {
2478        *_malloc = NULL;
2479        *_free = NULL;
2480        *_realloc = NULL;
2481    }
2482#else
2483    *_malloc = NULL;
2484    *_free = NULL;
2485    *_realloc = NULL;
2486#endif
2487}
2488
2489ZEND_API zend_mm_storage *zend_mm_get_storage(zend_mm_heap *heap)
2490{
2491#if ZEND_MM_STORAGE
2492    return heap->storage;
2493#else
2494    return NULL
2495#endif
2496}
2497
2498ZEND_API zend_mm_heap *zend_mm_startup(void)
2499{
2500    return zend_mm_init();
2501}
2502
2503ZEND_API zend_mm_heap *zend_mm_startup_ex(const zend_mm_handlers *handlers, void *data, size_t data_size)
2504{
2505#if ZEND_MM_STORAGE
2506    zend_mm_storage tmp_storage, *storage;
2507    zend_mm_chunk *chunk;
2508    zend_mm_heap *heap;
2509
2510    memcpy((zend_mm_handlers*)&tmp_storage.handlers, handlers, sizeof(zend_mm_handlers));
2511    tmp_storage.data = data;
2512    chunk = (zend_mm_chunk*)handlers->chunk_alloc(&tmp_storage, ZEND_MM_CHUNK_SIZE, ZEND_MM_CHUNK_SIZE);
2513    if (UNEXPECTED(chunk == NULL)) {
2514#if ZEND_MM_ERROR
2515#ifdef _WIN32
2516        stderr_last_error("Can't initialize heap");
2517#else
2518        fprintf(stderr, "\nCan't initialize heap: [%d] %s\n", errno, strerror(errno));
2519#endif
2520#endif
2521        return NULL;
2522    }
2523    heap = &chunk->heap_slot;
2524    chunk->heap = heap;
2525    chunk->next = chunk;
2526    chunk->prev = chunk;
2527    chunk->free_pages = ZEND_MM_PAGES - ZEND_MM_FIRST_PAGE;
2528    chunk->free_tail = ZEND_MM_FIRST_PAGE;
2529    chunk->num = 0;
2530    chunk->free_map[0] = (Z_L(1) << ZEND_MM_FIRST_PAGE) - 1;
2531    chunk->map[0] = ZEND_MM_LRUN(ZEND_MM_FIRST_PAGE);
2532    heap->main_chunk = chunk;
2533    heap->cached_chunks = NULL;
2534    heap->chunks_count = 1;
2535    heap->peak_chunks_count = 1;
2536    heap->cached_chunks_count = 0;
2537    heap->avg_chunks_count = 1.0;
2538#if ZEND_MM_STAT || ZEND_MM_LIMIT
2539    heap->real_size = ZEND_MM_CHUNK_SIZE;
2540#endif
2541#if ZEND_MM_STAT
2542    heap->real_peak = ZEND_MM_CHUNK_SIZE;
2543    heap->size = 0;
2544    heap->peak = 0;
2545#endif
2546#if ZEND_MM_LIMIT
2547    heap->limit = (Z_L(-1) >> Z_L(1));
2548    heap->overflow = 0;
2549#endif
2550#if ZEND_MM_CUSTOM
2551    heap->use_custom_heap = 0;
2552#endif
2553    heap->storage = &tmp_storage;
2554    heap->huge_list = NULL;
2555    storage = _zend_mm_alloc(heap, sizeof(zend_mm_storage) + data_size ZEND_FILE_LINE_CC ZEND_FILE_LINE_CC);
2556    if (!storage) {
2557        handlers->chunk_free(&tmp_storage, chunk, ZEND_MM_CHUNK_SIZE);
2558#if ZEND_MM_ERROR
2559#ifdef _WIN32
2560        stderr_last_error("Can't initialize heap");
2561#else
2562        fprintf(stderr, "\nCan't initialize heap: [%d] %s\n", errno, strerror(errno));
2563#endif
2564#endif
2565        return NULL;
2566    }
2567    memcpy(storage, &tmp_storage, sizeof(zend_mm_storage));
2568    if (data) {
2569        storage->data = (void*)(((char*)storage + sizeof(zend_mm_storage)));
2570        memcpy(storage->data, data, data_size);
2571    }
2572    heap->storage = storage;
2573    return heap;
2574#else
2575    return NULL;
2576#endif
2577}
2578
2579/*
2580 * Local variables:
2581 * tab-width: 4
2582 * c-basic-offset: 4
2583 * indent-tabs-mode: t
2584 * End:
2585 */
2586