1/*
2   +----------------------------------------------------------------------+
3   | Zend Engine                                                          |
4   +----------------------------------------------------------------------+
5   | Copyright (c) 1998-2014 Zend Technologies Ltd. (http://www.zend.com) |
6   +----------------------------------------------------------------------+
7   | This source file is subject to version 2.00 of the Zend license,     |
8   | that is bundled with this package in the file LICENSE, and is        |
9   | available through the world-wide-web at the following url:           |
10   | http://www.zend.com/license/2_00.txt.                                |
11   | If you did not receive a copy of the Zend license and are unable to  |
12   | obtain it through the world-wide-web, please send a note to          |
13   | license@zend.com so we can mail you a copy immediately.              |
14   +----------------------------------------------------------------------+
15   | Authors: Andi Gutmans <andi@zend.com>                                |
16   |          Zeev Suraski <zeev@zend.com>                                |
17   |          Dmitry Stogov <dmitry@zend.com>                             |
18   +----------------------------------------------------------------------+
19*/
20
21/* $Id$ */
22
23/*
24 * zend_alloc is designed to be a modern CPU cache friendly memory manager
25 * for PHP. Most ideas are taken from jemalloc and tcmalloc implementations.
26 *
27 * All allocations are split into 3 categories:
28 *
29 * Huge  - the size is greater than CHUNK size (~2M by default), allocation is
30 *         performed using mmap(). The result is aligned on 2M boundary.
31 *
32 * Large - a number of 4096K pages inside a CHUNK. Large blocks
33 *         are always alligned on page boundary.
34 *
35 * Small - less than 3/4 of page size. Small sizes are rounded up to nearest
36 *         greater predefined small size (there are 30 predefined sizes:
37 *         8, 16, 24, 32, ... 3072). Small blocks are allocated from
38 *         RUNs. Each RUN is allocated as a single or few following pages.
39 *         Allocation inside RUNs implemented using linked list of free
40 *         elements. The result is aligned to 8 bytes.
41 *
42 * zend_alloc allocates memory from OS by CHUNKs, these CHUNKs and huge memory
43 * blocks are always aligned to CHUNK boundary. So it's very easy to determine
44 * the CHUNK owning the certain pointer. Regular CHUNKs reserve a single
45 * page at start for special purpose. It contains bitset of free pages,
46 * few bitset for available runs of predefined small sizes, map of pages that
47 * keeps information about usage of each page in this CHUNK, etc.
48 *
49 * zend_alloc provides familiar emalloc/efree/erealloc API, but in addition it
50 * provides specialized and optimized routines to allocate blocks of predefined
51 * sizes (e.g. emalloc_2(), emallc_4(), ..., emalloc_large(), etc)
52 * The library uses C preprocessor tricks that substitute calls to emalloc()
53 * with more specialized routines when the requested size is known.
54 */
55
56#include "zend.h"
57#include "zend_alloc.h"
58#include "zend_globals.h"
59#include "zend_operators.h"
60
61#ifdef HAVE_SIGNAL_H
62# include <signal.h>
63#endif
64#ifdef HAVE_UNISTD_H
65# include <unistd.h>
66#endif
67
68#ifdef ZEND_WIN32
69# include <wincrypt.h>
70# include <process.h>
71#endif
72
73#include <stdio.h>
74#include <stdlib.h>
75#include <string.h>
76
77#include <sys/types.h>
78#include <sys/stat.h>
79#if HAVE_LIMITS_H
80#include <limits.h>
81#endif
82#include <fcntl.h>
83#include <errno.h>
84
85#ifndef _WIN32
86# ifdef HAVE_MREMAP
87#  ifndef _GNU_SOURCE
88#   define _GNU_SOURCE
89#  endif
90#  ifndef __USE_GNU
91#   define __USE_GNU
92#  endif
93# endif
94# include <sys/mman.h>
95# ifndef MAP_ANON
96#  ifdef MAP_ANONYMOUS
97#   define MAP_ANON MAP_ANONYMOUS
98#  endif
99# endif
100# ifndef MREMAP_MAYMOVE
101#  define MREMAP_MAYMOVE 0
102# endif
103# ifndef MAP_FAILED
104#  define MAP_FAILED ((void*)-1)
105# endif
106# ifndef MAP_POPULATE
107#  define MAP_POPULATE 0
108#endif
109#endif
110
111#ifndef ZEND_MM_STAT
112# define ZEND_MM_STAT 1    /* track current and peak memory usage            */
113#endif
114#ifndef ZEND_MM_LIMIT
115# define ZEND_MM_LIMIT 1   /* support for user-defined memory limit          */
116#endif
117#ifndef ZEND_MM_CUSTOM
118# define ZEND_MM_CUSTOM 1  /* support for custom memory allocator            */
119                           /* USE_ZEND_ALLOC=0 may switch to system malloc() */
120#endif
121#ifndef ZEND_MM_ERROR
122# define ZEND_MM_ERROR 1   /* report system errors                           */
123#endif
124
125#ifndef ZEND_MM_CHECK
126# define ZEND_MM_CHECK(condition, message)  do { \
127        if (UNEXPECTED(!(condition))) { \
128            zend_mm_panic(message); \
129        } \
130    } while (0)
131#endif
132
133typedef uint32_t   zend_mm_page_info; /* 4-byte integer */
134typedef zend_ulong zend_mm_bitset;    /* 4-byte or 8-byte integer */
135
136#define ZEND_MM_ALIGNED_OFFSET(size, alignment) \
137    (((size_t)(size)) & ((alignment) - 1))
138#define ZEND_MM_ALIGNED_BASE(size, alignment) \
139    (((size_t)(size)) & ~((alignment) - 1))
140#define ZEND_MM_ALIGNED_SIZE_EX(size, alignment) \
141    (((size_t)(size) + ((alignment) - 1)) & ~((alignment) - 1))
142#define ZEND_MM_SIZE_TO_NUM(size, alignment) \
143    (((size_t)(size) + ((alignment) - 1)) / (alignment))
144
145#define ZEND_MM_BITSET_LEN      (sizeof(zend_mm_bitset) * 8)       /* 32 or 64 */
146#define ZEND_MM_PAGE_MAP_LEN    (ZEND_MM_PAGES / ZEND_MM_BITSET_LEN) /* 16 or 8 */
147
148typedef zend_mm_bitset zend_mm_page_map[ZEND_MM_PAGE_MAP_LEN];     /* 64B */
149
150#define ZEND_MM_IS_FRUN                  0x00000000
151#define ZEND_MM_IS_LRUN                  0x40000000
152#define ZEND_MM_IS_SRUN                  0x80000000
153
154#define ZEND_MM_LRUN_PAGES_MASK          0x000003ff
155#define ZEND_MM_LRUN_PAGES_OFFSET        0
156
157#define ZEND_MM_SRUN_BIN_NUM_MASK        0x0000001f
158#define ZEND_MM_SRUN_BIN_NUM_OFFSET      0
159
160#define ZEND_MM_LRUN_PAGES(info)         (((info) & ZEND_MM_LRUN_PAGES_MASK) >> ZEND_MM_LRUN_PAGES_OFFSET)
161#define ZEND_MM_SRUN_BIN_NUM(info)       (((info) & ZEND_MM_SRUN_BIN_NUM_MASK) >> ZEND_MM_SRUN_BIN_NUM_OFFSET)
162
163#define ZEND_MM_FRUN()                   ZEND_MM_IS_FRUN
164#define ZEND_MM_LRUN(count)              (ZEND_MM_IS_LRUN | ((count) << ZEND_MM_LRUN_PAGES_OFFSET))
165#define ZEND_MM_SRUN(bin_num)            (ZEND_MM_IS_SRUN | ((bin_num) << ZEND_MM_SRUN_BIN_NUM_OFFSET))
166
167#define ZEND_MM_BINS 30
168
169typedef struct  _zend_mm_page      zend_mm_page;
170typedef struct  _zend_mm_bin       zend_mm_bin;
171typedef struct  _zend_mm_free_slot zend_mm_free_slot;
172typedef struct  _zend_mm_chunk     zend_mm_chunk;
173typedef struct  _zend_mm_huge_list zend_mm_huge_list;
174
175#ifdef _WIN64
176# define PTR_FMT "0x%0.16I64x"
177#elif SIZEOF_LONG == 8
178# define PTR_FMT "0x%0.16lx"
179#else
180# define PTR_FMT "0x%0.8lx"
181#endif
182
183/*
184 * Memory is retrived from OS by chunks of fixed size 2MB.
185 * Inside chunk it's managed by pages of fixed size 4096B.
186 * So each chunk consists from 512 pages.
187 * The first page of each chunk is reseved for chunk header.
188 * It contains service information about all pages.
189 *
190 * free_pages - current number of free pages in this chunk
191 *
192 * free_tail  - number of continuous free pages at the end of chunk
193 *
194 * free_map   - bitset (a bit for each page). The bit is set if the corresponding
195 *              page is allocated. Allocator for "lage sizes" may easily find a
196 *              free page (or a continuous number of pages) searching for zero
197 *              bits.
198 *
199 * map        - contains service information for each page. (32-bits for each
200 *              page).
201 *    usage:
202 *              (2 bits)
203 *              FRUN - free page,
204 *              LRUN - first page of "large" allocation
205 *              SRUN - first page of a bin used for "small" allocation
206 *
207 *    lrun_pages:
208 *              (10 bits) number of allocated pages
209 *
210 *    srun_bin_num:
211 *              (5 bits) bin number (e.g. 0 for sizes 0-2, 1 for 3-4,
212 *               2 for 5-8, 3 for 9-16 etc) see zend_alloc_sizes.h
213 */
214
215struct _zend_mm_heap {
216#if ZEND_MM_CUSTOM
217    int                use_custom_heap;
218#endif
219#if ZEND_MM_STAT
220    size_t             size;                    /* current memory usage */
221    size_t             peak;                    /* peak memory usage */
222#endif
223    zend_mm_free_slot *free_slot[ZEND_MM_BINS]; /* free lists for small sizes */
224#if ZEND_MM_STAT || ZEND_MM_LIMIT
225    size_t             real_size;               /* current size of allocated pages */
226#endif
227#if ZEND_MM_STAT
228    size_t             real_peak;               /* peak size of allocated pages */
229#endif
230#if ZEND_MM_LIMIT
231    size_t             limit;                   /* memory limit */
232    int                overflow;                /* memory overflow flag */
233#endif
234
235    zend_mm_huge_list *huge_list;               /* list of huge allocated blocks */
236
237    zend_mm_chunk     *main_chunk;
238    zend_mm_chunk     *cached_chunks;           /* list of unused chunks */
239    int                chunks_count;            /* number of alocated chunks */
240    int                peak_chunks_count;       /* peak number of allocated chunks for current request */
241    int                cached_chunks_count;     /* number of cached chunks */
242    double             avg_chunks_count;        /* average number of chunks allocated per request */
243#if ZEND_MM_CUSTOM
244    void              *(*_malloc)(size_t);
245    void               (*_free)(void*);
246    void              *(*_realloc)(void*, size_t);
247#endif
248};
249
250struct _zend_mm_chunk {
251    zend_mm_heap      *heap;
252    zend_mm_chunk     *next;
253    zend_mm_chunk     *prev;
254    int                free_pages;              /* number of free pages */
255    int                free_tail;               /* number of free pages at the end of chunk */
256    int                num;
257    char               reserve[64 - (sizeof(void*) * 3 + sizeof(int) * 3)];
258    zend_mm_heap       heap_slot;               /* used only in main chunk */
259    zend_mm_page_map   free_map;                /* 512 bits or 64 bytes */
260    zend_mm_page_info  map[ZEND_MM_PAGES];      /* 2 KB = 512 * 4 */
261};
262
263struct _zend_mm_page {
264    char               bytes[ZEND_MM_PAGE_SIZE];
265};
266
267/*
268 * bin - is one or few continuous pages (up to 8) used for alocation of
269 * a particular "small size".
270 */
271struct _zend_mm_bin {
272    char               bytes[ZEND_MM_PAGE_SIZE * 8];
273};
274
275#if ZEND_DEBUG
276typedef struct _zend_mm_debug_info {
277    size_t             size;
278    const char        *filename;
279    const char        *orig_filename;
280    uint               lineno;
281    uint               orig_lineno;
282} zend_mm_debug_info;
283#endif
284
285struct _zend_mm_free_slot {
286    zend_mm_free_slot *next_free_slot;
287};
288
289struct _zend_mm_huge_list {
290    void              *ptr;
291    size_t             size;
292    zend_mm_huge_list *next;
293#if ZEND_DEBUG
294    zend_mm_debug_info dbg;
295#endif
296};
297
298#define ZEND_MM_PAGE_ADDR(chunk, page_num) \
299    ((void*)(((zend_mm_page*)(chunk)) + (page_num)))
300
301#define _BIN_DATA_SIZE(num, size, elements, pages, x, y) size,
302static const unsigned int bin_data_size[] = {
303  ZEND_MM_BINS_INFO(_BIN_DATA_SIZE, x, y)
304};
305
306#define _BIN_DATA_ELEMENTS(num, size, elements, pages, x, y) elements,
307static const int bin_elements[] = {
308  ZEND_MM_BINS_INFO(_BIN_DATA_ELEMENTS, x, y)
309};
310
311#define _BIN_DATA_PAGES(num, size, elements, pages, x, y) pages,
312static const int bin_pages[] = {
313  ZEND_MM_BINS_INFO(_BIN_DATA_PAGES, x, y)
314};
315
316#if ZEND_DEBUG
317void zend_debug_alloc_output(char *format, ...)
318{
319    char output_buf[256];
320    va_list args;
321
322    va_start(args, format);
323    vsprintf(output_buf, format, args);
324    va_end(args);
325
326#ifdef ZEND_WIN32
327    OutputDebugString(output_buf);
328#else
329    fprintf(stderr, "%s", output_buf);
330#endif
331}
332#endif
333
334static ZEND_NORETURN void zend_mm_panic(const char *message)
335{
336    fprintf(stderr, "%s\n", message);
337/* See http://support.microsoft.com/kb/190351 */
338#ifdef PHP_WIN32
339    fflush(stderr);
340#endif
341#if ZEND_DEBUG && defined(HAVE_KILL) && defined(HAVE_GETPID)
342    kill(getpid(), SIGSEGV);
343#endif
344    exit(1);
345}
346
347static ZEND_NORETURN void zend_mm_safe_error(zend_mm_heap *heap,
348    const char *format,
349    size_t limit,
350#if ZEND_DEBUG
351    const char *filename,
352    uint lineno,
353#endif
354    size_t size)
355{
356    TSRMLS_FETCH();
357
358    heap->overflow = 1;
359    zend_try {
360        zend_error_noreturn(E_ERROR,
361            format,
362            limit,
363#if ZEND_DEBUG
364            filename,
365            lineno,
366#endif
367            size);
368    } zend_catch {
369    }  zend_end_try();
370    heap->overflow = 0;
371    zend_bailout();
372    exit(1);
373}
374
375/*****************/
376/* OS Allocation */
377/*****************/
378
379static void *zend_mm_mmap_fixed(void *addr, size_t size)
380{
381#ifdef _WIN32
382    return VirtualAlloc(addr, size, MEM_COMMIT | MEM_RESERVE, PAGE_READWRITE);
383#else
384    /* MAP_FIXED leads to discarding of the old mapping, so it can't be used. */
385    void *ptr = mmap(addr, size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON /*| MAP_POPULATE | MAP_HUGETLB*/, -1, 0);
386
387    if (ptr == MAP_FAILED) {
388#if ZEND_MM_ERROR
389        fprintf(stderr, "\nmmap() failed: [%d] %s\n", errno, strerror(errno));
390#endif
391        return NULL;
392    } else if (ptr != addr) {
393        if (munmap(ptr, size) != 0) {
394#if ZEND_MM_ERROR
395            fprintf(stderr, "\nmunmap() failed: [%d] %s\n", errno, strerror(errno));
396#endif
397        }
398        return NULL;
399    }
400    return ptr;
401#endif
402}
403
404static void *zend_mm_mmap(size_t size)
405{
406#ifdef _WIN32
407    void *ptr = VirtualAlloc(NULL, size, MEM_COMMIT | MEM_RESERVE, PAGE_READWRITE);
408
409    if (ptr == NULL) {
410#if ZEND_MM_ERROR
411        fprintf(stderr, "\nVirtualAlloc() failed: [%d]\n", GetLastError());
412#endif
413        return NULL;
414    }
415    return ptr;
416#else
417    void *ptr = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON /*| MAP_POPULATE | MAP_HUGETLB*/, -1, 0);
418
419    if (ptr == MAP_FAILED) {
420#if ZEND_MM_ERROR
421        fprintf(stderr, "\nmmap() failed: [%d] %s\n", errno, strerror(errno));
422#endif
423        return NULL;
424    }
425    return ptr;
426#endif
427}
428
429static void zend_mm_munmap(void *addr, size_t size)
430{
431#ifdef _WIN32
432    if (VirtualFree(addr, 0, MEM_RELEASE) == 0) {
433#if ZEND_MM_ERROR
434        fprintf(stderr, "\nVirtualFree() failed: [%d]\n", GetLastError());
435#endif
436    }
437#else
438    if (munmap(addr, size) != 0) {
439#if ZEND_MM_ERROR
440        fprintf(stderr, "\nmunmap() failed: [%d] %s\n", errno, strerror(errno));
441#endif
442    }
443#endif
444}
445
446/***********/
447/* Bitmask */
448/***********/
449
450/* number of trailing set (1) bits */
451static zend_always_inline int zend_mm_bitset_nts(zend_mm_bitset bitset)
452{
453#if defined(__GNUC__)
454    return __builtin_ctzl(~bitset);
455#else
456    int n;
457
458    if (bitset == (zend_mm_bitset)-1) return ZEND_MM_BITSET_LEN;
459
460    n = 0;
461#if SIZEOF_ZEND_LONG == 8
462    if (sizeof(zend_mm_bitset) == 8) {
463        if ((bitset & 0xffffffff) == 0xffffffff) {n += 32; bitset = bitset >> Z_UL(32);}
464    }
465#endif
466    if ((bitset & 0x0000ffff) == 0x0000ffff) {n += 16; bitset = bitset >> 16;}
467    if ((bitset & 0x000000ff) == 0x000000ff) {n +=  8; bitset = bitset >>  8;}
468    if ((bitset & 0x0000000f) == 0x0000000f) {n +=  4; bitset = bitset >>  4;}
469    if ((bitset & 0x00000003) == 0x00000003) {n +=  2; bitset = bitset >>  2;}
470    return n + (bitset & 1);
471#endif
472}
473
474/* number of trailing zero bits (0x01 -> 1; 0x40 -> 6; 0x00 -> LEN) */
475static zend_always_inline int zend_mm_bitset_ntz(zend_mm_bitset bitset)
476{
477#if defined(__GNUC__)
478    return __builtin_ctzl(bitset);
479#else
480    int n;
481
482    if (bitset == (zend_mm_bitset)0) return ZEND_MM_BITSET_LEN;
483
484    n = 1;
485#if SIZEOF_ZEND_LONG == 8
486    if (sizeof(zend_mm_bitset) == 8) {
487        if ((bitset & 0xffffffff) == 0) {n += 32; bitset = bitset >> Z_UL(32);}
488    }
489#endif
490    if ((bitset & 0x0000ffff) == 0) {n += 16; bitset = bitset >> 16;}
491    if ((bitset & 0x000000ff) == 0) {n +=  8; bitset = bitset >>  8;}
492    if ((bitset & 0x0000000f) == 0) {n +=  4; bitset = bitset >>  4;}
493    if ((bitset & 0x00000003) == 0) {n +=  2; bitset = bitset >>  2;}
494    return n - (bitset & 1);
495#endif
496}
497
498static zend_always_inline int zend_mm_bitset_find_zero(zend_mm_bitset *bitset, int size)
499{
500    int i = 0;
501
502    do {
503        zend_mm_bitset tmp = bitset[i];
504        if (tmp != (zend_mm_bitset)-1) {
505            return i * ZEND_MM_BITSET_LEN + zend_mm_bitset_nts(tmp);
506        }
507        i++;
508    } while (i < size);
509    return -1;
510}
511
512static zend_always_inline int zend_mm_bitset_find_one(zend_mm_bitset *bitset, int size)
513{
514    int i = 0;
515
516    do {
517        zend_mm_bitset tmp = bitset[i];
518        if (tmp != 0) {
519            return i * ZEND_MM_BITSET_LEN + zend_mm_bitset_ntz(tmp);
520        }
521        i++;
522    } while (i < size);
523    return -1;
524}
525
526static zend_always_inline int zend_mm_bitset_find_zero_and_set(zend_mm_bitset *bitset, int size)
527{
528    int i = 0;
529
530    do {
531        zend_mm_bitset tmp = bitset[i];
532        if (tmp != (zend_mm_bitset)-1) {
533            int n = zend_mm_bitset_nts(tmp);
534            bitset[i] |= Z_UL(1) << n;
535            return i * ZEND_MM_BITSET_LEN + n;
536        }
537        i++;
538    } while (i < size);
539    return -1;
540}
541
542static zend_always_inline int zend_mm_bitset_is_set(zend_mm_bitset *bitset, int bit)
543{
544    return (bitset[bit / ZEND_MM_BITSET_LEN] & (Z_L(1) << (bit & (ZEND_MM_BITSET_LEN-1)))) != 0;
545}
546
547static zend_always_inline void zend_mm_bitset_set_bit(zend_mm_bitset *bitset, int bit)
548{
549    bitset[bit / ZEND_MM_BITSET_LEN] |= (Z_L(1) << (bit & (ZEND_MM_BITSET_LEN-1)));
550}
551
552static zend_always_inline void zend_mm_bitset_reset_bit(zend_mm_bitset *bitset, int bit)
553{
554    bitset[bit / ZEND_MM_BITSET_LEN] &= ~(Z_L(1) << (bit & (ZEND_MM_BITSET_LEN-1)));
555}
556
557static zend_always_inline void zend_mm_bitset_set_range(zend_mm_bitset *bitset, int start, int len)
558{
559    if (len == 1) {
560        zend_mm_bitset_set_bit(bitset, start);
561    } else {
562        int pos = start / ZEND_MM_BITSET_LEN;
563        int end = (start + len - 1) / ZEND_MM_BITSET_LEN;
564        int bit = start & (ZEND_MM_BITSET_LEN - 1);
565        zend_mm_bitset tmp;
566
567        if (pos != end) {
568            /* set bits from "bit" to ZEND_MM_BITSET_LEN-1 */
569            tmp = (zend_mm_bitset)-1 << bit;
570            bitset[pos++] |= tmp;
571            while (pos != end) {
572                /* set all bits */
573                bitset[pos++] = (zend_mm_bitset)-1;
574            }
575            end = (start + len - 1) & (ZEND_MM_BITSET_LEN - 1);
576            /* set bits from "0" to "end" */
577            tmp = (zend_mm_bitset)-1 >> ((ZEND_MM_BITSET_LEN - 1) - end);
578            bitset[pos] |= tmp;
579        } else {
580            end = (start + len - 1) & (ZEND_MM_BITSET_LEN - 1);
581            /* set bits from "bit" to "end" */
582            tmp = (zend_mm_bitset)-1 << bit;
583            tmp &= (zend_mm_bitset)-1 >> ((ZEND_MM_BITSET_LEN - 1) - end);
584            bitset[pos] |= tmp;
585        }
586    }
587}
588
589static zend_always_inline void zend_mm_bitset_reset_range(zend_mm_bitset *bitset, int start, int len)
590{
591    if (len == 1) {
592        zend_mm_bitset_reset_bit(bitset, start);
593    } else {
594        int pos = start / ZEND_MM_BITSET_LEN;
595        int end = (start + len - 1) / ZEND_MM_BITSET_LEN;
596        int bit = start & (ZEND_MM_BITSET_LEN - 1);
597        zend_mm_bitset tmp;
598
599        if (pos != end) {
600            /* reset bits from "bit" to ZEND_MM_BITSET_LEN-1 */
601            tmp = ~((Z_L(1) << bit) - 1);
602            bitset[pos++] &= ~tmp;
603            while (pos != end) {
604                /* set all bits */
605                bitset[pos++] = 0;
606            }
607            end = (start + len - 1) & (ZEND_MM_BITSET_LEN - 1);
608            /* reset bits from "0" to "end" */
609            tmp = (zend_mm_bitset)-1 >> ((ZEND_MM_BITSET_LEN - 1) - end);
610            bitset[pos] &= ~tmp;
611        } else {
612            end = (start + len - 1) & (ZEND_MM_BITSET_LEN - 1);
613            /* reset bits from "bit" to "end" */
614            tmp = (zend_mm_bitset)-1 << bit;
615            tmp &= (zend_mm_bitset)-1 >> ((ZEND_MM_BITSET_LEN - 1) - end);
616            bitset[pos] &= ~tmp;
617        }
618    }
619}
620
621static zend_always_inline int zend_mm_bitset_is_free_range(zend_mm_bitset *bitset, int start, int len)
622{
623    if (len == 1) {
624        return !zend_mm_bitset_is_set(bitset, start);
625    } else {
626        int pos = start / ZEND_MM_BITSET_LEN;
627        int end = (start + len - 1) / ZEND_MM_BITSET_LEN;
628        int bit = start & (ZEND_MM_BITSET_LEN - 1);
629        zend_mm_bitset tmp;
630
631        if (pos != end) {
632            /* set bits from "bit" to ZEND_MM_BITSET_LEN-1 */
633            tmp = (zend_mm_bitset)-1 << bit;
634            if ((bitset[pos++] & tmp) != 0) {
635                return 0;
636            }
637            while (pos != end) {
638                /* set all bits */
639                if (bitset[pos++] != 0) {
640                    return 0;
641                }
642            }
643            end = (start + len - 1) & (ZEND_MM_BITSET_LEN - 1);
644            /* set bits from "0" to "end" */
645            tmp = (zend_mm_bitset)-1 >> ((ZEND_MM_BITSET_LEN - 1) - end);
646            return (bitset[pos] & tmp) == 0;
647        } else {
648            end = (start + len - 1) & (ZEND_MM_BITSET_LEN - 1);
649            /* set bits from "bit" to "end" */
650            tmp = (zend_mm_bitset)-1 << bit;
651            tmp &= (zend_mm_bitset)-1 >> ((ZEND_MM_BITSET_LEN - 1) - end);
652            return (bitset[pos] & tmp) == 0;
653        }
654    }
655}
656
657/**********/
658/* Chunks */
659/**********/
660
661static void *zend_mm_chunk_alloc(size_t size, size_t alignment)
662{
663    void *ptr = zend_mm_mmap(size);
664
665    if (ptr == NULL) {
666        return NULL;
667    } else if (ZEND_MM_ALIGNED_OFFSET(ptr, alignment) == 0) {
668#ifdef MADV_HUGEPAGE
669        madvise(ptr, size, MADV_HUGEPAGE);
670#endif
671        return ptr;
672    } else {
673        size_t offset;
674
675        /* chunk has to be aligned */
676        zend_mm_munmap(ptr, size);
677        ptr = zend_mm_mmap(size + alignment - ZEND_MM_PAGE_SIZE);
678#ifdef _WIN32
679        offset = ZEND_MM_ALIGNED_OFFSET(ptr, alignment);
680        zend_mm_munmap(ptr, size + alignment - ZEND_MM_PAGE_SIZE);
681        ptr = zend_mm_mmap_fixed((void*)((char*)ptr + (alignment - offset)), size);
682        offset = ZEND_MM_ALIGNED_OFFSET(ptr, alignment);
683        if (offset != 0) {
684            zend_mm_munmap(ptr, size);
685            return NULL;
686        }
687        return ptr;
688#else
689        offset = ZEND_MM_ALIGNED_OFFSET(ptr, alignment);
690        if (offset != 0) {
691            offset = alignment - offset;
692            zend_mm_munmap(ptr, offset);
693            ptr = (char*)ptr + offset;
694        } else {
695            zend_mm_munmap((char*)ptr + size, alignment - ZEND_MM_PAGE_SIZE);
696        }
697# ifdef MADV_HUGEPAGE
698        madvise(ptr, size, MADV_HUGEPAGE);
699# endif
700#endif
701        return ptr;
702    }
703}
704
705static zend_always_inline void zend_mm_chunk_init(zend_mm_heap *heap, zend_mm_chunk *chunk)
706{
707    chunk->heap = heap;
708    chunk->next = heap->main_chunk;
709    chunk->prev = heap->main_chunk->prev;
710    chunk->prev->next = chunk;
711    chunk->next->prev = chunk;
712    /* mark first pages as allocated */
713    chunk->free_pages = ZEND_MM_PAGES - ZEND_MM_FIRST_PAGE;
714    chunk->free_tail = ZEND_MM_FIRST_PAGE;
715    /* the younger chunks have bigger number */
716    chunk->num = chunk->prev->num + 1;
717    /* mark first pages as allocated */
718    chunk->free_map[0] = (1L << ZEND_MM_FIRST_PAGE) - 1;
719    chunk->map[0] = ZEND_MM_LRUN(ZEND_MM_FIRST_PAGE);
720}
721
722/***********************/
723/* Huge Runs (forward) */
724/***********************/
725
726static size_t zend_mm_get_huge_block_size(zend_mm_heap *heap, void *ptr ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC);
727static void *zend_mm_alloc_huge(zend_mm_heap *heap, size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC);
728static void zend_mm_free_huge(zend_mm_heap *heap, void *ptr ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC);
729
730#if ZEND_DEBUG
731static void zend_mm_change_huge_block_size(zend_mm_heap *heap, void *ptr, size_t size, size_t dbg_size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC);
732#else
733static void zend_mm_change_huge_block_size(zend_mm_heap *heap, void *ptr, size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC);
734#endif
735
736/**************/
737/* Large Runs */
738/**************/
739
740#if ZEND_DEBUG
741static void *zend_mm_alloc_pages(zend_mm_heap *heap, int pages_count, size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
742#else
743static void *zend_mm_alloc_pages(zend_mm_heap *heap, int pages_count ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
744#endif
745{
746    zend_mm_chunk *chunk = heap->main_chunk;
747    int page_num, len;
748
749    while (1) {
750        if (UNEXPECTED(chunk->free_pages < pages_count)) {
751            goto not_found;
752#if 0
753        } else if (UNEXPECTED(chunk->free_pages + chunk->free_tail == ZEND_MM_PAGES)) {
754            if (UNEXPECTED(ZEND_MM_PAGES - chunk->free_tail < pages_count)) {
755                goto not_found;
756            } else {
757                page_num = chunk->free_tail;
758                goto found;
759            }
760        } else if (0) {
761            /* First-Fit Search */
762            int free_tail = chunk->free_tail;
763            zend_mm_bitset *bitset = chunk->free_map;
764            zend_mm_bitset tmp = *(bitset++);
765            int i = 0;
766
767            while (1) {
768                /* skip allocated blocks */
769                while (tmp == (zend_mm_bitset)-1) {
770                    i += ZEND_MM_BITSET_LEN;
771                    if (i == ZEND_MM_PAGES) {
772                        goto not_found;
773                    }
774                    tmp = *(bitset++);
775                }
776                /* find first 0 bit */
777                page_num = i + zend_mm_bitset_nts(tmp);
778                /* reset bits from 0 to "bit" */
779                tmp &= tmp + 1;
780                /* skip free blocks */
781                while (tmp == 0) {
782                    i += ZEND_MM_BITSET_LEN;
783                    len = i - page_num;
784                    if (len >= pages_count) {
785                        goto found;
786                    } else if (i >= free_tail) {
787                        goto not_found;
788                    }
789                    tmp = *(bitset++);
790                }
791                /* find first 1 bit */
792                len = (i + zend_mm_bitset_ntz(tmp)) - page_num;
793                if (len >= pages_count) {
794                    goto found;
795                }
796                /* set bits from 0 to "bit" */
797                tmp |= tmp - 1;
798            }
799#endif
800        } else {
801            /* Best-Fit Search */
802            int best = -1;
803            int best_len = ZEND_MM_PAGES;
804            int free_tail = chunk->free_tail;
805            zend_mm_bitset *bitset = chunk->free_map;
806            zend_mm_bitset tmp = *(bitset++);
807            int i = 0;
808
809            while (1) {
810                /* skip allocated blocks */
811                while (tmp == (zend_mm_bitset)-1) {
812                    i += ZEND_MM_BITSET_LEN;
813                    if (i == ZEND_MM_PAGES) {
814                        if (best > 0) {
815                            page_num = best;
816                            goto found;
817                        } else {
818                            goto not_found;
819                        }
820                    }
821                    tmp = *(bitset++);
822                }
823                /* find first 0 bit */
824                page_num = i + zend_mm_bitset_nts(tmp);
825                /* reset bits from 0 to "bit" */
826                tmp &= tmp + 1;
827                /* skip free blocks */
828                while (tmp == 0) {
829                    i += ZEND_MM_BITSET_LEN;
830                    if (i >= free_tail) {
831                        len = ZEND_MM_PAGES - page_num;
832                        if (len >= pages_count && len < best_len) {
833                            chunk->free_tail = page_num + pages_count;
834                            goto found;
835                        } else {
836                            /* set accurate value */
837                            chunk->free_tail = page_num;
838                            if (best > 0) {
839                                page_num = best;
840                                goto found;
841                            } else {
842                                goto not_found;
843                            }
844                        }
845                    }
846                    tmp = *(bitset++);
847                }
848                /* find first 1 bit */
849                len = i + zend_mm_bitset_ntz(tmp) - page_num;
850                if (len >= pages_count) {
851                    if (len == pages_count) {
852                        goto found;
853                    } else if (len < best_len) {
854                        best_len = len;
855                        best = page_num;
856                    }
857                }
858                /* set bits from 0 to "bit" */
859                tmp |= tmp - 1;
860            }
861        }
862
863not_found:
864        if (chunk->next == heap->main_chunk) {
865            if (heap->cached_chunks) {
866                heap->cached_chunks_count--;
867                chunk = heap->cached_chunks;
868                heap->cached_chunks = chunk->next;
869            } else {
870#if ZEND_MM_LIMIT
871                if (heap->real_size + ZEND_MM_CHUNK_SIZE > heap->limit) {
872                    if (heap->overflow == 0) {
873#if ZEND_DEBUG
874                        zend_mm_safe_error(heap, "Allowed memory size of " ZEND_ULONG_FMT " bytes exhausted at %s:%d (tried to allocate " ZEND_ULONG_FMT " bytes)", heap->limit, __zend_filename, __zend_lineno, size);
875#else
876                        zend_mm_safe_error(heap, "Allowed memory size of " ZEND_ULONG_FMT " bytes exhausted (tried to allocate " ZEND_ULONG_FMT " bytes)", heap->limit, ZEND_MM_PAGE_SIZE * pages_count);
877#endif
878                        return NULL;
879                    }
880                }
881#endif
882                chunk = (zend_mm_chunk*)zend_mm_chunk_alloc(ZEND_MM_CHUNK_SIZE, ZEND_MM_CHUNK_SIZE);
883                if (UNEXPECTED(chunk == NULL)) {
884                    /* insufficient memory */
885                    return NULL;
886                }
887#if ZEND_MM_STAT
888                do {
889                    size_t size = heap->real_size + ZEND_MM_CHUNK_SIZE;
890                    size_t peak = MAX(heap->real_peak, size);
891                    heap->real_size = size;
892                    heap->real_peak = peak;
893                } while (0);
894#elif ZEND_MM_LIMIT
895                heap->real_size += ZEND_MM_CHUNK_SIZE;
896
897#endif
898            }
899            heap->chunks_count++;
900            if (heap->chunks_count > heap->peak_chunks_count) {
901                heap->peak_chunks_count = heap->chunks_count;
902            }
903            zend_mm_chunk_init(heap, chunk);
904            page_num = ZEND_MM_FIRST_PAGE;
905            len = ZEND_MM_PAGES - ZEND_MM_FIRST_PAGE;
906            goto found;
907        } else {
908            chunk = chunk->next;
909        }
910    }
911
912found:
913    /* mark run as allocated */
914    chunk->free_pages -= pages_count;
915    zend_mm_bitset_set_range(chunk->free_map, page_num, pages_count);
916    chunk->map[page_num] = ZEND_MM_LRUN(pages_count);
917    if (page_num == chunk->free_tail) {
918        chunk->free_tail = page_num + pages_count;
919    }
920    return ZEND_MM_PAGE_ADDR(chunk, page_num);
921}
922
923static zend_always_inline void *zend_mm_alloc_large(zend_mm_heap *heap, size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
924{
925    int pages_count = ZEND_MM_SIZE_TO_NUM(size, ZEND_MM_PAGE_SIZE);
926#if ZEND_DEBUG
927    void *ptr = zend_mm_alloc_pages(heap, pages_count, size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
928#else
929    void *ptr = zend_mm_alloc_pages(heap, pages_count ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
930#endif
931#if ZEND_MM_STAT
932    do {
933        size_t size = heap->size + pages_count * ZEND_MM_PAGE_SIZE;
934        size_t peak = MAX(heap->peak, size);
935        heap->size = size;
936        heap->peak = peak;
937    } while (0);
938#endif
939    return ptr;
940}
941
942static void zend_mm_free_pages(zend_mm_heap *heap, zend_mm_chunk *chunk, int page_num, int pages_count)
943{
944    chunk->free_pages += pages_count;
945    zend_mm_bitset_reset_range(chunk->free_map, page_num, pages_count);
946    chunk->map[page_num] = 0;
947    if (chunk->free_tail == page_num + pages_count) {
948        /* this setting may be not accurate */
949        chunk->free_tail = page_num;
950    }
951    if (chunk->free_pages == ZEND_MM_PAGES - ZEND_MM_FIRST_PAGE) {
952        /* delete chunk */
953        chunk->next->prev = chunk->prev;
954        chunk->prev->next = chunk->next;
955        heap->chunks_count--;
956        if (heap->chunks_count + heap->cached_chunks_count < heap->avg_chunks_count + 0.1) {
957            /* delay deletion */
958            heap->cached_chunks_count++;
959            chunk->next = heap->cached_chunks;
960            heap->cached_chunks = chunk;
961        } else {
962#if ZEND_MM_STAT || ZEND_MM_LIMIT
963            heap->real_size -= ZEND_MM_CHUNK_SIZE;
964#endif
965            if (!heap->cached_chunks || chunk->num > heap->cached_chunks->num) {
966                zend_mm_munmap(chunk, ZEND_MM_CHUNK_SIZE);
967            } else {
968//TODO: select the best chunk to delete???
969                chunk->next = heap->cached_chunks->next;
970                zend_mm_munmap(heap->cached_chunks, ZEND_MM_CHUNK_SIZE);
971                heap->cached_chunks = chunk;
972            }
973        }
974    }
975}
976
977static zend_always_inline void zend_mm_free_large(zend_mm_heap *heap, zend_mm_chunk *chunk, int page_num, int pages_count)
978{
979#if ZEND_MM_STAT
980    heap->size -= pages_count * ZEND_MM_PAGE_SIZE;
981#endif
982    zend_mm_free_pages(heap, chunk, page_num, pages_count);
983}
984
985/**************/
986/* Small Runs */
987/**************/
988
989/* higher set bit number (0->0, 1->1, 2->2, 4->3, 8->4, 127->7, 128->8 etc) */
990static zend_always_inline int zend_mm_small_size_to_bit(int size)
991{
992#if defined(__GNUC__)
993    return (__builtin_clz(size) ^ 0x1f) + 1;
994#else
995    int n = 16;
996    if (size == 0) return 0;
997    if (size <= 0x00ff) {n -= 8; size = size << 8;}
998    if (size <= 0x0fff) {n -= 4; size = size << 4;}
999    if (size <= 0x3fff) {n -= 2; size = size << 2;}
1000    if (size <= 0x7fff) {n -= 1;}
1001    return n;
1002#endif
1003}
1004
1005#ifndef MAX
1006# define MAX(a, b) (((a) > (b)) ? (a) : (b))
1007#endif
1008
1009#ifndef MIN
1010# define MIN(a, b) (((a) < (b)) ? (a) : (b))
1011#endif
1012
1013static zend_always_inline int zend_mm_small_size_to_bin(size_t size)
1014{
1015#if 0
1016    int n;
1017                            /*0,  1,  2,  3,  4,  5,  6,  7,  8,  9  10, 11, 12*/
1018    static const int f1[] = { 3,  3,  3,  3,  3,  3,  3,  4,  5,  6,  7,  8,  9};
1019    static const int f2[] = { 0,  0,  0,  0,  0,  0,  0,  4,  8, 12, 16, 20, 24};
1020
1021    if (UNEXPECTED(size <= 2)) return 0;
1022    n = zend_mm_small_size_to_bit(size - 1);
1023    return ((size-1) >> f1[n]) + f2[n];
1024#else
1025    int t1, t2, t3;
1026
1027    if (UNEXPECTED(size <= 8)) return 0;
1028    t1 = (int)(size - 1);
1029    t2 = zend_mm_small_size_to_bit(t1);
1030    t3 = t2 - 6;
1031    t3 = (t3 < 0) ? 0 : t3;
1032    t2 = t3 + 3;
1033    t1 = t1 >> t2;
1034    t3 = t3 << 2;
1035    return t1 + t3;
1036#endif
1037}
1038
1039#define ZEND_MM_SMALL_SIZE_TO_BIN(size)  zend_mm_small_size_to_bin(size)
1040
1041static zend_never_inline void *zend_mm_alloc_small_slow(zend_mm_heap *heap, int bin_num ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
1042{
1043    zend_mm_chunk *chunk;
1044    int page_num;
1045    zend_mm_bin *bin;
1046    zend_mm_free_slot *p, *end;
1047
1048#if ZEND_DEBUG
1049    bin = (zend_mm_bin*)zend_mm_alloc_pages(heap, bin_pages[bin_num], bin_data_size[bin_num] ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1050#else
1051    bin = (zend_mm_bin*)zend_mm_alloc_pages(heap, bin_pages[bin_num] ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1052#endif
1053    if (UNEXPECTED(bin == NULL)) {
1054        /* insufficient memory */
1055        return NULL;
1056    }
1057
1058    chunk = (zend_mm_chunk*)ZEND_MM_ALIGNED_BASE(bin, ZEND_MM_CHUNK_SIZE);
1059    page_num = ZEND_MM_ALIGNED_OFFSET(bin, ZEND_MM_CHUNK_SIZE) / ZEND_MM_PAGE_SIZE;
1060    chunk->map[page_num] = ZEND_MM_SRUN(bin_num);
1061    if (bin_pages[bin_num] > 1) {
1062        int i = 1;
1063        do {
1064            chunk->map[page_num+i] = ZEND_MM_SRUN(bin_num);
1065            i++;
1066        } while (i < bin_pages[bin_num]);
1067    }
1068
1069    /* create a linked list of elements from 1 to last */
1070    end = (zend_mm_free_slot*)((char*)bin + (bin_data_size[bin_num] * (bin_elements[bin_num] - 1)));
1071    heap->free_slot[bin_num] = p = (zend_mm_free_slot*)((char*)bin + bin_data_size[bin_num]);
1072    do {
1073        p->next_free_slot = (zend_mm_free_slot*)((char*)p + bin_data_size[bin_num]);;
1074#if ZEND_DEBUG
1075        do {
1076            zend_mm_debug_info *dbg = (zend_mm_debug_info*)((char*)p + bin_data_size[bin_num] - ZEND_MM_ALIGNED_SIZE(sizeof(zend_mm_debug_info)));
1077            dbg->size = 0;
1078        } while (0);
1079#endif
1080        p = (zend_mm_free_slot*)((char*)p + bin_data_size[bin_num]);
1081    } while (p != end);
1082
1083    /* terminate list using NULL */
1084    p->next_free_slot = NULL;
1085#if ZEND_DEBUG
1086        do {
1087            zend_mm_debug_info *dbg = (zend_mm_debug_info*)((char*)p + bin_data_size[bin_num] - ZEND_MM_ALIGNED_SIZE(sizeof(zend_mm_debug_info)));
1088            dbg->size = 0;
1089        } while (0);
1090#endif
1091
1092    /* return first element */
1093    return (char*)bin;
1094}
1095
1096static zend_always_inline void *zend_mm_alloc_small(zend_mm_heap *heap, size_t size, int bin_num ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
1097{
1098#if ZEND_MM_STAT
1099    do {
1100        size_t size = heap->size + bin_data_size[bin_num];
1101        size_t peak = MAX(heap->peak, size);
1102        heap->size = size;
1103        heap->peak = peak;
1104    } while (0);
1105#endif
1106
1107    if (EXPECTED(heap->free_slot[bin_num] != NULL)) {
1108        zend_mm_free_slot *p = heap->free_slot[bin_num];
1109        heap->free_slot[bin_num] = p->next_free_slot;
1110        return (void*)p;
1111    } else {
1112        return zend_mm_alloc_small_slow(heap, bin_num ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1113    }
1114}
1115
1116static zend_always_inline void zend_mm_free_small(zend_mm_heap *heap, void *ptr, int bin_num)
1117{
1118    zend_mm_free_slot *p;
1119
1120#if ZEND_MM_STAT
1121    heap->size -= bin_data_size[bin_num];
1122#endif
1123
1124#if ZEND_DEBUG
1125    do {
1126        zend_mm_debug_info *dbg = (zend_mm_debug_info*)((char*)ptr + bin_data_size[bin_num] - ZEND_MM_ALIGNED_SIZE(sizeof(zend_mm_debug_info)));
1127        dbg->size = 0;
1128    } while (0);
1129#endif
1130
1131    p = (zend_mm_free_slot*)ptr;
1132    p->next_free_slot = heap->free_slot[bin_num];
1133    heap->free_slot[bin_num] = p;
1134}
1135
1136/********/
1137/* Heap */
1138/********/
1139
1140#if ZEND_DEBUG
1141static zend_always_inline zend_mm_debug_info *zend_mm_get_debug_info(zend_mm_heap *heap, void *ptr)
1142{
1143    size_t page_offset = ZEND_MM_ALIGNED_OFFSET(ptr, ZEND_MM_CHUNK_SIZE);
1144    zend_mm_chunk *chunk;
1145    int page_num;
1146    zend_mm_page_info info;
1147
1148    ZEND_MM_CHECK(page_offset != 0, "zend_mm_heap corrupted");
1149    chunk = (zend_mm_chunk*)ZEND_MM_ALIGNED_BASE(ptr, ZEND_MM_CHUNK_SIZE);
1150    page_num = page_offset / ZEND_MM_PAGE_SIZE;
1151    info = chunk->map[page_num];
1152    ZEND_MM_CHECK(chunk->heap == heap, "zend_mm_heap corrupted");
1153    if (EXPECTED(info & ZEND_MM_IS_SRUN)) {
1154        int bin_num = ZEND_MM_SRUN_BIN_NUM(info);
1155        return (zend_mm_debug_info*)((char*)ptr + bin_data_size[bin_num] - ZEND_MM_ALIGNED_SIZE(sizeof(zend_mm_debug_info)));
1156    } else /* if (info & ZEND_MM_IS_LRUN) */ {
1157        int pages_count = ZEND_MM_LRUN_PAGES(info);
1158
1159        return (zend_mm_debug_info*)((char*)ptr + ZEND_MM_PAGE_SIZE * pages_count - ZEND_MM_ALIGNED_SIZE(sizeof(zend_mm_debug_info)));
1160    }
1161}
1162#endif
1163
1164static zend_always_inline void *zend_mm_alloc_heap(zend_mm_heap *heap, size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
1165{
1166    void *ptr;
1167#if ZEND_DEBUG
1168    size_t real_size = size;
1169    zend_mm_debug_info *dbg;
1170
1171    /* special handling for zero-size allocation */
1172    size = MAX(size, 1);
1173    size = ZEND_MM_ALIGNED_SIZE(size) + ZEND_MM_ALIGNED_SIZE(sizeof(zend_mm_debug_info));
1174#endif
1175    if (size <= ZEND_MM_MAX_SMALL_SIZE) {
1176        ptr = zend_mm_alloc_small(heap, size, ZEND_MM_SMALL_SIZE_TO_BIN(size) ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1177#if ZEND_DEBUG
1178        dbg = zend_mm_get_debug_info(heap, ptr);
1179        dbg->size = real_size;
1180        dbg->filename = __zend_filename;
1181        dbg->orig_filename = __zend_orig_filename;
1182        dbg->lineno = __zend_lineno;
1183        dbg->orig_lineno = __zend_orig_lineno;
1184#endif
1185        return ptr;
1186    } else if (size <= ZEND_MM_MAX_LARGE_SIZE) {
1187        ptr = zend_mm_alloc_large(heap, size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1188#if ZEND_DEBUG
1189        dbg = zend_mm_get_debug_info(heap, ptr);
1190        dbg->size = real_size;
1191        dbg->filename = __zend_filename;
1192        dbg->orig_filename = __zend_orig_filename;
1193        dbg->lineno = __zend_lineno;
1194        dbg->orig_lineno = __zend_orig_lineno;
1195#endif
1196        return ptr;
1197    } else {
1198#if ZEND_DEBUG
1199        size = real_size;
1200#endif
1201        return zend_mm_alloc_huge(heap, size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1202    }
1203}
1204
1205static zend_always_inline void zend_mm_free_heap(zend_mm_heap *heap, void *ptr ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
1206{
1207    size_t page_offset = ZEND_MM_ALIGNED_OFFSET(ptr, ZEND_MM_CHUNK_SIZE);
1208
1209    if (UNEXPECTED(page_offset == 0)) {
1210        if (ptr != NULL) {
1211            zend_mm_free_huge(heap, ptr ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1212        }
1213    } else {
1214        zend_mm_chunk *chunk = (zend_mm_chunk*)ZEND_MM_ALIGNED_BASE(ptr, ZEND_MM_CHUNK_SIZE);
1215        int page_num = page_offset / ZEND_MM_PAGE_SIZE;
1216        zend_mm_page_info info = chunk->map[page_num];
1217
1218        ZEND_MM_CHECK(chunk->heap == heap, "zend_mm_heap corrupted");
1219        if (EXPECTED(info & ZEND_MM_IS_SRUN)) {
1220            zend_mm_free_small(heap, ptr, ZEND_MM_SRUN_BIN_NUM(info));
1221        } else /* if (info & ZEND_MM_IS_LRUN) */ {
1222            int pages_count = ZEND_MM_LRUN_PAGES(info);
1223
1224            ZEND_MM_CHECK(ZEND_MM_ALIGNED_OFFSET(page_offset, ZEND_MM_PAGE_SIZE) == 0, "zend_mm_heap corrupted");
1225            zend_mm_free_large(heap, chunk, page_num, pages_count);
1226        }
1227    }
1228}
1229
1230static size_t zend_mm_size(zend_mm_heap *heap, void *ptr ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
1231{
1232    size_t page_offset = ZEND_MM_ALIGNED_OFFSET(ptr, ZEND_MM_CHUNK_SIZE);
1233
1234    if (UNEXPECTED(page_offset == 0)) {
1235        return zend_mm_get_huge_block_size(heap, ptr ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1236    } else {
1237        zend_mm_chunk *chunk;
1238#if 0 && ZEND_DEBUG
1239        zend_mm_debug_info *dbg = zend_mm_get_debug_info(heap, ptr);
1240        return dbg->size;
1241#else
1242        int page_num;
1243        zend_mm_page_info info;
1244
1245        chunk = (zend_mm_chunk*)ZEND_MM_ALIGNED_BASE(ptr, ZEND_MM_CHUNK_SIZE);
1246        page_num = page_offset / ZEND_MM_PAGE_SIZE;
1247        info = chunk->map[page_num];
1248        ZEND_MM_CHECK(chunk->heap == heap, "zend_mm_heap corrupted");
1249        if (EXPECTED(info & ZEND_MM_IS_SRUN)) {
1250            return bin_data_size[ZEND_MM_SRUN_BIN_NUM(info)];
1251        } else /* if (info & ZEND_MM_IS_LARGE_RUN) */ {
1252            return ZEND_MM_LRUN_PAGES(info) * ZEND_MM_PAGE_SIZE;
1253        }
1254#endif
1255    }
1256}
1257
1258static void *zend_mm_realloc_heap(zend_mm_heap *heap, void *ptr, size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
1259{
1260    size_t page_offset;
1261    size_t old_size;
1262    size_t new_size;
1263    void *ret;
1264#if ZEND_DEBUG
1265    size_t real_size;
1266    zend_mm_debug_info *dbg;
1267#endif
1268
1269    page_offset = ZEND_MM_ALIGNED_OFFSET(ptr, ZEND_MM_CHUNK_SIZE);
1270    if (UNEXPECTED(page_offset == 0)) {
1271        if (UNEXPECTED(ptr == NULL)) {
1272            return zend_mm_alloc_heap(heap, size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1273        }
1274        old_size = zend_mm_get_huge_block_size(heap, ptr ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1275#if ZEND_DEBUG
1276        real_size = size;
1277        size = ZEND_MM_ALIGNED_SIZE(size) + ZEND_MM_ALIGNED_SIZE(sizeof(zend_mm_debug_info));
1278#endif
1279        if (size > ZEND_MM_MAX_LARGE_SIZE) {
1280#if ZEND_DEBUG
1281            size = real_size;
1282#endif
1283            new_size = ZEND_MM_ALIGNED_SIZE_EX(size, ZEND_MM_PAGE_SIZE);
1284            if (new_size == old_size) {
1285#if ZEND_DEBUG
1286                zend_mm_change_huge_block_size(heap, ptr, new_size, real_size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1287#else
1288                zend_mm_change_huge_block_size(heap, ptr, new_size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1289#endif
1290                return ptr;
1291#ifndef _WIN32
1292            } else if (new_size < old_size) {
1293                /* unmup tail */
1294                zend_mm_munmap((char*)ptr + new_size, old_size - new_size);
1295#if ZEND_MM_STAT || ZEND_MM_LIMIT
1296                heap->real_size -= old_size - new_size;
1297#endif
1298#if ZEND_MM_STAT
1299                heap->size -= old_size - new_size;
1300#endif
1301#if ZEND_DEBUG
1302                zend_mm_change_huge_block_size(heap, ptr, new_size, real_size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1303#else
1304                zend_mm_change_huge_block_size(heap, ptr, new_size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1305#endif
1306                return ptr;
1307            } else /* if (new_size > old_size) */ {
1308#if ZEND_MM_LIMIT
1309                if (heap->real_size + (new_size - old_size) > heap->limit) {
1310                    if (heap->overflow == 0) {
1311#if ZEND_DEBUG
1312                        zend_mm_safe_error(heap, "Allowed memory size of " ZEND_ULONG_FMT " bytes exhausted at %s:%d (tried to allocate " ZEND_ULONG_FMT " bytes)", heap->limit, __zend_filename, __zend_lineno, size);
1313#else
1314                        zend_mm_safe_error(heap, "Allowed memory size of " ZEND_ULONG_FMT " bytes exhausted (tried to allocate " ZEND_ULONG_FMT " bytes)", heap->limit, size);
1315#endif
1316                        return NULL;
1317                    }
1318                }
1319#endif
1320                /* try to map tail right after this block */
1321                if (zend_mm_mmap_fixed((char*)ptr + old_size, new_size - old_size)) {
1322#if ZEND_MM_STAT || ZEND_MM_LIMIT
1323                    heap->real_size += new_size - old_size;
1324#endif
1325#if ZEND_MM_STAT
1326                    heap->size += new_size - old_size;
1327#endif
1328#if ZEND_DEBUG
1329                    zend_mm_change_huge_block_size(heap, ptr, new_size, real_size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1330#else
1331                    zend_mm_change_huge_block_size(heap, ptr, new_size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1332#endif
1333                    return ptr;
1334                }
1335#endif
1336            }
1337        }
1338    } else {
1339        zend_mm_chunk *chunk = (zend_mm_chunk*)ZEND_MM_ALIGNED_BASE(ptr, ZEND_MM_CHUNK_SIZE);
1340        int page_num = page_offset / ZEND_MM_PAGE_SIZE;
1341        zend_mm_page_info info = chunk->map[page_num];
1342#if ZEND_DEBUG
1343        size_t real_size = size;
1344
1345        size = ZEND_MM_ALIGNED_SIZE(size) + ZEND_MM_ALIGNED_SIZE(sizeof(zend_mm_debug_info));
1346#endif
1347
1348        ZEND_MM_CHECK(chunk->heap == heap, "zend_mm_heap corrupted");
1349        if (info & ZEND_MM_IS_SRUN) {
1350            int old_bin_num, bin_num;
1351
1352            old_bin_num = ZEND_MM_SRUN_BIN_NUM(info);
1353            old_size = bin_data_size[old_bin_num];
1354            bin_num = ZEND_MM_SMALL_SIZE_TO_BIN(size);
1355            if (old_bin_num == bin_num) {
1356#if ZEND_DEBUG
1357                dbg = zend_mm_get_debug_info(heap, ptr);
1358                dbg->size = real_size;
1359                dbg->filename = __zend_filename;
1360                dbg->orig_filename = __zend_orig_filename;
1361                dbg->lineno = __zend_lineno;
1362                dbg->orig_lineno = __zend_orig_lineno;
1363#endif
1364                return ptr;
1365            }
1366        } else /* if (info & ZEND_MM_IS_LARGE_RUN) */ {
1367            ZEND_MM_CHECK(ZEND_MM_ALIGNED_OFFSET(page_offset, ZEND_MM_PAGE_SIZE) == 0, "zend_mm_heap corrupted");
1368            old_size = ZEND_MM_LRUN_PAGES(info) * ZEND_MM_PAGE_SIZE;
1369            if (size > ZEND_MM_MAX_SMALL_SIZE && size <= ZEND_MM_MAX_LARGE_SIZE) {
1370                new_size = ZEND_MM_ALIGNED_SIZE_EX(size, ZEND_MM_PAGE_SIZE);
1371                if (new_size == old_size) {
1372#if ZEND_DEBUG
1373                    dbg = zend_mm_get_debug_info(heap, ptr);
1374                    dbg->size = real_size;
1375                    dbg->filename = __zend_filename;
1376                    dbg->orig_filename = __zend_orig_filename;
1377                    dbg->lineno = __zend_lineno;
1378                    dbg->orig_lineno = __zend_orig_lineno;
1379#endif
1380                    return ptr;
1381                } else if (new_size < old_size) {
1382                    /* free tail pages */
1383                    int new_pages_count = new_size / ZEND_MM_PAGE_SIZE;
1384                    int rest_pages_count = (old_size - new_size) / ZEND_MM_PAGE_SIZE;
1385
1386#if ZEND_MM_STAT
1387                    heap->size -= rest_pages_count * ZEND_MM_PAGE_SIZE;
1388#endif
1389                    chunk->map[page_num] = ZEND_MM_LRUN(new_pages_count);
1390                    chunk->free_pages += rest_pages_count;
1391                    zend_mm_bitset_reset_range(chunk->free_map, page_num + new_pages_count, rest_pages_count);
1392#if ZEND_DEBUG
1393                    dbg = zend_mm_get_debug_info(heap, ptr);
1394                    dbg->size = real_size;
1395                    dbg->filename = __zend_filename;
1396                    dbg->orig_filename = __zend_orig_filename;
1397                    dbg->lineno = __zend_lineno;
1398                    dbg->orig_lineno = __zend_orig_lineno;
1399#endif
1400                    return ptr;
1401                } else /* if (new_size > old_size) */ {
1402                    int new_pages_count = new_size / ZEND_MM_PAGE_SIZE;
1403                    int old_pages_count = old_size / ZEND_MM_PAGE_SIZE;
1404
1405                    /* try to allocate tail pages after this block */
1406                    if (page_num + new_pages_count <= ZEND_MM_PAGES &&
1407                        zend_mm_bitset_is_free_range(chunk->free_map, page_num + old_pages_count, new_pages_count - old_pages_count)) {
1408#if ZEND_MM_STAT
1409                        do {
1410                            size_t size = heap->size + (new_size - old_size);
1411                            size_t peak = MAX(heap->peak, size);
1412                            heap->size = size;
1413                            heap->peak = peak;
1414                        } while (0);
1415#endif
1416                        chunk->free_pages -= new_pages_count - old_pages_count;
1417                        zend_mm_bitset_set_range(chunk->free_map, page_num + old_pages_count, new_pages_count - old_pages_count);
1418                        chunk->map[page_num] = ZEND_MM_LRUN(new_pages_count);
1419#if ZEND_DEBUG
1420                        dbg = zend_mm_get_debug_info(heap, ptr);
1421                        dbg->size = real_size;
1422                        dbg->filename = __zend_filename;
1423                        dbg->orig_filename = __zend_orig_filename;
1424                        dbg->lineno = __zend_lineno;
1425                        dbg->orig_lineno = __zend_orig_lineno;
1426#endif
1427                        return ptr;
1428                    }
1429                }
1430            }
1431        }
1432#if ZEND_DEBUG
1433        size = real_size;
1434#endif
1435    }
1436
1437    /* Naive reallocation */
1438    old_size = zend_mm_size(heap, ptr ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1439    ret = zend_mm_alloc_heap(heap, size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1440    memcpy(ret, ptr, MIN(old_size, size));
1441    zend_mm_free_heap(heap, ptr ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1442    return ret;
1443}
1444
1445/*********************/
1446/* Huge Runs (again) */
1447/*********************/
1448
1449#if ZEND_DEBUG
1450static void zend_mm_add_huge_block(zend_mm_heap *heap, void *ptr, size_t size, size_t dbg_size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
1451#else
1452static void zend_mm_add_huge_block(zend_mm_heap *heap, void *ptr, size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
1453#endif
1454{
1455    zend_mm_huge_list *list = (zend_mm_huge_list*)zend_mm_alloc_heap(heap, sizeof(zend_mm_huge_list) ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1456    list->ptr = ptr;
1457    list->size = size;
1458    list->next = heap->huge_list;
1459#if ZEND_DEBUG
1460    list->dbg.size = dbg_size;
1461    list->dbg.filename = __zend_filename;
1462    list->dbg.orig_filename = __zend_orig_filename;
1463    list->dbg.lineno = __zend_lineno;
1464    list->dbg.orig_lineno = __zend_orig_lineno;
1465#endif
1466    heap->huge_list = list;
1467}
1468
1469static size_t zend_mm_del_huge_block(zend_mm_heap *heap, void *ptr ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
1470{
1471    zend_mm_huge_list *prev = NULL;
1472    zend_mm_huge_list *list = heap->huge_list;
1473    while (list != NULL) {
1474        if (list->ptr == ptr) {
1475            size_t size;
1476
1477            if (prev) {
1478                prev->next = list->next;
1479            } else {
1480                heap->huge_list = list->next;
1481            }
1482            size = list->size;
1483            zend_mm_free_heap(heap, list ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1484            return size;
1485        }
1486        prev = list;
1487        list = list->next;
1488    }
1489    ZEND_MM_CHECK(0, "zend_mm_heap corrupted");
1490    return 0;
1491}
1492
1493static size_t zend_mm_get_huge_block_size(zend_mm_heap *heap, void *ptr ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
1494{
1495    zend_mm_huge_list *list = heap->huge_list;
1496    while (list != NULL) {
1497        if (list->ptr == ptr) {
1498            return list->size;
1499        }
1500        list = list->next;
1501    }
1502    ZEND_MM_CHECK(0, "zend_mm_heap corrupted");
1503    return 0;
1504}
1505
1506#if ZEND_DEBUG
1507static void zend_mm_change_huge_block_size(zend_mm_heap *heap, void *ptr, size_t size, size_t dbg_size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
1508#else
1509static void zend_mm_change_huge_block_size(zend_mm_heap *heap, void *ptr, size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
1510#endif
1511{
1512    zend_mm_huge_list *list = heap->huge_list;
1513    while (list != NULL) {
1514        if (list->ptr == ptr) {
1515            list->size = size;
1516#if ZEND_DEBUG
1517            list->dbg.size = dbg_size;
1518            list->dbg.filename = __zend_filename;
1519            list->dbg.orig_filename = __zend_orig_filename;
1520            list->dbg.lineno = __zend_lineno;
1521            list->dbg.orig_lineno = __zend_orig_lineno;
1522#endif
1523            return;
1524        }
1525        list = list->next;
1526    }
1527}
1528
1529static void *zend_mm_alloc_huge(zend_mm_heap *heap, size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
1530{
1531    size_t new_size = ZEND_MM_ALIGNED_SIZE_EX(size, ZEND_MM_PAGE_SIZE);
1532    void *ptr;
1533
1534#if ZEND_MM_LIMIT
1535    if (heap->real_size + new_size > heap->limit) {
1536        if (heap->overflow == 0) {
1537#if ZEND_DEBUG
1538            zend_mm_safe_error(heap, "Allowed memory size of " ZEND_ULONG_FMT " bytes exhausted at %s:%d (tried to allocate %lu bytes)", heap->limit, __zend_filename, __zend_lineno, size);
1539#else
1540            zend_mm_safe_error(heap, "Allowed memory size of " ZEND_ULONG_FMT " bytes exhausted (tried to allocate %lu bytes)", heap->limit, size);
1541#endif
1542            return NULL;
1543        }
1544    }
1545#endif
1546    ptr = zend_mm_chunk_alloc(new_size, ZEND_MM_CHUNK_SIZE);
1547    if (UNEXPECTED(ptr == NULL)) {
1548        /* insufficient memory */
1549        return NULL;
1550    }
1551#if ZEND_DEBUG
1552    zend_mm_add_huge_block(heap, ptr, new_size, size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1553#else
1554    zend_mm_add_huge_block(heap, ptr, new_size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1555#endif
1556#if ZEND_MM_STAT
1557    do {
1558        size_t size = heap->real_size + new_size;
1559        size_t peak = MAX(heap->real_peak, size);
1560        heap->real_size = size;
1561        heap->real_peak = peak;
1562    } while (0);
1563    do {
1564        size_t size = heap->size + new_size;
1565        size_t peak = MAX(heap->peak, size);
1566        heap->size = size;
1567        heap->peak = peak;
1568    } while (0);
1569#elif ZEND_MM_LIMIT
1570    heap->real_size += new_size;
1571#endif
1572    return ptr;
1573}
1574
1575static void zend_mm_free_huge(zend_mm_heap *heap, void *ptr ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
1576{
1577    size_t size;
1578
1579    ZEND_MM_CHECK(ZEND_MM_ALIGNED_OFFSET(ptr, ZEND_MM_CHUNK_SIZE) == 0, "zend_mm_heap corrupted");
1580    size = zend_mm_del_huge_block(heap, ptr ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1581    zend_mm_munmap(ptr, size);
1582#if ZEND_MM_STAT || ZEND_MM_LIMIT
1583    heap->real_size -= size;
1584#endif
1585#if ZEND_MM_STAT
1586    heap->size -= size;
1587#endif
1588}
1589
1590/******************/
1591/* Initialization */
1592/******************/
1593
1594zend_mm_heap *zend_mm_init(void)
1595{
1596    zend_mm_chunk *chunk = (zend_mm_chunk*)zend_mm_chunk_alloc(ZEND_MM_CHUNK_SIZE, ZEND_MM_CHUNK_SIZE);
1597    zend_mm_heap *heap;
1598
1599    if (UNEXPECTED(chunk == NULL)) {
1600#if ZEND_MM_ERROR
1601        fprintf(stderr, "\nCan't initialize heap: [%d] %s\n", errno, strerror(errno));
1602#endif
1603        return NULL;
1604    }
1605    heap = &chunk->heap_slot;
1606    chunk->heap = heap;
1607    chunk->next = chunk;
1608    chunk->prev = chunk;
1609    chunk->free_pages = ZEND_MM_PAGES - ZEND_MM_FIRST_PAGE;
1610    chunk->free_tail = ZEND_MM_FIRST_PAGE;
1611    chunk->num = 0;
1612    chunk->free_map[0] = (Z_L(1) << ZEND_MM_FIRST_PAGE) - 1;
1613    chunk->map[0] = ZEND_MM_LRUN(ZEND_MM_FIRST_PAGE);
1614    heap->main_chunk = chunk;
1615    heap->cached_chunks = NULL;
1616    heap->chunks_count = 1;
1617    heap->peak_chunks_count = 1;
1618    heap->cached_chunks_count = 0;
1619    heap->avg_chunks_count = 1.0;
1620#if ZEND_MM_STAT || ZEND_MM_LIMIT
1621    heap->real_size = ZEND_MM_CHUNK_SIZE;
1622#endif
1623#if ZEND_MM_STAT
1624    heap->real_peak = ZEND_MM_CHUNK_SIZE;
1625    heap->size = 0;
1626    heap->peak = 0;
1627#endif
1628#if ZEND_MM_LIMIT
1629    heap->limit = (Z_L(-1) >> Z_L(1));
1630    heap->overflow = 0;
1631#endif
1632#if ZEND_MM_CUSTOM
1633    heap->use_custom_heap = 0;
1634#endif
1635    heap->huge_list = NULL;
1636    return heap;
1637}
1638
1639#if ZEND_DEBUG
1640/******************/
1641/* Leak detection */
1642/******************/
1643
1644static zend_long zend_mm_find_leaks_small(zend_mm_chunk *p, int i, int j, zend_leak_info *leak)
1645{
1646    int empty = 1;
1647    zend_long count = 0;
1648    int bin_num = ZEND_MM_SRUN_BIN_NUM(p->map[i]);
1649    zend_mm_debug_info *dbg = (zend_mm_debug_info*)((char*)p + ZEND_MM_PAGE_SIZE * i + bin_data_size[bin_num] * (j + 1) - ZEND_MM_ALIGNED_SIZE(sizeof(zend_mm_debug_info)));
1650
1651    while (j < bin_elements[bin_num]) {
1652        if (dbg->size != 0) {
1653            if (dbg->filename == leak->filename && dbg->lineno == leak->lineno) {
1654                count++;
1655                dbg->size = 0;
1656                dbg->filename = NULL;
1657                dbg->lineno = 0;
1658            } else {
1659                empty = 0;
1660            }
1661        }
1662        j++;
1663        dbg = (zend_mm_debug_info*)((char*)dbg + bin_data_size[bin_num]);
1664    }
1665    if (empty) {
1666        zend_mm_bitset_reset_range(p->free_map, i, bin_pages[bin_num]);
1667    }
1668    return count;
1669}
1670
1671static zend_long zend_mm_find_leaks(zend_mm_heap *heap, zend_mm_chunk *p, int i, zend_leak_info *leak)
1672{
1673    zend_long count = 0;
1674
1675    do {
1676        while (i < p->free_tail) {
1677            if (zend_mm_bitset_is_set(p->free_map, i)) {
1678                if (p->map[i] & ZEND_MM_IS_SRUN) {
1679                    int bin_num = ZEND_MM_SRUN_BIN_NUM(p->map[i]);
1680                    count += zend_mm_find_leaks_small(p, i, 0, leak);
1681                    i += bin_pages[bin_num];
1682                } else /* if (p->map[i] & ZEND_MM_IS_LRUN) */ {
1683                    int pages_count = ZEND_MM_LRUN_PAGES(p->map[i]);
1684                    zend_mm_debug_info *dbg = (zend_mm_debug_info*)((char*)p + ZEND_MM_PAGE_SIZE * (i + pages_count) - ZEND_MM_ALIGNED_SIZE(sizeof(zend_mm_debug_info)));
1685
1686                    if (dbg->filename == leak->filename && dbg->lineno == leak->lineno) {
1687                        count++;
1688                    }
1689                    zend_mm_bitset_reset_range(p->free_map, i, pages_count);
1690                    i += pages_count;
1691                }
1692            } else {
1693                i++;
1694            }
1695        }
1696        p = p->next;
1697    } while (p != heap->main_chunk);
1698    return count;
1699}
1700
1701static void zend_mm_check_leaks(zend_mm_heap *heap TSRMLS_DC)
1702{
1703    zend_mm_huge_list *list;
1704    zend_mm_chunk *p;
1705    zend_leak_info leak;
1706    zend_long repeated = 0;
1707    uint32_t total = 0;
1708    int i, j;
1709
1710    /* find leaked huge blocks and free them */
1711    list = heap->huge_list;
1712    while (list) {
1713        zend_mm_huge_list *q = list;
1714
1715        heap->huge_list = list->next;
1716
1717        leak.addr = list->ptr;
1718        leak.size = list->dbg.size;
1719        leak.filename = list->dbg.filename;
1720        leak.orig_filename = list->dbg.orig_filename;
1721        leak.lineno = list->dbg.lineno;
1722        leak.orig_lineno = list->dbg.orig_lineno;
1723
1724        zend_message_dispatcher(ZMSG_LOG_SCRIPT_NAME, NULL TSRMLS_CC);
1725        zend_message_dispatcher(ZMSG_MEMORY_LEAK_DETECTED, &leak TSRMLS_CC);
1726//???       repeated = zend_mm_find_leaks_huge(segment, p);
1727        total += 1 + repeated;
1728        if (repeated) {
1729            zend_message_dispatcher(ZMSG_MEMORY_LEAK_REPEATED, (void *)(zend_uintptr_t)repeated TSRMLS_CC);
1730        }
1731
1732        list = list->next;
1733        zend_mm_munmap(q->ptr, q->size);
1734        zend_mm_free_heap(heap, q, NULL, 0, NULL, 0);
1735    }
1736
1737    /* for each chunk */
1738    p = heap->main_chunk;
1739    do {
1740        i = ZEND_MM_FIRST_PAGE;
1741        while (i < p->free_tail) {
1742            if (zend_mm_bitset_is_set(p->free_map, i)) {
1743                if (p->map[i] & ZEND_MM_IS_SRUN) {
1744                    int bin_num = ZEND_MM_SRUN_BIN_NUM(p->map[i]);
1745                    zend_mm_debug_info *dbg = (zend_mm_debug_info*)((char*)p + ZEND_MM_PAGE_SIZE * i + bin_data_size[bin_num] - ZEND_MM_ALIGNED_SIZE(sizeof(zend_mm_debug_info)));
1746
1747                    j = 0;
1748                    while (j < bin_elements[bin_num]) {
1749                        if (dbg->size != 0) {
1750                            leak.addr = (zend_mm_debug_info*)((char*)p + ZEND_MM_PAGE_SIZE * i + bin_data_size[bin_num] * j);
1751                            leak.size = dbg->size;
1752                            leak.filename = dbg->filename;
1753                            leak.orig_filename = dbg->orig_filename;
1754                            leak.lineno = dbg->lineno;
1755                            leak.orig_lineno = dbg->orig_lineno;
1756
1757                            zend_message_dispatcher(ZMSG_LOG_SCRIPT_NAME, NULL TSRMLS_CC);
1758                            zend_message_dispatcher(ZMSG_MEMORY_LEAK_DETECTED, &leak TSRMLS_CC);
1759
1760                            dbg->size = 0;
1761                            dbg->filename = NULL;
1762                            dbg->lineno = 0;
1763
1764                            repeated = zend_mm_find_leaks_small(p, i, j + 1, &leak) +
1765                                       zend_mm_find_leaks(heap, p, i + bin_pages[bin_num], &leak);
1766                            total += 1 + repeated;
1767                            if (repeated) {
1768                                zend_message_dispatcher(ZMSG_MEMORY_LEAK_REPEATED, (void *)(zend_uintptr_t)repeated TSRMLS_CC);
1769                            }
1770                        }
1771                        dbg = (zend_mm_debug_info*)((char*)dbg + bin_data_size[bin_num]);
1772                        j++;
1773                    }
1774                    i += bin_pages[bin_num];
1775                } else /* if (p->map[i] & ZEND_MM_IS_LRUN) */ {
1776                    int pages_count = ZEND_MM_LRUN_PAGES(p->map[i]);
1777                    zend_mm_debug_info *dbg = (zend_mm_debug_info*)((char*)p + ZEND_MM_PAGE_SIZE * (i + pages_count) - ZEND_MM_ALIGNED_SIZE(sizeof(zend_mm_debug_info)));
1778
1779                    leak.addr = (void*)((char*)p + ZEND_MM_PAGE_SIZE * i);
1780                    leak.size = dbg->size;
1781                    leak.filename = dbg->filename;
1782                    leak.orig_filename = dbg->orig_filename;
1783                    leak.lineno = dbg->lineno;
1784                    leak.orig_lineno = dbg->orig_lineno;
1785
1786                    zend_message_dispatcher(ZMSG_LOG_SCRIPT_NAME, NULL TSRMLS_CC);
1787                    zend_message_dispatcher(ZMSG_MEMORY_LEAK_DETECTED, &leak TSRMLS_CC);
1788
1789                    zend_mm_bitset_reset_range(p->free_map, i, pages_count);
1790
1791                    repeated = zend_mm_find_leaks(heap, p, i + pages_count, &leak);
1792                    total += 1 + repeated;
1793                    if (repeated) {
1794                        zend_message_dispatcher(ZMSG_MEMORY_LEAK_REPEATED, (void *)(zend_uintptr_t)repeated TSRMLS_CC);
1795                    }
1796                    i += pages_count;
1797                }
1798            } else {
1799                i++;
1800            }
1801        }
1802        p = p->next;
1803    } while (p != heap->main_chunk);
1804    if (total) {
1805        zend_message_dispatcher(ZMSG_MEMORY_LEAKS_GRAND_TOTAL, &total TSRMLS_CC);
1806    }
1807}
1808#endif
1809
1810void zend_mm_shutdown(zend_mm_heap *heap, int full, int silent TSRMLS_DC)
1811{
1812    zend_mm_chunk *p;
1813    zend_mm_huge_list *list;
1814
1815#if ZEND_MM_CUSTOM
1816    if (heap->use_custom_heap) {
1817        return;
1818    }
1819#endif
1820
1821#if ZEND_DEBUG
1822    if (!silent) {
1823        zend_mm_check_leaks(heap TSRMLS_CC);
1824    }
1825#endif
1826
1827    /* free huge blocks */
1828    list = heap->huge_list;
1829    while (list) {
1830        zend_mm_huge_list *q = list;
1831        list = list->next;
1832        zend_mm_munmap(q->ptr, q->size);
1833    }
1834
1835    /* move all chunks except of the first one into the cache */
1836    p = heap->main_chunk->next;
1837    while (p != heap->main_chunk) {
1838        zend_mm_chunk *q = p->next;
1839        p->next = heap->cached_chunks;
1840        heap->cached_chunks = p;
1841        p = q;
1842        heap->chunks_count--;
1843        heap->cached_chunks_count++;
1844    }
1845
1846    if (full) {
1847        /* free all cached chunks */
1848        while (heap->cached_chunks) {
1849            p = heap->cached_chunks;
1850            heap->cached_chunks = p->next;
1851            zend_mm_munmap(p, ZEND_MM_CHUNK_SIZE);
1852        }
1853        /* free the first chunk */
1854        zend_mm_munmap(heap->main_chunk, ZEND_MM_CHUNK_SIZE);
1855    } else {
1856        zend_mm_heap old_heap;
1857
1858        /* free some cached chunks to keep average count */
1859        heap->avg_chunks_count = (heap->avg_chunks_count + (double)heap->peak_chunks_count) / 2.0;
1860        while ((double)heap->cached_chunks_count + 0.9 > heap->avg_chunks_count &&
1861               heap->cached_chunks) {
1862            p = heap->cached_chunks;
1863            heap->cached_chunks = p->next;
1864            zend_mm_munmap(p, ZEND_MM_CHUNK_SIZE);
1865            heap->cached_chunks_count--;
1866        }
1867        /* clear cached chunks */
1868        p = heap->cached_chunks;
1869        while (p != NULL) {
1870            zend_mm_chunk *q = p->next;
1871            memset(p, 0, sizeof(zend_mm_chunk));
1872            p->next = q;
1873            p = q;
1874        }
1875
1876        /* reinitialize the first chunk and heap */
1877        old_heap = *heap;
1878        p = heap->main_chunk;
1879        memset(p, 0, ZEND_MM_FIRST_PAGE * ZEND_MM_PAGE_SIZE);
1880        *heap = old_heap;
1881        memset(heap->free_slot, 0, sizeof(heap->free_slot));
1882        heap->main_chunk = p;
1883        p->heap = &p->heap_slot;
1884        p->next = p;
1885        p->prev = p;
1886        p->free_pages = ZEND_MM_PAGES - ZEND_MM_FIRST_PAGE;
1887        p->free_tail = ZEND_MM_FIRST_PAGE;
1888        p->free_map[0] = (1L << ZEND_MM_FIRST_PAGE) - 1;
1889        p->map[0] = ZEND_MM_LRUN(ZEND_MM_FIRST_PAGE);
1890        heap->chunks_count = 1;
1891        heap->peak_chunks_count = 1;
1892#if ZEND_MM_STAT || ZEND_MM_LIMIT
1893        heap->real_size = ZEND_MM_CHUNK_SIZE;
1894#endif
1895#if ZEND_MM_STAT
1896        heap->real_peak = ZEND_MM_CHUNK_SIZE;
1897#endif
1898    }
1899}
1900
1901/**************/
1902/* PUBLIC API */
1903/**************/
1904
1905ZEND_API void *_zend_mm_alloc(zend_mm_heap *heap, size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
1906{
1907    return zend_mm_alloc_heap(heap, size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1908}
1909
1910ZEND_API void _zend_mm_free(zend_mm_heap *heap, void *ptr ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
1911{
1912    zend_mm_free_heap(heap, ptr ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1913}
1914
1915void *_zend_mm_realloc(zend_mm_heap *heap, void *ptr, size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
1916{
1917    return zend_mm_realloc_heap(heap, ptr, size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1918}
1919
1920ZEND_API size_t _zend_mm_block_size(zend_mm_heap *heap, void *ptr ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
1921{
1922    return zend_mm_size(heap, ptr ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1923}
1924
1925/**********************/
1926/* Allocation Manager */
1927/**********************/
1928
1929typedef struct _zend_alloc_globals {
1930    zend_mm_heap *mm_heap;
1931} zend_alloc_globals;
1932
1933#ifdef ZTS
1934static int alloc_globals_id;
1935# define AG(v) TSRMG(alloc_globals_id, zend_alloc_globals *, v)
1936#else
1937# define AG(v) (alloc_globals.v)
1938static zend_alloc_globals alloc_globals;
1939#endif
1940
1941ZEND_API int is_zend_mm(TSRMLS_D)
1942{
1943#if ZEND_MM_CUSTOM
1944    return !AG(mm_heap)->use_custom_heap;
1945#else
1946    return 1;
1947#endif
1948}
1949
1950#if !ZEND_DEBUG && !defined(_WIN32)
1951#undef _emalloc
1952
1953#if ZEND_MM_CUSTOM
1954# define ZEND_MM_CUSTOM_ALLOCATOR(size) do { \
1955        if (UNEXPECTED(AG(mm_heap)->use_custom_heap)) { \
1956            return AG(mm_heap)->_malloc(size); \
1957        } \
1958    } while (0)
1959# define ZEND_MM_CUSTOM_DEALLOCATOR(ptr) do { \
1960        if (UNEXPECTED(AG(mm_heap)->use_custom_heap)) { \
1961            AG(mm_heap)->_free(ptr); \
1962            return; \
1963        } \
1964    } while (0)
1965#else
1966# define ZEND_MM_CUSTOM_ALLOCATOR(size)
1967# define ZEND_MM_CUSTOM_DEALLOCATOR(ptr)
1968#endif
1969
1970# define _ZEND_BIN_ALLOCATOR(_num, _size, _elements, _pages, x, y) \
1971    ZEND_API void* ZEND_FASTCALL _emalloc_ ## _size(void) { \
1972        TSRMLS_FETCH(); \
1973        ZEND_MM_CUSTOM_ALLOCATOR(_size); \
1974        return zend_mm_alloc_small(AG(mm_heap), _size, _num ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC); \
1975    }
1976
1977ZEND_MM_BINS_INFO(_ZEND_BIN_ALLOCATOR, x, y)
1978
1979ZEND_API void* ZEND_FASTCALL _emalloc_large(size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
1980{
1981    TSRMLS_FETCH();
1982
1983    ZEND_MM_CUSTOM_ALLOCATOR(size);
1984    return zend_mm_alloc_large(AG(mm_heap), size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1985}
1986
1987ZEND_API void* ZEND_FASTCALL _emalloc_huge(size_t size)
1988{
1989    TSRMLS_FETCH();
1990
1991    ZEND_MM_CUSTOM_ALLOCATOR(size);
1992    return zend_mm_alloc_huge(AG(mm_heap), size);
1993}
1994
1995# define _ZEND_BIN_FREE(_num, _size, _elements, _pages, x, y) \
1996    ZEND_API void ZEND_FASTCALL _efree_ ## _size(void *ptr) { \
1997        TSRMLS_FETCH(); \
1998        ZEND_MM_CUSTOM_DEALLOCATOR(ptr); \
1999        { \
2000            size_t page_offset = ZEND_MM_ALIGNED_OFFSET(ptr, ZEND_MM_CHUNK_SIZE); \
2001            zend_mm_chunk *chunk = (zend_mm_chunk*)ZEND_MM_ALIGNED_BASE(ptr, ZEND_MM_CHUNK_SIZE); \
2002            int page_num = page_offset / ZEND_MM_PAGE_SIZE; \
2003            ZEND_MM_CHECK(chunk->heap == AG(mm_heap), "zend_mm_heap corrupted"); \
2004            ZEND_ASSERT(chunk->map[page_num] & ZEND_MM_IS_SRUN); \
2005            ZEND_ASSERT(ZEND_MM_SRUN_BIN_NUM(chunk->map[page_num]) == _num); \
2006            zend_mm_free_small(AG(mm_heap), ptr, _num); \
2007        } \
2008    }
2009
2010ZEND_MM_BINS_INFO(_ZEND_BIN_FREE, x, y)
2011
2012ZEND_API void ZEND_FASTCALL _efree_large(void *ptr, size_t size)
2013{
2014    TSRMLS_FETCH();
2015
2016    ZEND_MM_CUSTOM_DEALLOCATOR(ptr);
2017    {
2018        size_t page_offset = ZEND_MM_ALIGNED_OFFSET(ptr, ZEND_MM_CHUNK_SIZE);
2019        zend_mm_chunk *chunk = (zend_mm_chunk*)ZEND_MM_ALIGNED_BASE(ptr, ZEND_MM_CHUNK_SIZE);
2020        int page_num = page_offset / ZEND_MM_PAGE_SIZE;
2021        int pages_count = ZEND_MM_ALIGNED_SIZE_EX(size, ZEND_MM_PAGE_SIZE) / ZEND_MM_PAGE_SIZE;
2022
2023        ZEND_MM_CHECK(chunk->heap == AG(mm_heap) && ZEND_MM_ALIGNED_OFFSET(page_offset, ZEND_MM_PAGE_SIZE) == 0, "zend_mm_heap corrupted");
2024        ZEND_ASSERT(chunk->map[page_num] & ZEND_MM_IS_LRUN);
2025        ZEND_ASSERT(ZEND_MM_LRUN_PAGES(chunk->map[page_num]) == pages_count);
2026        zend_mm_free_large(AG(mm_heap), chunk, page_num, pages_count);
2027    }
2028}
2029
2030ZEND_API void ZEND_FASTCALL _efree_huge(void *ptr, size_t size)
2031{
2032    TSRMLS_FETCH();
2033
2034    ZEND_MM_CUSTOM_DEALLOCATOR(ptr);
2035    // TODO: use size???
2036    zend_mm_free_huge(AG(mm_heap), ptr);
2037}
2038#endif
2039
2040ZEND_API void* ZEND_FASTCALL _emalloc(size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
2041{
2042    TSRMLS_FETCH();
2043
2044#if ZEND_MM_CUSTOM
2045    if (UNEXPECTED(AG(mm_heap)->use_custom_heap)) {
2046        return AG(mm_heap)->_malloc(size);
2047    }
2048#endif
2049    return zend_mm_alloc_heap(AG(mm_heap), size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
2050}
2051
2052ZEND_API void ZEND_FASTCALL _efree(void *ptr ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
2053{
2054    TSRMLS_FETCH();
2055
2056#if ZEND_MM_CUSTOM
2057    if (UNEXPECTED(AG(mm_heap)->use_custom_heap)) {
2058        AG(mm_heap)->_free(ptr);
2059        return;
2060    }
2061#endif
2062    zend_mm_free_heap(AG(mm_heap), ptr ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
2063}
2064
2065ZEND_API void* ZEND_FASTCALL _erealloc(void *ptr, size_t size, int allow_failure ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
2066{
2067    TSRMLS_FETCH();
2068
2069    if (UNEXPECTED(AG(mm_heap)->use_custom_heap)) {
2070        return AG(mm_heap)->_realloc(ptr, size);
2071    }
2072    return zend_mm_realloc_heap(AG(mm_heap), ptr, size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
2073}
2074
2075ZEND_API size_t ZEND_FASTCALL _zend_mem_block_size(void *ptr TSRMLS_DC ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
2076{
2077    if (UNEXPECTED(AG(mm_heap)->use_custom_heap)) {
2078        return 0;
2079    }
2080    return zend_mm_size(AG(mm_heap), ptr ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
2081}
2082
2083#if defined(__GNUC__) && (defined(__native_client__) || defined(i386))
2084
2085static inline size_t safe_address(size_t nmemb, size_t size, size_t offset)
2086{
2087    size_t res = nmemb;
2088    zend_ulong overflow = 0;
2089
2090    __asm__ ("mull %3\n\taddl %4,%0\n\tadcl $0,%1"
2091         : "=&a"(res), "=&d" (overflow)
2092         : "%0"(res),
2093           "rm"(size),
2094           "rm"(offset));
2095
2096    if (UNEXPECTED(overflow)) {
2097        zend_error_noreturn(E_ERROR, "Possible integer overflow in memory allocation (%zu * %zu + %zu)", nmemb, size, offset);
2098        return 0;
2099    }
2100    return res;
2101}
2102
2103#elif defined(__GNUC__) && defined(__x86_64__)
2104
2105static inline size_t safe_address(size_t nmemb, size_t size, size_t offset)
2106{
2107        size_t res = nmemb;
2108        zend_ulong overflow = 0;
2109
2110#ifdef __ILP32__ /* x32 */
2111# define LP_SUFF "l"
2112#else /* amd64 */
2113# define LP_SUFF "q"
2114#endif
2115
2116        __asm__ ("mul" LP_SUFF  " %3\n\t"
2117                 "add %4,%0\n\t"
2118                 "adc $0,%1"
2119             : "=&a"(res), "=&d" (overflow)
2120             : "%0"(res),
2121               "rm"(size),
2122               "rm"(offset));
2123
2124#undef LP_SUFF
2125        if (UNEXPECTED(overflow)) {
2126                zend_error_noreturn(E_ERROR, "Possible integer overflow in memory allocation (%zu * %zu + %zu)", nmemb, size, offset);
2127                return 0;
2128        }
2129        return res;
2130}
2131
2132#elif defined(__GNUC__) && defined(__arm__)
2133
2134static inline size_t safe_address(size_t nmemb, size_t size, size_t offset)
2135{
2136        size_t res;
2137        zend_ulong overflow;
2138
2139        __asm__ ("umlal %0,%1,%2,%3"
2140             : "=r"(res), "=r"(overflow)
2141             : "r"(nmemb),
2142               "r"(size),
2143               "0"(offset),
2144               "1"(0));
2145
2146        if (UNEXPECTED(overflow)) {
2147                zend_error_noreturn(E_ERROR, "Possible integer overflow in memory allocation (%zu * %zu + %zu)", nmemb, size, offset);
2148                return 0;
2149        }
2150        return res;
2151}
2152
2153#elif defined(__GNUC__) && defined(__aarch64__)
2154
2155static inline size_t safe_address(size_t nmemb, size_t size, size_t offset)
2156{
2157        size_t res;
2158        zend_ulong overflow;
2159
2160        __asm__ ("mul %0,%2,%3\n\tumulh %1,%2,%3\n\tadds %0,%0,%4\n\tadc %1,%1,xzr"
2161             : "=&r"(res), "=&r"(overflow)
2162             : "r"(nmemb),
2163               "r"(size),
2164               "r"(offset));
2165
2166        if (UNEXPECTED(overflow)) {
2167                zend_error_noreturn(E_ERROR, "Possible integer overflow in memory allocation (%zu * %zu + %zu)", nmemb, size, offset);
2168                return 0;
2169        }
2170        return res;
2171}
2172
2173#elif SIZEOF_SIZE_T == 4 && defined(HAVE_ZEND_LONG64)
2174
2175static inline size_t safe_address(size_t nmemb, size_t size, size_t offset)
2176{
2177    zend_ulong64 res = (zend_ulong64)nmemb * (zend_ulong64)size + (zend_ulong64)offset;
2178
2179    if (UNEXPECTED(res > (zend_ulong64)0xFFFFFFFFL)) {
2180        zend_error_noreturn(E_ERROR, "Possible integer overflow in memory allocation (%zu * %zu + %zu)", nmemb, size, offset);
2181        return 0;
2182    }
2183    return (size_t) res;
2184}
2185
2186#else
2187
2188static inline size_t safe_address(size_t nmemb, size_t size, size_t offset)
2189{
2190    size_t res = nmemb * size + offset;
2191    double _d  = (double)nmemb * (double)size + (double)offset;
2192    double _delta = (double)res - _d;
2193
2194    if (UNEXPECTED((_d + _delta ) != _d)) {
2195        zend_error_noreturn(E_ERROR, "Possible integer overflow in memory allocation (%zu * %zu + %zu)", nmemb, size, offset);
2196        return 0;
2197    }
2198    return res;
2199}
2200#endif
2201
2202
2203ZEND_API void* ZEND_FASTCALL _safe_emalloc(size_t nmemb, size_t size, size_t offset ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
2204{
2205    return emalloc_rel(safe_address(nmemb, size, offset));
2206}
2207
2208ZEND_API void* ZEND_FASTCALL _safe_malloc(size_t nmemb, size_t size, size_t offset)
2209{
2210    return pemalloc(safe_address(nmemb, size, offset), 1);
2211}
2212
2213ZEND_API void* ZEND_FASTCALL _safe_erealloc(void *ptr, size_t nmemb, size_t size, size_t offset ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
2214{
2215    return erealloc_rel(ptr, safe_address(nmemb, size, offset));
2216}
2217
2218ZEND_API void* ZEND_FASTCALL _safe_realloc(void *ptr, size_t nmemb, size_t size, size_t offset)
2219{
2220    return perealloc(ptr, safe_address(nmemb, size, offset), 1);
2221}
2222
2223
2224ZEND_API void* ZEND_FASTCALL _ecalloc(size_t nmemb, size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
2225{
2226    void *p;
2227#ifdef ZEND_SIGNALS
2228    TSRMLS_FETCH();
2229#endif
2230    HANDLE_BLOCK_INTERRUPTIONS();
2231
2232    p = _safe_emalloc(nmemb, size, 0 ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
2233    if (UNEXPECTED(p == NULL)) {
2234        HANDLE_UNBLOCK_INTERRUPTIONS();
2235        return p;
2236    }
2237    memset(p, 0, size * nmemb);
2238    HANDLE_UNBLOCK_INTERRUPTIONS();
2239    return p;
2240}
2241
2242ZEND_API char* ZEND_FASTCALL _estrdup(const char *s ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
2243{
2244    size_t length;
2245    char *p;
2246#ifdef ZEND_SIGNALS
2247    TSRMLS_FETCH();
2248#endif
2249
2250    HANDLE_BLOCK_INTERRUPTIONS();
2251
2252    length = strlen(s)+1;
2253    p = (char *) _emalloc(length ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
2254    if (UNEXPECTED(p == NULL)) {
2255        HANDLE_UNBLOCK_INTERRUPTIONS();
2256        return p;
2257    }
2258    memcpy(p, s, length);
2259    HANDLE_UNBLOCK_INTERRUPTIONS();
2260    return p;
2261}
2262
2263ZEND_API char* ZEND_FASTCALL _estrndup(const char *s, size_t length ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
2264{
2265    char *p;
2266#ifdef ZEND_SIGNALS
2267    TSRMLS_FETCH();
2268#endif
2269
2270    HANDLE_BLOCK_INTERRUPTIONS();
2271
2272    p = (char *) _emalloc(length+1 ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
2273    if (UNEXPECTED(p == NULL)) {
2274        HANDLE_UNBLOCK_INTERRUPTIONS();
2275        return p;
2276    }
2277    memcpy(p, s, length);
2278    p[length] = 0;
2279    HANDLE_UNBLOCK_INTERRUPTIONS();
2280    return p;
2281}
2282
2283
2284ZEND_API char* ZEND_FASTCALL zend_strndup(const char *s, size_t length)
2285{
2286    char *p;
2287#ifdef ZEND_SIGNALS
2288    TSRMLS_FETCH();
2289#endif
2290
2291    HANDLE_BLOCK_INTERRUPTIONS();
2292
2293    p = (char *) malloc(length+1);
2294    if (UNEXPECTED(p == NULL)) {
2295        HANDLE_UNBLOCK_INTERRUPTIONS();
2296        return p;
2297    }
2298    if (length) {
2299        memcpy(p, s, length);
2300    }
2301    p[length] = 0;
2302    HANDLE_UNBLOCK_INTERRUPTIONS();
2303    return p;
2304}
2305
2306
2307ZEND_API int zend_set_memory_limit(size_t memory_limit TSRMLS_DC)
2308{
2309#if ZEND_MM_LIMIT
2310    AG(mm_heap)->limit = (memory_limit >= ZEND_MM_CHUNK_SIZE) ? memory_limit : ZEND_MM_CHUNK_SIZE;
2311#endif
2312    return SUCCESS;
2313}
2314
2315ZEND_API size_t zend_memory_usage(int real_usage TSRMLS_DC)
2316{
2317#if ZEND_MM_STAT
2318    if (real_usage) {
2319        return AG(mm_heap)->real_size;
2320    } else {
2321        size_t usage = AG(mm_heap)->size;
2322        return usage;
2323    }
2324#endif
2325    return 0;
2326}
2327
2328ZEND_API size_t zend_memory_peak_usage(int real_usage TSRMLS_DC)
2329{
2330#if ZEND_MM_STAT
2331    if (real_usage) {
2332        return AG(mm_heap)->real_peak;
2333    } else {
2334        return AG(mm_heap)->peak;
2335    }
2336#endif
2337    return 0;
2338}
2339
2340ZEND_API void shutdown_memory_manager(int silent, int full_shutdown TSRMLS_DC)
2341{
2342    zend_mm_shutdown(AG(mm_heap), full_shutdown, silent TSRMLS_CC);
2343}
2344
2345static void alloc_globals_ctor(zend_alloc_globals *alloc_globals TSRMLS_DC)
2346{
2347#if ZEND_MM_CUSTOM
2348    char *tmp = getenv("USE_ZEND_ALLOC");
2349
2350    if (tmp && !zend_atoi(tmp, 0)) {
2351        alloc_globals->mm_heap = malloc(sizeof(zend_mm_heap));
2352        memset(alloc_globals->mm_heap, 0, sizeof(zend_mm_heap));
2353        alloc_globals->mm_heap->use_custom_heap = 1;
2354        alloc_globals->mm_heap->_malloc = malloc;
2355        alloc_globals->mm_heap->_free = free;
2356        alloc_globals->mm_heap->_realloc = realloc;
2357        return;
2358    }
2359#endif
2360    alloc_globals->mm_heap = zend_mm_init();
2361}
2362
2363#ifdef ZTS
2364static void alloc_globals_dtor(zend_alloc_globals *alloc_globals TSRMLS_DC)
2365{
2366    shutdown_memory_manager(1, 1 TSRMLS_CC);
2367}
2368#endif
2369
2370ZEND_API void start_memory_manager(TSRMLS_D)
2371{
2372#ifdef ZTS
2373    ts_allocate_id(&alloc_globals_id, sizeof(zend_alloc_globals), (ts_allocate_ctor) alloc_globals_ctor, (ts_allocate_dtor) alloc_globals_dtor);
2374#else
2375    alloc_globals_ctor(&alloc_globals);
2376#endif
2377}
2378
2379ZEND_API zend_mm_heap *zend_mm_set_heap(zend_mm_heap *new_heap TSRMLS_DC)
2380{
2381    zend_mm_heap *old_heap;
2382
2383    old_heap = AG(mm_heap);
2384    AG(mm_heap) = (zend_mm_heap*)new_heap;
2385    return (zend_mm_heap*)old_heap;
2386}
2387
2388ZEND_API void zend_mm_set_custom_handlers(zend_mm_heap *heap,
2389                                          void* (*_malloc)(size_t),
2390                                          void  (*_free)(void*),
2391                                          void* (*_realloc)(void*, size_t))
2392{
2393#if ZEND_MM_CUSTOM
2394    zend_mm_heap *_heap = (zend_mm_heap*)heap;
2395
2396    _heap->use_custom_heap = 1;
2397    _heap->_malloc = _malloc;
2398    _heap->_free = _free;
2399    _heap->_realloc = _realloc;
2400#endif
2401}
2402
2403/*
2404 * Local variables:
2405 * tab-width: 4
2406 * c-basic-offset: 4
2407 * indent-tabs-mode: t
2408 * End:
2409 */
2410