119
119
# define ZEND_MM_CUSTOM 1 /* support for custom memory allocator */
120
120
/* USE_ZEND_ALLOC=0 may switch to system malloc() */
121
121
#endif
122
+ #ifndef ZEND_MM_STORAGE
123
+ # define ZEND_MM_STORAGE 1 /* support for custom memory storage */
124
+ #endif
122
125
#ifndef ZEND_MM_ERROR
123
126
# define ZEND_MM_ERROR 1 /* report system errors */
124
127
#endif
@@ -217,6 +220,9 @@ struct _zend_mm_heap {
217
220
#if ZEND_MM_CUSTOM
218
221
int use_custom_heap ;
219
222
#endif
223
+ #if ZEND_MM_STORAGE
224
+ zend_mm_storage * storage ;
225
+ #endif
220
226
#if ZEND_MM_STAT
221
227
size_t size ; /* current memory usage */
222
228
size_t peak ; /* peak memory usage */
@@ -707,7 +713,7 @@ static zend_always_inline int zend_mm_bitset_is_free_range(zend_mm_bitset *bitse
707
713
/* Chunks */
708
714
/**********/
709
715
710
- static void * zend_mm_chunk_alloc (size_t size , size_t alignment )
716
+ static void * zend_mm_chunk_alloc_int (size_t size , size_t alignment )
711
717
{
712
718
void * ptr = zend_mm_mmap (size );
713
719
@@ -751,6 +757,40 @@ static void *zend_mm_chunk_alloc(size_t size, size_t alignment)
751
757
}
752
758
}
753
759
760
+ static void * zend_mm_chunk_alloc (zend_mm_heap * heap , size_t size , size_t alignment )
761
+ {
762
+ #if ZEND_MM_STORAGE
763
+ if (UNEXPECTED (heap -> storage )) {
764
+ void * ptr = heap -> storage -> chunk_alloc (heap -> storage , size , alignment );
765
+ ZEND_ASSERT (((zend_uintptr_t )((char * )ptr + (alignment - 1 )) & (alignment - 1 )) == (zend_uintptr_t )ptr );
766
+ return ptr ;
767
+ }
768
+ #endif
769
+ return zend_mm_chunk_alloc_int (size , alignment );
770
+ }
771
+
772
+ static void zend_mm_chunk_free (zend_mm_heap * heap , void * addr , size_t size )
773
+ {
774
+ #if ZEND_MM_STORAGE
775
+ if (UNEXPECTED (heap -> storage )) {
776
+ heap -> storage -> chunk_free (heap -> storage , addr , size );
777
+ return ;
778
+ }
779
+ #endif
780
+ zend_mm_munmap (addr , size );
781
+ }
782
+
783
+ static void zend_mm_chunk_truncate (zend_mm_heap * heap , void * addr , size_t old_size , size_t new_size )
784
+ {
785
+ #if ZEND_MM_STORAGE
786
+ if (UNEXPECTED (heap -> storage )) {
787
+ heap -> storage -> chunk_truncate (heap -> storage , addr , old_size , new_size );
788
+ return ;
789
+ }
790
+ #endif
791
+ zend_mm_munmap ((char * )addr + new_size , old_size - new_size );
792
+ }
793
+
754
794
static zend_always_inline void zend_mm_chunk_init (zend_mm_heap * heap , zend_mm_chunk * chunk )
755
795
{
756
796
chunk -> heap = heap ;
@@ -928,7 +968,7 @@ static void *zend_mm_alloc_pages(zend_mm_heap *heap, int pages_count ZEND_FILE_L
928
968
}
929
969
}
930
970
#endif
931
- chunk = (zend_mm_chunk * )zend_mm_chunk_alloc (ZEND_MM_CHUNK_SIZE , ZEND_MM_CHUNK_SIZE );
971
+ chunk = (zend_mm_chunk * )zend_mm_chunk_alloc (heap , ZEND_MM_CHUNK_SIZE , ZEND_MM_CHUNK_SIZE );
932
972
if (UNEXPECTED (chunk == NULL )) {
933
973
/* insufficient memory */
934
974
#if !ZEND_MM_LIMIT
@@ -1019,11 +1059,11 @@ static void zend_mm_free_pages(zend_mm_heap *heap, zend_mm_chunk *chunk, int pag
1019
1059
heap -> real_size -= ZEND_MM_CHUNK_SIZE ;
1020
1060
#endif
1021
1061
if (!heap -> cached_chunks || chunk -> num > heap -> cached_chunks -> num ) {
1022
- zend_mm_munmap ( chunk , ZEND_MM_CHUNK_SIZE );
1062
+ zend_mm_chunk_free ( heap , chunk , ZEND_MM_CHUNK_SIZE );
1023
1063
} else {
1024
1064
//TODO: select the best chunk to delete???
1025
1065
chunk -> next = heap -> cached_chunks -> next ;
1026
- zend_mm_munmap ( heap -> cached_chunks , ZEND_MM_CHUNK_SIZE );
1066
+ zend_mm_chunk_free ( heap , heap -> cached_chunks , ZEND_MM_CHUNK_SIZE );
1027
1067
heap -> cached_chunks = chunk ;
1028
1068
}
1029
1069
}
@@ -1355,7 +1395,7 @@ static void *zend_mm_realloc_heap(zend_mm_heap *heap, void *ptr, size_t size ZEN
1355
1395
#ifndef _WIN32
1356
1396
} else if (new_size < old_size ) {
1357
1397
/* unmup tail */
1358
- zend_mm_munmap (( char * ) ptr + new_size , old_size - new_size );
1398
+ zend_mm_chunk_truncate ( heap , ptr , old_size , new_size );
1359
1399
#if ZEND_MM_STAT || ZEND_MM_LIMIT
1360
1400
heap -> real_size -= old_size - new_size ;
1361
1401
#endif
@@ -1607,7 +1647,7 @@ static void *zend_mm_alloc_huge(zend_mm_heap *heap, size_t size ZEND_FILE_LINE_D
1607
1647
}
1608
1648
}
1609
1649
#endif
1610
- ptr = zend_mm_chunk_alloc (new_size , ZEND_MM_CHUNK_SIZE );
1650
+ ptr = zend_mm_chunk_alloc (heap , new_size , ZEND_MM_CHUNK_SIZE );
1611
1651
if (UNEXPECTED (ptr == NULL )) {
1612
1652
/* insufficient memory */
1613
1653
#if !ZEND_MM_LIMIT
@@ -1649,7 +1689,7 @@ static void zend_mm_free_huge(zend_mm_heap *heap, void *ptr ZEND_FILE_LINE_DC ZE
1649
1689
1650
1690
ZEND_MM_CHECK (ZEND_MM_ALIGNED_OFFSET (ptr , ZEND_MM_CHUNK_SIZE ) == 0 , "zend_mm_heap corrupted" );
1651
1691
size = zend_mm_del_huge_block (heap , ptr ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC );
1652
- zend_mm_munmap ( ptr , size );
1692
+ zend_mm_chunk_free ( heap , ptr , size );
1653
1693
#if ZEND_MM_STAT || ZEND_MM_LIMIT
1654
1694
heap -> real_size -= size ;
1655
1695
#endif
@@ -1662,9 +1702,9 @@ static void zend_mm_free_huge(zend_mm_heap *heap, void *ptr ZEND_FILE_LINE_DC ZE
1662
1702
/* Initialization */
1663
1703
/******************/
1664
1704
1665
- zend_mm_heap * zend_mm_init (void )
1705
+ static zend_mm_heap * zend_mm_init (void )
1666
1706
{
1667
- zend_mm_chunk * chunk = (zend_mm_chunk * )zend_mm_chunk_alloc (ZEND_MM_CHUNK_SIZE , ZEND_MM_CHUNK_SIZE );
1707
+ zend_mm_chunk * chunk = (zend_mm_chunk * )zend_mm_chunk_alloc_int (ZEND_MM_CHUNK_SIZE , ZEND_MM_CHUNK_SIZE );
1668
1708
zend_mm_heap * heap ;
1669
1709
1670
1710
if (UNEXPECTED (chunk == NULL )) {
@@ -1706,6 +1746,9 @@ zend_mm_heap *zend_mm_init(void)
1706
1746
#endif
1707
1747
#if ZEND_MM_CUSTOM
1708
1748
heap -> use_custom_heap = 0 ;
1749
+ #endif
1750
+ #if ZEND_MM_STORAGE
1751
+ heap -> storage = NULL ;
1709
1752
#endif
1710
1753
heap -> huge_list = NULL ;
1711
1754
return heap ;
@@ -1805,7 +1848,7 @@ static void zend_mm_check_leaks(zend_mm_heap *heap TSRMLS_DC)
1805
1848
}
1806
1849
1807
1850
list = list -> next ;
1808
- zend_mm_munmap ( q -> ptr , q -> size );
1851
+ zend_mm_chunk_free ( heap , q -> ptr , q -> size );
1809
1852
zend_mm_free_heap (heap , q , NULL , 0 , NULL , 0 );
1810
1853
}
1811
1854
@@ -1904,7 +1947,7 @@ void zend_mm_shutdown(zend_mm_heap *heap, int full, int silent TSRMLS_DC)
1904
1947
while (list ) {
1905
1948
zend_mm_huge_list * q = list ;
1906
1949
list = list -> next ;
1907
- zend_mm_munmap ( q -> ptr , q -> size );
1950
+ zend_mm_chunk_free ( heap , q -> ptr , q -> size );
1908
1951
}
1909
1952
1910
1953
/* move all chunks except of the first one into the cache */
@@ -1923,10 +1966,20 @@ void zend_mm_shutdown(zend_mm_heap *heap, int full, int silent TSRMLS_DC)
1923
1966
while (heap -> cached_chunks ) {
1924
1967
p = heap -> cached_chunks ;
1925
1968
heap -> cached_chunks = p -> next ;
1926
- zend_mm_munmap ( p , ZEND_MM_CHUNK_SIZE );
1969
+ zend_mm_chunk_free ( heap , p , ZEND_MM_CHUNK_SIZE );
1927
1970
}
1928
1971
/* free the first chunk */
1929
- zend_mm_munmap (heap -> main_chunk , ZEND_MM_CHUNK_SIZE );
1972
+ #if ZEND_MM_STORAGE
1973
+ if (UNEXPECTED (heap -> storage )) {
1974
+ zend_mm_storage * storage = heap -> storage ;
1975
+ zend_mm_chunk_free (heap , heap -> main_chunk , ZEND_MM_CHUNK_SIZE );
1976
+ storage -> dtor (storage );
1977
+ } else {
1978
+ zend_mm_chunk_free (heap , heap -> main_chunk , ZEND_MM_CHUNK_SIZE );
1979
+ }
1980
+ #else
1981
+ zend_mm_chunk_free (heap , heap -> main_chunk , ZEND_MM_CHUNK_SIZE );
1982
+ #endif
1930
1983
} else {
1931
1984
zend_mm_heap old_heap ;
1932
1985
@@ -1936,7 +1989,7 @@ void zend_mm_shutdown(zend_mm_heap *heap, int full, int silent TSRMLS_DC)
1936
1989
heap -> cached_chunks ) {
1937
1990
p = heap -> cached_chunks ;
1938
1991
heap -> cached_chunks = p -> next ;
1939
- zend_mm_munmap ( p , ZEND_MM_CHUNK_SIZE );
1992
+ zend_mm_chunk_free ( heap , p , ZEND_MM_CHUNK_SIZE );
1940
1993
heap -> cached_chunks_count -- ;
1941
1994
}
1942
1995
/* clear cached chunks */
@@ -2343,6 +2396,98 @@ ZEND_API void zend_mm_set_custom_handlers(zend_mm_heap *heap,
2343
2396
#endif
2344
2397
}
2345
2398
2399
+ ZEND_API void zend_mm_get_custom_handlers (zend_mm_heap * heap ,
2400
+ void * (* * _malloc )(size_t ),
2401
+ void (* * _free )(void * ),
2402
+ void * (* * _realloc )(void * , size_t ))
2403
+ {
2404
+ #if ZEND_MM_CUSTOM
2405
+ zend_mm_heap * _heap = (zend_mm_heap * )heap ;
2406
+
2407
+ if (heap -> use_custom_heap ) {
2408
+ * _malloc = _heap -> _malloc ;
2409
+ * _free = _heap -> _free ;
2410
+ * _realloc = _heap -> _realloc ;
2411
+ } else {
2412
+ * _malloc = NULL ;
2413
+ * _free = NULL ;
2414
+ * _realloc = NULL ;
2415
+ }
2416
+ #else
2417
+ * _malloc = NULL ;
2418
+ * _free = NULL ;
2419
+ * _realloc = NULL ;
2420
+ #endif
2421
+ }
2422
+
2423
+ ZEND_API zend_mm_storage * zend_mm_get_storage (zend_mm_heap * heap )
2424
+ {
2425
+ #if ZEND_MM_CUSTOM
2426
+ return heap -> storage ;
2427
+ #else
2428
+ return NULL
2429
+ #endif
2430
+ }
2431
+
2432
+ ZEND_API zend_mm_heap * zend_mm_startup (void )
2433
+ {
2434
+ return zend_mm_init ();
2435
+ }
2436
+
2437
+ ZEND_API zend_mm_heap * zend_mm_startup_ex (zend_mm_storage * storage )
2438
+ {
2439
+ #if ZEND_MM_STORAGE
2440
+ zend_mm_chunk * chunk = (zend_mm_chunk * )storage -> chunk_alloc (storage , ZEND_MM_CHUNK_SIZE , ZEND_MM_CHUNK_SIZE );
2441
+ zend_mm_heap * heap ;
2442
+
2443
+ if (UNEXPECTED (chunk == NULL )) {
2444
+ #if ZEND_MM_ERROR
2445
+ #ifdef _WIN32
2446
+ stderr_last_error ("Can't initialize heap" );
2447
+ #else
2448
+ fprintf (stderr , "\nCan't initialize heap: [%d] %s\n" , errno , strerror (errno ));
2449
+ #endif
2450
+ #endif
2451
+ return NULL ;
2452
+ }
2453
+ heap = & chunk -> heap_slot ;
2454
+ chunk -> heap = heap ;
2455
+ chunk -> next = chunk ;
2456
+ chunk -> prev = chunk ;
2457
+ chunk -> free_pages = ZEND_MM_PAGES - ZEND_MM_FIRST_PAGE ;
2458
+ chunk -> free_tail = ZEND_MM_FIRST_PAGE ;
2459
+ chunk -> num = 0 ;
2460
+ chunk -> free_map [0 ] = (Z_L (1 ) << ZEND_MM_FIRST_PAGE ) - 1 ;
2461
+ chunk -> map [0 ] = ZEND_MM_LRUN (ZEND_MM_FIRST_PAGE );
2462
+ heap -> main_chunk = chunk ;
2463
+ heap -> cached_chunks = NULL ;
2464
+ heap -> chunks_count = 1 ;
2465
+ heap -> peak_chunks_count = 1 ;
2466
+ heap -> cached_chunks_count = 0 ;
2467
+ heap -> avg_chunks_count = 1.0 ;
2468
+ #if ZEND_MM_STAT || ZEND_MM_LIMIT
2469
+ heap -> real_size = ZEND_MM_CHUNK_SIZE ;
2470
+ #endif
2471
+ #if ZEND_MM_STAT
2472
+ heap -> real_peak = ZEND_MM_CHUNK_SIZE ;
2473
+ heap -> size = 0 ;
2474
+ heap -> peak = 0 ;
2475
+ #endif
2476
+ #if ZEND_MM_LIMIT
2477
+ heap -> limit = (Z_L (-1 ) >> Z_L (1 ));
2478
+ heap -> overflow = 0 ;
2479
+ #endif
2480
+ #if ZEND_MM_CUSTOM
2481
+ heap -> use_custom_heap = 0 ;
2482
+ #endif
2483
+ heap -> storage = storage ;
2484
+ heap -> huge_list = NULL ;
2485
+ return heap ;
2486
+ #else
2487
+ return NULL ;
2488
+ #endif
2489
+ }
2490
+
2346
2491
/*
2347
2492
* Local variables:
2348
2493
* tab-width: 4
0 commit comments