87 void CodeHeap::on_code_mapping(char* base, size_t size) {
88 #ifdef LINUX
89 extern void linux_wrap_code(char* base, size_t size);
90 linux_wrap_code(base, size);
91 #endif
92 }
93
94
95 bool CodeHeap::reserve(ReservedSpace rs, size_t committed_size, size_t segment_size) {
96 assert(rs.size() >= committed_size, "reserved < committed");
97 assert(segment_size >= sizeof(FreeBlock), "segment size is too small");
98 assert(is_power_of_2(segment_size), "segment_size must be a power of 2");
99
100 _segment_size = segment_size;
101 _log2_segment_size = exact_log2(segment_size);
102
103 // Reserve and initialize space for _memory.
104 size_t page_size = os::vm_page_size();
105 if (os::can_execute_large_page_memory()) {
106 const size_t min_pages = 8;
107 page_size = MIN2(os::page_size_for_region(committed_size, min_pages),
108 os::page_size_for_region(rs.size(), min_pages));
109 }
110
111 const size_t granularity = os::vm_allocation_granularity();
112 const size_t c_size = align_size_up(committed_size, page_size);
113
114 os::trace_page_sizes(_name, committed_size, rs.size(), page_size,
115 rs.base(), rs.size());
116 if (!_memory.initialize(rs, c_size)) {
117 return false;
118 }
119
120 on_code_mapping(_memory.low(), _memory.committed_size());
121 _number_of_committed_segments = size_to_segments(_memory.committed_size());
122 _number_of_reserved_segments = size_to_segments(_memory.reserved_size());
123 assert(_number_of_reserved_segments >= _number_of_committed_segments, "just checking");
124 const size_t reserved_segments_alignment = MAX2((size_t)os::vm_page_size(), granularity);
125 const size_t reserved_segments_size = align_size_up(_number_of_reserved_segments, reserved_segments_alignment);
126 const size_t committed_segments_size = align_to_page_size(_number_of_committed_segments);
127
128 // reserve space for _segmap
|
87 void CodeHeap::on_code_mapping(char* base, size_t size) {
88 #ifdef LINUX
89 extern void linux_wrap_code(char* base, size_t size);
90 linux_wrap_code(base, size);
91 #endif
92 }
93
94
95 bool CodeHeap::reserve(ReservedSpace rs, size_t committed_size, size_t segment_size) {
96 assert(rs.size() >= committed_size, "reserved < committed");
97 assert(segment_size >= sizeof(FreeBlock), "segment size is too small");
98 assert(is_power_of_2(segment_size), "segment_size must be a power of 2");
99
100 _segment_size = segment_size;
101 _log2_segment_size = exact_log2(segment_size);
102
103 // Reserve and initialize space for _memory.
104 size_t page_size = os::vm_page_size();
105 if (os::can_execute_large_page_memory()) {
106 const size_t min_pages = 8;
107 page_size = MIN2(os::page_size_for_region_aligned(committed_size, min_pages),
108 os::page_size_for_region_aligned(rs.size(), min_pages));
109 }
110
111 const size_t granularity = os::vm_allocation_granularity();
112 const size_t c_size = align_size_up(committed_size, page_size);
113
114 os::trace_page_sizes(_name, committed_size, rs.size(), page_size,
115 rs.base(), rs.size());
116 if (!_memory.initialize(rs, c_size)) {
117 return false;
118 }
119
120 on_code_mapping(_memory.low(), _memory.committed_size());
121 _number_of_committed_segments = size_to_segments(_memory.committed_size());
122 _number_of_reserved_segments = size_to_segments(_memory.reserved_size());
123 assert(_number_of_reserved_segments >= _number_of_committed_segments, "just checking");
124 const size_t reserved_segments_alignment = MAX2((size_t)os::vm_page_size(), granularity);
125 const size_t reserved_segments_size = align_size_up(_number_of_reserved_segments, reserved_segments_alignment);
126 const size_t committed_segments_size = align_to_page_size(_number_of_committed_segments);
127
128 // reserve space for _segmap
|