83 #ifdef LINUX
84 extern void linux_wrap_code(char* base, size_t size);
85 linux_wrap_code(base, size);
86 #endif
87 }
88
89
90 bool CodeHeap::reserve(size_t reserved_size, size_t committed_size,
91 size_t segment_size) {
92 assert(reserved_size >= committed_size, "reserved < committed");
93 assert(segment_size >= sizeof(FreeBlock), "segment size is too small");
94 assert(is_power_of_2(segment_size), "segment_size must be a power of 2");
95
96 _segment_size = segment_size;
97 _log2_segment_size = exact_log2(segment_size);
98
99 // Reserve and initialize space for _memory.
100 size_t page_size = os::vm_page_size();
101 if (os::can_execute_large_page_memory()) {
102 const size_t min_pages = 8;
103 page_size = MIN2(os::page_size_for_region(committed_size, min_pages),
104 os::page_size_for_region(reserved_size, min_pages));
105 }
106
107 const size_t granularity = os::vm_allocation_granularity();
108 const size_t r_align = MAX2(page_size, granularity);
109 const size_t r_size = align_size_up(reserved_size, r_align);
110 const size_t c_size = align_size_up(committed_size, page_size);
111
112 const size_t rs_align = page_size == (size_t) os::vm_page_size() ? 0 :
113 MAX2(page_size, granularity);
114 ReservedCodeSpace rs(r_size, rs_align, rs_align > 0);
115 os::trace_page_sizes("code heap", committed_size, reserved_size, page_size,
116 rs.base(), rs.size());
117 if (!_memory.initialize(rs, c_size)) {
118 return false;
119 }
120
121 on_code_mapping(_memory.low(), _memory.committed_size());
122 _number_of_committed_segments = size_to_segments(_memory.committed_size());
123 _number_of_reserved_segments = size_to_segments(_memory.reserved_size());
124 assert(_number_of_reserved_segments >= _number_of_committed_segments, "just checking");
|
83 #ifdef LINUX
84 extern void linux_wrap_code(char* base, size_t size);
85 linux_wrap_code(base, size);
86 #endif
87 }
88
89
90 bool CodeHeap::reserve(size_t reserved_size, size_t committed_size,
91 size_t segment_size) {
92 assert(reserved_size >= committed_size, "reserved < committed");
93 assert(segment_size >= sizeof(FreeBlock), "segment size is too small");
94 assert(is_power_of_2(segment_size), "segment_size must be a power of 2");
95
96 _segment_size = segment_size;
97 _log2_segment_size = exact_log2(segment_size);
98
99 // Reserve and initialize space for _memory.
100 size_t page_size = os::vm_page_size();
101 if (os::can_execute_large_page_memory()) {
102 const size_t min_pages = 8;
103 page_size = MIN2(os::page_size_for_region_aligned(committed_size, min_pages),
104 os::page_size_for_region_aligned(reserved_size, min_pages));
105 }
106
107 const size_t granularity = os::vm_allocation_granularity();
108 const size_t r_align = MAX2(page_size, granularity);
109 const size_t r_size = align_size_up(reserved_size, r_align);
110 const size_t c_size = align_size_up(committed_size, page_size);
111
112 const size_t rs_align = page_size == (size_t) os::vm_page_size() ? 0 :
113 MAX2(page_size, granularity);
114 ReservedCodeSpace rs(r_size, rs_align, rs_align > 0);
115 os::trace_page_sizes("code heap", committed_size, reserved_size, page_size,
116 rs.base(), rs.size());
117 if (!_memory.initialize(rs, c_size)) {
118 return false;
119 }
120
121 on_code_mapping(_memory.low(), _memory.committed_size());
122 _number_of_committed_segments = size_to_segments(_memory.committed_size());
123 _number_of_reserved_segments = size_to_segments(_memory.reserved_size());
124 assert(_number_of_reserved_segments >= _number_of_committed_segments, "just checking");
|