81
82
83 void CodeHeap::on_code_mapping(char* base, size_t size) {
84 #ifdef LINUX
85 extern void linux_wrap_code(char* base, size_t size);
86 linux_wrap_code(base, size);
87 #endif
88 }
89
90
91 bool CodeHeap::reserve(size_t reserved_size, size_t committed_size,
92 size_t segment_size) {
93 assert(reserved_size >= committed_size, "reserved < committed");
94 assert(segment_size >= sizeof(FreeBlock), "segment size is too small");
95 assert(is_power_of_2(segment_size), "segment_size must be a power of 2");
96
97 _segment_size = segment_size;
98 _log2_segment_size = exact_log2(segment_size);
99
100 // Reserve and initialize space for _memory.
101 size_t page_size = os::vm_page_size();
102 if (os::can_execute_large_page_memory()) {
103 const size_t min_pages = 8;
104 page_size = MIN2(os::page_size_for_region(committed_size, min_pages),
105 os::page_size_for_region(reserved_size, min_pages));
106 }
107
108 const size_t granularity = os::vm_allocation_granularity();
109 const size_t r_align = MAX2(page_size, granularity);
110 const size_t r_size = align_size_up(reserved_size, r_align);
111 const size_t c_size = align_size_up(committed_size, page_size);
112
113 const size_t rs_align = page_size == (size_t) os::vm_page_size() ? 0 :
114 MAX2(page_size, granularity);
115 ReservedCodeSpace rs(r_size, rs_align, rs_align > 0);
116 os::trace_page_sizes("code heap", committed_size, reserved_size, page_size,
117 rs.base(), rs.size());
118 if (!_memory.initialize(rs, c_size)) {
119 return false;
120 }
121
122 on_code_mapping(_memory.low(), _memory.committed_size());
123 _number_of_committed_segments = size_to_segments(_memory.committed_size());
124 _number_of_reserved_segments = size_to_segments(_memory.reserved_size());
125 assert(_number_of_reserved_segments >= _number_of_committed_segments, "just checking");
126 const size_t reserved_segments_alignment = MAX2((size_t)os::vm_page_size(), granularity);
127 const size_t reserved_segments_size = align_size_up(_number_of_reserved_segments, reserved_segments_alignment);
|
81
82
83 void CodeHeap::on_code_mapping(char* base, size_t size) {
84 #ifdef LINUX
85 extern void linux_wrap_code(char* base, size_t size);
86 linux_wrap_code(base, size);
87 #endif
88 }
89
90
91 bool CodeHeap::reserve(size_t reserved_size, size_t committed_size,
92 size_t segment_size) {
93 assert(reserved_size >= committed_size, "reserved < committed");
94 assert(segment_size >= sizeof(FreeBlock), "segment size is too small");
95 assert(is_power_of_2(segment_size), "segment_size must be a power of 2");
96
97 _segment_size = segment_size;
98 _log2_segment_size = exact_log2(segment_size);
99
100 // Reserve and initialize space for _memory.
101 const size_t page_size = os::can_execute_large_page_memory() ?
102 os::page_size_for_region(committed_size, reserved_size, 8) :
103 os::vm_page_size();
104 const size_t granularity = os::vm_allocation_granularity();
105 const size_t r_align = MAX2(page_size, granularity);
106 const size_t r_size = align_size_up(reserved_size, r_align);
107 const size_t c_size = align_size_up(committed_size, page_size);
108
109 const size_t rs_align = page_size == (size_t) os::vm_page_size() ? 0 :
110 MAX2(page_size, granularity);
111 ReservedCodeSpace rs(r_size, rs_align, rs_align > 0);
112 os::trace_page_sizes("code heap", committed_size, reserved_size, page_size,
113 rs.base(), rs.size());
114 if (!_memory.initialize(rs, c_size)) {
115 return false;
116 }
117
118 on_code_mapping(_memory.low(), _memory.committed_size());
119 _number_of_committed_segments = size_to_segments(_memory.committed_size());
120 _number_of_reserved_segments = size_to_segments(_memory.reserved_size());
121 assert(_number_of_reserved_segments >= _number_of_committed_segments, "just checking");
122 const size_t reserved_segments_alignment = MAX2((size_t)os::vm_page_size(), granularity);
123 const size_t reserved_segments_size = align_size_up(_number_of_reserved_segments, reserved_segments_alignment);
|