80
81
82 void CodeHeap::on_code_mapping(char* base, size_t size) {
83 #ifdef LINUX
84 extern void linux_wrap_code(char* base, size_t size);
85 linux_wrap_code(base, size);
86 #endif
87 }
88
89
90 bool CodeHeap::reserve(size_t reserved_size, size_t committed_size,
91 size_t segment_size) {
92 assert(reserved_size >= committed_size, "reserved < committed");
93 assert(segment_size >= sizeof(FreeBlock), "segment size is too small");
94 assert(is_power_of_2(segment_size), "segment_size must be a power of 2");
95
96 _segment_size = segment_size;
97 _log2_segment_size = exact_log2(segment_size);
98
99 // Reserve and initialize space for _memory.
100 const size_t page_size = os::can_execute_large_page_memory() ?
101 os::page_size_for_region(committed_size, reserved_size, 8) :
102 os::vm_page_size();
103 const size_t granularity = os::vm_allocation_granularity();
104 const size_t r_align = MAX2(page_size, granularity);
105 const size_t r_size = align_size_up(reserved_size, r_align);
106 const size_t c_size = align_size_up(committed_size, page_size);
107
108 const size_t rs_align = page_size == (size_t) os::vm_page_size() ? 0 :
109 MAX2(page_size, granularity);
110 ReservedCodeSpace rs(r_size, rs_align, rs_align > 0);
111 os::trace_page_sizes("code heap", committed_size, reserved_size, page_size,
112 rs.base(), rs.size());
113 if (!_memory.initialize(rs, c_size)) {
114 return false;
115 }
116
117 on_code_mapping(_memory.low(), _memory.committed_size());
118 _number_of_committed_segments = size_to_segments(_memory.committed_size());
119 _number_of_reserved_segments = size_to_segments(_memory.reserved_size());
120 assert(_number_of_reserved_segments >= _number_of_committed_segments, "just checking");
121 const size_t reserved_segments_alignment = MAX2((size_t)os::vm_page_size(), granularity);
122 const size_t reserved_segments_size = align_size_up(_number_of_reserved_segments, reserved_segments_alignment);
|
80
81
82 void CodeHeap::on_code_mapping(char* base, size_t size) {
83 #ifdef LINUX
84 extern void linux_wrap_code(char* base, size_t size);
85 linux_wrap_code(base, size);
86 #endif
87 }
88
89
90 bool CodeHeap::reserve(size_t reserved_size, size_t committed_size,
91 size_t segment_size) {
92 assert(reserved_size >= committed_size, "reserved < committed");
93 assert(segment_size >= sizeof(FreeBlock), "segment size is too small");
94 assert(is_power_of_2(segment_size), "segment_size must be a power of 2");
95
96 _segment_size = segment_size;
97 _log2_segment_size = exact_log2(segment_size);
98
99 // Reserve and initialize space for _memory.
100 size_t page_size = os::vm_page_size();
101 if (os::can_execute_large_page_memory()) {
102 const size_t min_pages = 8;
103 page_size = MIN2(os::page_size_for_region(committed_size, min_pages),
104 os::page_size_for_region(reserved_size, min_pages));
105 }
106
107 const size_t granularity = os::vm_allocation_granularity();
108 const size_t r_align = MAX2(page_size, granularity);
109 const size_t r_size = align_size_up(reserved_size, r_align);
110 const size_t c_size = align_size_up(committed_size, page_size);
111
112 const size_t rs_align = page_size == (size_t) os::vm_page_size() ? 0 :
113 MAX2(page_size, granularity);
114 ReservedCodeSpace rs(r_size, rs_align, rs_align > 0);
115 os::trace_page_sizes("code heap", committed_size, reserved_size, page_size,
116 rs.base(), rs.size());
117 if (!_memory.initialize(rs, c_size)) {
118 return false;
119 }
120
121 on_code_mapping(_memory.low(), _memory.committed_size());
122 _number_of_committed_segments = size_to_segments(_memory.committed_size());
123 _number_of_reserved_segments = size_to_segments(_memory.reserved_size());
124 assert(_number_of_reserved_segments >= _number_of_committed_segments, "just checking");
125 const size_t reserved_segments_alignment = MAX2((size_t)os::vm_page_size(), granularity);
126 const size_t reserved_segments_size = align_size_up(_number_of_reserved_segments, reserved_segments_alignment);
|