112 _relocation_size = round_to(cb->total_relocation_size(), oopSize);
113 _content_offset = align_code_offset(header_size + _relocation_size);
114 _code_offset = _content_offset + cb->total_offset_of(cb->insts());
115 _data_offset = _content_offset + round_to(cb->total_content_size(), oopSize);
116 assert(_data_offset <= size, "codeBlob is too small");
117
118 cb->copy_code_and_locs_to(this);
119 set_oop_maps(oop_maps);
120 _frame_size = frame_size;
121 #ifdef COMPILER1
122 // probably wrong for tiered
123 assert(_frame_size >= -1, "must use frame size or -1 for runtime stubs");
124 #endif // COMPILER1
125 }
126
127
128 void CodeBlob::set_oop_maps(OopMapSet* p) {
129 // Danger Will Robinson! This method allocates a big
130 // chunk of memory, its your job to free it.
131 if (p != NULL) {
132 // We need to allocate a chunk big enough to hold the OopMapSet and all of its OopMaps
133 _oop_maps = (OopMapSet* )NEW_C_HEAP_ARRAY(unsigned char, p->heap_size(), mtCode);
134 p->copy_to((address)_oop_maps);
135 } else {
136 _oop_maps = NULL;
137 }
138 }
139
140
141 void CodeBlob::trace_new_stub(CodeBlob* stub, const char* name1, const char* name2) {
142 // Do not hold the CodeCache lock during name formatting.
143 assert(!CodeCache_lock->owned_by_self(), "release CodeCache before registering the stub");
144
145 if (stub != NULL) {
146 char stub_id[256];
147 assert(strlen(name1) + strlen(name2) < sizeof(stub_id), "");
148 jio_snprintf(stub_id, sizeof(stub_id), "%s%s", name1, name2);
149 if (PrintStubCode) {
150 ttyLocker ttyl;
151 tty->print_cr("Decoding %s " INTPTR_FORMAT, stub_id, (intptr_t) stub);
152 Disassembler::decode(stub->code_begin(), stub->code_end());
153 tty->cr();
154 }
158 const char* stub_name = name2;
159 if (name2[0] == '\0') stub_name = name1;
160 JvmtiExport::post_dynamic_code_generated(stub_name, stub->code_begin(), stub->code_end());
161 }
162 }
163
164 // Track memory usage statistic after releasing CodeCache_lock
165 MemoryService::track_code_cache_memory_usage();
166 }
167
168
169 void CodeBlob::flush() {
170 if (_oop_maps) {
171 FREE_C_HEAP_ARRAY(unsigned char, _oop_maps);
172 _oop_maps = NULL;
173 }
174 _strings.free();
175 }
176
177
178 OopMap* CodeBlob::oop_map_for_return_address(address return_address) {
179 assert(oop_maps() != NULL, "nope");
180 return oop_maps()->find_map_at_offset((intptr_t) return_address - (intptr_t) code_begin());
181 }
182
183
184 //----------------------------------------------------------------------------------------------------
185 // Implementation of BufferBlob
186
187
188 BufferBlob::BufferBlob(const char* name, int size)
189 : CodeBlob(name, sizeof(BufferBlob), size, CodeOffsets::frame_never_safe, /*locs_size:*/ 0)
190 {}
191
192 BufferBlob* BufferBlob::create(const char* name, int buffer_size) {
193 ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock
194
195 BufferBlob* blob = NULL;
196 unsigned int size = sizeof(BufferBlob);
197 // align the size to CodeEntryAlignment
198 size = align_code_offset(size);
|
112 _relocation_size = round_to(cb->total_relocation_size(), oopSize);
113 _content_offset = align_code_offset(header_size + _relocation_size);
114 _code_offset = _content_offset + cb->total_offset_of(cb->insts());
115 _data_offset = _content_offset + round_to(cb->total_content_size(), oopSize);
116 assert(_data_offset <= size, "codeBlob is too small");
117
118 cb->copy_code_and_locs_to(this);
119 set_oop_maps(oop_maps);
120 _frame_size = frame_size;
121 #ifdef COMPILER1
122 // probably wrong for tiered
123 assert(_frame_size >= -1, "must use frame size or -1 for runtime stubs");
124 #endif // COMPILER1
125 }
126
127
128 void CodeBlob::set_oop_maps(OopMapSet* p) {
129 // Danger Will Robinson! This method allocates a big
130 // chunk of memory, its your job to free it.
131 if (p != NULL) {
132 _oop_maps = ImmutableOopMapSet::build_from(p);
133 } else {
134 _oop_maps = NULL;
135 }
136 }
137
138
139 void CodeBlob::trace_new_stub(CodeBlob* stub, const char* name1, const char* name2) {
140 // Do not hold the CodeCache lock during name formatting.
141 assert(!CodeCache_lock->owned_by_self(), "release CodeCache before registering the stub");
142
143 if (stub != NULL) {
144 char stub_id[256];
145 assert(strlen(name1) + strlen(name2) < sizeof(stub_id), "");
146 jio_snprintf(stub_id, sizeof(stub_id), "%s%s", name1, name2);
147 if (PrintStubCode) {
148 ttyLocker ttyl;
149 tty->print_cr("Decoding %s " INTPTR_FORMAT, stub_id, (intptr_t) stub);
150 Disassembler::decode(stub->code_begin(), stub->code_end());
151 tty->cr();
152 }
156 const char* stub_name = name2;
157 if (name2[0] == '\0') stub_name = name1;
158 JvmtiExport::post_dynamic_code_generated(stub_name, stub->code_begin(), stub->code_end());
159 }
160 }
161
162 // Track memory usage statistic after releasing CodeCache_lock
163 MemoryService::track_code_cache_memory_usage();
164 }
165
166
167 void CodeBlob::flush() {
168 if (_oop_maps) {
169 FREE_C_HEAP_ARRAY(unsigned char, _oop_maps);
170 _oop_maps = NULL;
171 }
172 _strings.free();
173 }
174
175
176 const ImmutableOopMap* CodeBlob::oop_map_for_return_address(address return_address) {
177 assert(oop_maps() != NULL, "nope");
178 return oop_maps()->find_map_at_offset((intptr_t) return_address - (intptr_t) code_begin());
179 }
180
181
182 //----------------------------------------------------------------------------------------------------
183 // Implementation of BufferBlob
184
185
186 BufferBlob::BufferBlob(const char* name, int size)
187 : CodeBlob(name, sizeof(BufferBlob), size, CodeOffsets::frame_never_safe, /*locs_size:*/ 0)
188 {}
189
190 BufferBlob* BufferBlob::create(const char* name, int buffer_size) {
191 ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock
192
193 BufferBlob* blob = NULL;
194 unsigned int size = sizeof(BufferBlob);
195 // align the size to CodeEntryAlignment
196 size = align_code_offset(size);
|