1 /*
2 * Copyright (c) 2000, 2017, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef SHARE_VM_OOPS_METHODDATAOOP_HPP
26 #define SHARE_VM_OOPS_METHODDATAOOP_HPP
27
28 #include "interpreter/bytecodes.hpp"
29 #include "memory/universe.hpp"
30 #include "oops/method.hpp"
31 #include "oops/oop.hpp"
32 #include "runtime/orderAccess.hpp"
33 #include "utilities/align.hpp"
34 #if INCLUDE_JVMCI
35 #include "jvmci/jvmci_globals.hpp"
36 #endif
37
38 class BytecodeStream;
39 class KlassSizeStats;
40
41 // The MethodData object collects counts and other profile information
42 // during zeroth-tier (interpretive) and first-tier execution.
43 // The profile is used later by compilation heuristics. Some heuristics
44 // enable use of aggressive (or "heroic") optimizations. An aggressive
45 // optimization often has a down-side, a corner case that it handles
46 // poorly, but which is thought to be rare. The profile provides
47 // evidence of this rarity for a given method or even BCI. It allows
48 // the compiler to back out of the optimization at places where it
49 // has historically been a poor choice. Other heuristics try to use
50 // specific information gathered about types observed at a given site.
51 //
52 // All data in the profile is approximate. It is expected to be accurate
184 _header._struct._flags = (new_state << trap_shift) | old_flags;
185 }
186
187 u1 flags() const {
188 return _header._struct._flags;
189 }
190
191 u2 bci() const {
192 return _header._struct._bci;
193 }
194
195 void set_header(intptr_t value) {
196 _header._bits = value;
197 }
198 intptr_t header() {
199 return _header._bits;
200 }
201 void set_cell_at(int index, intptr_t value) {
202 _cells[index] = value;
203 }
204 void release_set_cell_at(int index, intptr_t value) {
205 OrderAccess::release_store(&_cells[index], value);
206 }
207 intptr_t cell_at(int index) const {
208 return _cells[index];
209 }
210
211 void set_flag_at(int flag_number) {
212 assert(flag_number < flag_limit, "oob");
213 _header._struct._flags |= (0x1 << flag_number);
214 }
215 bool flag_at(int flag_number) const {
216 assert(flag_number < flag_limit, "oob");
217 return (_header._struct._flags & (0x1 << flag_number)) != 0;
218 }
219
220 // Low-level support for code generation.
221 static ByteSize header_offset() {
222 return byte_offset_of(DataLayout, _header);
223 }
224 static ByteSize tag_offset() {
225 return byte_offset_of(DataLayout, _header._struct._tag);
226 }
308 };
309
310 public:
311 // How many cells are in this?
312 virtual int cell_count() const {
313 ShouldNotReachHere();
314 return -1;
315 }
316
317 // Return the size of this data.
318 int size_in_bytes() {
319 return DataLayout::compute_size_in_bytes(cell_count());
320 }
321
322 protected:
323 // Low-level accessors for underlying data
324 void set_intptr_at(int index, intptr_t value) {
325 assert(0 <= index && index < cell_count(), "oob");
326 data()->set_cell_at(index, value);
327 }
328 void release_set_intptr_at(int index, intptr_t value) {
329 assert(0 <= index && index < cell_count(), "oob");
330 data()->release_set_cell_at(index, value);
331 }
332 intptr_t intptr_at(int index) const {
333 assert(0 <= index && index < cell_count(), "oob");
334 return data()->cell_at(index);
335 }
336 void set_uint_at(int index, uint value) {
337 set_intptr_at(index, (intptr_t) value);
338 }
339 void release_set_uint_at(int index, uint value) {
340 release_set_intptr_at(index, (intptr_t) value);
341 }
342 uint uint_at(int index) const {
343 return (uint)intptr_at(index);
344 }
345 void set_int_at(int index, int value) {
346 set_intptr_at(index, (intptr_t) value);
347 }
348 void release_set_int_at(int index, int value) {
349 release_set_intptr_at(index, (intptr_t) value);
350 }
351 int int_at(int index) const {
352 return (int)intptr_at(index);
353 }
354 int int_at_unchecked(int index) const {
355 return (int)data()->cell_at(index);
356 }
357 void set_oop_at(int index, oop value) {
358 set_intptr_at(index, cast_from_oop<intptr_t>(value));
359 }
360 oop oop_at(int index) const {
361 return cast_to_oop(intptr_at(index));
362 }
363
364 void set_flag_at(int flag_number) {
365 data()->set_flag_at(flag_number);
366 }
367 bool flag_at(int flag_number) const {
368 return data()->flag_at(flag_number);
369 }
370
1586 // RetData
1587 //
1588 // A RetData is used to access profiling information for a ret bytecode.
1589 // It is composed of a count of the number of times that the ret has
1590 // been executed, followed by a series of triples of the form
1591 // (bci, count, di) which count the number of times that some bci was the
1592 // target of the ret and cache a corresponding data displacement.
1593 class RetData : public CounterData {
1594 protected:
1595 enum {
1596 bci0_offset = counter_cell_count,
1597 count0_offset,
1598 displacement0_offset,
1599 ret_row_cell_count = (displacement0_offset + 1) - bci0_offset
1600 };
1601
1602 void set_bci(uint row, int bci) {
1603 assert((uint)row < row_limit(), "oob");
1604 set_int_at(bci0_offset + row * ret_row_cell_count, bci);
1605 }
1606 void release_set_bci(uint row, int bci) {
1607 assert((uint)row < row_limit(), "oob");
1608 // 'release' when setting the bci acts as a valid flag for other
1609 // threads wrt bci_count and bci_displacement.
1610 release_set_int_at(bci0_offset + row * ret_row_cell_count, bci);
1611 }
1612 void set_bci_count(uint row, uint count) {
1613 assert((uint)row < row_limit(), "oob");
1614 set_uint_at(count0_offset + row * ret_row_cell_count, count);
1615 }
1616 void set_bci_displacement(uint row, int disp) {
1617 set_int_at(displacement0_offset + row * ret_row_cell_count, disp);
1618 }
1619
1620 public:
1621 RetData(DataLayout* layout) : CounterData(layout) {
1622 assert(layout->tag() == DataLayout::ret_data_tag, "wrong type");
1623 }
1624
1625 virtual bool is_RetData() const { return true; }
1626
1627 enum {
1628 no_bci = -1 // value of bci when bci1/2 are not in use.
1629 };
1630
1631 static int static_cell_count() {
|
1 /*
2 * Copyright (c) 2000, 2018, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef SHARE_VM_OOPS_METHODDATAOOP_HPP
26 #define SHARE_VM_OOPS_METHODDATAOOP_HPP
27
28 #include "interpreter/bytecodes.hpp"
29 #include "memory/universe.hpp"
30 #include "oops/method.hpp"
31 #include "oops/oop.hpp"
32 #include "utilities/align.hpp"
33 #if INCLUDE_JVMCI
34 #include "jvmci/jvmci_globals.hpp"
35 #endif
36
37 class BytecodeStream;
38 class KlassSizeStats;
39
40 // The MethodData object collects counts and other profile information
41 // during zeroth-tier (interpretive) and first-tier execution.
42 // The profile is used later by compilation heuristics. Some heuristics
43 // enable use of aggressive (or "heroic") optimizations. An aggressive
44 // optimization often has a down-side, a corner case that it handles
45 // poorly, but which is thought to be rare. The profile provides
46 // evidence of this rarity for a given method or even BCI. It allows
47 // the compiler to back out of the optimization at places where it
48 // has historically been a poor choice. Other heuristics try to use
49 // specific information gathered about types observed at a given site.
50 //
51 // All data in the profile is approximate. It is expected to be accurate
183 _header._struct._flags = (new_state << trap_shift) | old_flags;
184 }
185
186 u1 flags() const {
187 return _header._struct._flags;
188 }
189
190 u2 bci() const {
191 return _header._struct._bci;
192 }
193
194 void set_header(intptr_t value) {
195 _header._bits = value;
196 }
197 intptr_t header() {
198 return _header._bits;
199 }
200 void set_cell_at(int index, intptr_t value) {
201 _cells[index] = value;
202 }
203 void release_set_cell_at(int index, intptr_t value);
204 intptr_t cell_at(int index) const {
205 return _cells[index];
206 }
207
208 void set_flag_at(int flag_number) {
209 assert(flag_number < flag_limit, "oob");
210 _header._struct._flags |= (0x1 << flag_number);
211 }
212 bool flag_at(int flag_number) const {
213 assert(flag_number < flag_limit, "oob");
214 return (_header._struct._flags & (0x1 << flag_number)) != 0;
215 }
216
217 // Low-level support for code generation.
218 static ByteSize header_offset() {
219 return byte_offset_of(DataLayout, _header);
220 }
221 static ByteSize tag_offset() {
222 return byte_offset_of(DataLayout, _header._struct._tag);
223 }
305 };
306
307 public:
308 // How many cells are in this?
309 virtual int cell_count() const {
310 ShouldNotReachHere();
311 return -1;
312 }
313
314 // Return the size of this data.
315 int size_in_bytes() {
316 return DataLayout::compute_size_in_bytes(cell_count());
317 }
318
319 protected:
320 // Low-level accessors for underlying data
321 void set_intptr_at(int index, intptr_t value) {
322 assert(0 <= index && index < cell_count(), "oob");
323 data()->set_cell_at(index, value);
324 }
325 void release_set_intptr_at(int index, intptr_t value);
326 intptr_t intptr_at(int index) const {
327 assert(0 <= index && index < cell_count(), "oob");
328 return data()->cell_at(index);
329 }
330 void set_uint_at(int index, uint value) {
331 set_intptr_at(index, (intptr_t) value);
332 }
333 void release_set_uint_at(int index, uint value);
334 uint uint_at(int index) const {
335 return (uint)intptr_at(index);
336 }
337 void set_int_at(int index, int value) {
338 set_intptr_at(index, (intptr_t) value);
339 }
340 void release_set_int_at(int index, int value);
341 int int_at(int index) const {
342 return (int)intptr_at(index);
343 }
344 int int_at_unchecked(int index) const {
345 return (int)data()->cell_at(index);
346 }
347 void set_oop_at(int index, oop value) {
348 set_intptr_at(index, cast_from_oop<intptr_t>(value));
349 }
350 oop oop_at(int index) const {
351 return cast_to_oop(intptr_at(index));
352 }
353
354 void set_flag_at(int flag_number) {
355 data()->set_flag_at(flag_number);
356 }
357 bool flag_at(int flag_number) const {
358 return data()->flag_at(flag_number);
359 }
360
1576 // RetData
1577 //
1578 // A RetData is used to access profiling information for a ret bytecode.
1579 // It is composed of a count of the number of times that the ret has
1580 // been executed, followed by a series of triples of the form
1581 // (bci, count, di) which count the number of times that some bci was the
1582 // target of the ret and cache a corresponding data displacement.
1583 class RetData : public CounterData {
1584 protected:
1585 enum {
1586 bci0_offset = counter_cell_count,
1587 count0_offset,
1588 displacement0_offset,
1589 ret_row_cell_count = (displacement0_offset + 1) - bci0_offset
1590 };
1591
1592 void set_bci(uint row, int bci) {
1593 assert((uint)row < row_limit(), "oob");
1594 set_int_at(bci0_offset + row * ret_row_cell_count, bci);
1595 }
1596 void release_set_bci(uint row, int bci);
1597 void set_bci_count(uint row, uint count) {
1598 assert((uint)row < row_limit(), "oob");
1599 set_uint_at(count0_offset + row * ret_row_cell_count, count);
1600 }
1601 void set_bci_displacement(uint row, int disp) {
1602 set_int_at(displacement0_offset + row * ret_row_cell_count, disp);
1603 }
1604
1605 public:
1606 RetData(DataLayout* layout) : CounterData(layout) {
1607 assert(layout->tag() == DataLayout::ret_data_tag, "wrong type");
1608 }
1609
1610 virtual bool is_RetData() const { return true; }
1611
1612 enum {
1613 no_bci = -1 // value of bci when bci1/2 are not in use.
1614 };
1615
1616 static int static_cell_count() {
|