523
524 virtual void print_data_on(outputStream* st, const char* extra = NULL) const {
525 ShouldNotReachHere();
526 }
527
528 void print_data_on(outputStream* st, const MethodData* md) const;
529
530 void print_shared(outputStream* st, const char* name, const char* extra) const;
531 void tab(outputStream* st, bool first = false) const;
532 };
533
534 // BitData
535 //
536 // A BitData holds a flag or two in its header.
537 class BitData : public ProfileData {
538 protected:
539 enum {
540 // null_seen:
541 // saw a null operand (cast/aastore/instanceof)
542 null_seen_flag = DataLayout::first_flag + 0
543 };
544 enum { bit_cell_count = 0 }; // no additional data fields needed.
545 public:
546 BitData(DataLayout* layout) : ProfileData(layout) {
547 }
548
549 virtual bool is_BitData() const { return true; }
550
551 static int static_cell_count() {
552 return bit_cell_count;
553 }
554
555 virtual int cell_count() const {
556 return static_cell_count();
557 }
558
559 // Accessor
560
561 // The null_seen flag bit is specially known to the interpreter.
562 // Consulting it allows the compiler to avoid setting up null_check traps.
563 bool null_seen() { return flag_at(null_seen_flag); }
564 void set_null_seen() { set_flag_at(null_seen_flag); }
565
566
567 // Code generation support
568 static int null_seen_byte_constant() {
569 return flag_number_to_byte_constant(null_seen_flag);
570 }
571
572 static ByteSize bit_data_size() {
573 return cell_offset(bit_cell_count);
574 }
575
576 #ifdef CC_INTERP
577 static int bit_data_size_in_bytes() {
578 return cell_offset_in_bytes(bit_cell_count);
579 }
580
581 static void set_null_seen(DataLayout* layout) {
582 set_flag_at(layout, null_seen_flag);
583 }
584
585 static DataLayout* advance(DataLayout* layout) {
1149 if (has_arguments()) {
1150 _args.clean_weak_klass_links(is_alive_closure);
1151 }
1152 if (has_return()) {
1153 _ret.clean_weak_klass_links(is_alive_closure);
1154 }
1155 }
1156
1157 virtual void print_data_on(outputStream* st, const char* extra = NULL) const;
1158 };
1159
1160 // ReceiverTypeData
1161 //
1162 // A ReceiverTypeData is used to access profiling information about a
1163 // dynamic type check. It consists of a counter which counts the total times
1164 // that the check is reached, and a series of (Klass*, count) pairs
1165 // which are used to store a type profile for the receiver of the check.
1166 class ReceiverTypeData : public CounterData {
1167 protected:
1168 enum {
1169 receiver0_offset = counter_cell_count,
1170 count0_offset,
1171 receiver_type_row_cell_count = (count0_offset + 1) - receiver0_offset
1172 };
1173
1174 public:
1175 ReceiverTypeData(DataLayout* layout) : CounterData(layout) {
1176 assert(layout->tag() == DataLayout::receiver_type_data_tag ||
1177 layout->tag() == DataLayout::virtual_call_data_tag ||
1178 layout->tag() == DataLayout::virtual_call_type_data_tag, "wrong type");
1179 }
1180
1181 virtual bool is_ReceiverTypeData() const { return true; }
1182
1183 static int static_cell_count() {
1184 return counter_cell_count + (uint) TypeProfileWidth * receiver_type_row_cell_count;
1185 }
1186
1187 virtual int cell_count() const {
1188 return static_cell_count();
1189 }
1190
1191 // Direct accessors
1192 static uint row_limit() {
1193 return TypeProfileWidth;
1194 }
1195 static int receiver_cell_index(uint row) {
1196 return receiver0_offset + row * receiver_type_row_cell_count;
1197 }
1198 static int receiver_count_cell_index(uint row) {
1199 return count0_offset + row * receiver_type_row_cell_count;
1200 }
1201
1202 Klass* receiver(uint row) const {
1203 assert(row < row_limit(), "oob");
1204
1226 assert(row < row_limit(), "oob");
1227 // Clear total count - indicator of polymorphic call site.
1228 // The site may look like as monomorphic after that but
1229 // it allow to have more accurate profiling information because
1230 // there was execution phase change since klasses were unloaded.
1231 // If the site is still polymorphic then MDO will be updated
1232 // to reflect it. But it could be the case that the site becomes
1233 // only bimorphic. Then keeping total count not 0 will be wrong.
1234 // Even if we use monomorphic (when it is not) for compilation
1235 // we will only have trap, deoptimization and recompile again
1236 // with updated MDO after executing method in Interpreter.
1237 // An additional receiver will be recorded in the cleaned row
1238 // during next call execution.
1239 //
1240 // Note: our profiling logic works with empty rows in any slot.
1241 // We do sorting a profiling info (ciCallProfile) for compilation.
1242 //
1243 set_count(0);
1244 set_receiver(row, NULL);
1245 set_receiver_count(row, 0);
1246 }
1247
1248 // Code generation support
1249 static ByteSize receiver_offset(uint row) {
1250 return cell_offset(receiver_cell_index(row));
1251 }
1252 static ByteSize receiver_count_offset(uint row) {
1253 return cell_offset(receiver_count_cell_index(row));
1254 }
1255 static ByteSize receiver_type_data_size() {
1256 return cell_offset(static_cell_count());
1257 }
1258
1259 // GC support
1260 virtual void clean_weak_klass_links(BoolObjectClosure* is_alive_closure);
1261
1262 #ifdef CC_INTERP
1263 static int receiver_type_data_size_in_bytes() {
1264 return cell_offset_in_bytes(static_cell_count());
1265 }
1266
1267 static Klass *receiver_unchecked(DataLayout* layout, uint row) {
1268 Klass* recv = (Klass*)layout->cell_at(receiver_cell_index(row));
1269 return recv;
1270 }
1271
1272 static void increment_receiver_count_no_overflow(DataLayout* layout, Klass *rcvr) {
1273 const int num_rows = row_limit();
1274 // Receiver already exists?
1299 void print_receiver_data_on(outputStream* st) const;
1300 void print_data_on(outputStream* st, const char* extra = NULL) const;
1301 };
1302
1303 // VirtualCallData
1304 //
1305 // A VirtualCallData is used to access profiling information about a
1306 // virtual call. For now, it has nothing more than a ReceiverTypeData.
1307 class VirtualCallData : public ReceiverTypeData {
1308 public:
1309 VirtualCallData(DataLayout* layout) : ReceiverTypeData(layout) {
1310 assert(layout->tag() == DataLayout::virtual_call_data_tag ||
1311 layout->tag() == DataLayout::virtual_call_type_data_tag, "wrong type");
1312 }
1313
1314 virtual bool is_VirtualCallData() const { return true; }
1315
1316 static int static_cell_count() {
1317 // At this point we could add more profile state, e.g., for arguments.
1318 // But for now it's the same size as the base record type.
1319 return ReceiverTypeData::static_cell_count();
1320 }
1321
1322 virtual int cell_count() const {
1323 return static_cell_count();
1324 }
1325
1326 // Direct accessors
1327 static ByteSize virtual_call_data_size() {
1328 return cell_offset(static_cell_count());
1329 }
1330
1331 #ifdef CC_INTERP
1332 static int virtual_call_data_size_in_bytes() {
1333 return cell_offset_in_bytes(static_cell_count());
1334 }
1335
1336 static DataLayout* advance(DataLayout* layout) {
1337 return (DataLayout*) (((address)layout) + (ssize_t)VirtualCallData::virtual_call_data_size_in_bytes());
1338 }
1339 #endif // CC_INTERP
1340
1341 void print_data_on(outputStream* st, const char* extra = NULL) const;
1342 };
1343
1344 // VirtualCallTypeData
1345 //
1346 // A VirtualCallTypeData is used to access profiling information about
1347 // a virtual call for which we collect type information about
1348 // arguments and return value.
1349 class VirtualCallTypeData : public VirtualCallData {
1350 private:
1351 // entries for arguments if any
1352 TypeStackSlotEntries _args;
1353 // entry for return type if any
1354 ReturnTypeEntry _ret;
1355
1356 int cell_count_global_offset() const {
1357 return VirtualCallData::static_cell_count() + TypeEntriesAtCall::cell_count_local_offset();
1358 }
1359
1360 // number of cells not counting the header
2036 private:
2037 friend class ProfileData;
2038
2039 // Back pointer to the Method*
2040 Method* _method;
2041
2042 // Size of this oop in bytes
2043 int _size;
2044
2045 // Cached hint for bci_to_dp and bci_to_data
2046 int _hint_di;
2047
2048 Mutex _extra_data_lock;
2049
2050 MethodData(methodHandle method, int size, TRAPS);
2051 public:
2052 static MethodData* allocate(ClassLoaderData* loader_data, methodHandle method, TRAPS);
2053 MethodData() : _extra_data_lock(Monitor::leaf, "MDO extra data lock") {}; // For ciMethodData
2054
2055 bool is_methodData() const volatile { return true; }
2056
2057 // Whole-method sticky bits and flags
2058 enum {
2059 _trap_hist_limit = 22, // decoupled from Deoptimization::Reason_LIMIT
2060 _trap_hist_mask = max_jubyte,
2061 _extra_data_count = 4 // extra DataLayout headers, for trap history
2062 }; // Public flag values
2063 private:
2064 uint _nof_decompiles; // count of all nmethod removals
2065 uint _nof_overflow_recompiles; // recompile count, excluding recomp. bits
2066 uint _nof_overflow_traps; // trap count, excluding _trap_hist
2067 union {
2068 intptr_t _align;
2069 u1 _array[_trap_hist_limit];
2070 } _trap_hist;
2071
2072 // Support for interprocedural escape analysis, from Thomas Kotzmann.
2073 intx _eflags; // flags on escape information
2074 intx _arg_local; // bit set of non-escaping arguments
2075 intx _arg_stack; // bit set of stack-allocatable arguments
2076 intx _arg_returned; // bit set of returned arguments
2077
2078 int _creation_mileage; // method mileage at MDO creation
2079
2087 // Counter values at the time profiling started.
2088 int _invocation_counter_start;
2089 int _backedge_counter_start;
2090 uint _tenure_traps;
2091 int _invoke_mask; // per-method Tier0InvokeNotifyFreqLog
2092 int _backedge_mask; // per-method Tier0BackedgeNotifyFreqLog
2093
2094 #if INCLUDE_RTM_OPT
2095 // State of RTM code generation during compilation of the method
2096 int _rtm_state;
2097 #endif
2098
2099 // Number of loops and blocks is computed when compiling the first
2100 // time with C1. It is used to determine if method is trivial.
2101 short _num_loops;
2102 short _num_blocks;
2103 // Does this method contain anything worth profiling?
2104 enum WouldProfile {unknown, no_profile, profile};
2105 WouldProfile _would_profile;
2106
2107 // Size of _data array in bytes. (Excludes header and extra_data fields.)
2108 int _data_size;
2109
2110 // data index for the area dedicated to parameters. -1 if no
2111 // parameter profiling.
2112 enum { no_parameters = -2, parameters_uninitialized = -1 };
2113 int _parameters_type_data_di;
2114 int parameters_size_in_bytes() const {
2115 ParametersTypeData* param = parameters_type_data();
2116 return param == NULL ? 0 : param->size_in_bytes();
2117 }
2118
2119 // Beginning of the data entries
2120 intptr_t _data[1];
2121
2122 // Helper for size computation
2123 static int compute_data_size(BytecodeStream* stream);
2124 static int bytecode_cell_count(Bytecodes::Code code);
2125 static bool is_speculative_trap_bytecode(Bytecodes::Code code);
2126 enum { no_profile_data = -1, variable_cell_count = -2 };
2365 }
2366 // If SpeculativeTrapData allocation fails try to allocate a
2367 // regular entry
2368 data = bci_to_data(bci);
2369 if (data != NULL) {
2370 return data;
2371 }
2372 return bci_to_extra_data(bci, NULL, true);
2373 }
2374
2375 // Add a handful of extra data records, for trap tracking.
2376 DataLayout* extra_data_base() const { return limit_data_position(); }
2377 DataLayout* extra_data_limit() const { return (DataLayout*)((address)this + size_in_bytes()); }
2378 DataLayout* args_data_limit() const { return (DataLayout*)((address)this + size_in_bytes() -
2379 parameters_size_in_bytes()); }
2380 int extra_data_size() const { return (address)extra_data_limit() - (address)extra_data_base(); }
2381 static DataLayout* next_extra(DataLayout* dp);
2382
2383 // Return (uint)-1 for overflow.
2384 uint trap_count(int reason) const {
2385 assert((uint)reason < _trap_hist_limit, "oob");
2386 return (int)((_trap_hist._array[reason]+1) & _trap_hist_mask) - 1;
2387 }
2388 // For loops:
2389 static uint trap_reason_limit() { return _trap_hist_limit; }
2390 static uint trap_count_limit() { return _trap_hist_mask; }
2391 uint inc_trap_count(int reason) {
2392 // Count another trap, anywhere in this method.
2393 assert(reason >= 0, "must be single trap");
2394 if ((uint)reason < _trap_hist_limit) {
2395 uint cnt1 = 1 + _trap_hist._array[reason];
2396 if ((cnt1 & _trap_hist_mask) != 0) { // if no counter overflow...
2397 _trap_hist._array[reason] = cnt1;
2398 return cnt1;
2399 } else {
2400 return _trap_hist_mask + (++_nof_overflow_traps);
2401 }
2402 } else {
2403 // Could not represent the count in the histogram.
2404 return (++_nof_overflow_traps);
2405 }
2406 }
2407
2408 uint overflow_trap_count() const {
2409 return _nof_overflow_traps;
2410 }
2411 uint overflow_recompile_count() const {
2412 return _nof_overflow_recompiles;
2413 }
2414 void inc_overflow_recompile_count() {
2415 _nof_overflow_recompiles += 1;
2416 }
2417 uint decompile_count() const {
2418 return _nof_decompiles;
2419 }
2420 void inc_decompile_count() {
2421 _nof_decompiles += 1;
2422 if (decompile_count() > (uint)PerMethodRecompilationCutoff) {
2423 method()->set_not_compilable(CompLevel_full_optimization, true, "decompile_count > PerMethodRecompilationCutoff");
2424 }
2425 }
2429 void inc_tenure_traps() {
2430 _tenure_traps += 1;
2431 }
2432
2433 // Return pointer to area dedicated to parameters in MDO
2434 ParametersTypeData* parameters_type_data() const {
2435 assert(_parameters_type_data_di != parameters_uninitialized, "called too early");
2436 return _parameters_type_data_di != no_parameters ? data_layout_at(_parameters_type_data_di)->data_in()->as_ParametersTypeData() : NULL;
2437 }
2438
2439 int parameters_type_data_di() const {
2440 assert(_parameters_type_data_di != parameters_uninitialized && _parameters_type_data_di != no_parameters, "no args type data");
2441 return _parameters_type_data_di;
2442 }
2443
2444 // Support for code generation
2445 static ByteSize data_offset() {
2446 return byte_offset_of(MethodData, _data[0]);
2447 }
2448
2449 static ByteSize invocation_counter_offset() {
2450 return byte_offset_of(MethodData, _invocation_counter);
2451 }
2452
2453 static ByteSize backedge_counter_offset() {
2454 return byte_offset_of(MethodData, _backedge_counter);
2455 }
2456
2457 static ByteSize invoke_mask_offset() {
2458 return byte_offset_of(MethodData, _invoke_mask);
2459 }
2460
2461 static ByteSize backedge_mask_offset() {
2462 return byte_offset_of(MethodData, _backedge_mask);
2463 }
2464
2465 static ByteSize parameters_type_data_di_offset() {
2466 return byte_offset_of(MethodData, _parameters_type_data_di);
2467 }
2468
|
523
524 virtual void print_data_on(outputStream* st, const char* extra = NULL) const {
525 ShouldNotReachHere();
526 }
527
528 void print_data_on(outputStream* st, const MethodData* md) const;
529
530 void print_shared(outputStream* st, const char* name, const char* extra) const;
531 void tab(outputStream* st, bool first = false) const;
532 };
533
534 // BitData
535 //
536 // A BitData holds a flag or two in its header.
537 class BitData : public ProfileData {
538 protected:
539 enum {
540 // null_seen:
541 // saw a null operand (cast/aastore/instanceof)
542 null_seen_flag = DataLayout::first_flag + 0
543 #if INCLUDE_JVMCI
544 // bytecode threw any exception
545 , exception_seen_flag = null_seen_flag + 1
546 #endif
547 };
548 enum { bit_cell_count = 0 }; // no additional data fields needed.
549 public:
550 BitData(DataLayout* layout) : ProfileData(layout) {
551 }
552
553 virtual bool is_BitData() const { return true; }
554
555 static int static_cell_count() {
556 return bit_cell_count;
557 }
558
559 virtual int cell_count() const {
560 return static_cell_count();
561 }
562
563 // Accessor
564
565 // The null_seen flag bit is specially known to the interpreter.
566 // Consulting it allows the compiler to avoid setting up null_check traps.
567 bool null_seen() { return flag_at(null_seen_flag); }
568 void set_null_seen() { set_flag_at(null_seen_flag); }
569
570 #if INCLUDE_JVMCI
571 // true if an exception was thrown at the specific BCI
572 bool exception_seen() { return flag_at(exception_seen_flag); }
573 void set_exception_seen() { set_flag_at(exception_seen_flag); }
574 #endif
575
576 // Code generation support
577 static int null_seen_byte_constant() {
578 return flag_number_to_byte_constant(null_seen_flag);
579 }
580
581 static ByteSize bit_data_size() {
582 return cell_offset(bit_cell_count);
583 }
584
585 #ifdef CC_INTERP
586 static int bit_data_size_in_bytes() {
587 return cell_offset_in_bytes(bit_cell_count);
588 }
589
590 static void set_null_seen(DataLayout* layout) {
591 set_flag_at(layout, null_seen_flag);
592 }
593
594 static DataLayout* advance(DataLayout* layout) {
1158 if (has_arguments()) {
1159 _args.clean_weak_klass_links(is_alive_closure);
1160 }
1161 if (has_return()) {
1162 _ret.clean_weak_klass_links(is_alive_closure);
1163 }
1164 }
1165
1166 virtual void print_data_on(outputStream* st, const char* extra = NULL) const;
1167 };
1168
1169 // ReceiverTypeData
1170 //
1171 // A ReceiverTypeData is used to access profiling information about a
1172 // dynamic type check. It consists of a counter which counts the total times
1173 // that the check is reached, and a series of (Klass*, count) pairs
1174 // which are used to store a type profile for the receiver of the check.
1175 class ReceiverTypeData : public CounterData {
1176 protected:
1177 enum {
1178 #if INCLUDE_JVMCI
1179 // Description of the different counters
1180 // ReceiverTypeData for instanceof/checkcast/aastore:
1181 // C1/C2: count is incremented on type overflow and decremented for failed type checks
1182 // JVMCI: count decremented for failed type checks and nonprofiled_count is incremented on type overflow
1183 // TODO (chaeubl): in fact, JVMCI should also increment the count for failed type checks to mimic the C1/C2 behavior
1184 // VirtualCallData for invokevirtual/invokeinterface:
1185 // C1/C2: count is incremented on type overflow
1186 // JVMCI: count is incremented on type overflow, nonprofiled_count is incremented on method overflow
1187
1188 // JVMCI is interested in knowing the percentage of type checks involving a type not explicitly in the profile
1189 nonprofiled_count_off_set = counter_cell_count,
1190 receiver0_offset,
1191 #else
1192 receiver0_offset = counter_cell_count,
1193 #endif
1194 count0_offset,
1195 receiver_type_row_cell_count = (count0_offset + 1) - receiver0_offset
1196 };
1197
1198 public:
1199 ReceiverTypeData(DataLayout* layout) : CounterData(layout) {
1200 assert(layout->tag() == DataLayout::receiver_type_data_tag ||
1201 layout->tag() == DataLayout::virtual_call_data_tag ||
1202 layout->tag() == DataLayout::virtual_call_type_data_tag, "wrong type");
1203 }
1204
1205 virtual bool is_ReceiverTypeData() const { return true; }
1206
1207 static int static_cell_count() {
1208 return counter_cell_count + (uint) TypeProfileWidth * receiver_type_row_cell_count JVMCI_ONLY(+ 1);
1209 }
1210
1211 virtual int cell_count() const {
1212 return static_cell_count();
1213 }
1214
1215 // Direct accessors
1216 static uint row_limit() {
1217 return TypeProfileWidth;
1218 }
1219 static int receiver_cell_index(uint row) {
1220 return receiver0_offset + row * receiver_type_row_cell_count;
1221 }
1222 static int receiver_count_cell_index(uint row) {
1223 return count0_offset + row * receiver_type_row_cell_count;
1224 }
1225
1226 Klass* receiver(uint row) const {
1227 assert(row < row_limit(), "oob");
1228
1250 assert(row < row_limit(), "oob");
1251 // Clear total count - indicator of polymorphic call site.
1252 // The site may look like as monomorphic after that but
1253 // it allow to have more accurate profiling information because
1254 // there was execution phase change since klasses were unloaded.
1255 // If the site is still polymorphic then MDO will be updated
1256 // to reflect it. But it could be the case that the site becomes
1257 // only bimorphic. Then keeping total count not 0 will be wrong.
1258 // Even if we use monomorphic (when it is not) for compilation
1259 // we will only have trap, deoptimization and recompile again
1260 // with updated MDO after executing method in Interpreter.
1261 // An additional receiver will be recorded in the cleaned row
1262 // during next call execution.
1263 //
1264 // Note: our profiling logic works with empty rows in any slot.
1265 // We do sorting a profiling info (ciCallProfile) for compilation.
1266 //
1267 set_count(0);
1268 set_receiver(row, NULL);
1269 set_receiver_count(row, 0);
1270 #if INCLUDE_JVMCI
1271 if (!this->is_VirtualCallData()) {
1272 // if this is a ReceiverTypeData for JVMCI, the nonprofiled_count
1273 // must also be reset (see "Description of the different counters" above)
1274 set_nonprofiled_count(0);
1275 }
1276 #endif
1277 }
1278
1279 // Code generation support
1280 static ByteSize receiver_offset(uint row) {
1281 return cell_offset(receiver_cell_index(row));
1282 }
1283 static ByteSize receiver_count_offset(uint row) {
1284 return cell_offset(receiver_count_cell_index(row));
1285 }
1286 #if INCLUDE_JVMCI
1287 static ByteSize nonprofiled_receiver_count_offset() {
1288 return cell_offset(nonprofiled_count_off_set);
1289 }
1290 uint nonprofiled_count() const {
1291 return uint_at(nonprofiled_count_off_set);
1292 }
1293 void set_nonprofiled_count(uint count) {
1294 set_uint_at(nonprofiled_count_off_set, count);
1295 }
1296 #endif // INCLUDE_JVMCI
1297 static ByteSize receiver_type_data_size() {
1298 return cell_offset(static_cell_count());
1299 }
1300
1301 // GC support
1302 virtual void clean_weak_klass_links(BoolObjectClosure* is_alive_closure);
1303
1304 #ifdef CC_INTERP
1305 static int receiver_type_data_size_in_bytes() {
1306 return cell_offset_in_bytes(static_cell_count());
1307 }
1308
1309 static Klass *receiver_unchecked(DataLayout* layout, uint row) {
1310 Klass* recv = (Klass*)layout->cell_at(receiver_cell_index(row));
1311 return recv;
1312 }
1313
1314 static void increment_receiver_count_no_overflow(DataLayout* layout, Klass *rcvr) {
1315 const int num_rows = row_limit();
1316 // Receiver already exists?
1341 void print_receiver_data_on(outputStream* st) const;
1342 void print_data_on(outputStream* st, const char* extra = NULL) const;
1343 };
1344
1345 // VirtualCallData
1346 //
1347 // A VirtualCallData is used to access profiling information about a
1348 // virtual call. For now, it has nothing more than a ReceiverTypeData.
1349 class VirtualCallData : public ReceiverTypeData {
1350 public:
1351 VirtualCallData(DataLayout* layout) : ReceiverTypeData(layout) {
1352 assert(layout->tag() == DataLayout::virtual_call_data_tag ||
1353 layout->tag() == DataLayout::virtual_call_type_data_tag, "wrong type");
1354 }
1355
1356 virtual bool is_VirtualCallData() const { return true; }
1357
1358 static int static_cell_count() {
1359 // At this point we could add more profile state, e.g., for arguments.
1360 // But for now it's the same size as the base record type.
1361 return ReceiverTypeData::static_cell_count() JVMCI_ONLY(+ (uint) MethodProfileWidth * receiver_type_row_cell_count);
1362 }
1363
1364 virtual int cell_count() const {
1365 return static_cell_count();
1366 }
1367
1368 // Direct accessors
1369 static ByteSize virtual_call_data_size() {
1370 return cell_offset(static_cell_count());
1371 }
1372
1373 #ifdef CC_INTERP
1374 static int virtual_call_data_size_in_bytes() {
1375 return cell_offset_in_bytes(static_cell_count());
1376 }
1377
1378 static DataLayout* advance(DataLayout* layout) {
1379 return (DataLayout*) (((address)layout) + (ssize_t)VirtualCallData::virtual_call_data_size_in_bytes());
1380 }
1381 #endif // CC_INTERP
1382
1383 #if INCLUDE_JVMCI
1384 static ByteSize method_offset(uint row) {
1385 return cell_offset(method_cell_index(row));
1386 }
1387 static ByteSize method_count_offset(uint row) {
1388 return cell_offset(method_count_cell_index(row));
1389 }
1390 static int method_cell_index(uint row) {
1391 return receiver0_offset + (row + TypeProfileWidth) * receiver_type_row_cell_count;
1392 }
1393 static int method_count_cell_index(uint row) {
1394 return count0_offset + (row + TypeProfileWidth) * receiver_type_row_cell_count;
1395 }
1396 static uint method_row_limit() {
1397 return MethodProfileWidth;
1398 }
1399
1400 Method* method(uint row) const {
1401 assert(row < method_row_limit(), "oob");
1402
1403 Method* method = (Method*)intptr_at(method_cell_index(row));
1404 assert(method == NULL || method->is_method(), "must be");
1405 return method;
1406 }
1407
1408 uint method_count(uint row) const {
1409 assert(row < method_row_limit(), "oob");
1410 return uint_at(method_count_cell_index(row));
1411 }
1412
1413 void set_method(uint row, Method* m) {
1414 assert((uint)row < method_row_limit(), "oob");
1415 set_intptr_at(method_cell_index(row), (uintptr_t)m);
1416 }
1417
1418 void set_method_count(uint row, uint count) {
1419 assert(row < method_row_limit(), "oob");
1420 set_uint_at(method_count_cell_index(row), count);
1421 }
1422
1423 void clear_method_row(uint row) {
1424 assert(row < method_row_limit(), "oob");
1425 // Clear total count - indicator of polymorphic call site (see comment for clear_row() in ReceiverTypeData).
1426 set_nonprofiled_count(0);
1427 set_method(row, NULL);
1428 set_method_count(row, 0);
1429 }
1430
1431 // GC support
1432 virtual void clean_weak_klass_links(BoolObjectClosure* is_alive_closure);
1433
1434 // Redefinition support
1435 virtual void clean_weak_method_links();
1436 #endif // INCLUDE_JVMCI
1437
1438 void print_method_data_on(outputStream* st) const NOT_JVMCI_RETURN;
1439 void print_data_on(outputStream* st, const char* extra = NULL) const;
1440 };
1441
1442 // VirtualCallTypeData
1443 //
1444 // A VirtualCallTypeData is used to access profiling information about
1445 // a virtual call for which we collect type information about
1446 // arguments and return value.
1447 class VirtualCallTypeData : public VirtualCallData {
1448 private:
1449 // entries for arguments if any
1450 TypeStackSlotEntries _args;
1451 // entry for return type if any
1452 ReturnTypeEntry _ret;
1453
1454 int cell_count_global_offset() const {
1455 return VirtualCallData::static_cell_count() + TypeEntriesAtCall::cell_count_local_offset();
1456 }
1457
1458 // number of cells not counting the header
2134 private:
2135 friend class ProfileData;
2136
2137 // Back pointer to the Method*
2138 Method* _method;
2139
2140 // Size of this oop in bytes
2141 int _size;
2142
2143 // Cached hint for bci_to_dp and bci_to_data
2144 int _hint_di;
2145
2146 Mutex _extra_data_lock;
2147
2148 MethodData(methodHandle method, int size, TRAPS);
2149 public:
2150 static MethodData* allocate(ClassLoaderData* loader_data, methodHandle method, TRAPS);
2151 MethodData() : _extra_data_lock(Monitor::leaf, "MDO extra data lock") {}; // For ciMethodData
2152
2153 bool is_methodData() const volatile { return true; }
2154 void initialize();
2155
2156 // Whole-method sticky bits and flags
2157 enum {
2158 _trap_hist_limit = 22 JVMCI_ONLY(+5), // decoupled from Deoptimization::Reason_LIMIT
2159 _trap_hist_mask = max_jubyte,
2160 _extra_data_count = 4 // extra DataLayout headers, for trap history
2161 }; // Public flag values
2162 private:
2163 uint _nof_decompiles; // count of all nmethod removals
2164 uint _nof_overflow_recompiles; // recompile count, excluding recomp. bits
2165 uint _nof_overflow_traps; // trap count, excluding _trap_hist
2166 union {
2167 intptr_t _align;
2168 u1 _array[_trap_hist_limit];
2169 } _trap_hist;
2170
2171 // Support for interprocedural escape analysis, from Thomas Kotzmann.
2172 intx _eflags; // flags on escape information
2173 intx _arg_local; // bit set of non-escaping arguments
2174 intx _arg_stack; // bit set of stack-allocatable arguments
2175 intx _arg_returned; // bit set of returned arguments
2176
2177 int _creation_mileage; // method mileage at MDO creation
2178
2186 // Counter values at the time profiling started.
2187 int _invocation_counter_start;
2188 int _backedge_counter_start;
2189 uint _tenure_traps;
2190 int _invoke_mask; // per-method Tier0InvokeNotifyFreqLog
2191 int _backedge_mask; // per-method Tier0BackedgeNotifyFreqLog
2192
2193 #if INCLUDE_RTM_OPT
2194 // State of RTM code generation during compilation of the method
2195 int _rtm_state;
2196 #endif
2197
2198 // Number of loops and blocks is computed when compiling the first
2199 // time with C1. It is used to determine if method is trivial.
2200 short _num_loops;
2201 short _num_blocks;
2202 // Does this method contain anything worth profiling?
2203 enum WouldProfile {unknown, no_profile, profile};
2204 WouldProfile _would_profile;
2205
2206 #if INCLUDE_JVMCI
2207 // Support for HotSpotMethodData.setCompiledIRSize(int)
2208 int _jvmci_ir_size;
2209 #endif
2210
2211 // Size of _data array in bytes. (Excludes header and extra_data fields.)
2212 int _data_size;
2213
2214 // data index for the area dedicated to parameters. -1 if no
2215 // parameter profiling.
2216 enum { no_parameters = -2, parameters_uninitialized = -1 };
2217 int _parameters_type_data_di;
2218 int parameters_size_in_bytes() const {
2219 ParametersTypeData* param = parameters_type_data();
2220 return param == NULL ? 0 : param->size_in_bytes();
2221 }
2222
2223 // Beginning of the data entries
2224 intptr_t _data[1];
2225
2226 // Helper for size computation
2227 static int compute_data_size(BytecodeStream* stream);
2228 static int bytecode_cell_count(Bytecodes::Code code);
2229 static bool is_speculative_trap_bytecode(Bytecodes::Code code);
2230 enum { no_profile_data = -1, variable_cell_count = -2 };
2469 }
2470 // If SpeculativeTrapData allocation fails try to allocate a
2471 // regular entry
2472 data = bci_to_data(bci);
2473 if (data != NULL) {
2474 return data;
2475 }
2476 return bci_to_extra_data(bci, NULL, true);
2477 }
2478
2479 // Add a handful of extra data records, for trap tracking.
2480 DataLayout* extra_data_base() const { return limit_data_position(); }
2481 DataLayout* extra_data_limit() const { return (DataLayout*)((address)this + size_in_bytes()); }
2482 DataLayout* args_data_limit() const { return (DataLayout*)((address)this + size_in_bytes() -
2483 parameters_size_in_bytes()); }
2484 int extra_data_size() const { return (address)extra_data_limit() - (address)extra_data_base(); }
2485 static DataLayout* next_extra(DataLayout* dp);
2486
2487 // Return (uint)-1 for overflow.
2488 uint trap_count(int reason) const {
2489 assert((uint)reason < JVMCI_ONLY(2*) _trap_hist_limit, "oob");
2490 return (int)((_trap_hist._array[reason]+1) & _trap_hist_mask) - 1;
2491 }
2492 // For loops:
2493 static uint trap_reason_limit() { return _trap_hist_limit; }
2494 static uint trap_count_limit() { return _trap_hist_mask; }
2495 uint inc_trap_count(int reason) {
2496 // Count another trap, anywhere in this method.
2497 assert(reason >= 0, "must be single trap");
2498 assert((uint)reason < JVMCI_ONLY(2*) _trap_hist_limit, "oob");
2499 uint cnt1 = 1 + _trap_hist._array[reason];
2500 if ((cnt1 & _trap_hist_mask) != 0) { // if no counter overflow...
2501 _trap_hist._array[reason] = cnt1;
2502 return cnt1;
2503 } else {
2504 return _trap_hist_mask + (++_nof_overflow_traps);
2505 }
2506 }
2507
2508 uint overflow_trap_count() const {
2509 return _nof_overflow_traps;
2510 }
2511 uint overflow_recompile_count() const {
2512 return _nof_overflow_recompiles;
2513 }
2514 void inc_overflow_recompile_count() {
2515 _nof_overflow_recompiles += 1;
2516 }
2517 uint decompile_count() const {
2518 return _nof_decompiles;
2519 }
2520 void inc_decompile_count() {
2521 _nof_decompiles += 1;
2522 if (decompile_count() > (uint)PerMethodRecompilationCutoff) {
2523 method()->set_not_compilable(CompLevel_full_optimization, true, "decompile_count > PerMethodRecompilationCutoff");
2524 }
2525 }
2529 void inc_tenure_traps() {
2530 _tenure_traps += 1;
2531 }
2532
2533 // Return pointer to area dedicated to parameters in MDO
2534 ParametersTypeData* parameters_type_data() const {
2535 assert(_parameters_type_data_di != parameters_uninitialized, "called too early");
2536 return _parameters_type_data_di != no_parameters ? data_layout_at(_parameters_type_data_di)->data_in()->as_ParametersTypeData() : NULL;
2537 }
2538
2539 int parameters_type_data_di() const {
2540 assert(_parameters_type_data_di != parameters_uninitialized && _parameters_type_data_di != no_parameters, "no args type data");
2541 return _parameters_type_data_di;
2542 }
2543
2544 // Support for code generation
2545 static ByteSize data_offset() {
2546 return byte_offset_of(MethodData, _data[0]);
2547 }
2548
2549 static ByteSize trap_history_offset() {
2550 return byte_offset_of(MethodData, _trap_hist._array);
2551 }
2552
2553 static ByteSize invocation_counter_offset() {
2554 return byte_offset_of(MethodData, _invocation_counter);
2555 }
2556
2557 static ByteSize backedge_counter_offset() {
2558 return byte_offset_of(MethodData, _backedge_counter);
2559 }
2560
2561 static ByteSize invoke_mask_offset() {
2562 return byte_offset_of(MethodData, _invoke_mask);
2563 }
2564
2565 static ByteSize backedge_mask_offset() {
2566 return byte_offset_of(MethodData, _backedge_mask);
2567 }
2568
2569 static ByteSize parameters_type_data_di_offset() {
2570 return byte_offset_of(MethodData, _parameters_type_data_di);
2571 }
2572
|