diff --git a/.github/workflows/submit.yml b/.github/workflows/submit.yml
index ae5acf5000a9fe9de453cb84cb592aa436d35729..aca69121e21fd59048fbced30e8bcac27a7de149 100644
--- a/.github/workflows/submit.yml
+++ b/.github/workflows/submit.yml
@@ -536,10 +536,6 @@ jobs:
echo "cross_flags=
--openjdk-target=${{ matrix.gnu-arch }}-linux-gnu${{ matrix.gnu-flavor}}
--with-sysroot=${HOME}/sysroot-${{ matrix.debian-arch }}/
- --with-toolchain-path=${HOME}/sysroot-${{ matrix.debian-arch }}/
- --with-freetype-lib=${HOME}/sysroot-${{ matrix.debian-arch }}/usr/lib/${{ matrix.gnu-arch }}-linux-gnu${{ matrix.gnu-flavor}}/
- --with-freetype-include=${HOME}/sysroot-${{ matrix.debian-arch }}/usr/include/freetype2/
- --x-libraries=${HOME}/sysroot-${{ matrix.debian-arch }}/usr/lib/${{ matrix.gnu-arch }}-linux-gnu${{ matrix.gnu-flavor}}/
" >> $GITHUB_ENV
if: matrix.debian-arch != ''
diff --git a/doc/building.html b/doc/building.html
index 0522bc4888bbebce06aa99fb6aa83ac798d0eea8..4d5cdf67c3a35907b86116616bf28084213f7c69 100644
--- a/doc/building.html
+++ b/doc/building.html
@@ -849,7 +849,7 @@ sudo mv /tmp/configure /usr/local/bin
If you update the repository and part of the configure script has changed, the build system will force you to re-run configure
.
The default behavior for make is to create consistent and correct output, at the expense of build speed, if necessary.
If you are prepared to take some risk of an incorrect build, and know enough of the system to understand how things build and interact, you can speed up the build process considerably by instructing make to only build a portion of the product.
diff --git a/doc/building.md b/doc/building.md
index 926148d463df88935feabc4701e0e8290093b9fb..69b7fe640e81f1cf4b984c42bbd42c7f952e3a54 100644
--- a/doc/building.md
+++ b/doc/building.md
@@ -1556,8 +1556,8 @@ update. This might speed up the build, but comes at the risk of an incorrect
build result. This is only recommended if you know what you're doing.
From time to time, you will also need to modify the command line to `configure`
-due to changes. Use `make print-configure` to show the command line used for
-your current configuration.
+due to changes. Use `make print-configuration` to show the command line used
+for your current configuration.
### Using Fine-Grained Make Targets
diff --git a/make/CompileJavaModules.gmk b/make/CompileJavaModules.gmk
index c039ad30b002c430e90a43c8f6fe4c9ee41b0aaf..b4ee5e78adfb7fc220c3d0cc4b678f5837840d43 100644
--- a/make/CompileJavaModules.gmk
+++ b/make/CompileJavaModules.gmk
@@ -86,7 +86,7 @@ CreateHkTargets = \
################################################################################
# Include module specific build settings
--include $(TOPDIR)/make/modules/$(MODULE)/Java.gmk
+-include Java.gmk
################################################################################
# Setup the main compilation
diff --git a/make/Docs.gmk b/make/Docs.gmk
index 89cea6a7c3fea99b7186caca0c3009474304c0b6..295cf7d9119d64e1b92b449ed10ff37bc61f7bb9 100644
--- a/make/Docs.gmk
+++ b/make/Docs.gmk
@@ -99,7 +99,7 @@ JAVADOC_TAGS := \
REFERENCE_TAGS := $(JAVADOC_TAGS)
# Which doclint checks to ignore
-JAVADOC_DISABLED_DOCLINT := accessibility html missing syntax reference
+JAVADOC_DISABLED_DOCLINT := missing
# The initial set of options for javadoc
JAVADOC_OPTIONS := -use -keywords -notimestamp \
@@ -261,6 +261,7 @@ endef
# SHORT_NAME - The short name of this documentation collection
# LONG_NAME - The long name of this documentation collection
# TARGET_DIR - Where to store the output
+# OTHER_VERSIONS - URL for other page listing versions
#
SetupApiDocsGeneration = $(NamedParamsMacroTemplate)
define SetupApiDocsGenerationBody
@@ -297,10 +298,16 @@ define SetupApiDocsGenerationBody
# Ignore the doclint warnings in the W3C DOM package
$1_OPTIONS += -Xdoclint/package:-org.w3c.*
+ ifneq ($$($1_OTHER_VERSIONS), )
+ $1_LINKED_SHORT_NAME = $$($1_SHORT_NAME) \
+ $1_HEADER_TITLE :=
$$($1_LINKED_SHORT_NAME) \
$$(DRAFT_MARKER_STR)
$1_OPTIONS += -doctitle '$$($1_DOC_TITLE)'
@@ -438,6 +445,7 @@ $(eval $(call SetupApiDocsGeneration, JDK_API, \
SHORT_NAME := $(JDK_SHORT_NAME), \
LONG_NAME := $(JDK_LONG_NAME), \
TARGET_DIR := $(DOCS_OUTPUTDIR)/api, \
+ OTHER_VERSIONS := $(OTHER_JDK_VERSIONS_URL), \
))
# Targets generated are returned in JDK_API_JAVADOC_TARGETS and
diff --git a/make/Main.gmk b/make/Main.gmk
index cdb4be67c560e99f44b562475df0f45b09ea0775..d0c81c84fed52e3a4f77c313db28f5a53b25c5bc 100644
--- a/make/Main.gmk
+++ b/make/Main.gmk
@@ -187,6 +187,7 @@ JAVA_TARGETS := $(addsuffix -java, $(JAVA_MODULES))
define DeclareCompileJavaRecipe
$1-java:
+($(CD) $(TOPDIR)/make && $(MAKE) $(MAKE_ARGS) \
+ $(patsubst %,-I%/modules/$1,$(PHASE_MAKEDIRS)) \
-f CompileJavaModules.gmk MODULE=$1)
endef
diff --git a/make/MainSupport.gmk b/make/MainSupport.gmk
index 44296b86bbc3bb192ae2d46defa618b0df933adb..34137c502d4ae6a89e94a6db2185816c18c20363 100644
--- a/make/MainSupport.gmk
+++ b/make/MainSupport.gmk
@@ -150,9 +150,7 @@ define DeclareRecipeForModuleMakefile
$2-$$($1_TARGET_SUFFIX):
+($(CD) $(TOPDIR)/make && $(MAKE) $(MAKE_ARGS) \
-f ModuleWrapper.gmk -I $$(TOPDIR)/make/common/modules \
- $$(addprefix -I, $$(PHASE_MAKEDIRS) \
- $$(addsuffix /modules/$2, $$(PHASE_MAKEDIRS)) \
- ) \
+ $$(patsubst %,-I%/modules/$2,$$(PHASE_MAKEDIRS)) \
MODULE=$2 MAKEFILE_PREFIX=$$($1_FILE_PREFIX) $$($1_EXTRA_ARGS))
endef
diff --git a/make/autoconf/lib-freetype.m4 b/make/autoconf/lib-freetype.m4
index 6bfc0ae6f15cea8d6f4ac80dce8615d2b84b6e49..6a7109342477be106a6676fa6452f884dd3f4cb2 100644
--- a/make/autoconf/lib-freetype.m4
+++ b/make/autoconf/lib-freetype.m4
@@ -192,6 +192,16 @@ AC_DEFUN_ONCE([LIB_SETUP_FREETYPE],
[$FREETYPE_BASE_DIR/lib], [well-known location])
fi
+ if test "x$FOUND_FREETYPE" != "xyes" ; then
+ LIB_CHECK_POTENTIAL_FREETYPE([$FREETYPE_BASE_DIR/include],
+ [$FREETYPE_BASE_DIR/lib/$OPENJDK_TARGET_CPU-$OPENJDK_TARGET_OS-$OPENJDK_TARGET_ABI], [well-known location])
+ fi
+
+ if test "x$FOUND_FREETYPE" != "xyes" ; then
+ LIB_CHECK_POTENTIAL_FREETYPE([$FREETYPE_BASE_DIR/include],
+ [$FREETYPE_BASE_DIR/lib/$OPENJDK_TARGET_CPU_AUTOCONF-$OPENJDK_TARGET_OS-$OPENJDK_TARGET_ABI], [well-known location])
+ fi
+
if test "x$FOUND_FREETYPE" != "xyes" ; then
FREETYPE_BASE_DIR="$SYSROOT/usr/X11"
LIB_CHECK_POTENTIAL_FREETYPE([$FREETYPE_BASE_DIR/include],
diff --git a/make/autoconf/lib-x11.m4 b/make/autoconf/lib-x11.m4
index f0f96f39c3e71b16217499c864f2e738647d2f67..203586d6317bafcf505c30d9ca4fec63d45bc475 100644
--- a/make/autoconf/lib-x11.m4
+++ b/make/autoconf/lib-x11.m4
@@ -68,6 +68,10 @@ AC_DEFUN_ONCE([LIB_SETUP_X11],
x_libraries="$SYSROOT/usr/lib64"
elif test -f "$SYSROOT/usr/lib/libX11.so"; then
x_libraries="$SYSROOT/usr/lib"
+ elif test -f "$SYSROOT/usr/lib/$OPENJDK_TARGET_CPU-$OPENJDK_TARGET_OS-$OPENJDK_TARGET_ABI/libX11.so"; then
+ x_libraries="$SYSROOT/usr/lib/$OPENJDK_TARGET_CPU-$OPENJDK_TARGET_OS-$OPENJDK_TARGET_ABI/libX11.so"
+ elif test -f "$SYSROOT/usr/lib/$OPENJDK_TARGET_CPU_AUTOCONF-$OPENJDK_TARGET_OS-$OPENJDK_TARGET_ABI/libX11.so"; then
+ x_libraries="$SYSROOT/usr/lib/$OPENJDK_TARGET_CPU_AUTOCONF-$OPENJDK_TARGET_OS-$OPENJDK_TARGET_ABI/libX11.so"
fi
fi
fi
diff --git a/make/autoconf/platform.m4 b/make/autoconf/platform.m4
index 1890491773bf226217efd9bfe34bb469781b3a81..181fdbf701d858f1f71a228ae10ca1a2511c48e1 100644
--- a/make/autoconf/platform.m4
+++ b/make/autoconf/platform.m4
@@ -238,6 +238,33 @@ AC_DEFUN([PLATFORM_EXTRACT_VARS_FROM_LIBC],
esac
])
+# Support macro for PLATFORM_EXTRACT_TARGET_AND_BUILD.
+# Converts autoconf style OS name to OpenJDK style, into
+# VAR_ABI.
+AC_DEFUN([PLATFORM_EXTRACT_VARS_FROM_ABI],
+[
+ case "$1" in
+ *linux*-musl)
+ VAR_ABI=musl
+ ;;
+ *linux*-gnu)
+ VAR_ABI=gnu
+ ;;
+ *linux*-gnueabi)
+ VAR_ABI=gnueabi
+ ;;
+ *linux*-gnueabihf)
+ VAR_ABI=gnueabihf
+ ;;
+ *linux*-gnuabi64)
+ VAR_ABI=gnuabi64
+ ;;
+ *)
+ VAR_ABI=default
+ ;;
+ esac
+])
+
# Expects $host_os $host_cpu $build_os and $build_cpu
# and $with_target_bits to have been setup!
#
@@ -259,6 +286,7 @@ AC_DEFUN([PLATFORM_EXTRACT_TARGET_AND_BUILD],
PLATFORM_EXTRACT_VARS_FROM_OS($build_os)
PLATFORM_EXTRACT_VARS_FROM_CPU($build_cpu)
PLATFORM_EXTRACT_VARS_FROM_LIBC($build_os)
+ PLATFORM_EXTRACT_VARS_FROM_ABI($build_os)
# ..and setup our own variables. (Do this explicitly to facilitate searching)
OPENJDK_BUILD_OS="$VAR_OS"
if test "x$VAR_OS_TYPE" != x; then
@@ -275,7 +303,9 @@ AC_DEFUN([PLATFORM_EXTRACT_TARGET_AND_BUILD],
OPENJDK_BUILD_CPU_ARCH="$VAR_CPU_ARCH"
OPENJDK_BUILD_CPU_BITS="$VAR_CPU_BITS"
OPENJDK_BUILD_CPU_ENDIAN="$VAR_CPU_ENDIAN"
+ OPENJDK_BUILD_CPU_AUTOCONF="$build_cpu"
OPENJDK_BUILD_LIBC="$VAR_LIBC"
+ OPENJDK_BUILD_ABI="$VAR_ABI"
AC_SUBST(OPENJDK_BUILD_OS)
AC_SUBST(OPENJDK_BUILD_OS_TYPE)
AC_SUBST(OPENJDK_BUILD_OS_ENV)
@@ -283,7 +313,9 @@ AC_DEFUN([PLATFORM_EXTRACT_TARGET_AND_BUILD],
AC_SUBST(OPENJDK_BUILD_CPU_ARCH)
AC_SUBST(OPENJDK_BUILD_CPU_BITS)
AC_SUBST(OPENJDK_BUILD_CPU_ENDIAN)
+ AC_SUBST(OPENJDK_BUILD_CPU_AUTOCONF)
AC_SUBST(OPENJDK_BUILD_LIBC)
+ AC_SUBST(OPENJDK_BUILD_ABI)
AC_MSG_CHECKING([openjdk-build os-cpu])
AC_MSG_RESULT([$OPENJDK_BUILD_OS-$OPENJDK_BUILD_CPU])
@@ -297,6 +329,7 @@ AC_DEFUN([PLATFORM_EXTRACT_TARGET_AND_BUILD],
PLATFORM_EXTRACT_VARS_FROM_OS($host_os)
PLATFORM_EXTRACT_VARS_FROM_CPU($host_cpu)
PLATFORM_EXTRACT_VARS_FROM_LIBC($host_os)
+ PLATFORM_EXTRACT_VARS_FROM_ABI($host_os)
# ... and setup our own variables. (Do this explicitly to facilitate searching)
OPENJDK_TARGET_OS="$VAR_OS"
if test "x$VAR_OS_TYPE" != x; then
@@ -313,8 +346,10 @@ AC_DEFUN([PLATFORM_EXTRACT_TARGET_AND_BUILD],
OPENJDK_TARGET_CPU_ARCH="$VAR_CPU_ARCH"
OPENJDK_TARGET_CPU_BITS="$VAR_CPU_BITS"
OPENJDK_TARGET_CPU_ENDIAN="$VAR_CPU_ENDIAN"
+ OPENJDK_TARGET_CPU_AUTOCONF="$host_cpu"
OPENJDK_TARGET_OS_UPPERCASE=`$ECHO $OPENJDK_TARGET_OS | $TR 'abcdefghijklmnopqrstuvwxyz' 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'`
OPENJDK_TARGET_LIBC="$VAR_LIBC"
+ OPENJDK_TARGET_ABI="$VAR_ABI"
AC_SUBST(OPENJDK_TARGET_OS)
AC_SUBST(OPENJDK_TARGET_OS_TYPE)
@@ -324,7 +359,9 @@ AC_DEFUN([PLATFORM_EXTRACT_TARGET_AND_BUILD],
AC_SUBST(OPENJDK_TARGET_CPU_ARCH)
AC_SUBST(OPENJDK_TARGET_CPU_BITS)
AC_SUBST(OPENJDK_TARGET_CPU_ENDIAN)
+ AC_SUBST(OPENJDK_TARGET_CPU_AUTOCONF)
AC_SUBST(OPENJDK_TARGET_LIBC)
+ AC_SUBST(OPENJDK_TARGET_ABI)
AC_MSG_CHECKING([openjdk-target os-cpu])
AC_MSG_RESULT([$OPENJDK_TARGET_OS-$OPENJDK_TARGET_CPU])
diff --git a/make/conf/javadoc.conf b/make/conf/javadoc.conf
index df25452533a9b380ad1dffae9d23a795f5bd9bfd..6c92e40329afaec04f9a7f303f3b877ef04ba407 100644
--- a/make/conf/javadoc.conf
+++ b/make/conf/javadoc.conf
@@ -28,3 +28,4 @@ BUG_SUBMIT_URL=https://bugreport.java.com/bugreport/
COPYRIGHT_URL=legal/copyright.html
LICENSE_URL=https://www.oracle.com/java/javase/terms/license/java$(VERSION_NUMBER)speclicense.html
REDISTRIBUTION_URL=https://www.oracle.com/technetwork/java/redist-137594.html
+OTHER_JDK_VERSIONS_URL=https://docs.oracle.com/en/java/javase/index.html
diff --git a/make/conf/jib-profiles.js b/make/conf/jib-profiles.js
index b0761dd0ba097f46b8777f43b9319c760f434b5a..f1995e76e299644dd27c272b01f4f4eac9ee00df 100644
--- a/make/conf/jib-profiles.js
+++ b/make/conf/jib-profiles.js
@@ -477,7 +477,6 @@ var getJibProfilesProfiles = function (input, common, data) {
dependencies: ["devkit", "gtest", "build_devkit", "pandoc"],
configure_args: [
"--openjdk-target=aarch64-linux-gnu",
- "--disable-jvm-feature-jvmci",
],
},
@@ -1115,7 +1114,7 @@ var getJibProfilesDependencies = function (input, common) {
jmh: {
organization: common.organization,
ext: "tar.gz",
- revision: "1.21+1.0"
+ revision: "1.28+1.0"
},
jcov: {
diff --git a/make/data/characterdata/CharacterData00.java.template b/make/data/characterdata/CharacterData00.java.template
index 89a36023d35b02e4eeb2d030377fc9ad26e018ee..5705297a53ef5d4448c9745a81addcc72a678bb5 100644
--- a/make/data/characterdata/CharacterData00.java.template
+++ b/make/data/characterdata/CharacterData00.java.template
@@ -84,16 +84,6 @@ class CharacterData00 extends CharacterData {
return (props & $$maskType);
}
- boolean isOtherLowercase(int ch) {
- int props = getPropertiesEx(ch);
- return (props & $$maskOtherLowercase) != 0;
- }
-
- boolean isOtherUppercase(int ch) {
- int props = getPropertiesEx(ch);
- return (props & $$maskOtherUppercase) != 0;
- }
-
boolean isOtherAlphabetic(int ch) {
int props = getPropertiesEx(ch);
return (props & $$maskOtherAlphabetic) != 0;
@@ -765,13 +755,13 @@ class CharacterData00 extends CharacterData {
}
boolean isLowerCase(int ch) {
- int props = getProperties(ch);
- return (props & $$maskType) == Character.LOWERCASE_LETTER;
+ return (getProperties(ch) & $$maskType) == Character.LOWERCASE_LETTER
+ || (getPropertiesEx(ch) & $$maskOtherLowercase) != 0;
}
boolean isUpperCase(int ch) {
- int props = getProperties(ch);
- return (props & $$maskType) == Character.UPPERCASE_LETTER;
+ return (getProperties(ch) & $$maskType) == Character.UPPERCASE_LETTER
+ || (getPropertiesEx(ch) & $$maskOtherUppercase) != 0;
}
boolean isWhitespace(int ch) {
diff --git a/make/data/characterdata/CharacterData01.java.template b/make/data/characterdata/CharacterData01.java.template
index 430fde0ae960a9a1f1f924b2d7f8ce9be538481c..a44450b37ea6f5e71b11bf77c95f74fd03b3bd3a 100644
--- a/make/data/characterdata/CharacterData01.java.template
+++ b/make/data/characterdata/CharacterData01.java.template
@@ -83,16 +83,6 @@ class CharacterData01 extends CharacterData {
return (props & $$maskType);
}
- boolean isOtherLowercase(int ch) {
- int props = getPropertiesEx(ch);
- return (props & $$maskOtherLowercase) != 0;
- }
-
- boolean isOtherUppercase(int ch) {
- int props = getPropertiesEx(ch);
- return (props & $$maskOtherUppercase) != 0;
- }
-
boolean isOtherAlphabetic(int ch) {
int props = getPropertiesEx(ch);
return (props & $$maskOtherAlphabetic) != 0;
@@ -503,13 +493,13 @@ class CharacterData01 extends CharacterData {
}
boolean isLowerCase(int ch) {
- int props = getProperties(ch);
- return (props & $$maskType) == Character.LOWERCASE_LETTER;
+ return (getProperties(ch) & $$maskType) == Character.LOWERCASE_LETTER
+ || (getPropertiesEx(ch) & $$maskOtherLowercase) != 0;
}
boolean isUpperCase(int ch) {
- int props = getProperties(ch);
- return (props & $$maskType) == Character.UPPERCASE_LETTER;
+ return (getProperties(ch) & $$maskType) == Character.UPPERCASE_LETTER
+ || (getPropertiesEx(ch) & $$maskOtherUppercase) != 0;
}
boolean isWhitespace(int ch) {
diff --git a/make/data/characterdata/CharacterData02.java.template b/make/data/characterdata/CharacterData02.java.template
index 57289ed36a550d9c19cb599187ae1baedeee1845..739bc9d32ab052f11111f97e15389b24ff44d90a 100644
--- a/make/data/characterdata/CharacterData02.java.template
+++ b/make/data/characterdata/CharacterData02.java.template
@@ -77,16 +77,6 @@ class CharacterData02 extends CharacterData {
return props;
}
- boolean isOtherLowercase(int ch) {
- int props = getPropertiesEx(ch);
- return (props & $$maskOtherLowercase) != 0;
- }
-
- boolean isOtherUppercase(int ch) {
- int props = getPropertiesEx(ch);
- return (props & $$maskOtherUppercase) != 0;
- }
-
boolean isOtherAlphabetic(int ch) {
int props = getPropertiesEx(ch);
return (props & $$maskOtherAlphabetic) != 0;
@@ -222,15 +212,16 @@ class CharacterData02 extends CharacterData {
}
boolean isLowerCase(int ch) {
- int props = getProperties(ch);
- return (props & $$maskType) == Character.LOWERCASE_LETTER;
+ return (getProperties(ch) & $$maskType) == Character.LOWERCASE_LETTER
+ || (getPropertiesEx(ch) & $$maskOtherLowercase) != 0;
}
boolean isUpperCase(int ch) {
- int props = getProperties(ch);
- return (props & $$maskType) == Character.UPPERCASE_LETTER;
+ return (getProperties(ch) & $$maskType) == Character.UPPERCASE_LETTER
+ || (getPropertiesEx(ch) & $$maskOtherUppercase) != 0;
}
+
boolean isWhitespace(int ch) {
return (getProperties(ch) & $$maskIdentifierInfo) == $$valueJavaWhitespace;
}
diff --git a/make/data/characterdata/CharacterData03.java.template b/make/data/characterdata/CharacterData03.java.template
index 730169b029009e95448eb2231b4a16cfa1780f0b..06d4dfbdc2cfb0bb5debd7b7006c228406b7ed95 100644
--- a/make/data/characterdata/CharacterData03.java.template
+++ b/make/data/characterdata/CharacterData03.java.template
@@ -77,16 +77,6 @@ class CharacterData03 extends CharacterData {
return props;
}
- boolean isOtherLowercase(int ch) {
- int props = getPropertiesEx(ch);
- return (props & $$maskOtherLowercase) != 0;
- }
-
- boolean isOtherUppercase(int ch) {
- int props = getPropertiesEx(ch);
- return (props & $$maskOtherUppercase) != 0;
- }
-
boolean isOtherAlphabetic(int ch) {
int props = getPropertiesEx(ch);
return (props & $$maskOtherAlphabetic) != 0;
@@ -222,13 +212,13 @@ class CharacterData03 extends CharacterData {
}
boolean isLowerCase(int ch) {
- int props = getProperties(ch);
- return (props & $$maskType) == Character.LOWERCASE_LETTER;
+ return (getProperties(ch) & $$maskType) == Character.LOWERCASE_LETTER
+ || (getPropertiesEx(ch) & $$maskOtherLowercase) != 0;
}
boolean isUpperCase(int ch) {
- int props = getProperties(ch);
- return (props & $$maskType) == Character.UPPERCASE_LETTER;
+ return (getProperties(ch) & $$maskType) == Character.UPPERCASE_LETTER
+ || (getPropertiesEx(ch) & $$maskOtherUppercase) != 0;
}
boolean isWhitespace(int ch) {
diff --git a/make/data/characterdata/CharacterData0E.java.template b/make/data/characterdata/CharacterData0E.java.template
index d0e2b772525ff51880e470747dcd13234a386d76..aa6db8469a0c04912af97b1b2a697283eb448a2b 100644
--- a/make/data/characterdata/CharacterData0E.java.template
+++ b/make/data/characterdata/CharacterData0E.java.template
@@ -77,16 +77,6 @@ class CharacterData0E extends CharacterData {
return props;
}
- boolean isOtherLowercase(int ch) {
- int props = getPropertiesEx(ch);
- return (props & $$maskOtherLowercase) != 0;
- }
-
- boolean isOtherUppercase(int ch) {
- int props = getPropertiesEx(ch);
- return (props & $$maskOtherUppercase) != 0;
- }
-
boolean isOtherAlphabetic(int ch) {
int props = getPropertiesEx(ch);
return (props & $$maskOtherAlphabetic) != 0;
@@ -222,13 +212,13 @@ class CharacterData0E extends CharacterData {
}
boolean isLowerCase(int ch) {
- int props = getProperties(ch);
- return (props & $$maskType) == Character.LOWERCASE_LETTER;
+ return (getProperties(ch) & $$maskType) == Character.LOWERCASE_LETTER
+ || (getPropertiesEx(ch) & $$maskOtherLowercase) != 0;
}
boolean isUpperCase(int ch) {
- int props = getProperties(ch);
- return (props & $$maskType) == Character.UPPERCASE_LETTER;
+ return (getProperties(ch) & $$maskType) == Character.UPPERCASE_LETTER
+ || (getPropertiesEx(ch) & $$maskOtherUppercase) != 0;
}
boolean isWhitespace(int ch) {
diff --git a/make/data/characterdata/CharacterDataLatin1.java.template b/make/data/characterdata/CharacterDataLatin1.java.template
index 70559bdc346d5862701d307cee593f4ce22adff9..c2ff37321e517e517d7bbee5fe09595eb4ed2cb3 100644
--- a/make/data/characterdata/CharacterDataLatin1.java.template
+++ b/make/data/characterdata/CharacterDataLatin1.java.template
@@ -87,24 +87,13 @@ class CharacterDataLatin1 extends CharacterData {
@IntrinsicCandidate
boolean isLowerCase(int ch) {
- int props = getProperties(ch);
- return (props & $$maskType) == Character.LOWERCASE_LETTER;
+ return (getProperties(ch) & $$maskType) == Character.LOWERCASE_LETTER
+ || (getPropertiesEx(ch) & $$maskOtherLowercase) != 0; // 0xaa, 0xba
}
@IntrinsicCandidate
boolean isUpperCase(int ch) {
- int props = getProperties(ch);
- return (props & $$maskType) == Character.UPPERCASE_LETTER;
- }
-
- boolean isOtherLowercase(int ch) {
- int props = getPropertiesEx(ch);
- return (props & $$maskOtherLowercase) != 0;
- }
-
- boolean isOtherUppercase(int ch) {
- int props = getPropertiesEx(ch);
- return (props & $$maskOtherUppercase) != 0;
+ return (getProperties(ch) & $$maskType) == Character.UPPERCASE_LETTER;
}
boolean isOtherAlphabetic(int ch) {
@@ -290,6 +279,6 @@ class CharacterDataLatin1 extends CharacterData {
static {
$$Initializers
- }
+ }
}
diff --git a/make/devkit/createJMHBundle.sh b/make/devkit/createJMHBundle.sh
index b460ee75311a6a1e0cc656b6525e27a2ccf5fad9..9e0b9c06e4f1fc421f124845751215e44b39a5cd 100644
--- a/make/devkit/createJMHBundle.sh
+++ b/make/devkit/createJMHBundle.sh
@@ -26,7 +26,7 @@
# Create a bundle in the build directory, containing what's needed to
# build and run JMH microbenchmarks from the OpenJDK build.
-JMH_VERSION=1.26
+JMH_VERSION=1.28
COMMONS_MATH3_VERSION=3.2
JOPT_SIMPLE_VERSION=4.6
diff --git a/make/jdk/src/classes/build/tools/taglet/JSpec.java b/make/jdk/src/classes/build/tools/taglet/JSpec.java
index 0d7bfc3e776c23dbbfca838a50534ed8b2098916..1fdd224ae777129031322feb547e71b1c1b58215 100644
--- a/make/jdk/src/classes/build/tools/taglet/JSpec.java
+++ b/make/jdk/src/classes/build/tools/taglet/JSpec.java
@@ -206,7 +206,7 @@ public class JSpec implements Taglet {
private String escape(String s) {
return s.replace("&", "&")
.replace("<", "<")
- .replace(">", ">");
+ .replace(">", ">");
}
}).visit(trees, new StringBuilder()).toString();
}
diff --git a/make/test/BuildMicrobenchmark.gmk b/make/test/BuildMicrobenchmark.gmk
index 55e5026eb3869463222659126205e43f696fdeb6..4e1567c27483c328651a88685ae0d884a258ec7e 100644
--- a/make/test/BuildMicrobenchmark.gmk
+++ b/make/test/BuildMicrobenchmark.gmk
@@ -84,6 +84,7 @@ $(eval $(call SetupJavaCompilation, BUILD_INDIFY, \
#### Compile Targets
# Building microbenchmark requires the jdk.unsupported and java.management modules.
+# sun.security.util is required to compile Cache benchmark
# Build microbenchmark suite for the current JDK
$(eval $(call SetupJavaCompilation, BUILD_JDK_MICROBENCHMARK, \
@@ -93,6 +94,7 @@ $(eval $(call SetupJavaCompilation, BUILD_JDK_MICROBENCHMARK, \
DISABLED_WARNINGS := processing rawtypes cast serial, \
SRC := $(MICROBENCHMARK_SRC), \
BIN := $(MICROBENCHMARK_CLASSES), \
+ JAVAC_FLAGS := --add-exports java.base/sun.security.util=ALL-UNNAMED, \
JAVA_FLAGS := --add-modules jdk.unsupported --limit-modules java.management, \
))
diff --git a/src/hotspot/cpu/aarch64/aarch64.ad b/src/hotspot/cpu/aarch64/aarch64.ad
index 01915b33e9bbac45eb646a82ac64e742c745310a..f8dd0ee663f07ded7e74d93f168b56697f2410d7 100644
--- a/src/hotspot/cpu/aarch64/aarch64.ad
+++ b/src/hotspot/cpu/aarch64/aarch64.ad
@@ -11299,8 +11299,7 @@ instruct regI_not_reg(iRegINoSp dst,
// This pattern is automatically generated from aarch64_ad.m4
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
instruct AndI_reg_not_reg(iRegINoSp dst,
- iRegIorL2I src1, iRegIorL2I src2, immI_M1 m1,
- rFlagsReg cr) %{
+ iRegIorL2I src1, iRegIorL2I src2, immI_M1 m1) %{
match(Set dst (AndI src1 (XorI src2 m1)));
ins_cost(INSN_COST);
format %{ "bicw $dst, $src1, $src2" %}
@@ -11318,8 +11317,7 @@ instruct AndI_reg_not_reg(iRegINoSp dst,
// This pattern is automatically generated from aarch64_ad.m4
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
instruct AndL_reg_not_reg(iRegLNoSp dst,
- iRegL src1, iRegL src2, immL_M1 m1,
- rFlagsReg cr) %{
+ iRegL src1, iRegL src2, immL_M1 m1) %{
match(Set dst (AndL src1 (XorL src2 m1)));
ins_cost(INSN_COST);
format %{ "bic $dst, $src1, $src2" %}
@@ -11337,8 +11335,7 @@ instruct AndL_reg_not_reg(iRegLNoSp dst,
// This pattern is automatically generated from aarch64_ad.m4
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
instruct OrI_reg_not_reg(iRegINoSp dst,
- iRegIorL2I src1, iRegIorL2I src2, immI_M1 m1,
- rFlagsReg cr) %{
+ iRegIorL2I src1, iRegIorL2I src2, immI_M1 m1) %{
match(Set dst (OrI src1 (XorI src2 m1)));
ins_cost(INSN_COST);
format %{ "ornw $dst, $src1, $src2" %}
@@ -11356,8 +11353,7 @@ instruct OrI_reg_not_reg(iRegINoSp dst,
// This pattern is automatically generated from aarch64_ad.m4
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
instruct OrL_reg_not_reg(iRegLNoSp dst,
- iRegL src1, iRegL src2, immL_M1 m1,
- rFlagsReg cr) %{
+ iRegL src1, iRegL src2, immL_M1 m1) %{
match(Set dst (OrL src1 (XorL src2 m1)));
ins_cost(INSN_COST);
format %{ "orn $dst, $src1, $src2" %}
@@ -11375,8 +11371,7 @@ instruct OrL_reg_not_reg(iRegLNoSp dst,
// This pattern is automatically generated from aarch64_ad.m4
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
instruct XorI_reg_not_reg(iRegINoSp dst,
- iRegIorL2I src1, iRegIorL2I src2, immI_M1 m1,
- rFlagsReg cr) %{
+ iRegIorL2I src1, iRegIorL2I src2, immI_M1 m1) %{
match(Set dst (XorI m1 (XorI src2 src1)));
ins_cost(INSN_COST);
format %{ "eonw $dst, $src1, $src2" %}
@@ -11394,8 +11389,7 @@ instruct XorI_reg_not_reg(iRegINoSp dst,
// This pattern is automatically generated from aarch64_ad.m4
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
instruct XorL_reg_not_reg(iRegLNoSp dst,
- iRegL src1, iRegL src2, immL_M1 m1,
- rFlagsReg cr) %{
+ iRegL src1, iRegL src2, immL_M1 m1) %{
match(Set dst (XorL m1 (XorL src2 src1)));
ins_cost(INSN_COST);
format %{ "eon $dst, $src1, $src2" %}
@@ -11412,9 +11406,10 @@ instruct XorL_reg_not_reg(iRegLNoSp dst,
// This pattern is automatically generated from aarch64_ad.m4
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
+// val & (-1 ^ (val >>> shift)) ==> bicw
instruct AndI_reg_URShift_not_reg(iRegINoSp dst,
iRegIorL2I src1, iRegIorL2I src2,
- immI src3, immI_M1 src4, rFlagsReg cr) %{
+ immI src3, immI_M1 src4) %{
match(Set dst (AndI src1 (XorI(URShiftI src2 src3) src4)));
ins_cost(1.9 * INSN_COST);
format %{ "bicw $dst, $src1, $src2, LSR $src3" %}
@@ -11432,9 +11427,10 @@ instruct AndI_reg_URShift_not_reg(iRegINoSp dst,
// This pattern is automatically generated from aarch64_ad.m4
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
+// val & (-1 ^ (val >>> shift)) ==> bic
instruct AndL_reg_URShift_not_reg(iRegLNoSp dst,
iRegL src1, iRegL src2,
- immI src3, immL_M1 src4, rFlagsReg cr) %{
+ immI src3, immL_M1 src4) %{
match(Set dst (AndL src1 (XorL(URShiftL src2 src3) src4)));
ins_cost(1.9 * INSN_COST);
format %{ "bic $dst, $src1, $src2, LSR $src3" %}
@@ -11452,9 +11448,10 @@ instruct AndL_reg_URShift_not_reg(iRegLNoSp dst,
// This pattern is automatically generated from aarch64_ad.m4
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
+// val & (-1 ^ (val >> shift)) ==> bicw
instruct AndI_reg_RShift_not_reg(iRegINoSp dst,
iRegIorL2I src1, iRegIorL2I src2,
- immI src3, immI_M1 src4, rFlagsReg cr) %{
+ immI src3, immI_M1 src4) %{
match(Set dst (AndI src1 (XorI(RShiftI src2 src3) src4)));
ins_cost(1.9 * INSN_COST);
format %{ "bicw $dst, $src1, $src2, ASR $src3" %}
@@ -11472,9 +11469,10 @@ instruct AndI_reg_RShift_not_reg(iRegINoSp dst,
// This pattern is automatically generated from aarch64_ad.m4
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
+// val & (-1 ^ (val >> shift)) ==> bic
instruct AndL_reg_RShift_not_reg(iRegLNoSp dst,
iRegL src1, iRegL src2,
- immI src3, immL_M1 src4, rFlagsReg cr) %{
+ immI src3, immL_M1 src4) %{
match(Set dst (AndL src1 (XorL(RShiftL src2 src3) src4)));
ins_cost(1.9 * INSN_COST);
format %{ "bic $dst, $src1, $src2, ASR $src3" %}
@@ -11492,9 +11490,52 @@ instruct AndL_reg_RShift_not_reg(iRegLNoSp dst,
// This pattern is automatically generated from aarch64_ad.m4
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
+// val & (-1 ^ (val ror shift)) ==> bicw
+instruct AndI_reg_RotateRight_not_reg(iRegINoSp dst,
+ iRegIorL2I src1, iRegIorL2I src2,
+ immI src3, immI_M1 src4) %{
+ match(Set dst (AndI src1 (XorI(RotateRight src2 src3) src4)));
+ ins_cost(1.9 * INSN_COST);
+ format %{ "bicw $dst, $src1, $src2, ROR $src3" %}
+
+ ins_encode %{
+ __ bicw(as_Register($dst$$reg),
+ as_Register($src1$$reg),
+ as_Register($src2$$reg),
+ Assembler::ROR,
+ $src3$$constant & 0x1f);
+ %}
+
+ ins_pipe(ialu_reg_reg_shift);
+%}
+
+// This pattern is automatically generated from aarch64_ad.m4
+// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
+// val & (-1 ^ (val ror shift)) ==> bic
+instruct AndL_reg_RotateRight_not_reg(iRegLNoSp dst,
+ iRegL src1, iRegL src2,
+ immI src3, immL_M1 src4) %{
+ match(Set dst (AndL src1 (XorL(RotateRight src2 src3) src4)));
+ ins_cost(1.9 * INSN_COST);
+ format %{ "bic $dst, $src1, $src2, ROR $src3" %}
+
+ ins_encode %{
+ __ bic(as_Register($dst$$reg),
+ as_Register($src1$$reg),
+ as_Register($src2$$reg),
+ Assembler::ROR,
+ $src3$$constant & 0x3f);
+ %}
+
+ ins_pipe(ialu_reg_reg_shift);
+%}
+
+// This pattern is automatically generated from aarch64_ad.m4
+// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
+// val & (-1 ^ (val << shift)) ==> bicw
instruct AndI_reg_LShift_not_reg(iRegINoSp dst,
iRegIorL2I src1, iRegIorL2I src2,
- immI src3, immI_M1 src4, rFlagsReg cr) %{
+ immI src3, immI_M1 src4) %{
match(Set dst (AndI src1 (XorI(LShiftI src2 src3) src4)));
ins_cost(1.9 * INSN_COST);
format %{ "bicw $dst, $src1, $src2, LSL $src3" %}
@@ -11512,9 +11553,10 @@ instruct AndI_reg_LShift_not_reg(iRegINoSp dst,
// This pattern is automatically generated from aarch64_ad.m4
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
+// val & (-1 ^ (val << shift)) ==> bic
instruct AndL_reg_LShift_not_reg(iRegLNoSp dst,
iRegL src1, iRegL src2,
- immI src3, immL_M1 src4, rFlagsReg cr) %{
+ immI src3, immL_M1 src4) %{
match(Set dst (AndL src1 (XorL(LShiftL src2 src3) src4)));
ins_cost(1.9 * INSN_COST);
format %{ "bic $dst, $src1, $src2, LSL $src3" %}
@@ -11532,9 +11574,10 @@ instruct AndL_reg_LShift_not_reg(iRegLNoSp dst,
// This pattern is automatically generated from aarch64_ad.m4
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
+// val ^ (-1 ^ (val >>> shift)) ==> eonw
instruct XorI_reg_URShift_not_reg(iRegINoSp dst,
iRegIorL2I src1, iRegIorL2I src2,
- immI src3, immI_M1 src4, rFlagsReg cr) %{
+ immI src3, immI_M1 src4) %{
match(Set dst (XorI src4 (XorI(URShiftI src2 src3) src1)));
ins_cost(1.9 * INSN_COST);
format %{ "eonw $dst, $src1, $src2, LSR $src3" %}
@@ -11552,9 +11595,10 @@ instruct XorI_reg_URShift_not_reg(iRegINoSp dst,
// This pattern is automatically generated from aarch64_ad.m4
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
+// val ^ (-1 ^ (val >>> shift)) ==> eon
instruct XorL_reg_URShift_not_reg(iRegLNoSp dst,
iRegL src1, iRegL src2,
- immI src3, immL_M1 src4, rFlagsReg cr) %{
+ immI src3, immL_M1 src4) %{
match(Set dst (XorL src4 (XorL(URShiftL src2 src3) src1)));
ins_cost(1.9 * INSN_COST);
format %{ "eon $dst, $src1, $src2, LSR $src3" %}
@@ -11572,9 +11616,10 @@ instruct XorL_reg_URShift_not_reg(iRegLNoSp dst,
// This pattern is automatically generated from aarch64_ad.m4
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
+// val ^ (-1 ^ (val >> shift)) ==> eonw
instruct XorI_reg_RShift_not_reg(iRegINoSp dst,
iRegIorL2I src1, iRegIorL2I src2,
- immI src3, immI_M1 src4, rFlagsReg cr) %{
+ immI src3, immI_M1 src4) %{
match(Set dst (XorI src4 (XorI(RShiftI src2 src3) src1)));
ins_cost(1.9 * INSN_COST);
format %{ "eonw $dst, $src1, $src2, ASR $src3" %}
@@ -11592,9 +11637,10 @@ instruct XorI_reg_RShift_not_reg(iRegINoSp dst,
// This pattern is automatically generated from aarch64_ad.m4
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
+// val ^ (-1 ^ (val >> shift)) ==> eon
instruct XorL_reg_RShift_not_reg(iRegLNoSp dst,
iRegL src1, iRegL src2,
- immI src3, immL_M1 src4, rFlagsReg cr) %{
+ immI src3, immL_M1 src4) %{
match(Set dst (XorL src4 (XorL(RShiftL src2 src3) src1)));
ins_cost(1.9 * INSN_COST);
format %{ "eon $dst, $src1, $src2, ASR $src3" %}
@@ -11612,9 +11658,52 @@ instruct XorL_reg_RShift_not_reg(iRegLNoSp dst,
// This pattern is automatically generated from aarch64_ad.m4
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
+// val ^ (-1 ^ (val ror shift)) ==> eonw
+instruct XorI_reg_RotateRight_not_reg(iRegINoSp dst,
+ iRegIorL2I src1, iRegIorL2I src2,
+ immI src3, immI_M1 src4) %{
+ match(Set dst (XorI src4 (XorI(RotateRight src2 src3) src1)));
+ ins_cost(1.9 * INSN_COST);
+ format %{ "eonw $dst, $src1, $src2, ROR $src3" %}
+
+ ins_encode %{
+ __ eonw(as_Register($dst$$reg),
+ as_Register($src1$$reg),
+ as_Register($src2$$reg),
+ Assembler::ROR,
+ $src3$$constant & 0x1f);
+ %}
+
+ ins_pipe(ialu_reg_reg_shift);
+%}
+
+// This pattern is automatically generated from aarch64_ad.m4
+// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
+// val ^ (-1 ^ (val ror shift)) ==> eon
+instruct XorL_reg_RotateRight_not_reg(iRegLNoSp dst,
+ iRegL src1, iRegL src2,
+ immI src3, immL_M1 src4) %{
+ match(Set dst (XorL src4 (XorL(RotateRight src2 src3) src1)));
+ ins_cost(1.9 * INSN_COST);
+ format %{ "eon $dst, $src1, $src2, ROR $src3" %}
+
+ ins_encode %{
+ __ eon(as_Register($dst$$reg),
+ as_Register($src1$$reg),
+ as_Register($src2$$reg),
+ Assembler::ROR,
+ $src3$$constant & 0x3f);
+ %}
+
+ ins_pipe(ialu_reg_reg_shift);
+%}
+
+// This pattern is automatically generated from aarch64_ad.m4
+// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
+// val ^ (-1 ^ (val << shift)) ==> eonw
instruct XorI_reg_LShift_not_reg(iRegINoSp dst,
iRegIorL2I src1, iRegIorL2I src2,
- immI src3, immI_M1 src4, rFlagsReg cr) %{
+ immI src3, immI_M1 src4) %{
match(Set dst (XorI src4 (XorI(LShiftI src2 src3) src1)));
ins_cost(1.9 * INSN_COST);
format %{ "eonw $dst, $src1, $src2, LSL $src3" %}
@@ -11632,9 +11721,10 @@ instruct XorI_reg_LShift_not_reg(iRegINoSp dst,
// This pattern is automatically generated from aarch64_ad.m4
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
+// val ^ (-1 ^ (val << shift)) ==> eon
instruct XorL_reg_LShift_not_reg(iRegLNoSp dst,
iRegL src1, iRegL src2,
- immI src3, immL_M1 src4, rFlagsReg cr) %{
+ immI src3, immL_M1 src4) %{
match(Set dst (XorL src4 (XorL(LShiftL src2 src3) src1)));
ins_cost(1.9 * INSN_COST);
format %{ "eon $dst, $src1, $src2, LSL $src3" %}
@@ -11652,9 +11742,10 @@ instruct XorL_reg_LShift_not_reg(iRegLNoSp dst,
// This pattern is automatically generated from aarch64_ad.m4
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
+// val | (-1 ^ (val >>> shift)) ==> ornw
instruct OrI_reg_URShift_not_reg(iRegINoSp dst,
iRegIorL2I src1, iRegIorL2I src2,
- immI src3, immI_M1 src4, rFlagsReg cr) %{
+ immI src3, immI_M1 src4) %{
match(Set dst (OrI src1 (XorI(URShiftI src2 src3) src4)));
ins_cost(1.9 * INSN_COST);
format %{ "ornw $dst, $src1, $src2, LSR $src3" %}
@@ -11672,9 +11763,10 @@ instruct OrI_reg_URShift_not_reg(iRegINoSp dst,
// This pattern is automatically generated from aarch64_ad.m4
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
+// val | (-1 ^ (val >>> shift)) ==> orn
instruct OrL_reg_URShift_not_reg(iRegLNoSp dst,
iRegL src1, iRegL src2,
- immI src3, immL_M1 src4, rFlagsReg cr) %{
+ immI src3, immL_M1 src4) %{
match(Set dst (OrL src1 (XorL(URShiftL src2 src3) src4)));
ins_cost(1.9 * INSN_COST);
format %{ "orn $dst, $src1, $src2, LSR $src3" %}
@@ -11692,9 +11784,10 @@ instruct OrL_reg_URShift_not_reg(iRegLNoSp dst,
// This pattern is automatically generated from aarch64_ad.m4
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
+// val | (-1 ^ (val >> shift)) ==> ornw
instruct OrI_reg_RShift_not_reg(iRegINoSp dst,
iRegIorL2I src1, iRegIorL2I src2,
- immI src3, immI_M1 src4, rFlagsReg cr) %{
+ immI src3, immI_M1 src4) %{
match(Set dst (OrI src1 (XorI(RShiftI src2 src3) src4)));
ins_cost(1.9 * INSN_COST);
format %{ "ornw $dst, $src1, $src2, ASR $src3" %}
@@ -11712,9 +11805,10 @@ instruct OrI_reg_RShift_not_reg(iRegINoSp dst,
// This pattern is automatically generated from aarch64_ad.m4
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
+// val | (-1 ^ (val >> shift)) ==> orn
instruct OrL_reg_RShift_not_reg(iRegLNoSp dst,
iRegL src1, iRegL src2,
- immI src3, immL_M1 src4, rFlagsReg cr) %{
+ immI src3, immL_M1 src4) %{
match(Set dst (OrL src1 (XorL(RShiftL src2 src3) src4)));
ins_cost(1.9 * INSN_COST);
format %{ "orn $dst, $src1, $src2, ASR $src3" %}
@@ -11732,9 +11826,52 @@ instruct OrL_reg_RShift_not_reg(iRegLNoSp dst,
// This pattern is automatically generated from aarch64_ad.m4
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
+// val | (-1 ^ (val ror shift)) ==> ornw
+instruct OrI_reg_RotateRight_not_reg(iRegINoSp dst,
+ iRegIorL2I src1, iRegIorL2I src2,
+ immI src3, immI_M1 src4) %{
+ match(Set dst (OrI src1 (XorI(RotateRight src2 src3) src4)));
+ ins_cost(1.9 * INSN_COST);
+ format %{ "ornw $dst, $src1, $src2, ROR $src3" %}
+
+ ins_encode %{
+ __ ornw(as_Register($dst$$reg),
+ as_Register($src1$$reg),
+ as_Register($src2$$reg),
+ Assembler::ROR,
+ $src3$$constant & 0x1f);
+ %}
+
+ ins_pipe(ialu_reg_reg_shift);
+%}
+
+// This pattern is automatically generated from aarch64_ad.m4
+// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
+// val | (-1 ^ (val ror shift)) ==> orn
+instruct OrL_reg_RotateRight_not_reg(iRegLNoSp dst,
+ iRegL src1, iRegL src2,
+ immI src3, immL_M1 src4) %{
+ match(Set dst (OrL src1 (XorL(RotateRight src2 src3) src4)));
+ ins_cost(1.9 * INSN_COST);
+ format %{ "orn $dst, $src1, $src2, ROR $src3" %}
+
+ ins_encode %{
+ __ orn(as_Register($dst$$reg),
+ as_Register($src1$$reg),
+ as_Register($src2$$reg),
+ Assembler::ROR,
+ $src3$$constant & 0x3f);
+ %}
+
+ ins_pipe(ialu_reg_reg_shift);
+%}
+
+// This pattern is automatically generated from aarch64_ad.m4
+// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
+// val | (-1 ^ (val << shift)) ==> ornw
instruct OrI_reg_LShift_not_reg(iRegINoSp dst,
iRegIorL2I src1, iRegIorL2I src2,
- immI src3, immI_M1 src4, rFlagsReg cr) %{
+ immI src3, immI_M1 src4) %{
match(Set dst (OrI src1 (XorI(LShiftI src2 src3) src4)));
ins_cost(1.9 * INSN_COST);
format %{ "ornw $dst, $src1, $src2, LSL $src3" %}
@@ -11752,9 +11889,10 @@ instruct OrI_reg_LShift_not_reg(iRegINoSp dst,
// This pattern is automatically generated from aarch64_ad.m4
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
+// val | (-1 ^ (val << shift)) ==> orn
instruct OrL_reg_LShift_not_reg(iRegLNoSp dst,
iRegL src1, iRegL src2,
- immI src3, immL_M1 src4, rFlagsReg cr) %{
+ immI src3, immL_M1 src4) %{
match(Set dst (OrL src1 (XorL(LShiftL src2 src3) src4)));
ins_cost(1.9 * INSN_COST);
format %{ "orn $dst, $src1, $src2, LSL $src3" %}
@@ -11774,7 +11912,7 @@ instruct OrL_reg_LShift_not_reg(iRegLNoSp dst,
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
instruct AndI_reg_URShift_reg(iRegINoSp dst,
iRegIorL2I src1, iRegIorL2I src2,
- immI src3, rFlagsReg cr) %{
+ immI src3) %{
match(Set dst (AndI src1 (URShiftI src2 src3)));
ins_cost(1.9 * INSN_COST);
@@ -11795,7 +11933,7 @@ instruct AndI_reg_URShift_reg(iRegINoSp dst,
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
instruct AndL_reg_URShift_reg(iRegLNoSp dst,
iRegL src1, iRegL src2,
- immI src3, rFlagsReg cr) %{
+ immI src3) %{
match(Set dst (AndL src1 (URShiftL src2 src3)));
ins_cost(1.9 * INSN_COST);
@@ -11816,7 +11954,7 @@ instruct AndL_reg_URShift_reg(iRegLNoSp dst,
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
instruct AndI_reg_RShift_reg(iRegINoSp dst,
iRegIorL2I src1, iRegIorL2I src2,
- immI src3, rFlagsReg cr) %{
+ immI src3) %{
match(Set dst (AndI src1 (RShiftI src2 src3)));
ins_cost(1.9 * INSN_COST);
@@ -11837,7 +11975,7 @@ instruct AndI_reg_RShift_reg(iRegINoSp dst,
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
instruct AndL_reg_RShift_reg(iRegLNoSp dst,
iRegL src1, iRegL src2,
- immI src3, rFlagsReg cr) %{
+ immI src3) %{
match(Set dst (AndL src1 (RShiftL src2 src3)));
ins_cost(1.9 * INSN_COST);
@@ -11858,7 +11996,7 @@ instruct AndL_reg_RShift_reg(iRegLNoSp dst,
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
instruct AndI_reg_LShift_reg(iRegINoSp dst,
iRegIorL2I src1, iRegIorL2I src2,
- immI src3, rFlagsReg cr) %{
+ immI src3) %{
match(Set dst (AndI src1 (LShiftI src2 src3)));
ins_cost(1.9 * INSN_COST);
@@ -11879,7 +12017,7 @@ instruct AndI_reg_LShift_reg(iRegINoSp dst,
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
instruct AndL_reg_LShift_reg(iRegLNoSp dst,
iRegL src1, iRegL src2,
- immI src3, rFlagsReg cr) %{
+ immI src3) %{
match(Set dst (AndL src1 (LShiftL src2 src3)));
ins_cost(1.9 * INSN_COST);
@@ -11896,11 +12034,53 @@ instruct AndL_reg_LShift_reg(iRegLNoSp dst,
ins_pipe(ialu_reg_reg_shift);
%}
+// This pattern is automatically generated from aarch64_ad.m4
+// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
+instruct AndI_reg_RotateRight_reg(iRegINoSp dst,
+ iRegIorL2I src1, iRegIorL2I src2,
+ immI src3) %{
+ match(Set dst (AndI src1 (RotateRight src2 src3)));
+
+ ins_cost(1.9 * INSN_COST);
+ format %{ "andw $dst, $src1, $src2, ROR $src3" %}
+
+ ins_encode %{
+ __ andw(as_Register($dst$$reg),
+ as_Register($src1$$reg),
+ as_Register($src2$$reg),
+ Assembler::ROR,
+ $src3$$constant & 0x1f);
+ %}
+
+ ins_pipe(ialu_reg_reg_shift);
+%}
+
+// This pattern is automatically generated from aarch64_ad.m4
+// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
+instruct AndL_reg_RotateRight_reg(iRegLNoSp dst,
+ iRegL src1, iRegL src2,
+ immI src3) %{
+ match(Set dst (AndL src1 (RotateRight src2 src3)));
+
+ ins_cost(1.9 * INSN_COST);
+ format %{ "andr $dst, $src1, $src2, ROR $src3" %}
+
+ ins_encode %{
+ __ andr(as_Register($dst$$reg),
+ as_Register($src1$$reg),
+ as_Register($src2$$reg),
+ Assembler::ROR,
+ $src3$$constant & 0x3f);
+ %}
+
+ ins_pipe(ialu_reg_reg_shift);
+%}
+
// This pattern is automatically generated from aarch64_ad.m4
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
instruct XorI_reg_URShift_reg(iRegINoSp dst,
iRegIorL2I src1, iRegIorL2I src2,
- immI src3, rFlagsReg cr) %{
+ immI src3) %{
match(Set dst (XorI src1 (URShiftI src2 src3)));
ins_cost(1.9 * INSN_COST);
@@ -11921,7 +12101,7 @@ instruct XorI_reg_URShift_reg(iRegINoSp dst,
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
instruct XorL_reg_URShift_reg(iRegLNoSp dst,
iRegL src1, iRegL src2,
- immI src3, rFlagsReg cr) %{
+ immI src3) %{
match(Set dst (XorL src1 (URShiftL src2 src3)));
ins_cost(1.9 * INSN_COST);
@@ -11942,7 +12122,7 @@ instruct XorL_reg_URShift_reg(iRegLNoSp dst,
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
instruct XorI_reg_RShift_reg(iRegINoSp dst,
iRegIorL2I src1, iRegIorL2I src2,
- immI src3, rFlagsReg cr) %{
+ immI src3) %{
match(Set dst (XorI src1 (RShiftI src2 src3)));
ins_cost(1.9 * INSN_COST);
@@ -11963,7 +12143,7 @@ instruct XorI_reg_RShift_reg(iRegINoSp dst,
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
instruct XorL_reg_RShift_reg(iRegLNoSp dst,
iRegL src1, iRegL src2,
- immI src3, rFlagsReg cr) %{
+ immI src3) %{
match(Set dst (XorL src1 (RShiftL src2 src3)));
ins_cost(1.9 * INSN_COST);
@@ -11984,7 +12164,7 @@ instruct XorL_reg_RShift_reg(iRegLNoSp dst,
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
instruct XorI_reg_LShift_reg(iRegINoSp dst,
iRegIorL2I src1, iRegIorL2I src2,
- immI src3, rFlagsReg cr) %{
+ immI src3) %{
match(Set dst (XorI src1 (LShiftI src2 src3)));
ins_cost(1.9 * INSN_COST);
@@ -12005,7 +12185,7 @@ instruct XorI_reg_LShift_reg(iRegINoSp dst,
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
instruct XorL_reg_LShift_reg(iRegLNoSp dst,
iRegL src1, iRegL src2,
- immI src3, rFlagsReg cr) %{
+ immI src3) %{
match(Set dst (XorL src1 (LShiftL src2 src3)));
ins_cost(1.9 * INSN_COST);
@@ -12022,11 +12202,53 @@ instruct XorL_reg_LShift_reg(iRegLNoSp dst,
ins_pipe(ialu_reg_reg_shift);
%}
+// This pattern is automatically generated from aarch64_ad.m4
+// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
+instruct XorI_reg_RotateRight_reg(iRegINoSp dst,
+ iRegIorL2I src1, iRegIorL2I src2,
+ immI src3) %{
+ match(Set dst (XorI src1 (RotateRight src2 src3)));
+
+ ins_cost(1.9 * INSN_COST);
+ format %{ "eorw $dst, $src1, $src2, ROR $src3" %}
+
+ ins_encode %{
+ __ eorw(as_Register($dst$$reg),
+ as_Register($src1$$reg),
+ as_Register($src2$$reg),
+ Assembler::ROR,
+ $src3$$constant & 0x1f);
+ %}
+
+ ins_pipe(ialu_reg_reg_shift);
+%}
+
+// This pattern is automatically generated from aarch64_ad.m4
+// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
+instruct XorL_reg_RotateRight_reg(iRegLNoSp dst,
+ iRegL src1, iRegL src2,
+ immI src3) %{
+ match(Set dst (XorL src1 (RotateRight src2 src3)));
+
+ ins_cost(1.9 * INSN_COST);
+ format %{ "eor $dst, $src1, $src2, ROR $src3" %}
+
+ ins_encode %{
+ __ eor(as_Register($dst$$reg),
+ as_Register($src1$$reg),
+ as_Register($src2$$reg),
+ Assembler::ROR,
+ $src3$$constant & 0x3f);
+ %}
+
+ ins_pipe(ialu_reg_reg_shift);
+%}
+
// This pattern is automatically generated from aarch64_ad.m4
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
instruct OrI_reg_URShift_reg(iRegINoSp dst,
iRegIorL2I src1, iRegIorL2I src2,
- immI src3, rFlagsReg cr) %{
+ immI src3) %{
match(Set dst (OrI src1 (URShiftI src2 src3)));
ins_cost(1.9 * INSN_COST);
@@ -12047,7 +12269,7 @@ instruct OrI_reg_URShift_reg(iRegINoSp dst,
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
instruct OrL_reg_URShift_reg(iRegLNoSp dst,
iRegL src1, iRegL src2,
- immI src3, rFlagsReg cr) %{
+ immI src3) %{
match(Set dst (OrL src1 (URShiftL src2 src3)));
ins_cost(1.9 * INSN_COST);
@@ -12068,7 +12290,7 @@ instruct OrL_reg_URShift_reg(iRegLNoSp dst,
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
instruct OrI_reg_RShift_reg(iRegINoSp dst,
iRegIorL2I src1, iRegIorL2I src2,
- immI src3, rFlagsReg cr) %{
+ immI src3) %{
match(Set dst (OrI src1 (RShiftI src2 src3)));
ins_cost(1.9 * INSN_COST);
@@ -12089,7 +12311,7 @@ instruct OrI_reg_RShift_reg(iRegINoSp dst,
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
instruct OrL_reg_RShift_reg(iRegLNoSp dst,
iRegL src1, iRegL src2,
- immI src3, rFlagsReg cr) %{
+ immI src3) %{
match(Set dst (OrL src1 (RShiftL src2 src3)));
ins_cost(1.9 * INSN_COST);
@@ -12110,7 +12332,7 @@ instruct OrL_reg_RShift_reg(iRegLNoSp dst,
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
instruct OrI_reg_LShift_reg(iRegINoSp dst,
iRegIorL2I src1, iRegIorL2I src2,
- immI src3, rFlagsReg cr) %{
+ immI src3) %{
match(Set dst (OrI src1 (LShiftI src2 src3)));
ins_cost(1.9 * INSN_COST);
@@ -12131,7 +12353,7 @@ instruct OrI_reg_LShift_reg(iRegINoSp dst,
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
instruct OrL_reg_LShift_reg(iRegLNoSp dst,
iRegL src1, iRegL src2,
- immI src3, rFlagsReg cr) %{
+ immI src3) %{
match(Set dst (OrL src1 (LShiftL src2 src3)));
ins_cost(1.9 * INSN_COST);
@@ -12148,11 +12370,53 @@ instruct OrL_reg_LShift_reg(iRegLNoSp dst,
ins_pipe(ialu_reg_reg_shift);
%}
+// This pattern is automatically generated from aarch64_ad.m4
+// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
+instruct OrI_reg_RotateRight_reg(iRegINoSp dst,
+ iRegIorL2I src1, iRegIorL2I src2,
+ immI src3) %{
+ match(Set dst (OrI src1 (RotateRight src2 src3)));
+
+ ins_cost(1.9 * INSN_COST);
+ format %{ "orrw $dst, $src1, $src2, ROR $src3" %}
+
+ ins_encode %{
+ __ orrw(as_Register($dst$$reg),
+ as_Register($src1$$reg),
+ as_Register($src2$$reg),
+ Assembler::ROR,
+ $src3$$constant & 0x1f);
+ %}
+
+ ins_pipe(ialu_reg_reg_shift);
+%}
+
+// This pattern is automatically generated from aarch64_ad.m4
+// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
+instruct OrL_reg_RotateRight_reg(iRegLNoSp dst,
+ iRegL src1, iRegL src2,
+ immI src3) %{
+ match(Set dst (OrL src1 (RotateRight src2 src3)));
+
+ ins_cost(1.9 * INSN_COST);
+ format %{ "orr $dst, $src1, $src2, ROR $src3" %}
+
+ ins_encode %{
+ __ orr(as_Register($dst$$reg),
+ as_Register($src1$$reg),
+ as_Register($src2$$reg),
+ Assembler::ROR,
+ $src3$$constant & 0x3f);
+ %}
+
+ ins_pipe(ialu_reg_reg_shift);
+%}
+
// This pattern is automatically generated from aarch64_ad.m4
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
instruct AddI_reg_URShift_reg(iRegINoSp dst,
iRegIorL2I src1, iRegIorL2I src2,
- immI src3, rFlagsReg cr) %{
+ immI src3) %{
match(Set dst (AddI src1 (URShiftI src2 src3)));
ins_cost(1.9 * INSN_COST);
@@ -12173,7 +12437,7 @@ instruct AddI_reg_URShift_reg(iRegINoSp dst,
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
instruct AddL_reg_URShift_reg(iRegLNoSp dst,
iRegL src1, iRegL src2,
- immI src3, rFlagsReg cr) %{
+ immI src3) %{
match(Set dst (AddL src1 (URShiftL src2 src3)));
ins_cost(1.9 * INSN_COST);
@@ -12194,7 +12458,7 @@ instruct AddL_reg_URShift_reg(iRegLNoSp dst,
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
instruct AddI_reg_RShift_reg(iRegINoSp dst,
iRegIorL2I src1, iRegIorL2I src2,
- immI src3, rFlagsReg cr) %{
+ immI src3) %{
match(Set dst (AddI src1 (RShiftI src2 src3)));
ins_cost(1.9 * INSN_COST);
@@ -12215,7 +12479,7 @@ instruct AddI_reg_RShift_reg(iRegINoSp dst,
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
instruct AddL_reg_RShift_reg(iRegLNoSp dst,
iRegL src1, iRegL src2,
- immI src3, rFlagsReg cr) %{
+ immI src3) %{
match(Set dst (AddL src1 (RShiftL src2 src3)));
ins_cost(1.9 * INSN_COST);
@@ -12236,7 +12500,7 @@ instruct AddL_reg_RShift_reg(iRegLNoSp dst,
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
instruct AddI_reg_LShift_reg(iRegINoSp dst,
iRegIorL2I src1, iRegIorL2I src2,
- immI src3, rFlagsReg cr) %{
+ immI src3) %{
match(Set dst (AddI src1 (LShiftI src2 src3)));
ins_cost(1.9 * INSN_COST);
@@ -12257,7 +12521,7 @@ instruct AddI_reg_LShift_reg(iRegINoSp dst,
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
instruct AddL_reg_LShift_reg(iRegLNoSp dst,
iRegL src1, iRegL src2,
- immI src3, rFlagsReg cr) %{
+ immI src3) %{
match(Set dst (AddL src1 (LShiftL src2 src3)));
ins_cost(1.9 * INSN_COST);
@@ -12278,7 +12542,7 @@ instruct AddL_reg_LShift_reg(iRegLNoSp dst,
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
instruct SubI_reg_URShift_reg(iRegINoSp dst,
iRegIorL2I src1, iRegIorL2I src2,
- immI src3, rFlagsReg cr) %{
+ immI src3) %{
match(Set dst (SubI src1 (URShiftI src2 src3)));
ins_cost(1.9 * INSN_COST);
@@ -12299,7 +12563,7 @@ instruct SubI_reg_URShift_reg(iRegINoSp dst,
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
instruct SubL_reg_URShift_reg(iRegLNoSp dst,
iRegL src1, iRegL src2,
- immI src3, rFlagsReg cr) %{
+ immI src3) %{
match(Set dst (SubL src1 (URShiftL src2 src3)));
ins_cost(1.9 * INSN_COST);
@@ -12320,7 +12584,7 @@ instruct SubL_reg_URShift_reg(iRegLNoSp dst,
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
instruct SubI_reg_RShift_reg(iRegINoSp dst,
iRegIorL2I src1, iRegIorL2I src2,
- immI src3, rFlagsReg cr) %{
+ immI src3) %{
match(Set dst (SubI src1 (RShiftI src2 src3)));
ins_cost(1.9 * INSN_COST);
@@ -12341,7 +12605,7 @@ instruct SubI_reg_RShift_reg(iRegINoSp dst,
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
instruct SubL_reg_RShift_reg(iRegLNoSp dst,
iRegL src1, iRegL src2,
- immI src3, rFlagsReg cr) %{
+ immI src3) %{
match(Set dst (SubL src1 (RShiftL src2 src3)));
ins_cost(1.9 * INSN_COST);
@@ -12362,7 +12626,7 @@ instruct SubL_reg_RShift_reg(iRegLNoSp dst,
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
instruct SubI_reg_LShift_reg(iRegINoSp dst,
iRegIorL2I src1, iRegIorL2I src2,
- immI src3, rFlagsReg cr) %{
+ immI src3) %{
match(Set dst (SubI src1 (LShiftI src2 src3)));
ins_cost(1.9 * INSN_COST);
@@ -12383,7 +12647,7 @@ instruct SubI_reg_LShift_reg(iRegINoSp dst,
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
instruct SubL_reg_LShift_reg(iRegLNoSp dst,
iRegL src1, iRegL src2,
- immI src3, rFlagsReg cr) %{
+ immI src3) %{
match(Set dst (SubL src1 (LShiftL src2 src3)));
ins_cost(1.9 * INSN_COST);
@@ -12400,7 +12664,6 @@ instruct SubL_reg_LShift_reg(iRegLNoSp dst,
ins_pipe(ialu_reg_reg_shift);
%}
-
// This pattern is automatically generated from aarch64_ad.m4
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
diff --git a/src/hotspot/cpu/aarch64/aarch64_ad.m4 b/src/hotspot/cpu/aarch64/aarch64_ad.m4
index 97b4b9bc71c153cc9968d0f2455598f3b35c7952..a76be239b556f8329de82ae98fa0cb1967d91c22 100644
--- a/src/hotspot/cpu/aarch64/aarch64_ad.m4
+++ b/src/hotspot/cpu/aarch64/aarch64_ad.m4
@@ -35,8 +35,8 @@ define(`BASE_SHIFT_INSN',
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
instruct $2$1_reg_$4_reg(iReg$1NoSp dst,
iReg$1`'ORL2I($1) src1, iReg$1`'ORL2I($1) src2,
- immI src3, rFlagsReg cr) %{
- match(Set dst ($2$1 src1 ($4$1 src2 src3)));
+ immI src3) %{
+ match(Set dst ($2$1 src1 (ifelse($4, RotateRight, $4, $4$1) src2 src3)));
ins_cost(1.9 * INSN_COST);
format %{ "$3 $dst, $src1, $src2, $5 $src3" %}
@@ -56,8 +56,7 @@ define(`BASE_INVERTED_INSN',
`// This pattern is automatically generated from aarch64_ad.m4
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
instruct $2$1_reg_not_reg(iReg$1NoSp dst,
- iReg$1`'ORL2I($1) src1, iReg$1`'ORL2I($1) src2, imm$1_M1 m1,
- rFlagsReg cr) %{
+ iReg$1`'ORL2I($1) src1, iReg$1`'ORL2I($1) src2, imm$1_M1 m1) %{
dnl This ifelse is because hotspot reassociates (xor (xor ..)..)
dnl into this canonical form.
ifelse($2,Xor,
@@ -79,14 +78,15 @@ dnl into this canonical form.
define(`INVERTED_SHIFT_INSN',
`// This pattern is automatically generated from aarch64_ad.m4
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
+// val ifelse($2, Xor, ^, $2, And, &, |) (-1 ^ (val ifelse($4, RShift, >>, $4, LShift, <<, $4, URShift, >>>, ror) shift)) ==> $3
instruct $2$1_reg_$4_not_reg(iReg$1NoSp dst,
iReg$1`'ORL2I($1) src1, iReg$1`'ORL2I($1) src2,
- immI src3, imm$1_M1 src4, rFlagsReg cr) %{
+ immI src3, imm$1_M1 src4) %{
dnl This ifelse is because hotspot reassociates (xor (xor ..)..)
dnl into this canonical form.
ifelse($2,Xor,
- match(Set dst ($2$1 src4 (Xor$1($4$1 src2 src3) src1)));,
- match(Set dst ($2$1 src1 (Xor$1($4$1 src2 src3) src4)));)
+ match(Set dst ($2$1 src4 (Xor$1(ifelse($4, RotateRight, $4, $4$1) src2 src3) src1)));,
+ match(Set dst ($2$1 src1 (Xor$1(ifelse($4, RotateRight, $4, $4$1) src2 src3) src4)));)
ins_cost(1.9 * INSN_COST);
format %{ "$3 $dst, $src1, $src2, $5 $src3" %}
@@ -131,17 +131,22 @@ define(`BOTH_INVERTED_INSNS',
BASE_INVERTED_INSN(L, $1, $2, $3, $4)')dnl
dnl
define(`BOTH_INVERTED_SHIFT_INSNS',
-`INVERTED_SHIFT_INSN(I, $1, $2w, $3, $4, ~0, int)
-INVERTED_SHIFT_INSN(L, $1, $2, $3, $4, ~0l, jlong)')dnl
+`INVERTED_SHIFT_INSN(I, $1, $2w, $3, $4)
+INVERTED_SHIFT_INSN(L, $1, $2, $3, $4)')dnl
dnl
-define(`ALL_SHIFT_KINDS',
+define(`ALL_SHIFT_KINDS_WITHOUT_ROR',
`BOTH_SHIFT_INSNS($1, $2, URShift, LSR)
BOTH_SHIFT_INSNS($1, $2, RShift, ASR)
BOTH_SHIFT_INSNS($1, $2, LShift, LSL)')dnl
dnl
+define(`ALL_SHIFT_KINDS',
+`ALL_SHIFT_KINDS_WITHOUT_ROR($1, $2)
+BOTH_SHIFT_INSNS($1, $2, RotateRight, ROR)')dnl
+dnl
define(`ALL_INVERTED_SHIFT_KINDS',
`BOTH_INVERTED_SHIFT_INSNS($1, $2, URShift, LSR)
BOTH_INVERTED_SHIFT_INSNS($1, $2, RShift, ASR)
+BOTH_INVERTED_SHIFT_INSNS($1, $2, RotateRight, ROR)
BOTH_INVERTED_SHIFT_INSNS($1, $2, LShift, LSL)')dnl
dnl
NOT_INSN(L, eon)
@@ -155,8 +160,8 @@ ALL_INVERTED_SHIFT_KINDS(Or, orn)
ALL_SHIFT_KINDS(And, andr)
ALL_SHIFT_KINDS(Xor, eor)
ALL_SHIFT_KINDS(Or, orr)
-ALL_SHIFT_KINDS(Add, add)
-ALL_SHIFT_KINDS(Sub, sub)
+ALL_SHIFT_KINDS_WITHOUT_ROR(Add, add)
+ALL_SHIFT_KINDS_WITHOUT_ROR(Sub, sub)
dnl
dnl EXTEND mode, rshift_op, src, lshift_count, rshift_count
define(`EXTEND', `($2$1 (LShift$1 $3 $4) $5)') dnl
diff --git a/src/hotspot/cpu/aarch64/assembler_aarch64.hpp b/src/hotspot/cpu/aarch64/assembler_aarch64.hpp
index b9188737faf8dde041172abe278ccde4a0d7e90f..915bc6b81d703c77d6165b77e66c1e562d680c0e 100644
--- a/src/hotspot/cpu/aarch64/assembler_aarch64.hpp
+++ b/src/hotspot/cpu/aarch64/assembler_aarch64.hpp
@@ -1,6 +1,6 @@
/*
- * Copyright (c) 1997, 2020, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2014, 2020, Red Hat Inc. All rights reserved.
+ * Copyright (c) 1997, 2021, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2014, 2021, Red Hat Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -2657,16 +2657,18 @@ public:
f(sidx<<(int)T, 14, 11), f(1, 10), rf(Vn, 5), rf(Vd, 0);
}
-#define INSN(NAME, op) \
- void NAME(Register Rd, FloatRegister Vn, SIMD_RegVariant T, int idx) { \
- starti; \
- f(0, 31), f(T==D ? 1:0, 30), f(0b001110000, 29, 21); \
- f(((idx<<1)|1)<<(int)T, 20, 16), f(op, 15, 10); \
- rf(Vn, 5), rf(Rd, 0); \
+#define INSN(NAME, cond, op1, op2) \
+ void NAME(Register Rd, FloatRegister Vn, SIMD_RegVariant T, int idx) { \
+ starti; \
+ assert(cond, "invalid register variant"); \
+ f(0, 31), f(op1, 30), f(0b001110000, 29, 21); \
+ f(((idx << 1) | 1) << (int)T, 20, 16), f(op2, 15, 10); \
+ rf(Vn, 5), rf(Rd, 0); \
}
- INSN(umov, 0b001111);
- INSN(smov, 0b001011);
+ INSN(umov, (T != Q), (T == D ? 1 : 0), 0b001111);
+ INSN(smov, (T < D), 1, 0b001011);
+
#undef INSN
#define INSN(NAME, opc, opc2, isSHR) \
@@ -2685,6 +2687,7 @@ public:
* 1xxx xxx 1D/2D, shift = UInt(immh:immb) - 64 \
* (1D is RESERVED) \
*/ \
+ assert(!isSHR || (isSHR && (shift != 0)), "Zero right shift"); \
assert((1 << ((T>>1)+3)) > shift, "Invalid Shift value"); \
int cVal = (1 << (((T >> 1) + 3) + (isSHR ? 1 : 0))); \
int encodedShift = isSHR ? cVal - shift : cVal + shift; \
diff --git a/src/hotspot/cpu/aarch64/atomic_aarch64.hpp b/src/hotspot/cpu/aarch64/atomic_aarch64.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..ac12ba9e23d7d1e4657b671d782f42457fd2fde3
--- /dev/null
+++ b/src/hotspot/cpu/aarch64/atomic_aarch64.hpp
@@ -0,0 +1,49 @@
+/* Copyright (c) 2021, Red Hat Inc. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef CPU_AARCH64_ATOMIC_AARCH64_HPP
+#define CPU_AARCH64_ATOMIC_AARCH64_HPP
+
+// Atomic stub implementation.
+// Default implementations are in atomic_linux_aarch64.S
+//
+// All stubs pass arguments the same way
+// x0: src/dest address
+// x1: arg1
+// x2: arg2 (optional)
+// x3, x8, x9: scratch
+typedef uint64_t (*aarch64_atomic_stub_t)(volatile void *ptr, uint64_t arg1, uint64_t arg2);
+
+// Pointers to stubs
+extern aarch64_atomic_stub_t aarch64_atomic_fetch_add_4_impl;
+extern aarch64_atomic_stub_t aarch64_atomic_fetch_add_8_impl;
+extern aarch64_atomic_stub_t aarch64_atomic_xchg_4_impl;
+extern aarch64_atomic_stub_t aarch64_atomic_xchg_8_impl;
+extern aarch64_atomic_stub_t aarch64_atomic_cmpxchg_1_impl;
+extern aarch64_atomic_stub_t aarch64_atomic_cmpxchg_4_impl;
+extern aarch64_atomic_stub_t aarch64_atomic_cmpxchg_8_impl;
+extern aarch64_atomic_stub_t aarch64_atomic_cmpxchg_1_relaxed_impl;
+extern aarch64_atomic_stub_t aarch64_atomic_cmpxchg_4_relaxed_impl;
+extern aarch64_atomic_stub_t aarch64_atomic_cmpxchg_8_relaxed_impl;
+
+#endif // CPU_AARCH64_ATOMIC_AARCH64_HPP
diff --git a/src/hotspot/cpu/aarch64/c1_Runtime1_aarch64.cpp b/src/hotspot/cpu/aarch64/c1_Runtime1_aarch64.cpp
index d4d74ac1f4fbcc03b8b3d57b5460972705808a89..7acc1bf19f2e17b9ba3594f47f5f5b5f0296053a 100644
--- a/src/hotspot/cpu/aarch64/c1_Runtime1_aarch64.cpp
+++ b/src/hotspot/cpu/aarch64/c1_Runtime1_aarch64.cpp
@@ -147,7 +147,7 @@ int StubAssembler::call_RT(Register oop_result1, Register metadata_result, addre
if (arg1 == c_rarg2 || arg1 == c_rarg3 ||
arg2 == c_rarg1 || arg2 == c_rarg3 ||
arg3 == c_rarg1 || arg3 == c_rarg2) {
- stp(arg3, arg2, Address(pre(sp, 2 * wordSize)));
+ stp(arg3, arg2, Address(pre(sp, -2 * wordSize)));
stp(arg1, zr, Address(pre(sp, -2 * wordSize)));
ldp(c_rarg1, zr, Address(post(sp, 2 * wordSize)));
ldp(c_rarg3, c_rarg2, Address(post(sp, 2 * wordSize)));
diff --git a/src/hotspot/cpu/aarch64/c1_globals_aarch64.hpp b/src/hotspot/cpu/aarch64/c1_globals_aarch64.hpp
index ab4f16e9c180718de0453f57e9b0363e66d46eb3..50bbbd786d93707eeab5e525bcc5ed13d71ed3d9 100644
--- a/src/hotspot/cpu/aarch64/c1_globals_aarch64.hpp
+++ b/src/hotspot/cpu/aarch64/c1_globals_aarch64.hpp
@@ -52,7 +52,6 @@ define_pd_global(bool, ProfileInterpreter, false);
define_pd_global(intx, CodeCacheExpansionSize, 32*K );
define_pd_global(uintx, CodeCacheMinBlockLength, 1);
define_pd_global(uintx, CodeCacheMinimumUseSpace, 400*K);
-define_pd_global(uintx, MetaspaceSize, 12*M );
define_pd_global(bool, NeverActAsServerClassMachine, true );
define_pd_global(uint64_t,MaxRAM, 1ULL*G);
define_pd_global(bool, CICompileOSR, true );
diff --git a/src/hotspot/cpu/aarch64/c2_globals_aarch64.hpp b/src/hotspot/cpu/aarch64/c2_globals_aarch64.hpp
index e1bfeb3548638eb2fa4155489cd182359adb32ff..f15b6faa79d06a8d1cac9b17ac3dae3d8899b15e 100644
--- a/src/hotspot/cpu/aarch64/c2_globals_aarch64.hpp
+++ b/src/hotspot/cpu/aarch64/c2_globals_aarch64.hpp
@@ -77,9 +77,6 @@ define_pd_global(intx, NonNMethodCodeHeapSize, 5*M );
define_pd_global(uintx, CodeCacheMinBlockLength, 6);
define_pd_global(uintx, CodeCacheMinimumUseSpace, 400*K);
-// Heap related flags
-define_pd_global(uintx,MetaspaceSize, ScaleForWordSize(16*M));
-
// Ergonomics related flags
define_pd_global(bool, NeverActAsServerClassMachine, false);
diff --git a/src/hotspot/cpu/aarch64/frame_aarch64.cpp b/src/hotspot/cpu/aarch64/frame_aarch64.cpp
index 255477f6a49b5aabf9a3cf6626742849b3baea02..939669ef3f11fe9814a12cdbf4e8db2319214ce9 100644
--- a/src/hotspot/cpu/aarch64/frame_aarch64.cpp
+++ b/src/hotspot/cpu/aarch64/frame_aarch64.cpp
@@ -355,10 +355,6 @@ frame frame::sender_for_entry_frame(RegisterMap* map) const {
vmassert(jfa->last_Java_pc() != NULL, "not walkable");
frame fr(jfa->last_Java_sp(), jfa->last_Java_fp(), jfa->last_Java_pc());
- if (jfa->saved_fp_address()) {
- update_map_with_saved_link(map, jfa->saved_fp_address());
- }
-
return fr;
}
diff --git a/src/hotspot/cpu/aarch64/gc/shared/barrierSetAssembler_aarch64.cpp b/src/hotspot/cpu/aarch64/gc/shared/barrierSetAssembler_aarch64.cpp
index 2841e68cf56589bd0f078264fa1ec2286ae423ba..777448c6a8ce4794733f827d8fd48f36f09592fc 100644
--- a/src/hotspot/cpu/aarch64/gc/shared/barrierSetAssembler_aarch64.cpp
+++ b/src/hotspot/cpu/aarch64/gc/shared/barrierSetAssembler_aarch64.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018, 2020, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2018, 2021, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -23,6 +23,7 @@
*/
#include "precompiled.hpp"
+#include "classfile/classLoaderData.hpp"
#include "gc/shared/barrierSet.hpp"
#include "gc/shared/barrierSetAssembler.hpp"
#include "gc/shared/barrierSetNMethod.hpp"
diff --git a/src/hotspot/cpu/aarch64/gc/shared/cardTableBarrierSetAssembler_aarch64.cpp b/src/hotspot/cpu/aarch64/gc/shared/cardTableBarrierSetAssembler_aarch64.cpp
index 9c0a66b255adc9004dad9568fc79b5d174196ccd..fb677828e20e640df27cc874e6183f7a59036eed 100644
--- a/src/hotspot/cpu/aarch64/gc/shared/cardTableBarrierSetAssembler_aarch64.cpp
+++ b/src/hotspot/cpu/aarch64/gc/shared/cardTableBarrierSetAssembler_aarch64.cpp
@@ -38,9 +38,6 @@ void CardTableBarrierSetAssembler::store_check(MacroAssembler* masm, Register ob
BarrierSet* bs = BarrierSet::barrier_set();
assert(bs->kind() == BarrierSet::CardTableBarrierSet, "Wrong barrier set kind");
- CardTableBarrierSet* ctbs = barrier_set_cast
(bs);
- CardTable* ct = ctbs->card_table();
-
__ lsr(obj, obj, CardTable::card_shift);
assert(CardTable::dirty_card_val() == 0, "must be");
@@ -49,25 +46,17 @@ void CardTableBarrierSetAssembler::store_check(MacroAssembler* masm, Register ob
if (UseCondCardMark) {
Label L_already_dirty;
- __ membar(Assembler::StoreLoad);
__ ldrb(rscratch2, Address(obj, rscratch1));
__ cbz(rscratch2, L_already_dirty);
__ strb(zr, Address(obj, rscratch1));
__ bind(L_already_dirty);
} else {
- if (ct->scanned_concurrently()) {
- __ membar(Assembler::StoreStore);
- }
__ strb(zr, Address(obj, rscratch1));
}
}
void CardTableBarrierSetAssembler::gen_write_ref_array_post_barrier(MacroAssembler* masm, DecoratorSet decorators,
Register start, Register count, Register scratch, RegSet saved_regs) {
- BarrierSet* bs = BarrierSet::barrier_set();
- CardTableBarrierSet* ctbs = barrier_set_cast(bs);
- CardTable* ct = ctbs->card_table();
-
Label L_loop, L_done;
const Register end = count;
@@ -81,9 +70,6 @@ void CardTableBarrierSetAssembler::gen_write_ref_array_post_barrier(MacroAssembl
__ load_byte_map_base(scratch);
__ add(start, start, scratch);
- if (ct->scanned_concurrently()) {
- __ membar(__ StoreStore);
- }
__ bind(L_loop);
__ strb(zr, Address(start, count));
__ subs(count, count, 1);
diff --git a/src/hotspot/cpu/aarch64/interpreterRT_aarch64.cpp b/src/hotspot/cpu/aarch64/interpreterRT_aarch64.cpp
index 8a89fb56a83893f68ce6002b8b83caceca89c14e..731e45643aa03aacf51ffeb1809d6342cbf2cd22 100644
--- a/src/hotspot/cpu/aarch64/interpreterRT_aarch64.cpp
+++ b/src/hotspot/cpu/aarch64/interpreterRT_aarch64.cpp
@@ -43,215 +43,103 @@ Register InterpreterRuntime::SignatureHandlerGenerator::from() { return rlocals;
Register InterpreterRuntime::SignatureHandlerGenerator::to() { return sp; }
Register InterpreterRuntime::SignatureHandlerGenerator::temp() { return rscratch1; }
+Register InterpreterRuntime::SignatureHandlerGenerator::next_gpr() {
+ if (_num_reg_int_args < Argument::n_int_register_parameters_c-1) {
+ return as_Register(_num_reg_int_args++ + c_rarg1->encoding());
+ }
+ return noreg;
+}
+
+FloatRegister InterpreterRuntime::SignatureHandlerGenerator::next_fpr() {
+ if (_num_reg_fp_args < Argument::n_float_register_parameters_c) {
+ return as_FloatRegister(_num_reg_fp_args++);
+ }
+ return fnoreg;
+}
+
+int InterpreterRuntime::SignatureHandlerGenerator::next_stack_offset() {
+ int ret = _stack_offset;
+ _stack_offset += wordSize;
+ return ret;
+}
+
InterpreterRuntime::SignatureHandlerGenerator::SignatureHandlerGenerator(
const methodHandle& method, CodeBuffer* buffer) : NativeSignatureIterator(method) {
_masm = new MacroAssembler(buffer);
- _num_int_args = (method->is_static() ? 1 : 0);
- _num_fp_args = 0;
+ _num_reg_int_args = (method->is_static() ? 1 : 0);
+ _num_reg_fp_args = 0;
_stack_offset = 0;
}
void InterpreterRuntime::SignatureHandlerGenerator::pass_int() {
const Address src(from(), Interpreter::local_offset_in_bytes(offset()));
- switch (_num_int_args) {
- case 0:
- __ ldr(c_rarg1, src);
- _num_int_args++;
- break;
- case 1:
- __ ldr(c_rarg2, src);
- _num_int_args++;
- break;
- case 2:
- __ ldr(c_rarg3, src);
- _num_int_args++;
- break;
- case 3:
- __ ldr(c_rarg4, src);
- _num_int_args++;
- break;
- case 4:
- __ ldr(c_rarg5, src);
- _num_int_args++;
- break;
- case 5:
- __ ldr(c_rarg6, src);
- _num_int_args++;
- break;
- case 6:
- __ ldr(c_rarg7, src);
- _num_int_args++;
- break;
- default:
- __ ldr(r0, src);
- __ str(r0, Address(to(), _stack_offset));
- _stack_offset += wordSize;
- _num_int_args++;
- break;
+ Register reg = next_gpr();
+ if (reg != noreg) {
+ __ ldr(reg, src);
+ } else {
+ __ ldrw(r0, src);
+ __ strw(r0, Address(to(), next_stack_offset()));
}
}
void InterpreterRuntime::SignatureHandlerGenerator::pass_long() {
const Address src(from(), Interpreter::local_offset_in_bytes(offset() + 1));
- switch (_num_int_args) {
- case 0:
- __ ldr(c_rarg1, src);
- _num_int_args++;
- break;
- case 1:
- __ ldr(c_rarg2, src);
- _num_int_args++;
- break;
- case 2:
- __ ldr(c_rarg3, src);
- _num_int_args++;
- break;
- case 3:
- __ ldr(c_rarg4, src);
- _num_int_args++;
- break;
- case 4:
- __ ldr(c_rarg5, src);
- _num_int_args++;
- break;
- case 5:
- __ ldr(c_rarg6, src);
- _num_int_args++;
- break;
- case 6:
- __ ldr(c_rarg7, src);
- _num_int_args++;
- break;
- default:
+ Register reg = next_gpr();
+ if (reg != noreg) {
+ __ ldr(reg, src);
+ } else {
__ ldr(r0, src);
- __ str(r0, Address(to(), _stack_offset));
- _stack_offset += wordSize;
- _num_int_args++;
- break;
+ __ str(r0, Address(to(), next_stack_offset()));
}
}
void InterpreterRuntime::SignatureHandlerGenerator::pass_float() {
const Address src(from(), Interpreter::local_offset_in_bytes(offset()));
- if (_num_fp_args < Argument::n_float_register_parameters_c) {
- __ ldrs(as_FloatRegister(_num_fp_args++), src);
+ FloatRegister reg = next_fpr();
+ if (reg != fnoreg) {
+ __ ldrs(reg, src);
} else {
__ ldrw(r0, src);
- __ strw(r0, Address(to(), _stack_offset));
- _stack_offset += wordSize;
- _num_fp_args++;
+ __ strw(r0, Address(to(), next_stack_offset()));
}
}
void InterpreterRuntime::SignatureHandlerGenerator::pass_double() {
const Address src(from(), Interpreter::local_offset_in_bytes(offset() + 1));
- if (_num_fp_args < Argument::n_float_register_parameters_c) {
- __ ldrd(as_FloatRegister(_num_fp_args++), src);
+ FloatRegister reg = next_fpr();
+ if (reg != fnoreg) {
+ __ ldrd(reg, src);
} else {
__ ldr(r0, src);
- __ str(r0, Address(to(), _stack_offset));
- _stack_offset += wordSize;
- _num_fp_args++;
+ __ str(r0, Address(to(), next_stack_offset()));
}
}
void InterpreterRuntime::SignatureHandlerGenerator::pass_object() {
-
- switch (_num_int_args) {
- case 0:
+ Register reg = next_gpr();
+ if (reg == c_rarg1) {
assert(offset() == 0, "argument register 1 can only be (non-null) receiver");
__ add(c_rarg1, from(), Interpreter::local_offset_in_bytes(offset()));
- _num_int_args++;
- break;
- case 1:
- {
- __ add(r0, from(), Interpreter::local_offset_in_bytes(offset()));
- __ mov(c_rarg2, 0);
- __ ldr(temp(), r0);
- Label L;
- __ cbz(temp(), L);
- __ mov(c_rarg2, r0);
- __ bind(L);
- _num_int_args++;
- break;
- }
- case 2:
- {
- __ add(r0, from(), Interpreter::local_offset_in_bytes(offset()));
- __ mov(c_rarg3, 0);
- __ ldr(temp(), r0);
- Label L;
- __ cbz(temp(), L);
- __ mov(c_rarg3, r0);
- __ bind(L);
- _num_int_args++;
- break;
- }
- case 3:
- {
- __ add(r0, from(), Interpreter::local_offset_in_bytes(offset()));
- __ mov(c_rarg4, 0);
- __ ldr(temp(), r0);
- Label L;
- __ cbz(temp(), L);
- __ mov(c_rarg4, r0);
- __ bind(L);
- _num_int_args++;
- break;
- }
- case 4:
- {
- __ add(r0, from(), Interpreter::local_offset_in_bytes(offset()));
- __ mov(c_rarg5, 0);
- __ ldr(temp(), r0);
- Label L;
- __ cbz(temp(), L);
- __ mov(c_rarg5, r0);
- __ bind(L);
- _num_int_args++;
- break;
- }
- case 5:
- {
- __ add(r0, from(), Interpreter::local_offset_in_bytes(offset()));
- __ mov(c_rarg6, 0);
- __ ldr(temp(), r0);
- Label L;
- __ cbz(temp(), L);
- __ mov(c_rarg6, r0);
- __ bind(L);
- _num_int_args++;
- break;
- }
- case 6:
- {
- __ add(r0, from(), Interpreter::local_offset_in_bytes(offset()));
- __ mov(c_rarg7, 0);
- __ ldr(temp(), r0);
- Label L;
- __ cbz(temp(), L);
- __ mov(c_rarg7, r0);
- __ bind(L);
- _num_int_args++;
- break;
- }
- default:
- {
- __ add(r0, from(), Interpreter::local_offset_in_bytes(offset()));
- __ ldr(temp(), r0);
- Label L;
- __ cbnz(temp(), L);
- __ mov(r0, zr);
- __ bind(L);
- __ str(r0, Address(to(), _stack_offset));
- _stack_offset += wordSize;
- _num_int_args++;
- break;
- }
+ } else if (reg != noreg) {
+ __ add(r0, from(), Interpreter::local_offset_in_bytes(offset()));
+ __ mov(reg, 0);
+ __ ldr(temp(), r0);
+ Label L;
+ __ cbz(temp(), L);
+ __ mov(reg, r0);
+ __ bind(L);
+ } else {
+ __ add(r0, from(), Interpreter::local_offset_in_bytes(offset()));
+ __ ldr(temp(), r0);
+ Label L;
+ __ cbnz(temp(), L);
+ __ mov(r0, zr);
+ __ bind(L);
+ __ str(r0, Address(to(), next_stack_offset()));
}
}
@@ -280,77 +168,77 @@ class SlowSignatureHandler
intptr_t* _int_args;
intptr_t* _fp_args;
intptr_t* _fp_identifiers;
- unsigned int _num_int_args;
- unsigned int _num_fp_args;
+ unsigned int _num_reg_int_args;
+ unsigned int _num_reg_fp_args;
- virtual void pass_int()
- {
- jint from_obj = *(jint *)(_from+Interpreter::local_offset_in_bytes(0));
+ intptr_t* single_slot_addr() {
+ intptr_t* from_addr = (intptr_t*)(_from+Interpreter::local_offset_in_bytes(0));
_from -= Interpreter::stackElementSize;
-
- if (_num_int_args < Argument::n_int_register_parameters_c-1) {
- *_int_args++ = from_obj;
- _num_int_args++;
- } else {
- *_to++ = from_obj;
- _num_int_args++;
- }
+ return from_addr;
}
- virtual void pass_long()
- {
- intptr_t from_obj = *(intptr_t*)(_from+Interpreter::local_offset_in_bytes(1));
+ intptr_t* double_slot_addr() {
+ intptr_t* from_addr = (intptr_t*)(_from+Interpreter::local_offset_in_bytes(1));
_from -= 2*Interpreter::stackElementSize;
+ return from_addr;
+ }
- if (_num_int_args < Argument::n_int_register_parameters_c-1) {
- *_int_args++ = from_obj;
- _num_int_args++;
- } else {
- *_to++ = from_obj;
- _num_int_args++;
+ int pass_gpr(intptr_t value) {
+ if (_num_reg_int_args < Argument::n_int_register_parameters_c-1) {
+ *_int_args++ = value;
+ return _num_reg_int_args++;
}
+ return -1;
}
- virtual void pass_object()
- {
- intptr_t *from_addr = (intptr_t*)(_from + Interpreter::local_offset_in_bytes(0));
- _from -= Interpreter::stackElementSize;
+ int pass_fpr(intptr_t value) {
+ if (_num_reg_fp_args < Argument::n_float_register_parameters_c) {
+ *_fp_args++ = value;
+ return _num_reg_fp_args++;
+ }
+ return -1;
+ }
- if (_num_int_args < Argument::n_int_register_parameters_c-1) {
- *_int_args++ = (*from_addr == 0) ? NULL : (intptr_t)from_addr;
- _num_int_args++;
- } else {
- *_to++ = (*from_addr == 0) ? NULL : (intptr_t) from_addr;
- _num_int_args++;
+ void pass_stack(intptr_t value) {
+ *_to++ = value;
+ }
+
+ virtual void pass_int() {
+ jint value = *(jint*)single_slot_addr();
+ if (pass_gpr(value) < 0) {
+ pass_stack(value);
}
}
- virtual void pass_float()
- {
- jint from_obj = *(jint*)(_from+Interpreter::local_offset_in_bytes(0));
- _from -= Interpreter::stackElementSize;
+ virtual void pass_long() {
+ intptr_t value = *double_slot_addr();
+ if (pass_gpr(value) < 0) {
+ pass_stack(value);
+ }
+ }
- if (_num_fp_args < Argument::n_float_register_parameters_c) {
- *_fp_args++ = from_obj;
- _num_fp_args++;
- } else {
- *_to++ = from_obj;
- _num_fp_args++;
+ virtual void pass_object() {
+ intptr_t* addr = single_slot_addr();
+ intptr_t value = *addr == 0 ? NULL : (intptr_t)addr;
+ if (pass_gpr(value) < 0) {
+ pass_stack(value);
}
}
- virtual void pass_double()
- {
- intptr_t from_obj = *(intptr_t*)(_from+Interpreter::local_offset_in_bytes(1));
- _from -= 2*Interpreter::stackElementSize;
+ virtual void pass_float() {
+ jint value = *(jint*)single_slot_addr();
+ if (pass_fpr(value) < 0) {
+ pass_stack(value);
+ }
+ }
- if (_num_fp_args < Argument::n_float_register_parameters_c) {
- *_fp_args++ = from_obj;
- *_fp_identifiers |= (1ull << _num_fp_args); // mark as double
- _num_fp_args++;
+ virtual void pass_double() {
+ intptr_t value = *double_slot_addr();
+ int arg = pass_fpr(value);
+ if (0 <= arg) {
+ *_fp_identifiers |= (1ull << arg); // mark as double
} else {
- *_to++ = from_obj;
- _num_fp_args++;
+ pass_stack(value);
}
}
@@ -365,8 +253,8 @@ class SlowSignatureHandler
_fp_args = to - 8;
_fp_identifiers = to - 9;
*(int*) _fp_identifiers = 0;
- _num_int_args = (method->is_static() ? 1 : 0);
- _num_fp_args = 0;
+ _num_reg_int_args = (method->is_static() ? 1 : 0);
+ _num_reg_fp_args = 0;
}
};
diff --git a/src/hotspot/cpu/aarch64/interpreterRT_aarch64.hpp b/src/hotspot/cpu/aarch64/interpreterRT_aarch64.hpp
index ee7c2d1bf76774ae6d7bf943220b60e358c15e46..023760a469f286e1dceb46b00cba2d04e5f3b8b4 100644
--- a/src/hotspot/cpu/aarch64/interpreterRT_aarch64.hpp
+++ b/src/hotspot/cpu/aarch64/interpreterRT_aarch64.hpp
@@ -34,8 +34,8 @@
class SignatureHandlerGenerator: public NativeSignatureIterator {
private:
MacroAssembler* _masm;
- unsigned int _num_fp_args;
- unsigned int _num_int_args;
+ unsigned int _num_reg_fp_args;
+ unsigned int _num_reg_int_args;
int _stack_offset;
void pass_int();
@@ -44,6 +44,10 @@ class SignatureHandlerGenerator: public NativeSignatureIterator {
void pass_double();
void pass_object();
+ Register next_gpr();
+ FloatRegister next_fpr();
+ int next_stack_offset();
+
public:
// Creation
SignatureHandlerGenerator(const methodHandle& method, CodeBuffer* buffer);
diff --git a/src/hotspot/cpu/aarch64/javaFrameAnchor_aarch64.hpp b/src/hotspot/cpu/aarch64/javaFrameAnchor_aarch64.hpp
index 2d5b9a62b455f22dafc18ddf6df7cefbbef8eaff..6ff3c037407e67a47a1122b1e4310a3a8b572ede 100644
--- a/src/hotspot/cpu/aarch64/javaFrameAnchor_aarch64.hpp
+++ b/src/hotspot/cpu/aarch64/javaFrameAnchor_aarch64.hpp
@@ -31,9 +31,6 @@ private:
// FP value associated with _last_Java_sp:
intptr_t* volatile _last_Java_fp; // pointer is volatile not what it points to
- // (Optional) location of saved FP register, which GCs want to inspect
- intptr_t** volatile _saved_fp_address;
-
public:
// Each arch must define reset, save, restore
// These are used by objects that only care about:
@@ -47,7 +44,6 @@ public:
OrderAccess::release();
_last_Java_fp = NULL;
_last_Java_pc = NULL;
- _saved_fp_address = NULL;
}
void copy(JavaFrameAnchor* src) {
@@ -66,8 +62,6 @@ public:
_last_Java_pc = src->_last_Java_pc;
// Must be last so profiler will always see valid frame if has_last_frame() is true
_last_Java_sp = src->_last_Java_sp;
-
- _saved_fp_address = src->_saved_fp_address;
}
bool walkable(void) { return _last_Java_sp != NULL && _last_Java_pc != NULL; }
@@ -78,12 +72,9 @@ public:
address last_Java_pc(void) { return _last_Java_pc; }
- intptr_t** saved_fp_address(void) const { return _saved_fp_address; }
-
private:
static ByteSize last_Java_fp_offset() { return byte_offset_of(JavaFrameAnchor, _last_Java_fp); }
- static ByteSize saved_fp_address_offset() { return byte_offset_of(JavaFrameAnchor, _saved_fp_address); }
public:
diff --git a/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp b/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp
index b742edb47dcefaf76ad445c71542de27a8397351..c577d6ada841bcfc730145131350cd70b1ac5447 100644
--- a/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp
+++ b/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp
@@ -320,8 +320,6 @@ void MacroAssembler::reset_last_Java_frame(bool clear_fp) {
// Always clear the pc because it could have been set by make_walkable()
str(zr, Address(rthread, JavaThread::last_Java_pc_offset()));
-
- str(zr, Address(rthread, JavaThread::saved_fp_address_offset()));
}
// Calls to C land
@@ -2567,6 +2565,8 @@ void MacroAssembler::atomic_##OP(Register prev, Register newv, Register addr) {
ATOMIC_XCHG(xchg, swp, ldxr, stxr, Assembler::xword)
ATOMIC_XCHG(xchgw, swp, ldxrw, stxrw, Assembler::word)
+ATOMIC_XCHG(xchgl, swpl, ldxr, stlxr, Assembler::xword)
+ATOMIC_XCHG(xchglw, swpl, ldxrw, stlxrw, Assembler::word)
ATOMIC_XCHG(xchgal, swpal, ldaxr, stlxr, Assembler::xword)
ATOMIC_XCHG(xchgalw, swpal, ldaxrw, stlxrw, Assembler::word)
@@ -5266,10 +5266,14 @@ void MacroAssembler::char_array_compress(Register src, Register dst, Register le
// by the call to JavaThread::aarch64_get_thread_helper() or, indeed,
// the call setup code.
//
-// aarch64_get_thread_helper() clobbers only r0, r1, and flags.
+// On Linux, aarch64_get_thread_helper() clobbers only r0, r1, and flags.
+// On other systems, the helper is a usual C function.
//
void MacroAssembler::get_thread(Register dst) {
- RegSet saved_regs = RegSet::range(r0, r1) + lr - dst;
+ RegSet saved_regs =
+ LINUX_ONLY(RegSet::range(r0, r1) + lr - dst)
+ NOT_LINUX (RegSet::range(r0, r17) + lr - dst);
+
push(saved_regs, sp);
mov(lr, CAST_FROM_FN_PTR(address, JavaThread::aarch64_get_thread_helper));
diff --git a/src/hotspot/cpu/aarch64/macroAssembler_aarch64.hpp b/src/hotspot/cpu/aarch64/macroAssembler_aarch64.hpp
index 4ffc97bb377197ec615c1a9599b6f196abf12b4c..fd170d405262b152accd5b994d001da644716c7b 100644
--- a/src/hotspot/cpu/aarch64/macroAssembler_aarch64.hpp
+++ b/src/hotspot/cpu/aarch64/macroAssembler_aarch64.hpp
@@ -527,6 +527,33 @@ public:
orr(Vd, T, Vn, Vn);
}
+ // AdvSIMD shift by immediate.
+ // These are "user friendly" variants which allow a shift count of 0.
+#define WRAP(INSN) \
+ void INSN(FloatRegister Vd, SIMD_Arrangement T, FloatRegister Vn, int shift) { \
+ if (shift == 0) { \
+ SIMD_Arrangement arrange = (T & 1) == 0 ? T8B : T16B; \
+ Assembler::orr(Vd, arrange, Vn, Vn); \
+ } else { \
+ Assembler::INSN(Vd, T, Vn, shift); \
+ } \
+ } \
+
+ WRAP(shl) WRAP(sshr) WRAP(ushr)
+#undef WRAP
+
+#define WRAP(INSN) \
+ void INSN(FloatRegister Vd, SIMD_Arrangement T, FloatRegister Vn, int shift) { \
+ if (shift == 0) { \
+ Assembler::addv(Vd, T, Vd, Vn); \
+ } else { \
+ Assembler::INSN(Vd, T, Vn, shift); \
+ } \
+ } \
+
+ WRAP(usra) WRAP(ssra)
+#undef WRAP
+
public:
// Generalized Test Bit And Branch, including a "far" variety which
@@ -1039,6 +1066,8 @@ public:
void atomic_xchg(Register prev, Register newv, Register addr);
void atomic_xchgw(Register prev, Register newv, Register addr);
+ void atomic_xchgl(Register prev, Register newv, Register addr);
+ void atomic_xchglw(Register prev, Register newv, Register addr);
void atomic_xchgal(Register prev, Register newv, Register addr);
void atomic_xchgalw(Register prev, Register newv, Register addr);
diff --git a/src/hotspot/cpu/aarch64/sharedRuntime_aarch64.cpp b/src/hotspot/cpu/aarch64/sharedRuntime_aarch64.cpp
index c1a86c025953a165082afec419811ef0c45710ad..a478fd0236154040aa7d7a03b9ee636b6e1852f3 100644
--- a/src/hotspot/cpu/aarch64/sharedRuntime_aarch64.cpp
+++ b/src/hotspot/cpu/aarch64/sharedRuntime_aarch64.cpp
@@ -240,15 +240,6 @@ bool SharedRuntime::is_wide_vector(int size) {
return size > 8;
}
-size_t SharedRuntime::trampoline_size() {
- return 16;
-}
-
-void SharedRuntime::generate_trampoline(MacroAssembler *masm, address destination) {
- __ mov(rscratch1, destination);
- __ br(rscratch1);
-}
-
// The java_calling_convention describes stack locations as ideal slots on
// a frame with no abi restrictions. Since we must observe abi restrictions
// (like the placement of the register window) the slots must be biased by
@@ -3072,7 +3063,6 @@ void OptoRuntime::generate_exception_blob() {
// Set exception blob
_exception_blob = ExceptionBlob::create(&buffer, oop_maps, SimpleRuntimeFrame::framesize >> 1);
}
-#endif // COMPILER2
// ---------------------------------------------------------------
@@ -3082,6 +3072,10 @@ class NativeInvokerGenerator : public StubCodeGenerator {
const GrowableArray& _input_registers;
const GrowableArray& _output_registers;
+
+ int _frame_complete;
+ int _framesize;
+ OopMapSet* _oop_maps;
public:
NativeInvokerGenerator(CodeBuffer* buffer,
address call_target,
@@ -3092,9 +3086,90 @@ public:
_call_target(call_target),
_shadow_space_bytes(shadow_space_bytes),
_input_registers(input_registers),
- _output_registers(output_registers) {}
+ _output_registers(output_registers),
+ _frame_complete(0),
+ _framesize(0),
+ _oop_maps(NULL) {
+ assert(_output_registers.length() <= 1
+ || (_output_registers.length() == 2 && !_output_registers.at(1)->is_valid()), "no multi-reg returns");
+ }
+
void generate();
+ int spill_size_in_bytes() const {
+ if (_output_registers.length() == 0) {
+ return 0;
+ }
+ VMReg reg = _output_registers.at(0);
+ assert(reg->is_reg(), "must be a register");
+ if (reg->is_Register()) {
+ return 8;
+ } else if (reg->is_FloatRegister()) {
+ bool use_sve = Matcher::supports_scalable_vector();
+ if (use_sve) {
+ return Matcher::scalable_vector_reg_size(T_BYTE);
+ }
+ return 16;
+ } else {
+ ShouldNotReachHere();
+ }
+ return 0;
+ }
+
+ void spill_output_registers() {
+ if (_output_registers.length() == 0) {
+ return;
+ }
+ VMReg reg = _output_registers.at(0);
+ assert(reg->is_reg(), "must be a register");
+ MacroAssembler* masm = _masm;
+ if (reg->is_Register()) {
+ __ spill(reg->as_Register(), true, 0);
+ } else if (reg->is_FloatRegister()) {
+ bool use_sve = Matcher::supports_scalable_vector();
+ if (use_sve) {
+ __ spill_sve_vector(reg->as_FloatRegister(), 0, Matcher::scalable_vector_reg_size(T_BYTE));
+ } else {
+ __ spill(reg->as_FloatRegister(), __ Q, 0);
+ }
+ } else {
+ ShouldNotReachHere();
+ }
+ }
+
+ void fill_output_registers() {
+ if (_output_registers.length() == 0) {
+ return;
+ }
+ VMReg reg = _output_registers.at(0);
+ assert(reg->is_reg(), "must be a register");
+ MacroAssembler* masm = _masm;
+ if (reg->is_Register()) {
+ __ unspill(reg->as_Register(), true, 0);
+ } else if (reg->is_FloatRegister()) {
+ bool use_sve = Matcher::supports_scalable_vector();
+ if (use_sve) {
+ __ unspill_sve_vector(reg->as_FloatRegister(), 0, Matcher::scalable_vector_reg_size(T_BYTE));
+ } else {
+ __ unspill(reg->as_FloatRegister(), __ Q, 0);
+ }
+ } else {
+ ShouldNotReachHere();
+ }
+ }
+
+ int frame_complete() const {
+ return _frame_complete;
+ }
+
+ int framesize() const {
+ return (_framesize >> (LogBytesPerWord - LogBytesPerInt));
+ }
+
+ OopMapSet* oop_maps() const {
+ return _oop_maps;
+ }
+
private:
#ifdef ASSERT
bool target_uses_register(VMReg reg) {
@@ -3105,21 +3180,23 @@ private:
static const int native_invoker_code_size = 1024;
-BufferBlob* SharedRuntime::make_native_invoker(address call_target,
- int shadow_space_bytes,
- const GrowableArray& input_registers,
- const GrowableArray& output_registers) {
- BufferBlob* _invoke_native_blob =
- BufferBlob::create("nep_invoker_blob", native_invoker_code_size);
- if (_invoke_native_blob == NULL)
- return NULL; // allocation failure
-
- CodeBuffer code(_invoke_native_blob);
+RuntimeStub* SharedRuntime::make_native_invoker(address call_target,
+ int shadow_space_bytes,
+ const GrowableArray& input_registers,
+ const GrowableArray& output_registers) {
+ int locs_size = 64;
+ CodeBuffer code("nep_invoker_blob", native_invoker_code_size, locs_size);
NativeInvokerGenerator g(&code, call_target, shadow_space_bytes, input_registers, output_registers);
g.generate();
code.log_section_sizes("nep_invoker_blob");
- return _invoke_native_blob;
+ RuntimeStub* stub =
+ RuntimeStub::new_runtime_stub("nep_invoker_blob",
+ &code,
+ g.frame_complete(),
+ g.framesize(),
+ g.oop_maps(), false);
+ return stub;
}
void NativeInvokerGenerator::generate() {
@@ -3128,26 +3205,40 @@ void NativeInvokerGenerator::generate() {
|| target_uses_register(rthread->as_VMReg())),
"Register conflict");
+ enum layout {
+ rbp_off,
+ rbp_off2,
+ return_off,
+ return_off2,
+ framesize // inclusive of return address
+ };
+
+ assert(_shadow_space_bytes == 0, "not expecting shadow space on AArch64");
+ _framesize = align_up(framesize + (spill_size_in_bytes() >> LogBytesPerInt), 4);
+ assert(is_even(_framesize/2), "sp not 16-byte aligned");
+
+ _oop_maps = new OopMapSet();
MacroAssembler* masm = _masm;
- __ set_last_Java_frame(sp, noreg, lr, rscratch1);
+ address start = __ pc();
__ enter();
- // Store a pointer to the previous R29 (RFP) saved on the stack as it
- // may contain an oop if PreserveFramePointer is off. This value is
- // retrieved later by frame::sender_for_entry_frame() when the stack
- // is walked.
- __ mov(rscratch1, sp);
- __ str(rscratch1, Address(rthread, JavaThread::saved_fp_address_offset()));
+ // lr and fp are already in place
+ __ sub(sp, rfp, ((unsigned)_framesize-4) << LogBytesPerInt); // prolog
+
+ _frame_complete = __ pc() - start;
+
+ address the_pc = __ pc();
+ __ set_last_Java_frame(sp, rfp, the_pc, rscratch1);
+ OopMap* map = new OopMap(_framesize, 0);
+ _oop_maps->add_gc_map(the_pc - start, map);
// State transition
__ mov(rscratch1, _thread_in_native);
__ lea(rscratch2, Address(rthread, JavaThread::thread_state_offset()));
__ stlrw(rscratch1, rscratch2);
- assert(_shadow_space_bytes == 0, "not expecting shadow space on AArch64");
-
rt_call(masm, _call_target);
__ mov(rscratch1, _thread_in_native_trans);
@@ -3193,27 +3284,14 @@ void NativeInvokerGenerator::generate() {
__ bind(L_safepoint_poll_slow_path);
// Need to save the native result registers around any runtime calls.
- RegSet spills;
- FloatRegSet fp_spills;
- for (int i = 0; i < _output_registers.length(); i++) {
- VMReg output = _output_registers.at(i);
- if (output->is_Register()) {
- spills += RegSet::of(output->as_Register());
- } else if (output->is_FloatRegister()) {
- fp_spills += FloatRegSet::of(output->as_FloatRegister());
- }
- }
-
- __ push(spills, sp);
- __ push_fp(fp_spills, sp);
+ spill_output_registers();
__ mov(c_rarg0, rthread);
assert(frame::arg_reg_save_area_bytes == 0, "not expecting frame reg save area");
__ lea(rscratch1, RuntimeAddress(CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans)));
__ blr(rscratch1);
- __ pop_fp(fp_spills, sp);
- __ pop(spills, sp);
+ fill_output_registers();
__ b(L_after_safepoint_poll);
__ block_comment("} L_safepoint_poll_slow_path");
@@ -3223,13 +3301,11 @@ void NativeInvokerGenerator::generate() {
__ block_comment("{ L_reguard");
__ bind(L_reguard);
- __ push(spills, sp);
- __ push_fp(fp_spills, sp);
+ spill_output_registers();
rt_call(masm, CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages));
- __ pop_fp(fp_spills, sp);
- __ pop(spills, sp);
+ fill_output_registers();
__ b(L_after_reguard);
@@ -3239,3 +3315,4 @@ void NativeInvokerGenerator::generate() {
__ flush();
}
+#endif // COMPILER2
diff --git a/src/hotspot/cpu/aarch64/stubGenerator_aarch64.cpp b/src/hotspot/cpu/aarch64/stubGenerator_aarch64.cpp
index 1cd6e171c6e91c7c648144ef9e0119b459d236c8..6326b29ed6d7697a2baa0491ab2d25cb7f3e8962 100644
--- a/src/hotspot/cpu/aarch64/stubGenerator_aarch64.cpp
+++ b/src/hotspot/cpu/aarch64/stubGenerator_aarch64.cpp
@@ -26,6 +26,7 @@
#include "precompiled.hpp"
#include "asm/macroAssembler.hpp"
#include "asm/macroAssembler.inline.hpp"
+#include "atomic_aarch64.hpp"
#include "gc/shared/barrierSet.hpp"
#include "gc/shared/barrierSetAssembler.hpp"
#include "gc/shared/gc_globals.hpp"
@@ -38,6 +39,7 @@
#include "oops/objArrayKlass.hpp"
#include "oops/oop.inline.hpp"
#include "prims/methodHandles.hpp"
+#include "runtime/atomic.hpp"
#include "runtime/frame.inline.hpp"
#include "runtime/handles.inline.hpp"
#include "runtime/sharedRuntime.hpp"
@@ -1361,7 +1363,7 @@ class StubGenerator: public StubCodeGenerator {
//
// If 'from' and/or 'to' are aligned on 4-byte boundaries, we let
// the hardware handle it. The two dwords within qwords that span
- // cache line boundaries will still be loaded and stored atomicly.
+ // cache line boundaries will still be loaded and stored atomically.
//
// Side Effects:
// disjoint_int_copy_entry is set to the no-overlap entry point
@@ -1431,7 +1433,7 @@ class StubGenerator: public StubCodeGenerator {
//
// If 'from' and/or 'to' are aligned on 4-byte boundaries, we let
// the hardware handle it. The two dwords within qwords that span
- // cache line boundaries will still be loaded and stored atomicly.
+ // cache line boundaries will still be loaded and stored atomically.
//
address generate_conjoint_copy(int size, bool aligned, bool is_oop, address nooverlap_target,
address *entry, const char *name,
@@ -1596,7 +1598,7 @@ class StubGenerator: public StubCodeGenerator {
//
// If 'from' and/or 'to' are aligned on 4-byte boundaries, we let
// the hardware handle it. The two dwords within qwords that span
- // cache line boundaries will still be loaded and stored atomicly.
+ // cache line boundaries will still be loaded and stored atomically.
//
// Side Effects:
// disjoint_int_copy_entry is set to the no-overlap entry point
@@ -1620,7 +1622,7 @@ class StubGenerator: public StubCodeGenerator {
//
// If 'from' and/or 'to' are aligned on 4-byte boundaries, we let
// the hardware handle it. The two dwords within qwords that span
- // cache line boundaries will still be loaded and stored atomicly.
+ // cache line boundaries will still be loaded and stored atomically.
//
address generate_conjoint_int_copy(bool aligned, address nooverlap_target,
address *entry, const char *name,
@@ -5571,6 +5573,171 @@ class StubGenerator: public StubCodeGenerator {
return start;
}
+#ifdef LINUX
+
+ // ARMv8.1 LSE versions of the atomic stubs used by Atomic::PlatformXX.
+ //
+ // If LSE is in use, generate LSE versions of all the stubs. The
+ // non-LSE versions are in atomic_aarch64.S.
+
+ // class AtomicStubMark records the entry point of a stub and the
+ // stub pointer which will point to it. The stub pointer is set to
+ // the entry point when ~AtomicStubMark() is called, which must be
+ // after ICache::invalidate_range. This ensures safe publication of
+ // the generated code.
+ class AtomicStubMark {
+ address _entry_point;
+ aarch64_atomic_stub_t *_stub;
+ MacroAssembler *_masm;
+ public:
+ AtomicStubMark(MacroAssembler *masm, aarch64_atomic_stub_t *stub) {
+ _masm = masm;
+ __ align(32);
+ _entry_point = __ pc();
+ _stub = stub;
+ }
+ ~AtomicStubMark() {
+ *_stub = (aarch64_atomic_stub_t)_entry_point;
+ }
+ };
+
+ // NB: For memory_order_conservative we need a trailing membar after
+ // LSE atomic operations but not a leading membar.
+ //
+ // We don't need a leading membar because a clause in the Arm ARM
+ // says:
+ //
+ // Barrier-ordered-before
+ //
+ // Barrier instructions order prior Memory effects before subsequent
+ // Memory effects generated by the same Observer. A read or a write
+ // RW1 is Barrier-ordered-before a read or a write RW 2 from the same
+ // Observer if and only if RW1 appears in program order before RW 2
+ // and [ ... ] at least one of RW 1 and RW 2 is generated by an atomic
+ // instruction with both Acquire and Release semantics.
+ //
+ // All the atomic instructions {ldaddal, swapal, casal} have Acquire
+ // and Release semantics, therefore we don't need a leading
+ // barrier. However, there is no corresponding Barrier-ordered-after
+ // relationship, therefore we need a trailing membar to prevent a
+ // later store or load from being reordered with the store in an
+ // atomic instruction.
+ //
+ // This was checked by using the herd7 consistency model simulator
+ // (http://diy.inria.fr/) with this test case:
+ //
+ // AArch64 LseCas
+ // { 0:X1=x; 0:X2=y; 1:X1=x; 1:X2=y; }
+ // P0 | P1;
+ // LDR W4, [X2] | MOV W3, #0;
+ // DMB LD | MOV W4, #1;
+ // LDR W3, [X1] | CASAL W3, W4, [X1];
+ // | DMB ISH;
+ // | STR W4, [X2];
+ // exists
+ // (0:X3=0 /\ 0:X4=1)
+ //
+ // If X3 == 0 && X4 == 1, the store to y in P1 has been reordered
+ // with the store to x in P1. Without the DMB in P1 this may happen.
+ //
+ // At the time of writing we don't know of any AArch64 hardware that
+ // reorders stores in this way, but the Reference Manual permits it.
+
+ void gen_cas_entry(Assembler::operand_size size,
+ atomic_memory_order order) {
+ Register prev = r3, ptr = c_rarg0, compare_val = c_rarg1,
+ exchange_val = c_rarg2;
+ bool acquire, release;
+ switch (order) {
+ case memory_order_relaxed:
+ acquire = false;
+ release = false;
+ break;
+ default:
+ acquire = true;
+ release = true;
+ break;
+ }
+ __ mov(prev, compare_val);
+ __ lse_cas(prev, exchange_val, ptr, size, acquire, release, /*not_pair*/true);
+ if (order == memory_order_conservative) {
+ __ membar(Assembler::StoreStore|Assembler::StoreLoad);
+ }
+ if (size == Assembler::xword) {
+ __ mov(r0, prev);
+ } else {
+ __ movw(r0, prev);
+ }
+ __ ret(lr);
+ }
+
+ void gen_ldaddal_entry(Assembler::operand_size size) {
+ Register prev = r2, addr = c_rarg0, incr = c_rarg1;
+ __ ldaddal(size, incr, prev, addr);
+ __ membar(Assembler::StoreStore|Assembler::StoreLoad);
+ if (size == Assembler::xword) {
+ __ mov(r0, prev);
+ } else {
+ __ movw(r0, prev);
+ }
+ __ ret(lr);
+ }
+
+ void gen_swpal_entry(Assembler::operand_size size) {
+ Register prev = r2, addr = c_rarg0, incr = c_rarg1;
+ __ swpal(size, incr, prev, addr);
+ __ membar(Assembler::StoreStore|Assembler::StoreLoad);
+ if (size == Assembler::xword) {
+ __ mov(r0, prev);
+ } else {
+ __ movw(r0, prev);
+ }
+ __ ret(lr);
+ }
+
+ void generate_atomic_entry_points() {
+ if (! UseLSE) {
+ return;
+ }
+
+ __ align(CodeEntryAlignment);
+ StubCodeMark mark(this, "StubRoutines", "atomic entry points");
+ address first_entry = __ pc();
+
+ // All memory_order_conservative
+ AtomicStubMark mark_fetch_add_4(_masm, &aarch64_atomic_fetch_add_4_impl);
+ gen_ldaddal_entry(Assembler::word);
+ AtomicStubMark mark_fetch_add_8(_masm, &aarch64_atomic_fetch_add_8_impl);
+ gen_ldaddal_entry(Assembler::xword);
+
+ AtomicStubMark mark_xchg_4(_masm, &aarch64_atomic_xchg_4_impl);
+ gen_swpal_entry(Assembler::word);
+ AtomicStubMark mark_xchg_8_impl(_masm, &aarch64_atomic_xchg_8_impl);
+ gen_swpal_entry(Assembler::xword);
+
+ // CAS, memory_order_conservative
+ AtomicStubMark mark_cmpxchg_1(_masm, &aarch64_atomic_cmpxchg_1_impl);
+ gen_cas_entry(MacroAssembler::byte, memory_order_conservative);
+ AtomicStubMark mark_cmpxchg_4(_masm, &aarch64_atomic_cmpxchg_4_impl);
+ gen_cas_entry(MacroAssembler::word, memory_order_conservative);
+ AtomicStubMark mark_cmpxchg_8(_masm, &aarch64_atomic_cmpxchg_8_impl);
+ gen_cas_entry(MacroAssembler::xword, memory_order_conservative);
+
+ // CAS, memory_order_relaxed
+ AtomicStubMark mark_cmpxchg_1_relaxed
+ (_masm, &aarch64_atomic_cmpxchg_1_relaxed_impl);
+ gen_cas_entry(MacroAssembler::byte, memory_order_relaxed);
+ AtomicStubMark mark_cmpxchg_4_relaxed
+ (_masm, &aarch64_atomic_cmpxchg_4_relaxed_impl);
+ gen_cas_entry(MacroAssembler::word, memory_order_relaxed);
+ AtomicStubMark mark_cmpxchg_8_relaxed
+ (_masm, &aarch64_atomic_cmpxchg_8_relaxed_impl);
+ gen_cas_entry(MacroAssembler::xword, memory_order_relaxed);
+
+ ICache::invalidate_range(first_entry, __ pc() - first_entry);
+ }
+#endif // LINUX
+
// Continuation point for throwing of implicit exceptions that are
// not handled in the current activation. Fabricates an exception
// oop and initiates normal exception dispatching in this
@@ -6683,6 +6850,12 @@ class StubGenerator: public StubCodeGenerator {
StubRoutines::_updateBytesAdler32 = generate_updateBytesAdler32();
}
+#ifdef LINUX
+
+ generate_atomic_entry_points();
+
+#endif // LINUX
+
StubRoutines::aarch64::set_completed();
}
@@ -6703,3 +6876,30 @@ void StubGenerator_generate(CodeBuffer* code, bool all) {
}
StubGenerator g(code, all);
}
+
+
+#ifdef LINUX
+
+// Define pointers to atomic stubs and initialize them to point to the
+// code in atomic_aarch64.S.
+
+#define DEFAULT_ATOMIC_OP(OPNAME, SIZE, RELAXED) \
+ extern "C" uint64_t aarch64_atomic_ ## OPNAME ## _ ## SIZE ## RELAXED ## _default_impl \
+ (volatile void *ptr, uint64_t arg1, uint64_t arg2); \
+ aarch64_atomic_stub_t aarch64_atomic_ ## OPNAME ## _ ## SIZE ## RELAXED ## _impl \
+ = aarch64_atomic_ ## OPNAME ## _ ## SIZE ## RELAXED ## _default_impl;
+
+DEFAULT_ATOMIC_OP(fetch_add, 4, )
+DEFAULT_ATOMIC_OP(fetch_add, 8, )
+DEFAULT_ATOMIC_OP(xchg, 4, )
+DEFAULT_ATOMIC_OP(xchg, 8, )
+DEFAULT_ATOMIC_OP(cmpxchg, 1, )
+DEFAULT_ATOMIC_OP(cmpxchg, 4, )
+DEFAULT_ATOMIC_OP(cmpxchg, 8, )
+DEFAULT_ATOMIC_OP(cmpxchg, 1, _relaxed)
+DEFAULT_ATOMIC_OP(cmpxchg, 4, _relaxed)
+DEFAULT_ATOMIC_OP(cmpxchg, 8, _relaxed)
+
+#undef DEFAULT_ATOMIC_OP
+
+#endif // LINUX
diff --git a/src/hotspot/cpu/aarch64/vm_version_aarch64.cpp b/src/hotspot/cpu/aarch64/vm_version_aarch64.cpp
index 5c13f3e6fb268089a26da258e406fb99cf32f9af..8cae18ad5a6a00f258e9027fec09da7ce06ad1cb 100644
--- a/src/hotspot/cpu/aarch64/vm_version_aarch64.cpp
+++ b/src/hotspot/cpu/aarch64/vm_version_aarch64.cpp
@@ -95,6 +95,9 @@ void VM_Version::initialize() {
SoftwarePrefetchHintDistance &= ~7;
}
+ if (FLAG_IS_DEFAULT(ContendedPaddingWidth) && (dcache_line > ContendedPaddingWidth)) {
+ ContendedPaddingWidth = dcache_line;
+ }
if (os::supports_map_sync()) {
// if dcpop is available publish data cache line flush size via
diff --git a/src/hotspot/cpu/aarch64/vtableStubs_aarch64.cpp b/src/hotspot/cpu/aarch64/vtableStubs_aarch64.cpp
index 2f7a8224ff8e1b0fe325f0663518b1383a1a7bbc..4a2285b392f672d6d0623ae39023bb87db15886b 100644
--- a/src/hotspot/cpu/aarch64/vtableStubs_aarch64.cpp
+++ b/src/hotspot/cpu/aarch64/vtableStubs_aarch64.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2003, 2020, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2021, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -70,7 +70,7 @@ VtableStub* VtableStubs::create_vtable_stub(int vtable_index) {
#if (!defined(PRODUCT) && defined(COMPILER2))
if (CountCompiledCalls) {
__ lea(r16, ExternalAddress((address) SharedRuntime::nof_megamorphic_calls_addr()));
- __ incrementw(Address(r16));
+ __ increment(Address(r16));
}
#endif
@@ -145,6 +145,7 @@ VtableStub* VtableStubs::create_itable_stub(int itable_index) {
if (s == NULL) {
return NULL;
}
+
// Count unused bytes in instruction sequences of variable size.
// We add them to the computed buffer size in order to avoid
// overflow in subsequently generated stubs.
@@ -159,7 +160,7 @@ VtableStub* VtableStubs::create_itable_stub(int itable_index) {
#if (!defined(PRODUCT) && defined(COMPILER2))
if (CountCompiledCalls) {
__ lea(r10, ExternalAddress((address) SharedRuntime::nof_megamorphic_calls_addr()));
- __ incrementw(Address(r10));
+ __ increment(Address(r10));
}
#endif
diff --git a/src/hotspot/cpu/arm/c1_LIRGenerator_arm.cpp b/src/hotspot/cpu/arm/c1_LIRGenerator_arm.cpp
index 5cba07805b9bfee32379014b8581477e2ed26030..f29d4c0744cf394ee5bb434668fd6284afe00172 100644
--- a/src/hotspot/cpu/arm/c1_LIRGenerator_arm.cpp
+++ b/src/hotspot/cpu/arm/c1_LIRGenerator_arm.cpp
@@ -366,9 +366,6 @@ void LIRGenerator::set_card(LIR_Opr value, LIR_Address* card_addr) {
void LIRGenerator::CardTableBarrierSet_post_barrier_helper(LIR_OprDesc* addr, LIR_Const* card_table_base) {
assert(addr->is_register(), "must be a register at this point");
- CardTableBarrierSet* ctbs = barrier_set_cast(BarrierSet::barrier_set());
- CardTable* ct = ctbs->card_table();
-
LIR_Opr tmp = FrameMap::LR_ptr_opr;
bool load_card_table_base_const = VM_Version::supports_movw();
@@ -382,9 +379,6 @@ void LIRGenerator::CardTableBarrierSet_post_barrier_helper(LIR_OprDesc* addr, LI
// byte instruction does not support the addressing mode we need.
LIR_Address* card_addr = new LIR_Address(tmp, addr, (LIR_Address::Scale) -CardTable::card_shift, 0, T_BOOLEAN);
if (UseCondCardMark) {
- if (ct->scanned_concurrently()) {
- __ membar_storeload();
- }
LIR_Opr cur_value = new_register(T_INT);
__ move(card_addr, cur_value);
@@ -394,9 +388,6 @@ void LIRGenerator::CardTableBarrierSet_post_barrier_helper(LIR_OprDesc* addr, LI
set_card(tmp, card_addr);
__ branch_destination(L_already_dirty->label());
} else {
- if (ct->scanned_concurrently()) {
- __ membar_storestore();
- }
set_card(tmp, card_addr);
}
}
diff --git a/src/hotspot/cpu/arm/c1_globals_arm.hpp b/src/hotspot/cpu/arm/c1_globals_arm.hpp
index 7077a87092c28e5a6347a34782112b51839a6837..8f196bc5e6abb6f557157b824a9ddb82fdc70bd1 100644
--- a/src/hotspot/cpu/arm/c1_globals_arm.hpp
+++ b/src/hotspot/cpu/arm/c1_globals_arm.hpp
@@ -53,7 +53,6 @@ define_pd_global(bool, ProfileInterpreter, false);
define_pd_global(size_t, CodeCacheExpansionSize, 32*K );
define_pd_global(uintx, CodeCacheMinBlockLength, 1);
define_pd_global(size_t, CodeCacheMinimumUseSpace, 400*K);
-define_pd_global(size_t, MetaspaceSize, 12*M );
define_pd_global(bool, NeverActAsServerClassMachine, true);
define_pd_global(uint64_t, MaxRAM, 1ULL*G);
define_pd_global(bool, CICompileOSR, true );
diff --git a/src/hotspot/cpu/arm/c2_globals_arm.hpp b/src/hotspot/cpu/arm/c2_globals_arm.hpp
index 525af8b1edc9e2dc521bc81bbf9b0be90e6a0c7b..7754001dd0af8f65f2a8ea45b0a31cbf9d606487 100644
--- a/src/hotspot/cpu/arm/c2_globals_arm.hpp
+++ b/src/hotspot/cpu/arm/c2_globals_arm.hpp
@@ -100,9 +100,6 @@ define_pd_global(size_t, CodeCacheMinimumUseSpace, 400*K);
define_pd_global(bool, TrapBasedRangeChecks, false); // Not needed
-// Heap related flags
-define_pd_global(size_t, MetaspaceSize, ScaleForWordSize(16*M));
-
// Ergonomics related flags
define_pd_global(bool, NeverActAsServerClassMachine, false);
diff --git a/src/hotspot/cpu/arm/gc/shared/cardTableBarrierSetAssembler_arm.cpp b/src/hotspot/cpu/arm/gc/shared/cardTableBarrierSetAssembler_arm.cpp
index fc32418daa80a0b678d984bce6bb301966f49289..86f43597e220bb1b7f302dc59422cf0320e36de1 100644
--- a/src/hotspot/cpu/arm/gc/shared/cardTableBarrierSetAssembler_arm.cpp
+++ b/src/hotspot/cpu/arm/gc/shared/cardTableBarrierSetAssembler_arm.cpp
@@ -128,16 +128,10 @@ void CardTableBarrierSetAssembler::store_check_part2(MacroAssembler* masm, Regis
assert(bs->kind() == BarrierSet::CardTableBarrierSet,
"Wrong barrier set kind");
- CardTableBarrierSet* ctbs = barrier_set_cast(bs);
- CardTable* ct = ctbs->card_table();
-
assert(CardTable::dirty_card_val() == 0, "Dirty card value must be 0 due to optimizations.");
Address card_table_addr(card_table_base, obj, lsr, CardTable::card_shift);
if (UseCondCardMark) {
- if (ct->scanned_concurrently()) {
- __ membar(MacroAssembler::Membar_mask_bits(MacroAssembler::StoreLoad), noreg);
- }
Label already_dirty;
__ ldrb(tmp, card_table_addr);
@@ -147,9 +141,6 @@ void CardTableBarrierSetAssembler::store_check_part2(MacroAssembler* masm, Regis
__ bind(already_dirty);
} else {
- if (ct->scanned_concurrently()) {
- __ membar(MacroAssembler::Membar_mask_bits(MacroAssembler::StoreStore), noreg);
- }
set_card(masm, card_table_base, card_table_addr, tmp);
}
}
diff --git a/src/hotspot/cpu/arm/sharedRuntime_arm.cpp b/src/hotspot/cpu/arm/sharedRuntime_arm.cpp
index b19bea1b2a12692d0f455b586ae3fa346c1d1264..66307e232b4be05067c206365e541e45952019bd 100644
--- a/src/hotspot/cpu/arm/sharedRuntime_arm.cpp
+++ b/src/hotspot/cpu/arm/sharedRuntime_arm.cpp
@@ -251,16 +251,6 @@ bool SharedRuntime::is_wide_vector(int size) {
return false;
}
-size_t SharedRuntime::trampoline_size() {
- return 16;
-}
-
-void SharedRuntime::generate_trampoline(MacroAssembler *masm, address destination) {
- InlinedAddress dest(destination);
- __ indirect_jump(dest, Rtemp);
- __ bind_literal(dest);
-}
-
int SharedRuntime::c_calling_convention(const BasicType *sig_bt,
VMRegPair *regs,
VMRegPair *regs2,
@@ -1898,10 +1888,12 @@ RuntimeStub* SharedRuntime::generate_resolve_blob(address destination, const cha
return RuntimeStub::new_runtime_stub(name, &buffer, frame_complete, frame_size_words, oop_maps, true);
}
-BufferBlob* SharedRuntime::make_native_invoker(address call_target,
- int shadow_space_bytes,
- const GrowableArray& input_registers,
- const GrowableArray& output_registers) {
+#ifdef COMPILER2
+RuntimeStub* SharedRuntime::make_native_invoker(address call_target,
+ int shadow_space_bytes,
+ const GrowableArray& input_registers,
+ const GrowableArray& output_registers) {
Unimplemented();
return nullptr;
}
+#endif
diff --git a/src/hotspot/cpu/ppc/c1_globals_ppc.hpp b/src/hotspot/cpu/ppc/c1_globals_ppc.hpp
index 84cf043b22d8f1969ee9d9c6f75ce30e422b8a01..af6db7555dd13d2a928d6cfca6c748265c463e58 100644
--- a/src/hotspot/cpu/ppc/c1_globals_ppc.hpp
+++ b/src/hotspot/cpu/ppc/c1_globals_ppc.hpp
@@ -51,7 +51,6 @@ define_pd_global(uintx, NonNMethodCodeHeapSize, 5*M );
define_pd_global(uintx, CodeCacheExpansionSize, 32*K);
define_pd_global(uintx, CodeCacheMinBlockLength, 1);
define_pd_global(uintx, CodeCacheMinimumUseSpace, 400*K);
-define_pd_global(size_t, MetaspaceSize, 12*M);
define_pd_global(bool, NeverActAsServerClassMachine, true);
define_pd_global(size_t, NewSizeThreadIncrease, 16*K);
define_pd_global(uint64_t, MaxRAM, 1ULL*G);
diff --git a/src/hotspot/cpu/ppc/c2_globals_ppc.hpp b/src/hotspot/cpu/ppc/c2_globals_ppc.hpp
index 88377001bcfecaa521156864d6e7f6a2b1c7341b..bb103cdf6091b8b05f29a9c6ae38efc8083f66b4 100644
--- a/src/hotspot/cpu/ppc/c2_globals_ppc.hpp
+++ b/src/hotspot/cpu/ppc/c2_globals_ppc.hpp
@@ -93,9 +93,6 @@ define_pd_global(uintx, CodeCacheMinimumUseSpace, 400*K);
define_pd_global(bool, TrapBasedRangeChecks, true);
-// Heap related flags
-define_pd_global(size_t, MetaspaceSize, ScaleForWordSize(16*M));
-
// Ergonomics related flags
define_pd_global(bool, NeverActAsServerClassMachine, false);
diff --git a/src/hotspot/cpu/ppc/gc/shared/barrierSetAssembler_ppc.cpp b/src/hotspot/cpu/ppc/gc/shared/barrierSetAssembler_ppc.cpp
index cc78b0191539aaf75546bbffffd32285a1a797a6..800b34e4ba73663e206807a0c0ff7b489612c296 100644
--- a/src/hotspot/cpu/ppc/gc/shared/barrierSetAssembler_ppc.cpp
+++ b/src/hotspot/cpu/ppc/gc/shared/barrierSetAssembler_ppc.cpp
@@ -26,6 +26,7 @@
#include "nativeInst_ppc.hpp"
#include "precompiled.hpp"
#include "asm/macroAssembler.inline.hpp"
+#include "classfile/classLoaderData.hpp"
#include "gc/shared/barrierSetAssembler.hpp"
#include "gc/shared/barrierSetNMethod.hpp"
#include "interpreter/interp_masm.hpp"
diff --git a/src/hotspot/cpu/ppc/gc/shared/cardTableBarrierSetAssembler_ppc.cpp b/src/hotspot/cpu/ppc/gc/shared/cardTableBarrierSetAssembler_ppc.cpp
index fd0c4c6a54087212d928a14ff478bbbd55df1ee0..8337317e3f2cc1a431aa8832d87005a72c222381 100644
--- a/src/hotspot/cpu/ppc/gc/shared/cardTableBarrierSetAssembler_ppc.cpp
+++ b/src/hotspot/cpu/ppc/gc/shared/cardTableBarrierSetAssembler_ppc.cpp
@@ -49,8 +49,6 @@ void CardTableBarrierSetAssembler::gen_write_ref_array_post_barrier(MacroAssembl
Label Lskip_loop, Lstore_loop;
- if (ct->scanned_concurrently()) { __ membar(Assembler::StoreStore); }
-
__ sldi_(count, count, LogBytesPerHeapOop);
__ beq(CCR0, Lskip_loop); // zero length
__ addi(count, count, -BytesPerHeapOop);
@@ -74,13 +72,10 @@ void CardTableBarrierSetAssembler::gen_write_ref_array_post_barrier(MacroAssembl
void CardTableBarrierSetAssembler::card_table_write(MacroAssembler* masm,
CardTable::CardValue* byte_map_base,
Register tmp, Register obj) {
- CardTableBarrierSet* ctbs = barrier_set_cast(BarrierSet::barrier_set());
- CardTable* ct = ctbs->card_table();
assert_different_registers(obj, tmp, R0);
__ load_const_optimized(tmp, (address)byte_map_base, R0);
__ srdi(obj, obj, CardTable::card_shift);
__ li(R0, CardTable::dirty_card_val());
- if (ct->scanned_concurrently()) { __ membar(Assembler::StoreStore); }
__ stbx(R0, tmp, obj);
}
diff --git a/src/hotspot/cpu/ppc/ppc.ad b/src/hotspot/cpu/ppc/ppc.ad
index 0c3b96fa9ba96994728377f5ce6b7f090944d442..a1642b0ad169eb0ee9b54ade15b95d25a007e5ed 100644
--- a/src/hotspot/cpu/ppc/ppc.ad
+++ b/src/hotspot/cpu/ppc/ppc.ad
@@ -3035,36 +3035,6 @@ encode %{
__ stfd($src$$FloatRegister, Idisp, $mem$$base$$Register);
%}
- // Use release_store for card-marking to ensure that previous
- // oop-stores are visible before the card-mark change.
- enc_class enc_cms_card_mark(memory mem, iRegLdst releaseFieldAddr, flagsReg crx) %{
- // FIXME: Implement this as a cmove and use a fixed condition code
- // register which is written on every transition to compiled code,
- // e.g. in call-stub and when returning from runtime stubs.
- //
- // Proposed code sequence for the cmove implementation:
- //
- // Label skip_release;
- // __ beq(CCRfixed, skip_release);
- // __ release();
- // __ bind(skip_release);
- // __ stb(card mark);
-
- C2_MacroAssembler _masm(&cbuf);
- Label skip_storestore;
-
- __ li(R0, 0);
- __ membar(Assembler::StoreStore);
-
- // Do the store.
- if ($mem$$index == 0) {
- __ stb(R0, $mem$$disp, $mem$$base$$Register);
- } else {
- assert(0 == $mem$$disp, "no displacement possible with indexed load/stores on ppc");
- __ stbx(R0, $mem$$base$$Register, $mem$$index$$Register);
- }
- %}
-
enc_class postalloc_expand_encode_oop(iRegNdst dst, iRegPdst src, flagsReg crx) %{
if (VM_Version::has_isel()) {
@@ -6601,37 +6571,15 @@ instruct storeD(memory mem, regD src) %{
//----------Store Instructions With Zeros--------------------------------------
-// Card-mark for CMS garbage collection.
-// This cardmark does an optimization so that it must not always
-// do a releasing store. For this, it gets the address of
-// CMSCollectorCardTableBarrierSetBSExt::_requires_release as input.
-// (Using releaseFieldAddr in the match rule is a hack.)
-instruct storeCM_CMS(memory mem, iRegLdst releaseFieldAddr, flagsReg crx) %{
- match(Set mem (StoreCM mem releaseFieldAddr));
- effect(TEMP crx);
- predicate(false);
- ins_cost(MEMORY_REF_COST);
-
- // See loadConP.
- ins_cannot_rematerialize(true);
-
- format %{ "STB #0, $mem \t// CMS card-mark byte (must be 0!), checking requires_release in [$releaseFieldAddr]" %}
- ins_encode( enc_cms_card_mark(mem, releaseFieldAddr, crx) );
- ins_pipe(pipe_class_memory);
-%}
-
-instruct storeCM_G1(memory mem, immI_0 zero) %{
+instruct storeCM(memory mem, immI_0 zero) %{
match(Set mem (StoreCM mem zero));
- predicate(UseG1GC);
ins_cost(MEMORY_REF_COST);
- ins_cannot_rematerialize(true);
-
- format %{ "STB #0, $mem \t// CMS card-mark byte store (G1)" %}
+ format %{ "STB #0, $mem \t// CMS card-mark byte store" %}
size(8);
ins_encode %{
__ li(R0, 0);
- //__ release(); // G1: oops are allowed to get visible after dirty marking
+ // No release barrier: Oops are allowed to get visible after marking.
guarantee($mem$$base$$Register != R1_SP, "use frame_slots_bias");
__ stb(R0, $mem$$disp, $mem$$base$$Register);
%}
diff --git a/src/hotspot/cpu/ppc/sharedRuntime_ppc.cpp b/src/hotspot/cpu/ppc/sharedRuntime_ppc.cpp
index 1e5cd5a5903501e1e7e2d84794b98ac1726207ac..32e6eb3c341a2d114aa29728eb2a387d975d565d 100644
--- a/src/hotspot/cpu/ppc/sharedRuntime_ppc.cpp
+++ b/src/hotspot/cpu/ppc/sharedRuntime_ppc.cpp
@@ -564,17 +564,6 @@ bool SharedRuntime::is_wide_vector(int size) {
return size > 8;
}
-size_t SharedRuntime::trampoline_size() {
- return Assembler::load_const_size + 8;
-}
-
-void SharedRuntime::generate_trampoline(MacroAssembler *masm, address destination) {
- Register Rtemp = R12;
- __ load_const(Rtemp, destination);
- __ mtctr(Rtemp);
- __ bctr();
-}
-
static int reg2slot(VMReg r) {
return r->reg2stack() + SharedRuntime::out_preserve_stack_slots();
}
@@ -3442,10 +3431,12 @@ void SharedRuntime::montgomery_square(jint *a_ints, jint *n_ints,
reverse_words(m, (unsigned long *)m_ints, longwords);
}
-BufferBlob* SharedRuntime::make_native_invoker(address call_target,
- int shadow_space_bytes,
- const GrowableArray& input_registers,
- const GrowableArray& output_registers) {
+#ifdef COMPILER2
+RuntimeStub* SharedRuntime::make_native_invoker(address call_target,
+ int shadow_space_bytes,
+ const GrowableArray& input_registers,
+ const GrowableArray& output_registers) {
Unimplemented();
return nullptr;
}
+#endif
diff --git a/src/hotspot/cpu/ppc/stubGenerator_ppc.cpp b/src/hotspot/cpu/ppc/stubGenerator_ppc.cpp
index f762f62e226a02bd29ec91437e436ced179b5529..e632ec55e0b236d181341a60543a7e25868177d7 100644
--- a/src/hotspot/cpu/ppc/stubGenerator_ppc.cpp
+++ b/src/hotspot/cpu/ppc/stubGenerator_ppc.cpp
@@ -2598,7 +2598,7 @@ class StubGenerator: public StubCodeGenerator {
address start = __ function_entry();
- Label L_doLast;
+ Label L_doLast, L_error;
Register from = R3_ARG1; // source array address
Register to = R4_ARG2; // destination array address
@@ -2628,7 +2628,7 @@ class StubGenerator: public StubCodeGenerator {
__ li (fifteen, 15);
- // load unaligned from[0-15] to vsRet
+ // load unaligned from[0-15] to vRet
__ lvx (vRet, from);
__ lvx (vTmp1, fifteen, from);
__ lvsl (fromPerm, from);
@@ -2743,6 +2743,11 @@ class StubGenerator: public StubCodeGenerator {
__ cmpwi (CCR0, keylen, 52);
__ beq (CCR0, L_doLast);
+#ifdef ASSERT
+ __ cmpwi (CCR0, keylen, 60);
+ __ bne (CCR0, L_error);
+#endif
+
// 12th - 13th rounds
__ vcipher (vRet, vRet, vKey1);
__ vcipher (vRet, vRet, vKey2);
@@ -2763,29 +2768,30 @@ class StubGenerator: public StubCodeGenerator {
__ vcipher (vRet, vRet, vKey1);
__ vcipherlast (vRet, vRet, vKey2);
- // store result (unaligned)
#ifdef VM_LITTLE_ENDIAN
- __ lvsl (toPerm, to);
-#else
- __ lvsr (toPerm, to);
-#endif
- __ vspltisb (vTmp3, -1);
- __ vspltisb (vTmp4, 0);
- __ lvx (vTmp1, to);
- __ lvx (vTmp2, fifteen, to);
-#ifdef VM_LITTLE_ENDIAN
- __ vperm (vTmp3, vTmp3, vTmp4, toPerm); // generate select mask
- __ vxor (toPerm, toPerm, fSplt); // swap bytes
-#else
- __ vperm (vTmp3, vTmp4, vTmp3, toPerm); // generate select mask
+ // toPerm = 0x0F0E0D0C0B0A09080706050403020100
+ __ lvsl (toPerm, keypos); // keypos is a multiple of 16
+ __ vxor (toPerm, toPerm, fSplt);
+
+ // Swap Bytes
+ __ vperm (vRet, vRet, vRet, toPerm);
#endif
- __ vperm (vTmp4, vRet, vRet, toPerm); // rotate data
- __ vsel (vTmp2, vTmp4, vTmp2, vTmp3);
- __ vsel (vTmp1, vTmp1, vTmp4, vTmp3);
- __ stvx (vTmp2, fifteen, to); // store this one first (may alias)
- __ stvx (vTmp1, to);
+
+ // store result (unaligned)
+ // Note: We can't use a read-modify-write sequence which touches additional Bytes.
+ Register lo = temp, hi = fifteen; // Reuse
+ __ vsldoi (vTmp1, vRet, vRet, 8);
+ __ mfvrd (hi, vRet);
+ __ mfvrd (lo, vTmp1);
+ __ std (hi, 0 LITTLE_ENDIAN_ONLY(+ 8), to);
+ __ std (lo, 0 BIG_ENDIAN_ONLY(+ 8), to);
__ blr();
+
+#ifdef ASSERT
+ __ bind(L_error);
+ __ stop("aescrypt_encryptBlock: invalid key length");
+#endif
return start;
}
@@ -2799,9 +2805,7 @@ class StubGenerator: public StubCodeGenerator {
address start = __ function_entry();
- Label L_doLast;
- Label L_do44;
- Label L_do52;
+ Label L_doLast, L_do44, L_do52, L_error;
Register from = R3_ARG1; // source array address
Register to = R4_ARG2; // destination array address
@@ -2832,7 +2836,7 @@ class StubGenerator: public StubCodeGenerator {
__ li (fifteen, 15);
- // load unaligned from[0-15] to vsRet
+ // load unaligned from[0-15] to vRet
__ lvx (vRet, from);
__ lvx (vTmp1, fifteen, from);
__ lvsl (fromPerm, from);
@@ -2861,6 +2865,11 @@ class StubGenerator: public StubCodeGenerator {
__ cmpwi (CCR0, keylen, 52);
__ beq (CCR0, L_do52);
+#ifdef ASSERT
+ __ cmpwi (CCR0, keylen, 60);
+ __ bne (CCR0, L_error);
+#endif
+
// load the 15th round key to vKey1
__ li (keypos, 240);
__ lvx (vKey1, keypos, key);
@@ -2897,6 +2906,7 @@ class StubGenerator: public StubCodeGenerator {
__ b (L_doLast);
+ __ align(32);
__ bind (L_do52);
// load the 13th round key to vKey1
@@ -2923,6 +2933,7 @@ class StubGenerator: public StubCodeGenerator {
__ b (L_doLast);
+ __ align(32);
__ bind (L_do44);
// load the 11th round key to vKey1
@@ -3000,29 +3011,30 @@ class StubGenerator: public StubCodeGenerator {
__ vncipher (vRet, vRet, vKey4);
__ vncipherlast (vRet, vRet, vKey5);
- // store result (unaligned)
-#ifdef VM_LITTLE_ENDIAN
- __ lvsl (toPerm, to);
-#else
- __ lvsr (toPerm, to);
-#endif
- __ vspltisb (vTmp3, -1);
- __ vspltisb (vTmp4, 0);
- __ lvx (vTmp1, to);
- __ lvx (vTmp2, fifteen, to);
#ifdef VM_LITTLE_ENDIAN
- __ vperm (vTmp3, vTmp3, vTmp4, toPerm); // generate select mask
- __ vxor (toPerm, toPerm, fSplt); // swap bytes
-#else
- __ vperm (vTmp3, vTmp4, vTmp3, toPerm); // generate select mask
+ // toPerm = 0x0F0E0D0C0B0A09080706050403020100
+ __ lvsl (toPerm, keypos); // keypos is a multiple of 16
+ __ vxor (toPerm, toPerm, fSplt);
+
+ // Swap Bytes
+ __ vperm (vRet, vRet, vRet, toPerm);
#endif
- __ vperm (vTmp4, vRet, vRet, toPerm); // rotate data
- __ vsel (vTmp2, vTmp4, vTmp2, vTmp3);
- __ vsel (vTmp1, vTmp1, vTmp4, vTmp3);
- __ stvx (vTmp2, fifteen, to); // store this one first (may alias)
- __ stvx (vTmp1, to);
+
+ // store result (unaligned)
+ // Note: We can't use a read-modify-write sequence which touches additional Bytes.
+ Register lo = temp, hi = fifteen; // Reuse
+ __ vsldoi (vTmp1, vRet, vRet, 8);
+ __ mfvrd (hi, vRet);
+ __ mfvrd (lo, vTmp1);
+ __ std (hi, 0 LITTLE_ENDIAN_ONLY(+ 8), to);
+ __ std (lo, 0 BIG_ENDIAN_ONLY(+ 8), to);
__ blr();
+
+#ifdef ASSERT
+ __ bind(L_error);
+ __ stop("aescrypt_decryptBlock: invalid key length");
+#endif
return start;
}
diff --git a/src/hotspot/cpu/ppc/vtableStubs_ppc_64.cpp b/src/hotspot/cpu/ppc/vtableStubs_ppc_64.cpp
index 796a8b1bf78856fb9d3ec0d860806f7f67cd011a..14d3568d5d73e3288ea5095e6fafceb031088a8c 100644
--- a/src/hotspot/cpu/ppc/vtableStubs_ppc_64.cpp
+++ b/src/hotspot/cpu/ppc/vtableStubs_ppc_64.cpp
@@ -1,6 +1,6 @@
/*
- * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2012, 2018 SAP SE. All rights reserved.
+ * Copyright (c) 1997, 2021, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2021 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -73,9 +73,9 @@ VtableStub* VtableStubs::create_vtable_stub(int vtable_index) {
slop_delta = load_const_maxLen - (__ pc() - start_pc);
slop_bytes += slop_delta;
assert(slop_delta >= 0, "negative slop(%d) encountered, adjust code size estimate!", slop_delta);
- __ lwz(R12_scratch2, offs, R11_scratch1);
+ __ ld(R12_scratch2, offs, R11_scratch1);
__ addi(R12_scratch2, R12_scratch2, 1);
- __ stw(R12_scratch2, offs, R11_scratch1);
+ __ std(R12_scratch2, offs, R11_scratch1);
}
#endif
@@ -141,6 +141,7 @@ VtableStub* VtableStubs::create_itable_stub(int itable_index) {
if (s == NULL) {
return NULL;
}
+
// Count unused bytes in instruction sequences of variable size.
// We add them to the computed buffer size in order to avoid
// overflow in subsequently generated stubs.
@@ -160,9 +161,9 @@ VtableStub* VtableStubs::create_itable_stub(int itable_index) {
slop_delta = load_const_maxLen - (__ pc() - start_pc);
slop_bytes += slop_delta;
assert(slop_delta >= 0, "negative slop(%d) encountered, adjust code size estimate!", slop_delta);
- __ lwz(R12_scratch2, offs, R11_scratch1);
+ __ ld(R12_scratch2, offs, R11_scratch1);
__ addi(R12_scratch2, R12_scratch2, 1);
- __ stw(R12_scratch2, offs, R11_scratch1);
+ __ std(R12_scratch2, offs, R11_scratch1);
}
#endif
diff --git a/src/hotspot/cpu/s390/c1_globals_s390.hpp b/src/hotspot/cpu/s390/c1_globals_s390.hpp
index ec08cda96934c4b9274817fd2accf59c27224c4f..a939b32bd5be42c1c1eca89b59c4bbbf2af203d5 100644
--- a/src/hotspot/cpu/s390/c1_globals_s390.hpp
+++ b/src/hotspot/cpu/s390/c1_globals_s390.hpp
@@ -51,7 +51,6 @@ define_pd_global(uintx, NonNMethodCodeHeapSize, 5*M);
define_pd_global(uintx, CodeCacheExpansionSize, 32*K);
define_pd_global(uintx, CodeCacheMinBlockLength, 1);
define_pd_global(uintx, CodeCacheMinimumUseSpace, 400*K);
-define_pd_global(size_t, MetaspaceSize, 12*M);
define_pd_global(bool, NeverActAsServerClassMachine, true);
define_pd_global(size_t, NewSizeThreadIncrease, 16*K);
define_pd_global(uint64_t, MaxRAM, 1ULL*G);
diff --git a/src/hotspot/cpu/s390/c2_globals_s390.hpp b/src/hotspot/cpu/s390/c2_globals_s390.hpp
index 0704a4bdab7ee95cf37410972cbc12866751be47..e747f6c8c517905a8aef397dac93ffb575360b6c 100644
--- a/src/hotspot/cpu/s390/c2_globals_s390.hpp
+++ b/src/hotspot/cpu/s390/c2_globals_s390.hpp
@@ -82,9 +82,6 @@ define_pd_global(uintx, CodeCacheMinimumUseSpace, 400*K);
define_pd_global(bool, TrapBasedRangeChecks, false); // Not needed on z/Architecture.
-// Heap related flags
-define_pd_global(size_t, MetaspaceSize, ScaleForWordSize(16*M));
-
// Ergonomics related flags
define_pd_global(bool, NeverActAsServerClassMachine, false);
diff --git a/src/hotspot/cpu/s390/macroAssembler_s390.cpp b/src/hotspot/cpu/s390/macroAssembler_s390.cpp
index 9c06a20ae70d0af70b0feddbdba6697c095b5b7d..7554a3f00e8a54d079e3faa425d80675e3e09271 100644
--- a/src/hotspot/cpu/s390/macroAssembler_s390.cpp
+++ b/src/hotspot/cpu/s390/macroAssembler_s390.cpp
@@ -3603,6 +3603,7 @@ void MacroAssembler::encode_klass_not_null(Register dst, Register src) {
Register current = (src != noreg) ? src : dst; // Klass is in dst if no src provided. (dst == src) also possible.
address base = CompressedKlassPointers::base();
int shift = CompressedKlassPointers::shift();
+ bool need_zero_extend = base != 0;
assert(UseCompressedClassPointers, "only for compressed klass ptrs");
BLOCK_COMMENT("cKlass encoder {");
@@ -3619,28 +3620,76 @@ void MacroAssembler::encode_klass_not_null(Register dst, Register src) {
bind(ok);
#endif
- if (base != NULL) {
- unsigned int base_h = ((unsigned long)base)>>32;
- unsigned int base_l = (unsigned int)((unsigned long)base);
- if ((base_h != 0) && (base_l == 0) && VM_Version::has_HighWordInstr()) {
- lgr_if_needed(dst, current);
- z_aih(dst, -((int)base_h)); // Base has no set bits in lower half.
- } else if ((base_h == 0) && (base_l != 0)) {
- lgr_if_needed(dst, current);
- z_agfi(dst, -(int)base_l);
- } else {
- load_const(Z_R0, base);
- lgr_if_needed(dst, current);
- z_sgr(dst, Z_R0);
- }
- current = dst;
- }
+ // Scale down the incoming klass pointer first.
+ // We then can be sure we calculate an offset that fits into 32 bit.
+ // More generally speaking: all subsequent calculations are purely 32-bit.
if (shift != 0) {
assert (LogKlassAlignmentInBytes == shift, "decode alg wrong");
z_srlg(dst, current, shift);
current = dst;
}
- lgr_if_needed(dst, current); // Move may be required (if neither base nor shift != 0).
+
+ if (base != NULL) {
+ // Use scaled-down base address parts to match scaled-down klass pointer.
+ unsigned int base_h = ((unsigned long)base)>>(32+shift);
+ unsigned int base_l = (unsigned int)(((unsigned long)base)>>shift);
+
+ // General considerations:
+ // - when calculating (current_h - base_h), all digits must cancel (become 0).
+ // Otherwise, we would end up with a compressed klass pointer which doesn't
+ // fit into 32-bit.
+ // - Only bit#33 of the difference could potentially be non-zero. For that
+ // to happen, (current_l < base_l) must hold. In this case, the subtraction
+ // will create a borrow out of bit#32, nicely killing bit#33.
+ // - With the above, we only need to consider current_l and base_l to
+ // calculate the result.
+ // - Both values are treated as unsigned. The unsigned subtraction is
+ // replaced by adding (unsigned) the 2's complement of the subtrahend.
+
+ if (base_l == 0) {
+ // - By theory, the calculation to be performed here (current_h - base_h) MUST
+ // cancel all high-word bits. Otherwise, we would end up with an offset
+ // (i.e. compressed klass pointer) that does not fit into 32 bit.
+ // - current_l remains unchanged.
+ // - Therefore, we can replace all calculation with just a
+ // zero-extending load 32 to 64 bit.
+ // - Even that can be replaced with a conditional load if dst != current.
+ // (this is a local view. The shift step may have requested zero-extension).
+ } else {
+ if ((base_h == 0) && is_uimm(base_l, 31)) {
+ // If we happen to find that (base_h == 0), and that base_l is within the range
+ // which can be represented by a signed int, then we can use 64bit signed add with
+ // (-base_l) as 32bit signed immediate operand. The add will take care of the
+ // upper 32 bits of the result, saving us the need of an extra zero extension.
+ // For base_l to be in the required range, it must not have the most significant
+ // bit (aka sign bit) set.
+ lgr_if_needed(dst, current); // no zero/sign extension in this case!
+ z_agfi(dst, -(int)base_l); // base_l must be passed as signed.
+ need_zero_extend = false;
+ current = dst;
+ } else {
+ // To begin with, we may need to copy and/or zero-extend the register operand.
+ // We have to calculate (current_l - base_l). Because there is no unsigend
+ // subtract instruction with immediate operand, we add the 2's complement of base_l.
+ if (need_zero_extend) {
+ z_llgfr(dst, current);
+ need_zero_extend = false;
+ } else {
+ llgfr_if_needed(dst, current);
+ }
+ current = dst;
+ z_alfi(dst, -base_l);
+ }
+ }
+ }
+
+ if (need_zero_extend) {
+ // We must zero-extend the calculated result. It may have some leftover bits in
+ // the hi-word because we only did optimized calculations.
+ z_llgfr(dst, current);
+ } else {
+ llgfr_if_needed(dst, current); // zero-extension while copying comes at no extra cost.
+ }
BLOCK_COMMENT("} cKlass encoder");
}
diff --git a/src/hotspot/cpu/s390/sharedRuntime_s390.cpp b/src/hotspot/cpu/s390/sharedRuntime_s390.cpp
index 79980aeb670a582de67efbce03f38d0a9f80e118..43c5bba706afa2f8371fb85e2a56edcaddf5f820 100644
--- a/src/hotspot/cpu/s390/sharedRuntime_s390.cpp
+++ b/src/hotspot/cpu/s390/sharedRuntime_s390.cpp
@@ -556,16 +556,6 @@ void RegisterSaver::restore_result_registers(MacroAssembler* masm) {
}
}
-size_t SharedRuntime::trampoline_size() {
- return MacroAssembler::load_const_size() + 2;
-}
-
-void SharedRuntime::generate_trampoline(MacroAssembler *masm, address destination) {
- // Think about using pc-relative branch.
- __ load_const(Z_R1_scratch, destination);
- __ z_br(Z_R1_scratch);
-}
-
// ---------------------------------------------------------------------------
void SharedRuntime::save_native_result(MacroAssembler * masm,
BasicType ret_type,
@@ -3468,10 +3458,12 @@ int SpinPause() {
return 0;
}
-BufferBlob* SharedRuntime::make_native_invoker(address call_target,
- int shadow_space_bytes,
- const GrowableArray& input_registers,
- const GrowableArray& output_registers) {
+#ifdef COMPILER2
+RuntimeStub* SharedRuntime::make_native_invoker(address call_target,
+ int shadow_space_bytes,
+ const GrowableArray& input_registers,
+ const GrowableArray& output_registers) {
Unimplemented();
return nullptr;
}
+#endif
diff --git a/src/hotspot/cpu/s390/vtableStubs_s390.cpp b/src/hotspot/cpu/s390/vtableStubs_s390.cpp
index 306cce9395d2f42ac3f37f95693688dd0b4c8e34..56a9e36721ce4baa026abceb225707aaa940e80c 100644
--- a/src/hotspot/cpu/s390/vtableStubs_s390.cpp
+++ b/src/hotspot/cpu/s390/vtableStubs_s390.cpp
@@ -1,6 +1,6 @@
/*
- * Copyright (c) 2016, 2020, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2016, 2018 SAP SE. All rights reserved.
+ * Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2016, 2021 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -75,7 +75,7 @@ VtableStub* VtableStubs::create_vtable_stub(int vtable_index) {
// Abuse Z_method as scratch register for generic emitter.
// It is loaded further down anyway before it is first used.
// No dynamic code size variance here, increment is 1, always.
- __ add2mem_32(Address(Z_R1_scratch), 1, Z_method);
+ __ add2mem_64(Address(Z_R1_scratch), 1, Z_method);
}
#endif
@@ -158,6 +158,7 @@ VtableStub* VtableStubs::create_itable_stub(int itable_index) {
if (s == NULL) {
return NULL;
}
+
// Count unused bytes in instruction sequences of variable size.
// We add them to the computed buffer size in order to avoid
// overflow in subsequently generated stubs.
@@ -179,7 +180,7 @@ VtableStub* VtableStubs::create_itable_stub(int itable_index) {
// Abuse Z_method as scratch register for generic emitter.
// It is loaded further down anyway before it is first used.
// No dynamic code size variance here, increment is 1, always.
- __ add2mem_32(Address(Z_R1_scratch), 1, Z_method);
+ __ add2mem_64(Address(Z_R1_scratch), 1, Z_method);
}
#endif
diff --git a/src/hotspot/cpu/x86/assembler_x86.cpp b/src/hotspot/cpu/x86/assembler_x86.cpp
index 16446d895f570202eba60194ce459366973d91ca..241deb2867f33dfebe98c19cc8b31c8ad9bd60f4 100644
--- a/src/hotspot/cpu/x86/assembler_x86.cpp
+++ b/src/hotspot/cpu/x86/assembler_x86.cpp
@@ -9173,6 +9173,13 @@ void Assembler::evpblendmq (XMMRegister dst, KRegister mask, XMMRegister nds, XM
emit_int16(0x64, (0xC0 | encode));
}
+void Assembler::bzhiq(Register dst, Register src1, Register src2) {
+ assert(VM_Version::supports_bmi2(), "bit manipulation instructions not supported");
+ InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
+ int encode = vex_prefix_and_encode(dst->encoding(), src2->encoding(), src1->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, &attributes);
+ emit_int16((unsigned char)0xF5, (0xC0 | encode));
+}
+
void Assembler::shlxl(Register dst, Register src1, Register src2) {
assert(VM_Version::supports_bmi2(), "");
InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ true);
diff --git a/src/hotspot/cpu/x86/assembler_x86.hpp b/src/hotspot/cpu/x86/assembler_x86.hpp
index 0304a882a33046183822402a99b315a61ebb2a53..b73d1d2501f84e6685cd50a6f2ee730c216b6efa 100644
--- a/src/hotspot/cpu/x86/assembler_x86.hpp
+++ b/src/hotspot/cpu/x86/assembler_x86.hpp
@@ -2092,6 +2092,7 @@ private:
void shlxq(Register dst, Register src1, Register src2);
void shrxq(Register dst, Register src1, Register src2);
+ void bzhiq(Register dst, Register src1, Register src2);
//====================VECTOR ARITHMETIC=====================================
void evpmovd2m(KRegister kdst, XMMRegister src, int vector_len);
diff --git a/src/hotspot/cpu/x86/c1_globals_x86.hpp b/src/hotspot/cpu/x86/c1_globals_x86.hpp
index a2f88c28642214d155266e6a0d306c4ecb745312..9212e321d65baa7cd95267bea3f7970a4f6dd5ac 100644
--- a/src/hotspot/cpu/x86/c1_globals_x86.hpp
+++ b/src/hotspot/cpu/x86/c1_globals_x86.hpp
@@ -51,7 +51,6 @@ define_pd_global(bool, ProfileInterpreter, false);
define_pd_global(uintx, CodeCacheExpansionSize, 32*K );
define_pd_global(uintx, CodeCacheMinBlockLength, 1 );
define_pd_global(uintx, CodeCacheMinimumUseSpace, 400*K);
-define_pd_global(size_t, MetaspaceSize, 12*M );
define_pd_global(bool, NeverActAsServerClassMachine, true );
define_pd_global(uint64_t, MaxRAM, 1ULL*G);
define_pd_global(bool, CICompileOSR, true );
diff --git a/src/hotspot/cpu/x86/c2_MacroAssembler_x86.cpp b/src/hotspot/cpu/x86/c2_MacroAssembler_x86.cpp
index 4b7649cca535e8757d1cffc819ab20b1bc83714c..a4744970aca4be4082229be1d75cf96f9a780ba9 100644
--- a/src/hotspot/cpu/x86/c2_MacroAssembler_x86.cpp
+++ b/src/hotspot/cpu/x86/c2_MacroAssembler_x86.cpp
@@ -1894,17 +1894,9 @@ void C2_MacroAssembler::reduce8L(int opcode, Register dst, Register src1, XMMReg
}
void C2_MacroAssembler::genmask(Register dst, Register len, Register temp) {
- if (ArrayCopyPartialInlineSize <= 32) {
- mov64(dst, 1);
- shlxq(dst, dst, len);
- decq(dst);
- } else {
- mov64(dst, -1);
- movq(temp, len);
- negptr(temp);
- addptr(temp, 64);
- shrxq(dst, dst, temp);
- }
+ assert(ArrayCopyPartialInlineSize <= 64,"");
+ mov64(dst, -1L);
+ bzhiq(dst, dst, len);
}
#endif // _LP64
diff --git a/src/hotspot/cpu/x86/c2_globals_x86.hpp b/src/hotspot/cpu/x86/c2_globals_x86.hpp
index 42704bcd57052f76ddb9330806a10c40bb4c866c..776caa30cf9a55b104136aaf2e138d3d00bd09c4 100644
--- a/src/hotspot/cpu/x86/c2_globals_x86.hpp
+++ b/src/hotspot/cpu/x86/c2_globals_x86.hpp
@@ -91,9 +91,6 @@ define_pd_global(uintx, CodeCacheMinimumUseSpace, 400*K);
define_pd_global(bool, TrapBasedRangeChecks, false); // Not needed on x86.
-// Heap related flags
-define_pd_global(size_t, MetaspaceSize, ScaleForWordSize(16*M));
-
// Ergonomics related flags
define_pd_global(bool, NeverActAsServerClassMachine, false);
diff --git a/src/hotspot/cpu/x86/frame_x86.cpp b/src/hotspot/cpu/x86/frame_x86.cpp
index 1234bc88dbcc736f05449a42ed22ff9b5c2941b8..6195d479a8c48b6c94b13fccf61f2962aedbde74 100644
--- a/src/hotspot/cpu/x86/frame_x86.cpp
+++ b/src/hotspot/cpu/x86/frame_x86.cpp
@@ -346,10 +346,6 @@ frame frame::sender_for_entry_frame(RegisterMap* map) const {
vmassert(jfa->last_Java_pc() != NULL, "not walkable");
frame fr(jfa->last_Java_sp(), jfa->last_Java_fp(), jfa->last_Java_pc());
- if (jfa->saved_rbp_address()) {
- update_map_with_saved_link(map, jfa->saved_rbp_address());
- }
-
return fr;
}
diff --git a/src/hotspot/cpu/x86/gc/shared/barrierSetAssembler_x86.cpp b/src/hotspot/cpu/x86/gc/shared/barrierSetAssembler_x86.cpp
index 274cc1a8702ba8e54f6bb555802ef387a2cf3c9e..6e820f98689141a8c625eeca5d8a50b4b5b3882a 100644
--- a/src/hotspot/cpu/x86/gc/shared/barrierSetAssembler_x86.cpp
+++ b/src/hotspot/cpu/x86/gc/shared/barrierSetAssembler_x86.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018, 2020, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2018, 2021, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -23,6 +23,7 @@
*/
#include "precompiled.hpp"
+#include "classfile/classLoaderData.hpp"
#include "gc/shared/barrierSet.hpp"
#include "gc/shared/barrierSetAssembler.hpp"
#include "gc/shared/barrierSetNMethod.hpp"
diff --git a/src/hotspot/cpu/x86/gc/shared/cardTableBarrierSetAssembler_x86.cpp b/src/hotspot/cpu/x86/gc/shared/cardTableBarrierSetAssembler_x86.cpp
index c00ea223f00dcca556d521f1d937ae9fc833a124..9b2d2c5efedcee2eba8c4a90adfe02b7e054dd7c 100644
--- a/src/hotspot/cpu/x86/gc/shared/cardTableBarrierSetAssembler_x86.cpp
+++ b/src/hotspot/cpu/x86/gc/shared/cardTableBarrierSetAssembler_x86.cpp
@@ -118,9 +118,6 @@ void CardTableBarrierSetAssembler::store_check(MacroAssembler* masm, Register ob
int dirty = CardTable::dirty_card_val();
if (UseCondCardMark) {
Label L_already_dirty;
- if (ct->scanned_concurrently()) {
- __ membar(Assembler::StoreLoad);
- }
__ cmpb(card_addr, dirty);
__ jcc(Assembler::equal, L_already_dirty);
__ movb(card_addr, dirty);
diff --git a/src/hotspot/cpu/x86/javaFrameAnchor_x86.hpp b/src/hotspot/cpu/x86/javaFrameAnchor_x86.hpp
index 4579b7377a0e9eae467ac2b694b5922a4b363560..bb39c8e513e51ceac3b6d501bec117564348970c 100644
--- a/src/hotspot/cpu/x86/javaFrameAnchor_x86.hpp
+++ b/src/hotspot/cpu/x86/javaFrameAnchor_x86.hpp
@@ -30,9 +30,6 @@ private:
// FP value associated with _last_Java_sp:
intptr_t* volatile _last_Java_fp; // pointer is volatile not what it points to
- // (Optional) location of saved RBP register, which GCs want to inspect
- intptr_t** volatile _saved_rbp_address;
-
public:
// Each arch must define reset, save, restore
// These are used by objects that only care about:
@@ -46,7 +43,6 @@ public:
// fence?
_last_Java_fp = NULL;
_last_Java_pc = NULL;
- _saved_rbp_address = NULL;
}
void copy(JavaFrameAnchor* src) {
@@ -64,8 +60,6 @@ public:
_last_Java_pc = src->_last_Java_pc;
// Must be last so profiler will always see valid frame if has_last_frame() is true
_last_Java_sp = src->_last_Java_sp;
-
- _saved_rbp_address = src->_saved_rbp_address;
}
bool walkable(void) { return _last_Java_sp != NULL && _last_Java_pc != NULL; }
@@ -76,12 +70,9 @@ public:
address last_Java_pc(void) { return _last_Java_pc; }
- intptr_t** saved_rbp_address(void) const { return _saved_rbp_address; }
-
private:
static ByteSize last_Java_fp_offset() { return byte_offset_of(JavaFrameAnchor, _last_Java_fp); }
- static ByteSize saved_rbp_address_offset() { return byte_offset_of(JavaFrameAnchor, _saved_rbp_address); }
public:
diff --git a/src/hotspot/cpu/x86/macroAssembler_x86.cpp b/src/hotspot/cpu/x86/macroAssembler_x86.cpp
index d6e7783bee1719930183cd2c43a7402d482fbe03..ee3f1a8533459829640078d1fa0dcc90d275b863 100644
--- a/src/hotspot/cpu/x86/macroAssembler_x86.cpp
+++ b/src/hotspot/cpu/x86/macroAssembler_x86.cpp
@@ -2732,7 +2732,6 @@ void MacroAssembler::reset_last_Java_frame(Register java_thread, bool clear_fp)
}
// Always clear the pc because it could have been set by make_walkable()
movptr(Address(java_thread, JavaThread::last_Java_pc_offset()), NULL_WORD);
- movptr(Address(java_thread, JavaThread::saved_rbp_address_offset()), NULL_WORD);
vzeroupper();
}
@@ -3005,6 +3004,16 @@ void MacroAssembler::vaddss(XMMRegister dst, XMMRegister nds, AddressLiteral src
}
}
+void MacroAssembler::vpaddb(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) {
+ assert(UseAVX > 0, "requires some form of AVX");
+ if (reachable(src)) {
+ Assembler::vpaddb(dst, nds, as_Address(src), vector_len);
+ } else {
+ lea(rscratch, src);
+ Assembler::vpaddb(dst, nds, Address(rscratch, 0), vector_len);
+ }
+}
+
void MacroAssembler::vpaddd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) {
assert(UseAVX > 0, "requires some form of AVX");
if (reachable(src)) {
diff --git a/src/hotspot/cpu/x86/macroAssembler_x86.hpp b/src/hotspot/cpu/x86/macroAssembler_x86.hpp
index 5cabc583fc0e6becab0c033608f2543e070b9b91..a92155ec5ad1fa37f7d3ea1a8c8b8320aa75388c 100644
--- a/src/hotspot/cpu/x86/macroAssembler_x86.hpp
+++ b/src/hotspot/cpu/x86/macroAssembler_x86.hpp
@@ -1245,6 +1245,7 @@ public:
void vpaddb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
void vpaddb(XMMRegister dst, XMMRegister nds, Address src, int vector_len);
+ void vpaddb(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch);
void vpaddw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
void vpaddw(XMMRegister dst, XMMRegister nds, Address src, int vector_len);
diff --git a/src/hotspot/cpu/x86/macroAssembler_x86_arrayCopy_avx3.cpp b/src/hotspot/cpu/x86/macroAssembler_x86_arrayCopy_avx3.cpp
index b03338a82089d67d77051a22991bfdc4d967d409..ff92b5647ba84538a3786fb0920b9a49c5a4fc4e 100644
--- a/src/hotspot/cpu/x86/macroAssembler_x86_arrayCopy_avx3.cpp
+++ b/src/hotspot/cpu/x86/macroAssembler_x86_arrayCopy_avx3.cpp
@@ -196,10 +196,8 @@ void MacroAssembler::copy64_masked_avx(Register dst, Register src, XMMRegister x
} else {
Address::ScaleFactor scale = (Address::ScaleFactor)(shift);
assert(MaxVectorSize == 64, "vector length != 64");
- negptr(length);
- addq(length, 64);
- mov64(temp, -1);
- shrxq(temp, temp, length);
+ mov64(temp, -1L);
+ bzhiq(temp, temp, length);
kmovql(mask, temp);
evmovdqu(type[shift], mask, xmm, Address(src, index, scale, offset), Assembler::AVX_512bit);
evmovdqu(type[shift], mask, Address(dst, index, scale, offset), xmm, Assembler::AVX_512bit);
@@ -213,9 +211,8 @@ void MacroAssembler::copy32_masked_avx(Register dst, Register src, XMMRegister x
assert(MaxVectorSize >= 32, "vector length should be >= 32");
BasicType type[] = { T_BYTE, T_SHORT, T_INT, T_LONG};
Address::ScaleFactor scale = (Address::ScaleFactor)(shift);
- mov64(temp, 1);
- shlxq(temp, temp, length);
- decq(temp);
+ mov64(temp, -1L);
+ bzhiq(temp, temp, length);
kmovql(mask, temp);
evmovdqu(type[shift], mask, xmm, Address(src, index, scale, offset), Assembler::AVX_256bit);
evmovdqu(type[shift], mask, Address(dst, index, scale, offset), xmm, Assembler::AVX_256bit);
diff --git a/src/hotspot/cpu/x86/sharedRuntime_x86_32.cpp b/src/hotspot/cpu/x86/sharedRuntime_x86_32.cpp
index b807541f71c523b10b7f3e85b1f9075c18ce9f19..11a1a235741cb50f097e4addf39676a216e546f2 100644
--- a/src/hotspot/cpu/x86/sharedRuntime_x86_32.cpp
+++ b/src/hotspot/cpu/x86/sharedRuntime_x86_32.cpp
@@ -371,14 +371,6 @@ bool SharedRuntime::is_wide_vector(int size) {
return size > 16;
}
-size_t SharedRuntime::trampoline_size() {
- return 16;
-}
-
-void SharedRuntime::generate_trampoline(MacroAssembler *masm, address destination) {
- __ jump(RuntimeAddress(destination));
-}
-
// The java_calling_convention describes stack locations as ideal slots on
// a frame with no abi restrictions. Since we must observe abi restrictions
// (like the placement of the register window) the slots must be biased by
@@ -2980,10 +2972,12 @@ RuntimeStub* SharedRuntime::generate_resolve_blob(address destination, const cha
return RuntimeStub::new_runtime_stub(name, &buffer, frame_complete, frame_size_words, oop_maps, true);
}
-BufferBlob* SharedRuntime::make_native_invoker(address call_target,
+#ifdef COMPILER2
+RuntimeStub* SharedRuntime::make_native_invoker(address call_target,
int shadow_space_bytes,
const GrowableArray& input_registers,
const GrowableArray& output_registers) {
ShouldNotCallThis();
return nullptr;
}
+#endif
diff --git a/src/hotspot/cpu/x86/sharedRuntime_x86_64.cpp b/src/hotspot/cpu/x86/sharedRuntime_x86_64.cpp
index 837b59d44fe2ec799c5508e8eacbbbd6fc4f32b8..a2d7c5a82d3f24ce4555639ba87ef224f9b60171 100644
--- a/src/hotspot/cpu/x86/sharedRuntime_x86_64.cpp
+++ b/src/hotspot/cpu/x86/sharedRuntime_x86_64.cpp
@@ -423,14 +423,6 @@ bool SharedRuntime::is_wide_vector(int size) {
return size > 16;
}
-size_t SharedRuntime::trampoline_size() {
- return 16;
-}
-
-void SharedRuntime::generate_trampoline(MacroAssembler *masm, address destination) {
- __ jump(RuntimeAddress(destination));
-}
-
// The java_calling_convention describes stack locations as ideal slots on
// a frame with no abi restrictions. Since we must observe abi restrictions
// (like the placement of the register window) the slots must be biased by
@@ -3161,7 +3153,6 @@ void SharedRuntime::generate_uncommon_trap_blob() {
}
#endif // COMPILER2
-
//------------------------------generate_handler_blob------
//
// Generate a special Compile2Runtime blob that saves all registers,
@@ -3410,6 +3401,7 @@ RuntimeStub* SharedRuntime::generate_resolve_blob(address destination, const cha
return RuntimeStub::new_runtime_stub(name, &buffer, frame_complete, frame_size_in_words, oop_maps, true);
}
+#ifdef COMPILER2
static const int native_invoker_code_size = MethodHandles::adapter_code_size;
class NativeInvokerGenerator : public StubCodeGenerator {
@@ -3418,6 +3410,10 @@ class NativeInvokerGenerator : public StubCodeGenerator {
const GrowableArray& _input_registers;
const GrowableArray& _output_registers;
+
+ int _frame_complete;
+ int _framesize;
+ OopMapSet* _oop_maps;
public:
NativeInvokerGenerator(CodeBuffer* buffer,
address call_target,
@@ -3428,23 +3424,54 @@ public:
_call_target(call_target),
_shadow_space_bytes(shadow_space_bytes),
_input_registers(input_registers),
- _output_registers(output_registers) {}
+ _output_registers(output_registers),
+ _frame_complete(0),
+ _framesize(0),
+ _oop_maps(NULL) {
+ assert(_output_registers.length() <= 1
+ || (_output_registers.length() == 2 && !_output_registers.at(1)->is_valid()), "no multi-reg returns");
+
+ }
+
void generate();
- void spill_register(VMReg reg) {
+ int spill_size_in_bytes() const {
+ if (_output_registers.length() == 0) {
+ return 0;
+ }
+ VMReg reg = _output_registers.at(0);
+ assert(reg->is_reg(), "must be a register");
+ if (reg->is_Register()) {
+ return 8;
+ } else if (reg->is_XMMRegister()) {
+ if (UseAVX >= 3) {
+ return 64;
+ } else if (UseAVX >= 1) {
+ return 32;
+ } else {
+ return 16;
+ }
+ } else {
+ ShouldNotReachHere();
+ }
+ return 0;
+ }
+
+ void spill_out_registers() {
+ if (_output_registers.length() == 0) {
+ return;
+ }
+ VMReg reg = _output_registers.at(0);
assert(reg->is_reg(), "must be a register");
MacroAssembler* masm = _masm;
if (reg->is_Register()) {
- __ push(reg->as_Register());
+ __ movptr(Address(rsp, 0), reg->as_Register());
} else if (reg->is_XMMRegister()) {
if (UseAVX >= 3) {
- __ subptr(rsp, 64); // bytes
__ evmovdqul(Address(rsp, 0), reg->as_XMMRegister(), Assembler::AVX_512bit);
} else if (UseAVX >= 1) {
- __ subptr(rsp, 32);
__ vmovdqu(Address(rsp, 0), reg->as_XMMRegister());
} else {
- __ subptr(rsp, 16);
__ movdqu(Address(rsp, 0), reg->as_XMMRegister());
}
} else {
@@ -3452,27 +3479,40 @@ public:
}
}
- void fill_register(VMReg reg) {
+ void fill_out_registers() {
+ if (_output_registers.length() == 0) {
+ return;
+ }
+ VMReg reg = _output_registers.at(0);
assert(reg->is_reg(), "must be a register");
MacroAssembler* masm = _masm;
if (reg->is_Register()) {
- __ pop(reg->as_Register());
+ __ movptr(reg->as_Register(), Address(rsp, 0));
} else if (reg->is_XMMRegister()) {
if (UseAVX >= 3) {
__ evmovdqul(reg->as_XMMRegister(), Address(rsp, 0), Assembler::AVX_512bit);
- __ addptr(rsp, 64); // bytes
} else if (UseAVX >= 1) {
__ vmovdqu(reg->as_XMMRegister(), Address(rsp, 0));
- __ addptr(rsp, 32);
} else {
__ movdqu(reg->as_XMMRegister(), Address(rsp, 0));
- __ addptr(rsp, 16);
}
} else {
ShouldNotReachHere();
}
}
+ int frame_complete() const {
+ return _frame_complete;
+ }
+
+ int framesize() const {
+ return (_framesize >> (LogBytesPerWord - LogBytesPerInt));
+ }
+
+ OopMapSet* oop_maps() const {
+ return _oop_maps;
+ }
+
private:
#ifdef ASSERT
bool target_uses_register(VMReg reg) {
@@ -3481,57 +3521,61 @@ bool target_uses_register(VMReg reg) {
#endif
};
-BufferBlob* SharedRuntime::make_native_invoker(address call_target,
- int shadow_space_bytes,
- const GrowableArray& input_registers,
- const GrowableArray& output_registers) {
- BufferBlob* _invoke_native_blob = BufferBlob::create("nep_invoker_blob", native_invoker_code_size);
- if (_invoke_native_blob == NULL)
- return NULL; // allocation failure
-
- CodeBuffer code(_invoke_native_blob);
+RuntimeStub* SharedRuntime::make_native_invoker(address call_target,
+ int shadow_space_bytes,
+ const GrowableArray& input_registers,
+ const GrowableArray& output_registers) {
+ int locs_size = 64;
+ CodeBuffer code("nep_invoker_blob", native_invoker_code_size, locs_size);
NativeInvokerGenerator g(&code, call_target, shadow_space_bytes, input_registers, output_registers);
g.generate();
code.log_section_sizes("nep_invoker_blob");
- return _invoke_native_blob;
+ RuntimeStub* stub =
+ RuntimeStub::new_runtime_stub("nep_invoker_blob",
+ &code,
+ g.frame_complete(),
+ g.framesize(),
+ g.oop_maps(), false);
+ return stub;
}
void NativeInvokerGenerator::generate() {
assert(!(target_uses_register(r15_thread->as_VMReg()) || target_uses_register(rscratch1->as_VMReg())), "Register conflict");
+ enum layout {
+ rbp_off,
+ rbp_off2,
+ return_off,
+ return_off2,
+ framesize // inclusive of return address
+ };
+
+ _framesize = align_up(framesize + ((_shadow_space_bytes + spill_size_in_bytes()) >> LogBytesPerInt), 4);
+ assert(is_even(_framesize/2), "sp not 16-byte aligned");
+
+ _oop_maps = new OopMapSet();
MacroAssembler* masm = _masm;
- __ enter();
- Address java_pc(r15_thread, JavaThread::last_Java_pc_offset());
- __ movptr(rscratch1, Address(rsp, 8)); // read return address from stack
- __ movptr(java_pc, rscratch1);
+ address start = __ pc();
- __ movptr(rscratch1, rsp);
- __ addptr(rscratch1, 16); // skip return and frame
- __ movptr(Address(r15_thread, JavaThread::last_Java_sp_offset()), rscratch1);
+ __ enter();
- __ movptr(Address(r15_thread, JavaThread::saved_rbp_address_offset()), rsp); // rsp points at saved RBP
+ // return address and rbp are already in place
+ __ subptr(rsp, (_framesize-4) << LogBytesPerInt); // prolog
- // State transition
- __ movl(Address(r15_thread, JavaThread::thread_state_offset()), _thread_in_native);
+ _frame_complete = __ pc() - start;
- if (_shadow_space_bytes != 0) {
- // needed here for correct stack args offset on Windows
- __ subptr(rsp, _shadow_space_bytes);
- }
+ address the_pc = __ pc();
- __ call(RuntimeAddress(_call_target));
+ __ set_last_Java_frame(rsp, rbp, (address)the_pc);
+ OopMap* map = new OopMap(_framesize, 0);
+ _oop_maps->add_gc_map(the_pc - start, map);
- if (_shadow_space_bytes != 0) {
- // needed here for correct stack args offset on Windows
- __ addptr(rsp, _shadow_space_bytes);
- }
+ // State transition
+ __ movl(Address(r15_thread, JavaThread::thread_state_offset()), _thread_in_native);
- assert(_output_registers.length() <= 1
- || (_output_registers.length() == 2 && !_output_registers.at(1)->is_valid()), "no multi-reg returns");
- bool need_spills = _output_registers.length() != 0;
- VMReg ret_reg = need_spills ? _output_registers.at(0) : VMRegImpl::Bad();
+ __ call(RuntimeAddress(_call_target));
__ restore_cpu_control_state_after_jni();
@@ -3572,9 +3616,7 @@ void NativeInvokerGenerator::generate() {
__ bind(L_safepoint_poll_slow_path);
__ vzeroupper();
- if (need_spills) {
- spill_register(ret_reg);
- }
+ spill_out_registers();
__ mov(c_rarg0, r15_thread);
__ mov(r12, rsp); // remember sp
@@ -3584,9 +3626,7 @@ void NativeInvokerGenerator::generate() {
__ mov(rsp, r12); // restore sp
__ reinit_heapbase();
- if (need_spills) {
- fill_register(ret_reg);
- }
+ fill_out_registers();
__ jmp(L_after_safepoint_poll);
__ block_comment("} L_safepoint_poll_slow_path");
@@ -3597,9 +3637,7 @@ void NativeInvokerGenerator::generate() {
__ bind(L_reguard);
__ vzeroupper();
- if (need_spills) {
- spill_register(ret_reg);
- }
+ spill_out_registers();
__ mov(r12, rsp); // remember sp
__ subptr(rsp, frame::arg_reg_save_area_bytes); // windows
@@ -3608,9 +3646,7 @@ void NativeInvokerGenerator::generate() {
__ mov(rsp, r12); // restore sp
__ reinit_heapbase();
- if (need_spills) {
- fill_register(ret_reg);
- }
+ fill_out_registers();
__ jmp(L_after_reguard);
@@ -3620,6 +3656,7 @@ void NativeInvokerGenerator::generate() {
__ flush();
}
+#endif // COMPILER2
//------------------------------Montgomery multiplication------------------------
//
diff --git a/src/hotspot/cpu/x86/stubGenerator_x86_32.cpp b/src/hotspot/cpu/x86/stubGenerator_x86_32.cpp
index 681db40dd31f9f669bb3f596a9245aeaf6d93f20..6ab857150006d807af41c5234dd2262d3638c52f 100644
--- a/src/hotspot/cpu/x86/stubGenerator_x86_32.cpp
+++ b/src/hotspot/cpu/x86/stubGenerator_x86_32.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1999, 2020, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2021, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -610,6 +610,21 @@ class StubGenerator: public StubCodeGenerator {
return start;
}
+ address generate_vector_byte_shuffle_mask(const char *stub_name) {
+ __ align(CodeEntryAlignment);
+ StubCodeMark mark(this, "StubRoutines", stub_name);
+ address start = __ pc();
+ __ emit_data(0x70707070, relocInfo::none, 0);
+ __ emit_data(0x70707070, relocInfo::none, 0);
+ __ emit_data(0x70707070, relocInfo::none, 0);
+ __ emit_data(0x70707070, relocInfo::none, 0);
+ __ emit_data(0xF0F0F0F0, relocInfo::none, 0);
+ __ emit_data(0xF0F0F0F0, relocInfo::none, 0);
+ __ emit_data(0xF0F0F0F0, relocInfo::none, 0);
+ __ emit_data(0xF0F0F0F0, relocInfo::none, 0);
+ return start;
+ }
+
address generate_vector_mask_long_double(const char *stub_name, int32_t maskhi, int32_t masklo) {
__ align(CodeEntryAlignment);
StubCodeMark mark(this, "StubRoutines", stub_name);
@@ -3981,6 +3996,7 @@ class StubGenerator: public StubCodeGenerator {
StubRoutines::x86::_vector_64_bit_mask = generate_vector_custom_i32("vector_64_bit_mask", Assembler::AVX_512bit,
0xFFFFFFFF, 0xFFFFFFFF, 0, 0);
StubRoutines::x86::_vector_int_shuffle_mask = generate_vector_mask("vector_int_shuffle_mask", 0x03020100);
+ StubRoutines::x86::_vector_byte_shuffle_mask = generate_vector_byte_shuffle_mask("vector_byte_shuffle_mask");
StubRoutines::x86::_vector_short_shuffle_mask = generate_vector_mask("vector_short_shuffle_mask", 0x01000100);
StubRoutines::x86::_vector_long_shuffle_mask = generate_vector_mask_long_double("vector_long_shuffle_mask", 0x00000001, 0x0);
StubRoutines::x86::_vector_byte_perm_mask = generate_vector_byte_perm_mask("vector_byte_perm_mask");
diff --git a/src/hotspot/cpu/x86/stubGenerator_x86_64.cpp b/src/hotspot/cpu/x86/stubGenerator_x86_64.cpp
index be8377cde0d706b0519a467948ec0fe4109b6b2e..e1fa12d6509ef6331ded37f9ca6d578b6abf79f3 100644
--- a/src/hotspot/cpu/x86/stubGenerator_x86_64.cpp
+++ b/src/hotspot/cpu/x86/stubGenerator_x86_64.cpp
@@ -808,6 +808,17 @@ class StubGenerator: public StubCodeGenerator {
return start;
}
+ address generate_vector_byte_shuffle_mask(const char *stub_name) {
+ __ align(CodeEntryAlignment);
+ StubCodeMark mark(this, "StubRoutines", stub_name);
+ address start = __ pc();
+ __ emit_data64(0x7070707070707070, relocInfo::none);
+ __ emit_data64(0x7070707070707070, relocInfo::none);
+ __ emit_data64(0xF0F0F0F0F0F0F0F0, relocInfo::none);
+ __ emit_data64(0xF0F0F0F0F0F0F0F0, relocInfo::none);
+ return start;
+ }
+
address generate_fp_mask(const char *stub_name, int64_t mask) {
__ align(CodeEntryAlignment);
StubCodeMark mark(this, "StubRoutines", stub_name);
@@ -1471,6 +1482,7 @@ class StubGenerator: public StubCodeGenerator {
__ subq(temp1, loop_size[shift]);
// Main loop with aligned copy block size of 192 bytes at 32 byte granularity.
+ __ align(32);
__ BIND(L_main_loop);
__ copy64_avx(to, from, temp4, xmm1, false, shift, 0);
__ copy64_avx(to, from, temp4, xmm1, false, shift, 64);
@@ -1537,6 +1549,7 @@ class StubGenerator: public StubCodeGenerator {
// Main loop with aligned copy block size of 192 bytes at
// 64 byte copy granularity.
+ __ align(32);
__ BIND(L_main_loop_64bytes);
__ copy64_avx(to, from, temp4, xmm1, false, shift, 0 , true);
__ copy64_avx(to, from, temp4, xmm1, false, shift, 64, true);
@@ -1676,6 +1689,7 @@ class StubGenerator: public StubCodeGenerator {
__ BIND(L_main_pre_loop);
// Main loop with aligned copy block size of 192 bytes at 32 byte granularity.
+ __ align(32);
__ BIND(L_main_loop);
__ copy64_avx(to, from, temp1, xmm1, true, shift, -64);
__ copy64_avx(to, from, temp1, xmm1, true, shift, -128);
@@ -1708,6 +1722,7 @@ class StubGenerator: public StubCodeGenerator {
// Main loop with aligned copy block size of 192 bytes at
// 64 byte copy granularity.
+ __ align(32);
__ BIND(L_main_loop_64bytes);
__ copy64_avx(to, from, temp1, xmm1, true, shift, -64 , true);
__ copy64_avx(to, from, temp1, xmm1, true, shift, -128, true);
@@ -1770,7 +1785,7 @@ class StubGenerator: public StubCodeGenerator {
//
address generate_disjoint_byte_copy(bool aligned, address* entry, const char *name) {
#if COMPILER2_OR_JVMCI
- if (VM_Version::supports_avx512vlbw() && MaxVectorSize >= 32) {
+ if (VM_Version::supports_avx512vlbw() && VM_Version::supports_bmi2() && MaxVectorSize >= 32) {
return generate_disjoint_copy_avx3_masked(entry, "jbyte_disjoint_arraycopy_avx3", 0,
aligned, false, false);
}
@@ -1886,7 +1901,7 @@ class StubGenerator: public StubCodeGenerator {
address generate_conjoint_byte_copy(bool aligned, address nooverlap_target,
address* entry, const char *name) {
#if COMPILER2_OR_JVMCI
- if (VM_Version::supports_avx512vlbw() && MaxVectorSize >= 32) {
+ if (VM_Version::supports_avx512vlbw() && VM_Version::supports_bmi2() && MaxVectorSize >= 32) {
return generate_conjoint_copy_avx3_masked(entry, "jbyte_conjoint_arraycopy_avx3", 0,
nooverlap_target, aligned, false, false);
}
@@ -1997,7 +2012,7 @@ class StubGenerator: public StubCodeGenerator {
//
address generate_disjoint_short_copy(bool aligned, address *entry, const char *name) {
#if COMPILER2_OR_JVMCI
- if (VM_Version::supports_avx512vlbw() && MaxVectorSize >= 32) {
+ if (VM_Version::supports_avx512vlbw() && VM_Version::supports_bmi2() && MaxVectorSize >= 32) {
return generate_disjoint_copy_avx3_masked(entry, "jshort_disjoint_arraycopy_avx3", 1,
aligned, false, false);
}
@@ -2128,7 +2143,7 @@ class StubGenerator: public StubCodeGenerator {
address generate_conjoint_short_copy(bool aligned, address nooverlap_target,
address *entry, const char *name) {
#if COMPILER2_OR_JVMCI
- if (VM_Version::supports_avx512vlbw() && MaxVectorSize >= 32) {
+ if (VM_Version::supports_avx512vlbw() && VM_Version::supports_bmi2() && MaxVectorSize >= 32) {
return generate_conjoint_copy_avx3_masked(entry, "jshort_conjoint_arraycopy_avx3", 1,
nooverlap_target, aligned, false, false);
}
@@ -2232,7 +2247,7 @@ class StubGenerator: public StubCodeGenerator {
address generate_disjoint_int_oop_copy(bool aligned, bool is_oop, address* entry,
const char *name, bool dest_uninitialized = false) {
#if COMPILER2_OR_JVMCI
- if (VM_Version::supports_avx512vlbw() && MaxVectorSize >= 32) {
+ if (VM_Version::supports_avx512vlbw() && VM_Version::supports_bmi2() && MaxVectorSize >= 32) {
return generate_disjoint_copy_avx3_masked(entry, "jint_disjoint_arraycopy_avx3", 2,
aligned, is_oop, dest_uninitialized);
}
@@ -2343,7 +2358,7 @@ class StubGenerator: public StubCodeGenerator {
address *entry, const char *name,
bool dest_uninitialized = false) {
#if COMPILER2_OR_JVMCI
- if (VM_Version::supports_avx512vlbw() && MaxVectorSize >= 32) {
+ if (VM_Version::supports_avx512vlbw() && VM_Version::supports_bmi2() && MaxVectorSize >= 32) {
return generate_conjoint_copy_avx3_masked(entry, "jint_conjoint_arraycopy_avx3", 2,
nooverlap_target, aligned, is_oop, dest_uninitialized);
}
@@ -2456,7 +2471,7 @@ class StubGenerator: public StubCodeGenerator {
address generate_disjoint_long_oop_copy(bool aligned, bool is_oop, address *entry,
const char *name, bool dest_uninitialized = false) {
#if COMPILER2_OR_JVMCI
- if (VM_Version::supports_avx512vlbw() && MaxVectorSize >= 32) {
+ if (VM_Version::supports_avx512vlbw() && VM_Version::supports_bmi2() && MaxVectorSize >= 32) {
return generate_disjoint_copy_avx3_masked(entry, "jlong_disjoint_arraycopy_avx3", 3,
aligned, is_oop, dest_uninitialized);
}
@@ -2566,7 +2581,7 @@ class StubGenerator: public StubCodeGenerator {
address nooverlap_target, address *entry,
const char *name, bool dest_uninitialized = false) {
#if COMPILER2_OR_JVMCI
- if (VM_Version::supports_avx512vlbw() && MaxVectorSize >= 32) {
+ if (VM_Version::supports_avx512vlbw() && VM_Version::supports_bmi2() && MaxVectorSize >= 32) {
return generate_conjoint_copy_avx3_masked(entry, "jlong_conjoint_arraycopy_avx3", 3,
nooverlap_target, aligned, is_oop, dest_uninitialized);
}
@@ -6828,6 +6843,7 @@ address generate_avx_ghash_processBlocks() {
StubRoutines::x86::_vector_64_bit_mask = generate_vector_custom_i32("vector_64_bit_mask", Assembler::AVX_512bit,
0xFFFFFFFF, 0xFFFFFFFF, 0, 0);
StubRoutines::x86::_vector_int_shuffle_mask = generate_vector_mask("vector_int_shuffle_mask", 0x0302010003020100);
+ StubRoutines::x86::_vector_byte_shuffle_mask = generate_vector_byte_shuffle_mask("vector_byte_shuffle_mask");
StubRoutines::x86::_vector_short_shuffle_mask = generate_vector_mask("vector_short_shuffle_mask", 0x0100010001000100);
StubRoutines::x86::_vector_long_shuffle_mask = generate_vector_mask("vector_long_shuffle_mask", 0x0000000100000000);
StubRoutines::x86::_vector_long_sign_mask = generate_vector_mask("vector_long_sign_mask", 0x8000000000000000);
diff --git a/src/hotspot/cpu/x86/stubRoutines_x86.cpp b/src/hotspot/cpu/x86/stubRoutines_x86.cpp
index 45762902db2e8c3a55b881d9ceceeaaf2df19282..6aa4c4eb25631fd56e431c70cbe36ccb6c43a189 100644
--- a/src/hotspot/cpu/x86/stubRoutines_x86.cpp
+++ b/src/hotspot/cpu/x86/stubRoutines_x86.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, 2021, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -47,6 +47,7 @@ address StubRoutines::x86::_vector_short_to_byte_mask = NULL;
address StubRoutines::x86::_vector_int_to_byte_mask = NULL;
address StubRoutines::x86::_vector_int_to_short_mask = NULL;
address StubRoutines::x86::_vector_all_bits_set = NULL;
+address StubRoutines::x86::_vector_byte_shuffle_mask = NULL;
address StubRoutines::x86::_vector_short_shuffle_mask = NULL;
address StubRoutines::x86::_vector_int_shuffle_mask = NULL;
address StubRoutines::x86::_vector_long_shuffle_mask = NULL;
diff --git a/src/hotspot/cpu/x86/stubRoutines_x86.hpp b/src/hotspot/cpu/x86/stubRoutines_x86.hpp
index 84ae8a75b5a8f594ff29af656030ba4891754af3..22e40b2c18116d300d8ed9477c49a48aba55f091 100644
--- a/src/hotspot/cpu/x86/stubRoutines_x86.hpp
+++ b/src/hotspot/cpu/x86/stubRoutines_x86.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013, 2020, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, 2021, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -149,6 +149,7 @@ class x86 {
static address _vector_32_bit_mask;
static address _vector_64_bit_mask;
static address _vector_int_shuffle_mask;
+ static address _vector_byte_shuffle_mask;
static address _vector_short_shuffle_mask;
static address _vector_long_shuffle_mask;
static address _vector_iota_indices;
@@ -280,6 +281,10 @@ class x86 {
return _vector_int_shuffle_mask;
}
+ static address vector_byte_shuffle_mask() {
+ return _vector_byte_shuffle_mask;
+ }
+
static address vector_short_shuffle_mask() {
return _vector_short_shuffle_mask;
}
diff --git a/src/hotspot/cpu/x86/vm_version_x86.hpp b/src/hotspot/cpu/x86/vm_version_x86.hpp
index 5d91280e616151310b5e72a866c702f5c54bceb0..ab5e35b547934140d793bbc8cccfa8b5616a2291 100644
--- a/src/hotspot/cpu/x86/vm_version_x86.hpp
+++ b/src/hotspot/cpu/x86/vm_version_x86.hpp
@@ -1063,6 +1063,11 @@ public:
static bool supports_clflushopt() { return ((_features & CPU_FLUSHOPT) != 0); }
static bool supports_clwb() { return ((_features & CPU_CLWB) != 0); }
+#ifdef __APPLE__
+ // Is the CPU running emulated (for example macOS Rosetta running x86_64 code on M1 ARM (aarch64)
+ static bool is_cpu_emulated();
+#endif
+
// support functions for virtualization detection
private:
static void check_virtualizations();
diff --git a/src/hotspot/cpu/x86/vtableStubs_x86_64.cpp b/src/hotspot/cpu/x86/vtableStubs_x86_64.cpp
index e626f95b33ba5b016f5944fc38311cd60fd11863..c6181f2d007ed8880c674cbe1fec535e6aa94972 100644
--- a/src/hotspot/cpu/x86/vtableStubs_x86_64.cpp
+++ b/src/hotspot/cpu/x86/vtableStubs_x86_64.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2003, 2020, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2021, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -70,7 +70,7 @@ VtableStub* VtableStubs::create_vtable_stub(int vtable_index) {
#if (!defined(PRODUCT) && defined(COMPILER2))
if (CountCompiledCalls) {
- __ incrementl(ExternalAddress((address) SharedRuntime::nof_megamorphic_calls_addr()));
+ __ incrementq(ExternalAddress((address) SharedRuntime::nof_megamorphic_calls_addr()));
}
#endif
@@ -148,6 +148,7 @@ VtableStub* VtableStubs::create_itable_stub(int itable_index) {
if (s == NULL) {
return NULL;
}
+
// Count unused bytes in instruction sequences of variable size.
// We add them to the computed buffer size in order to avoid
// overflow in subsequently generated stubs.
@@ -163,7 +164,7 @@ VtableStub* VtableStubs::create_itable_stub(int itable_index) {
#if (!defined(PRODUCT) && defined(COMPILER2))
if (CountCompiledCalls) {
- __ incrementl(ExternalAddress((address) SharedRuntime::nof_megamorphic_calls_addr()));
+ __ incrementq(ExternalAddress((address) SharedRuntime::nof_megamorphic_calls_addr()));
}
#endif // PRODUCT
diff --git a/src/hotspot/cpu/x86/x86.ad b/src/hotspot/cpu/x86/x86.ad
index 3ffc7c8f06ab601ae6c5d572c4a6806d9d6c5e6d..7cf669f0e27cc6e77cdde4f3897bbe11c0601d32 100644
--- a/src/hotspot/cpu/x86/x86.ad
+++ b/src/hotspot/cpu/x86/x86.ad
@@ -1356,6 +1356,7 @@ Assembler::Width widthForType(BasicType bt) {
static address vector_long_sign_mask() { return StubRoutines::x86::vector_long_sign_mask(); }
static address vector_all_bits_set() { return StubRoutines::x86::vector_all_bits_set(); }
static address vector_int_to_short_mask() { return StubRoutines::x86::vector_int_to_short_mask(); }
+ static address vector_byte_shufflemask() { return StubRoutines::x86::vector_byte_shuffle_mask(); }
static address vector_short_shufflemask() { return StubRoutines::x86::vector_short_shuffle_mask(); }
static address vector_int_shufflemask() { return StubRoutines::x86::vector_int_shuffle_mask(); }
static address vector_long_shufflemask() { return StubRoutines::x86::vector_long_shuffle_mask(); }
@@ -1526,7 +1527,7 @@ const bool Matcher::match_rule_supported(int opcode) {
case Op_VectorMaskGen:
case Op_LoadVectorMasked:
case Op_StoreVectorMasked:
- if (UseAVX < 3) {
+ if (UseAVX < 3 || !VM_Version::supports_bmi2()) {
return false;
}
break;
@@ -1693,9 +1694,9 @@ const bool Matcher::match_rule_supported_vector(int opcode, int vlen, BasicType
return false; // Implementation limitation due to how shuffle is loaded
} else if (size_in_bits == 256 && UseAVX < 2) {
return false; // Implementation limitation
- } else if (bt == T_BYTE && size_in_bits >= 256 && !VM_Version::supports_avx512_vbmi()) {
+ } else if (bt == T_BYTE && size_in_bits > 256 && !VM_Version::supports_avx512_vbmi()) {
return false; // Implementation limitation
- } else if (bt == T_SHORT && size_in_bits >= 256 && !VM_Version::supports_avx512bw()) {
+ } else if (bt == T_SHORT && size_in_bits > 256 && !VM_Version::supports_avx512bw()) {
return false; // Implementation limitation
}
break;
@@ -7500,13 +7501,24 @@ instruct rearrangeB(vec dst, vec shuffle) %{
ins_pipe( pipe_slow );
%}
-instruct rearrangeB_avx(vec dst, vec src, vec shuffle) %{
+instruct rearrangeB_avx(legVec dst, legVec src, vec shuffle, legVec vtmp1, legVec vtmp2, rRegP scratch) %{
predicate(vector_element_basic_type(n) == T_BYTE &&
vector_length(n) == 32 && !VM_Version::supports_avx512_vbmi());
match(Set dst (VectorRearrange src shuffle));
- format %{ "vector_rearrange $dst, $shuffle, $src" %}
+ effect(TEMP dst, TEMP vtmp1, TEMP vtmp2, TEMP scratch);
+ format %{ "vector_rearrange $dst, $shuffle, $src\t! using $vtmp1, $vtmp2, $scratch as TEMP" %}
ins_encode %{
- __ vpshufb($dst$$XMMRegister, $shuffle$$XMMRegister, $src$$XMMRegister, Assembler::AVX_256bit);
+ assert(UseAVX >= 2, "required");
+ // Swap src into vtmp1
+ __ vperm2i128($vtmp1$$XMMRegister, $src$$XMMRegister, $src$$XMMRegister, 1);
+ // Shuffle swapped src to get entries from other 128 bit lane
+ __ vpshufb($vtmp1$$XMMRegister, $vtmp1$$XMMRegister, $shuffle$$XMMRegister, Assembler::AVX_256bit);
+ // Shuffle original src to get entries from self 128 bit lane
+ __ vpshufb($dst$$XMMRegister, $src$$XMMRegister, $shuffle$$XMMRegister, Assembler::AVX_256bit);
+ // Create a blend mask by setting high bits for entries coming from other lane in shuffle
+ __ vpaddb($vtmp2$$XMMRegister, $shuffle$$XMMRegister, ExternalAddress(vector_byte_shufflemask()), Assembler::AVX_256bit, $scratch$$Register);
+ // Perform the blend
+ __ vpblendvb($dst$$XMMRegister, $dst$$XMMRegister, $vtmp1$$XMMRegister, $vtmp2$$XMMRegister, Assembler::AVX_256bit);
%}
ins_pipe( pipe_slow );
%}
@@ -7527,26 +7539,42 @@ instruct rearrangeB_evex(vec dst, vec src, vec shuffle) %{
instruct loadShuffleS(vec dst, vec src, vec vtmp, rRegP scratch) %{
predicate(vector_element_basic_type(n) == T_SHORT &&
- vector_length(n) <= 8 && !VM_Version::supports_avx512bw()); // NB! aligned with rearrangeS
+ vector_length(n) <= 16 && !VM_Version::supports_avx512bw()); // NB! aligned with rearrangeS
match(Set dst (VectorLoadShuffle src));
effect(TEMP dst, TEMP vtmp, TEMP scratch);
format %{ "vector_load_shuffle $dst, $src\t! using $vtmp and $scratch as TEMP" %}
ins_encode %{
// Create a byte shuffle mask from short shuffle mask
// only byte shuffle instruction available on these platforms
+ int vlen_in_bytes = vector_length_in_bytes(this);
+ if (UseAVX == 0) {
+ assert(vlen_in_bytes <= 16, "required");
+ // Multiply each shuffle by two to get byte index
+ __ pmovzxbw($vtmp$$XMMRegister, $src$$XMMRegister);
+ __ psllw($vtmp$$XMMRegister, 1);
+
+ // Duplicate to create 2 copies of byte index
+ __ movdqu($dst$$XMMRegister, $vtmp$$XMMRegister);
+ __ psllw($dst$$XMMRegister, 8);
+ __ por($dst$$XMMRegister, $vtmp$$XMMRegister);
+
+ // Add one to get alternate byte index
+ __ movdqu($vtmp$$XMMRegister, ExternalAddress(vector_short_shufflemask()), $scratch$$Register);
+ __ paddb($dst$$XMMRegister, $vtmp$$XMMRegister);
+ } else {
+ assert(UseAVX > 1 || vlen_in_bytes <= 16, "required");
+ int vlen_enc = vector_length_encoding(this);
+ // Multiply each shuffle by two to get byte index
+ __ vpmovzxbw($vtmp$$XMMRegister, $src$$XMMRegister, vlen_enc);
+ __ vpsllw($vtmp$$XMMRegister, $vtmp$$XMMRegister, 1, vlen_enc);
- // Multiply each shuffle by two to get byte index
- __ pmovzxbw($vtmp$$XMMRegister, $src$$XMMRegister);
- __ psllw($vtmp$$XMMRegister, 1);
-
- // Duplicate to create 2 copies of byte index
- __ movdqu($dst$$XMMRegister, $vtmp$$XMMRegister);
- __ psllw($dst$$XMMRegister, 8);
- __ por($dst$$XMMRegister, $vtmp$$XMMRegister);
+ // Duplicate to create 2 copies of byte index
+ __ vpsllw($dst$$XMMRegister, $vtmp$$XMMRegister, 8, vlen_enc);
+ __ vpor($dst$$XMMRegister, $dst$$XMMRegister, $vtmp$$XMMRegister, vlen_enc);
- // Add one to get alternate byte index
- __ movdqu($vtmp$$XMMRegister, ExternalAddress(vector_short_shufflemask()), $scratch$$Register);
- __ paddb($dst$$XMMRegister, $vtmp$$XMMRegister);
+ // Add one to get alternate byte index
+ __ vpaddb($dst$$XMMRegister, $dst$$XMMRegister, ExternalAddress(vector_short_shufflemask()), vlen_enc, $scratch$$Register);
+ }
%}
ins_pipe( pipe_slow );
%}
@@ -7563,6 +7591,28 @@ instruct rearrangeS(vec dst, vec shuffle) %{
ins_pipe( pipe_slow );
%}
+instruct rearrangeS_avx(legVec dst, legVec src, vec shuffle, legVec vtmp1, legVec vtmp2, rRegP scratch) %{
+ predicate(vector_element_basic_type(n) == T_SHORT &&
+ vector_length(n) == 16 && !VM_Version::supports_avx512bw());
+ match(Set dst (VectorRearrange src shuffle));
+ effect(TEMP dst, TEMP vtmp1, TEMP vtmp2, TEMP scratch);
+ format %{ "vector_rearrange $dst, $shuffle, $src\t! using $vtmp1, $vtmp2, $scratch as TEMP" %}
+ ins_encode %{
+ assert(UseAVX >= 2, "required");
+ // Swap src into vtmp1
+ __ vperm2i128($vtmp1$$XMMRegister, $src$$XMMRegister, $src$$XMMRegister, 1);
+ // Shuffle swapped src to get entries from other 128 bit lane
+ __ vpshufb($vtmp1$$XMMRegister, $vtmp1$$XMMRegister, $shuffle$$XMMRegister, Assembler::AVX_256bit);
+ // Shuffle original src to get entries from self 128 bit lane
+ __ vpshufb($dst$$XMMRegister, $src$$XMMRegister, $shuffle$$XMMRegister, Assembler::AVX_256bit);
+ // Create a blend mask by setting high bits for entries coming from other lane in shuffle
+ __ vpaddb($vtmp2$$XMMRegister, $shuffle$$XMMRegister, ExternalAddress(vector_byte_shufflemask()), Assembler::AVX_256bit, $scratch$$Register);
+ // Perform the blend
+ __ vpblendvb($dst$$XMMRegister, $dst$$XMMRegister, $vtmp1$$XMMRegister, $vtmp2$$XMMRegister, Assembler::AVX_256bit);
+ %}
+ ins_pipe( pipe_slow );
+%}
+
instruct loadShuffleS_evex(vec dst, vec src) %{
predicate(vector_element_basic_type(n) == T_SHORT &&
VM_Version::supports_avx512bw());
diff --git a/src/hotspot/cpu/zero/sharedRuntime_zero.cpp b/src/hotspot/cpu/zero/sharedRuntime_zero.cpp
index f4aaeb6ea1e491ea34bdef1088a2f706001cf1db..efe82a0cce396ff2af91667885d1e65f85e7ac0c 100644
--- a/src/hotspot/cpu/zero/sharedRuntime_zero.cpp
+++ b/src/hotspot/cpu/zero/sharedRuntime_zero.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2021, Oracle and/or its affiliates. All rights reserved.
* Copyright 2007, 2008, 2009, 2010, 2011 Red Hat, Inc.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -119,16 +119,6 @@ RuntimeStub* SharedRuntime::generate_resolve_blob(address destination, const cha
return generate_empty_runtime_stub("resolve_blob");
}
-size_t SharedRuntime::trampoline_size() {
- ShouldNotCallThis();
- return 0;
-}
-
-void SharedRuntime::generate_trampoline(MacroAssembler *masm, address destination) {
- ShouldNotCallThis();
- return;
-}
-
int SharedRuntime::c_calling_convention(const BasicType *sig_bt,
VMRegPair *regs,
VMRegPair *regs2,
diff --git a/src/hotspot/os/aix/os_aix.cpp b/src/hotspot/os/aix/os_aix.cpp
index a976e4f6b940025931e04727595de87efd82d3f8..dc2c25e5568175571fe86767e822d3d9fd6544d1 100644
--- a/src/hotspot/os/aix/os_aix.cpp
+++ b/src/hotspot/os/aix/os_aix.cpp
@@ -29,7 +29,6 @@
// no precompiled headers
#include "jvm.h"
-#include "classfile/classLoader.hpp"
#include "classfile/vmSymbols.hpp"
#include "code/icBuffer.hpp"
#include "code/vtableStubs.hpp"
@@ -63,9 +62,9 @@
#include "runtime/os.hpp"
#include "runtime/osThread.hpp"
#include "runtime/perfMemory.hpp"
+#include "runtime/safefetch.inline.hpp"
#include "runtime/sharedRuntime.hpp"
#include "runtime/statSampler.hpp"
-#include "runtime/stubRoutines.hpp"
#include "runtime/thread.inline.hpp"
#include "runtime/threadCritical.hpp"
#include "runtime/timer.hpp"
@@ -109,7 +108,6 @@
#include
#include
#include
-#include
// Missing prototypes for various system APIs.
extern "C"
@@ -3151,64 +3149,6 @@ size_t os::current_stack_size() {
return s;
}
-extern char** environ;
-
-// Run the specified command in a separate process. Return its exit value,
-// or -1 on failure (e.g. can't fork a new process).
-// Unlike system(), this function can be called from signal handler. It
-// doesn't block SIGINT et al.
-int os::fork_and_exec(char* cmd, bool use_vfork_if_available) {
- char* argv[4] = { (char*)"sh", (char*)"-c", cmd, NULL};
-
- pid_t pid = fork();
-
- if (pid < 0) {
- // fork failed
- return -1;
-
- } else if (pid == 0) {
- // child process
-
- // Try to be consistent with system(), which uses "/usr/bin/sh" on AIX.
- execve("/usr/bin/sh", argv, environ);
-
- // execve failed
- _exit(-1);
-
- } else {
- // copied from J2SE ..._waitForProcessExit() in UNIXProcess_md.c; we don't
- // care about the actual exit code, for now.
-
- int status;
-
- // Wait for the child process to exit. This returns immediately if
- // the child has already exited. */
- while (waitpid(pid, &status, 0) < 0) {
- switch (errno) {
- case ECHILD: return 0;
- case EINTR: break;
- default: return -1;
- }
- }
-
- if (WIFEXITED(status)) {
- // The child exited normally; get its exit code.
- return WEXITSTATUS(status);
- } else if (WIFSIGNALED(status)) {
- // The child exited because of a signal.
- // The best value to return is 0x80 + signal number,
- // because that is what all Unix shells do, and because
- // it allows callers to distinguish between process exit and
- // process death by signal.
- return 0x80 + WTERMSIG(status);
- } else {
- // Unknown exit code; pass it through.
- return status;
- }
- }
- return -1;
-}
-
// Get the default path to the core file
// Returns the length of the string
int os::get_core_path(char* buffer, size_t bufferSize) {
@@ -3225,12 +3165,6 @@ int os::get_core_path(char* buffer, size_t bufferSize) {
return strlen(buffer);
}
-#ifndef PRODUCT
-void TestReserveMemorySpecial_test() {
- // No tests available for this platform
-}
-#endif
-
bool os::start_debugging(char *buf, int buflen) {
int len = (int)strlen(buf);
char *p = &buf[len];
diff --git a/src/hotspot/os/bsd/os_bsd.cpp b/src/hotspot/os/bsd/os_bsd.cpp
index 627b995dfedaa43594931ba351ca1db6fa3a4edf..4488e1c31d8538461f12b27451f9f042c7e1c8e1 100644
--- a/src/hotspot/os/bsd/os_bsd.cpp
+++ b/src/hotspot/os/bsd/os_bsd.cpp
@@ -24,7 +24,6 @@
// no precompiled headers
#include "jvm.h"
-#include "classfile/classLoader.hpp"
#include "classfile/vmSymbols.hpp"
#include "code/icBuffer.hpp"
#include "code/vtableStubs.hpp"
@@ -94,7 +93,6 @@
# include
# include
# include
-# include
# include
# include
@@ -1412,7 +1410,7 @@ void os::print_os_info_brief(outputStream* st) {
}
void os::print_os_info(outputStream* st) {
- st->print("OS:");
+ st->print_cr("OS:");
os::Posix::print_uname_info(st);
@@ -2611,80 +2609,6 @@ void os::pause() {
}
}
-// Darwin has no "environ" in a dynamic library.
-#ifdef __APPLE__
- #include
- #define environ (*_NSGetEnviron())
-#else
-extern char** environ;
-#endif
-
-// Run the specified command in a separate process. Return its exit value,
-// or -1 on failure (e.g. can't fork a new process).
-// Unlike system(), this function can be called from signal handler. It
-// doesn't block SIGINT et al.
-int os::fork_and_exec(char* cmd, bool use_vfork_if_available) {
- const char * argv[4] = {"sh", "-c", cmd, NULL};
-
- // fork() in BsdThreads/NPTL is not async-safe. It needs to run
- // pthread_atfork handlers and reset pthread library. All we need is a
- // separate process to execve. Make a direct syscall to fork process.
- // On IA64 there's no fork syscall, we have to use fork() and hope for
- // the best...
- pid_t pid = fork();
-
- if (pid < 0) {
- // fork failed
- return -1;
-
- } else if (pid == 0) {
- // child process
-
- // execve() in BsdThreads will call pthread_kill_other_threads_np()
- // first to kill every thread on the thread list. Because this list is
- // not reset by fork() (see notes above), execve() will instead kill
- // every thread in the parent process. We know this is the only thread
- // in the new process, so make a system call directly.
- // IA64 should use normal execve() from glibc to match the glibc fork()
- // above.
- execve("/bin/sh", (char* const*)argv, environ);
-
- // execve failed
- _exit(-1);
-
- } else {
- // copied from J2SE ..._waitForProcessExit() in UNIXProcess_md.c; we don't
- // care about the actual exit code, for now.
-
- int status;
-
- // Wait for the child process to exit. This returns immediately if
- // the child has already exited. */
- while (waitpid(pid, &status, 0) < 0) {
- switch (errno) {
- case ECHILD: return 0;
- case EINTR: break;
- default: return -1;
- }
- }
-
- if (WIFEXITED(status)) {
- // The child exited normally; get its exit code.
- return WEXITSTATUS(status);
- } else if (WIFSIGNALED(status)) {
- // The child exited because of a signal
- // The best value to return is 0x80 + signal number,
- // because that is what all Unix shells do, and because
- // it allows callers to distinguish between process exit and
- // process death by signal.
- return 0x80 + WTERMSIG(status);
- } else {
- // Unknown exit code; pass it through
- return status;
- }
- }
-}
-
// Get the kern.corefile setting, or otherwise the default path to the core file
// Returns the length of the string
int os::get_core_path(char* buffer, size_t bufferSize) {
@@ -2719,12 +2643,6 @@ bool os::supports_map_sync() {
return false;
}
-#ifndef PRODUCT
-void TestReserveMemorySpecial_test() {
- // No tests available for this platform
-}
-#endif
-
bool os::start_debugging(char *buf, int buflen) {
int len = (int)strlen(buf);
char *p = &buf[len];
diff --git a/src/hotspot/os/linux/gc/z/zPhysicalMemoryBacking_linux.cpp b/src/hotspot/os/linux/gc/z/zPhysicalMemoryBacking_linux.cpp
index 2cd3b95a72b0f34fe110f7c63ac650aac1bc035a..7a113055423aa3837a114580c6ee66005874dae8 100644
--- a/src/hotspot/os/linux/gc/z/zPhysicalMemoryBacking_linux.cpp
+++ b/src/hotspot/os/linux/gc/z/zPhysicalMemoryBacking_linux.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015, 2020, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2021, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -34,7 +34,7 @@
#include "logging/log.hpp"
#include "runtime/init.hpp"
#include "runtime/os.hpp"
-#include "runtime/stubRoutines.hpp"
+#include "runtime/safefetch.inline.hpp"
#include "utilities/align.hpp"
#include "utilities/debug.hpp"
#include "utilities/growableArray.hpp"
diff --git a/src/hotspot/os/linux/os_linux.cpp b/src/hotspot/os/linux/os_linux.cpp
index 3b9556d688d79a65f130451589301646ec3462df..b0bfa1301315ec32695c5e29b0dd4b23bb576600 100644
--- a/src/hotspot/os/linux/os_linux.cpp
+++ b/src/hotspot/os/linux/os_linux.cpp
@@ -97,7 +97,6 @@
# include
# include
# include
-# include
# include
# include
# include
@@ -3529,11 +3528,19 @@ bool os::Linux::transparent_huge_pages_sanity_check(bool warn,
return result;
}
+int os::Linux::hugetlbfs_page_size_flag(size_t page_size) {
+ if (page_size != default_large_page_size()) {
+ return (exact_log2(page_size) << MAP_HUGE_SHIFT);
+ }
+ return 0;
+}
+
bool os::Linux::hugetlbfs_sanity_check(bool warn, size_t page_size) {
bool result = false;
- void *p = mmap(NULL, page_size, PROT_READ|PROT_WRITE,
- MAP_ANONYMOUS|MAP_PRIVATE|MAP_HUGETLB,
- -1, 0);
+
+ // Include the page size flag to ensure we sanity check the correct page size.
+ int flags = MAP_ANONYMOUS | MAP_PRIVATE | MAP_HUGETLB | hugetlbfs_page_size_flag(page_size);
+ void *p = mmap(NULL, page_size, PROT_READ|PROT_WRITE, flags, -1, 0);
if (p != MAP_FAILED) {
// We don't know if this really is a huge page or not.
@@ -3564,6 +3571,30 @@ bool os::Linux::hugetlbfs_sanity_check(bool warn, size_t page_size) {
return result;
}
+bool os::Linux::shm_hugetlbfs_sanity_check(bool warn, size_t page_size) {
+ // Try to create a large shared memory segment.
+ int shmid = shmget(IPC_PRIVATE, page_size, SHM_HUGETLB|IPC_CREAT|SHM_R|SHM_W);
+ if (shmid == -1) {
+ // Possible reasons for shmget failure:
+ // 1. shmmax is too small for the request.
+ // > check shmmax value: cat /proc/sys/kernel/shmmax
+ // > increase shmmax value: echo "new_value" > /proc/sys/kernel/shmmax
+ // 2. not enough large page memory.
+ // > check available large pages: cat /proc/meminfo
+ // > increase amount of large pages:
+ // sysctl -w vm.nr_hugepages=new_value
+ // > For more information regarding large pages please refer to:
+ // https://www.kernel.org/doc/Documentation/vm/hugetlbpage.txt
+ if (warn) {
+ warning("Large pages using UseSHM are not configured on this system.");
+ }
+ return false;
+ }
+ // Managed to create a segment, now delete it.
+ shmctl(shmid, IPC_RMID, NULL);
+ return true;
+}
+
// From the coredump_filter documentation:
//
// - (bit 0) anonymous private memory
@@ -3748,7 +3779,18 @@ bool os::Linux::setup_large_page_type(size_t page_size) {
UseHugeTLBFS = false;
}
- return UseSHM;
+ if (UseSHM) {
+ bool warn_on_failure = !FLAG_IS_DEFAULT(UseSHM);
+ if (shm_hugetlbfs_sanity_check(warn_on_failure, page_size)) {
+ return true;
+ }
+ UseSHM = false;
+ }
+
+ if (!FLAG_IS_DEFAULT(UseLargePages)) {
+ log_warning(pagesize)("UseLargePages disabled, no large pages configured and available on the system.");
+ }
+ return false;
}
void os::large_page_init() {
@@ -3888,13 +3930,15 @@ char* os::Linux::reserve_memory_special_shm(size_t bytes, size_t alignment,
int shmid = shmget(IPC_PRIVATE, bytes, SHM_HUGETLB|IPC_CREAT|SHM_R|SHM_W);
if (shmid == -1) {
// Possible reasons for shmget failure:
- // 1. shmmax is too small for Java heap.
+ // 1. shmmax is too small for the request.
// > check shmmax value: cat /proc/sys/kernel/shmmax
- // > increase shmmax value: echo "0xffffffff" > /proc/sys/kernel/shmmax
+ // > increase shmmax value: echo "new_value" > /proc/sys/kernel/shmmax
// 2. not enough large page memory.
// > check available large pages: cat /proc/meminfo
// > increase amount of large pages:
- // echo new_value > /proc/sys/vm/nr_hugepages
+ // sysctl -w vm.nr_hugepages=new_value
+ // > For more information regarding large pages please refer to:
+ // https://www.kernel.org/doc/Documentation/vm/hugetlbpage.txt
// Note 1: different Linux may use different name for this property,
// e.g. on Redhat AS-3 it is "hugetlb_pool".
// Note 2: it's possible there's enough physical memory available but
@@ -3943,10 +3987,9 @@ char* os::Linux::reserve_memory_special_huge_tlbfs_only(size_t bytes,
int prot = exec ? PROT_READ|PROT_WRITE|PROT_EXEC : PROT_READ|PROT_WRITE;
int flags = MAP_PRIVATE|MAP_ANONYMOUS|MAP_HUGETLB;
+ // Ensure the correct page size flag is used when needed.
+ flags |= hugetlbfs_page_size_flag(os::large_page_size());
- if (os::large_page_size() != default_large_page_size()) {
- flags |= (exact_log2(os::large_page_size()) << MAP_HUGE_SHIFT);
- }
char* addr = (char*)::mmap(req_addr, bytes, prot, flags, -1, 0);
if (addr == MAP_FAILED) {
@@ -4016,11 +4059,7 @@ char* os::Linux::reserve_memory_special_huge_tlbfs_mixed(size_t bytes,
}
// Commit large-paged area.
- flags |= MAP_HUGETLB;
-
- if (os::large_page_size() != default_large_page_size()) {
- flags |= (exact_log2(os::large_page_size()) << MAP_HUGE_SHIFT);
- }
+ flags |= MAP_HUGETLB | hugetlbfs_page_size_flag(os::large_page_size());
result = ::mmap(lp_start, lp_bytes, prot, flags, -1, 0);
if (result == MAP_FAILED) {
@@ -5179,68 +5218,6 @@ void os::pause() {
}
}
-extern char** environ;
-
-// Run the specified command in a separate process. Return its exit value,
-// or -1 on failure (e.g. can't fork a new process).
-// Unlike system(), this function can be called from signal handler. It
-// doesn't block SIGINT et al.
-int os::fork_and_exec(char* cmd, bool use_vfork_if_available) {
- const char * argv[4] = {"sh", "-c", cmd, NULL};
-
- pid_t pid ;
-
- if (use_vfork_if_available) {
- pid = vfork();
- } else {
- pid = fork();
- }
-
- if (pid < 0) {
- // fork failed
- return -1;
-
- } else if (pid == 0) {
- // child process
-
- execve("/bin/sh", (char* const*)argv, environ);
-
- // execve failed
- _exit(-1);
-
- } else {
- // copied from J2SE ..._waitForProcessExit() in UNIXProcess_md.c; we don't
- // care about the actual exit code, for now.
-
- int status;
-
- // Wait for the child process to exit. This returns immediately if
- // the child has already exited. */
- while (waitpid(pid, &status, 0) < 0) {
- switch (errno) {
- case ECHILD: return 0;
- case EINTR: break;
- default: return -1;
- }
- }
-
- if (WIFEXITED(status)) {
- // The child exited normally; get its exit code.
- return WEXITSTATUS(status);
- } else if (WIFSIGNALED(status)) {
- // The child exited because of a signal
- // The best value to return is 0x80 + signal number,
- // because that is what all Unix shells do, and because
- // it allows callers to distinguish between process exit and
- // process death by signal.
- return 0x80 + WTERMSIG(status);
- } else {
- // Unknown exit code; pass it through
- return status;
- }
- }
-}
-
// Get the default path to the core file
// Returns the length of the string
int os::get_core_path(char* buffer, size_t bufferSize) {
@@ -5495,172 +5472,3 @@ void os::print_memory_mappings(char* addr, size_t bytes, outputStream* st) {
st->cr();
}
}
-
-/////////////// Unit tests ///////////////
-
-#ifndef PRODUCT
-
-class TestReserveMemorySpecial : AllStatic {
- public:
- static void small_page_write(void* addr, size_t size) {
- size_t page_size = os::vm_page_size();
-
- char* end = (char*)addr + size;
- for (char* p = (char*)addr; p < end; p += page_size) {
- *p = 1;
- }
- }
-
- static void test_reserve_memory_special_huge_tlbfs_only(size_t size) {
- if (!UseHugeTLBFS) {
- return;
- }
-
- char* addr = os::Linux::reserve_memory_special_huge_tlbfs_only(size, NULL, false);
-
- if (addr != NULL) {
- small_page_write(addr, size);
-
- os::Linux::release_memory_special_huge_tlbfs(addr, size);
- }
- }
-
- static void test_reserve_memory_special_huge_tlbfs_only() {
- if (!UseHugeTLBFS) {
- return;
- }
-
- size_t lp = os::large_page_size();
-
- for (size_t size = lp; size <= lp * 10; size += lp) {
- test_reserve_memory_special_huge_tlbfs_only(size);
- }
- }
-
- static void test_reserve_memory_special_huge_tlbfs_mixed() {
- size_t lp = os::large_page_size();
- size_t ag = os::vm_allocation_granularity();
-
- // sizes to test
- const size_t sizes[] = {
- lp, lp + ag, lp + lp / 2, lp * 2,
- lp * 2 + ag, lp * 2 - ag, lp * 2 + lp / 2,
- lp * 10, lp * 10 + lp / 2
- };
- const int num_sizes = sizeof(sizes) / sizeof(size_t);
-
- // For each size/alignment combination, we test three scenarios:
- // 1) with req_addr == NULL
- // 2) with a non-null req_addr at which we expect to successfully allocate
- // 3) with a non-null req_addr which contains a pre-existing mapping, at which we
- // expect the allocation to either fail or to ignore req_addr
-
- // Pre-allocate two areas; they shall be as large as the largest allocation
- // and aligned to the largest alignment we will be testing.
- const size_t mapping_size = sizes[num_sizes - 1] * 2;
- char* const mapping1 = (char*) ::mmap(NULL, mapping_size,
- PROT_NONE, MAP_PRIVATE|MAP_ANONYMOUS|MAP_NORESERVE,
- -1, 0);
- assert(mapping1 != MAP_FAILED, "should work");
-
- char* const mapping2 = (char*) ::mmap(NULL, mapping_size,
- PROT_NONE, MAP_PRIVATE|MAP_ANONYMOUS|MAP_NORESERVE,
- -1, 0);
- assert(mapping2 != MAP_FAILED, "should work");
-
- // Unmap the first mapping, but leave the second mapping intact: the first
- // mapping will serve as a value for a "good" req_addr (case 2). The second
- // mapping, still intact, as "bad" req_addr (case 3).
- ::munmap(mapping1, mapping_size);
-
- // Case 1
- for (int i = 0; i < num_sizes; i++) {
- const size_t size = sizes[i];
- for (size_t alignment = ag; is_aligned(size, alignment); alignment *= 2) {
- char* p = os::Linux::reserve_memory_special_huge_tlbfs_mixed(size, alignment, NULL, false);
- if (p != NULL) {
- assert(is_aligned(p, alignment), "must be");
- small_page_write(p, size);
- os::Linux::release_memory_special_huge_tlbfs(p, size);
- }
- }
- }
-
- // Case 2
- for (int i = 0; i < num_sizes; i++) {
- const size_t size = sizes[i];
- for (size_t alignment = ag; is_aligned(size, alignment); alignment *= 2) {
- char* const req_addr = align_up(mapping1, alignment);
- char* p = os::Linux::reserve_memory_special_huge_tlbfs_mixed(size, alignment, req_addr, false);
- if (p != NULL) {
- assert(p == req_addr, "must be");
- small_page_write(p, size);
- os::Linux::release_memory_special_huge_tlbfs(p, size);
- }
- }
- }
-
- // Case 3
- for (int i = 0; i < num_sizes; i++) {
- const size_t size = sizes[i];
- for (size_t alignment = ag; is_aligned(size, alignment); alignment *= 2) {
- char* const req_addr = align_up(mapping2, alignment);
- char* p = os::Linux::reserve_memory_special_huge_tlbfs_mixed(size, alignment, req_addr, false);
- // as the area around req_addr contains already existing mappings, the API should always
- // return NULL (as per contract, it cannot return another address)
- assert(p == NULL, "must be");
- }
- }
-
- ::munmap(mapping2, mapping_size);
-
- }
-
- static void test_reserve_memory_special_huge_tlbfs() {
- if (!UseHugeTLBFS) {
- return;
- }
-
- test_reserve_memory_special_huge_tlbfs_only();
- test_reserve_memory_special_huge_tlbfs_mixed();
- }
-
- static void test_reserve_memory_special_shm(size_t size, size_t alignment) {
- if (!UseSHM) {
- return;
- }
-
- char* addr = os::Linux::reserve_memory_special_shm(size, alignment, NULL, false);
-
- if (addr != NULL) {
- assert(is_aligned(addr, alignment), "Check");
- assert(is_aligned(addr, os::large_page_size()), "Check");
-
- small_page_write(addr, size);
-
- os::Linux::release_memory_special_shm(addr, size);
- }
- }
-
- static void test_reserve_memory_special_shm() {
- size_t lp = os::large_page_size();
- size_t ag = os::vm_allocation_granularity();
-
- for (size_t size = ag; size < lp * 3; size += ag) {
- for (size_t alignment = ag; is_aligned(size, alignment); alignment *= 2) {
- test_reserve_memory_special_shm(size, alignment);
- }
- }
- }
-
- static void test() {
- test_reserve_memory_special_huge_tlbfs();
- test_reserve_memory_special_shm();
- }
-};
-
-void TestReserveMemorySpecial_test() {
- TestReserveMemorySpecial::test();
-}
-
-#endif
diff --git a/src/hotspot/os/linux/os_linux.hpp b/src/hotspot/os/linux/os_linux.hpp
index 513b12aaf506d77ba4c84147090be6a36a4aff1b..0e9a3add1f2f3d39bf4cee748251b4580c7ff6f1 100644
--- a/src/hotspot/os/linux/os_linux.hpp
+++ b/src/hotspot/os/linux/os_linux.hpp
@@ -85,6 +85,9 @@ class Linux {
static bool setup_large_page_type(size_t page_size);
static bool transparent_huge_pages_sanity_check(bool warn, size_t pages_size);
static bool hugetlbfs_sanity_check(bool warn, size_t page_size);
+ static bool shm_hugetlbfs_sanity_check(bool warn, size_t page_size);
+
+ static int hugetlbfs_page_size_flag(size_t page_size);
static char* reserve_memory_special_shm(size_t bytes, size_t alignment, char* req_addr, bool exec);
static char* reserve_memory_special_huge_tlbfs(size_t bytes, size_t alignment, char* req_addr, bool exec);
diff --git a/src/hotspot/os/posix/os_posix.cpp b/src/hotspot/os/posix/os_posix.cpp
index 7344efa415b79e1af099d8c641c74423a23f57ca..f52f003bd5e1daf5c9f4698ac09d98ccf4fe35c8 100644
--- a/src/hotspot/os/posix/os_posix.cpp
+++ b/src/hotspot/os/posix/os_posix.cpp
@@ -51,11 +51,17 @@
#include
#include
#include
+#include
#include
+#include
#include
#include
#include
+#ifdef __APPLE__
+ #include
+#endif
+
#define ROOT_UID 0
#ifndef MAP_ANONYMOUS
@@ -1765,3 +1771,75 @@ int os::PlatformMonitor::wait(jlong millis) {
return OS_OK;
}
}
+
+// Darwin has no "environ" in a dynamic library.
+#ifdef __APPLE__
+ #define environ (*_NSGetEnviron())
+#else
+ extern char** environ;
+#endif
+
+char** os::get_environ() { return environ; }
+
+// Run the specified command in a separate process. Return its exit value,
+// or -1 on failure (e.g. can't fork a new process).
+// Notes: -Unlike system(), this function can be called from signal handler. It
+// doesn't block SIGINT et al.
+// -this function is unsafe to use in non-error situations, mainly
+// because the child process will inherit all parent descriptors.
+int os::fork_and_exec(const char* cmd, bool prefer_vfork) {
+ const char * argv[4] = {"sh", "-c", cmd, NULL};
+
+ pid_t pid ;
+
+ char** env = os::get_environ();
+
+ // Use always vfork on AIX, since its safe and helps with analyzing OOM situations.
+ // Otherwise leave it up to the caller.
+ AIX_ONLY(prefer_vfork = true;)
+ pid = prefer_vfork ? ::vfork() : ::fork();
+
+ if (pid < 0) {
+ // fork failed
+ return -1;
+
+ } else if (pid == 0) {
+ // child process
+
+ ::execve("/bin/sh", (char* const*)argv, env);
+
+ // execve failed
+ ::_exit(-1);
+
+ } else {
+ // copied from J2SE ..._waitForProcessExit() in UNIXProcess_md.c; we don't
+ // care about the actual exit code, for now.
+
+ int status;
+
+ // Wait for the child process to exit. This returns immediately if
+ // the child has already exited. */
+ while (::waitpid(pid, &status, 0) < 0) {
+ switch (errno) {
+ case ECHILD: return 0;
+ case EINTR: break;
+ default: return -1;
+ }
+ }
+
+ if (WIFEXITED(status)) {
+ // The child exited normally; get its exit code.
+ return WEXITSTATUS(status);
+ } else if (WIFSIGNALED(status)) {
+ // The child exited because of a signal
+ // The best value to return is 0x80 + signal number,
+ // because that is what all Unix shells do, and because
+ // it allows callers to distinguish between process exit and
+ // process death by signal.
+ return 0x80 + WTERMSIG(status);
+ } else {
+ // Unknown exit code; pass it through
+ return status;
+ }
+ }
+}
diff --git a/src/hotspot/os/posix/signals_posix.cpp b/src/hotspot/os/posix/signals_posix.cpp
index 5fbcff7f9b08f5b1bb3990a6cba1589eb89f8272..8c1967ec0cc14ec496321296bae06c1cb40e5b8e 100644
--- a/src/hotspot/os/posix/signals_posix.cpp
+++ b/src/hotspot/os/posix/signals_posix.cpp
@@ -47,6 +47,17 @@ extern sigjmp_buf* get_jmp_buf_for_continuation();
#include
+
+static const char* get_signal_name(int sig, char* out, size_t outlen);
+
+// Returns address of a handler associated with the given sigaction
+static address get_signal_handler(const struct sigaction* action);
+
+#define HANDLER_IS(handler, address) ((handler) == CAST_FROM_FN_PTR(void*, (address)))
+#define HANDLER_IS_IGN(handler) (HANDLER_IS(handler, SIG_IGN))
+#define HANDLER_IS_DFL(handler) (HANDLER_IS(handler, SIG_DFL))
+#define HANDLER_IS_IGN_OR_DFL(handler) (HANDLER_IS_IGN(handler) || HANDLER_IS_DFL(handler))
+
// Various signal related mechanism are laid out in the following order:
//
// sun.misc.Signal
@@ -54,13 +65,25 @@ extern sigjmp_buf* get_jmp_buf_for_continuation();
// signal handling (except suspend/resume)
// suspend/resume
-// Glibc on Linux uses the SA_RESTORER flag to indicate
-// the use of a "signal trampoline". We have no interest
-// in this flag and need to ignore it when checking our
-// own flag settings.
-// Note: SA_RESTORER is not exposed through signal.h so we
-// have to hardwire its 0x04000000 value in the mask.
-LINUX_ONLY(const int SA_RESTORER_FLAG_MASK = ~0x04000000;)
+// Helper function to strip any flags from a sigaction sa_flag
+// which are not needed for semantic comparison (see remarks below
+// about SA_RESTORER on Linux).
+// Also to work around the fact that not all platforms define sa_flags
+// as signed int (looking at you, zlinux).
+static int get_sanitized_sa_flags(const struct sigaction* sa) {
+ int f = (int) sa->sa_flags;
+#ifdef LINUX
+ // Glibc on Linux uses the SA_RESTORER flag to indicate
+ // the use of a "signal trampoline". We have no interest
+ // in this flag and need to ignore it when checking our
+ // own flag settings.
+ // Note: SA_RESTORER is not exposed through signal.h so we
+ // have to hardcode its 0x04000000 value here.
+ const int sa_restorer_flag = 0x04000000;
+ f &= ~sa_restorer_flag;
+#endif // LINUX
+ return f;
+}
// Todo: provide a os::get_max_process_id() or similar. Number of processes
// may have been configured, can be read more accurately from proc fs etc.
@@ -76,22 +99,68 @@ extern "C" {
typedef void (*sa_sigaction_t)(int, siginfo_t *, void *);
}
-// For diagnostics to print a message once. see run_periodic_checks
-static sigset_t check_signal_done;
-static bool check_signals = true;
+// At various places we store handler information for each installed handler.
+// SavedSignalHandlers is a helper class for those cases, keeping an array of sigaction
+// structures.
+class SavedSignalHandlers {
+ // Note: NSIG can be largish, depending on platform, and this array is expected
+ // to be sparsely populated. To save space the contained structures are
+ // C-heap allocated. Since they only get added outside of signal handling
+ // this is no problem.
+ struct sigaction* _sa[NSIG];
+
+ bool check_signal_number(int sig) const {
+ assert(sig > 0 && sig < NSIG, "invalid signal number %d", sig);
+ return sig > 0 && sig < NSIG;
+ }
+
+public:
+
+ SavedSignalHandlers() {
+ ::memset(_sa, 0, sizeof(_sa));
+ }
+
+ ~SavedSignalHandlers() {
+ for (int i = 0; i < NSIG; i ++) {
+ FREE_C_HEAP_OBJ(_sa[i]);
+ }
+ }
+
+ void set(int sig, const struct sigaction* act) {
+ if (check_signal_number(sig)) {
+ assert(_sa[sig] == NULL, "Overwriting signal handler?");
+ _sa[sig] = NEW_C_HEAP_OBJ(struct sigaction, mtInternal);
+ *_sa[sig] = *act;
+ }
+ }
+
+ const struct sigaction* get(int sig) const {
+ if (check_signal_number(sig)) {
+ return _sa[sig];
+ }
+ return NULL;
+ }
+};
+
debug_only(static bool signal_sets_initialized = false);
static sigset_t unblocked_sigs, vm_sigs, preinstalled_sigs;
-struct sigaction sigact[NSIG];
-// For signal-chaining
+// Our own signal handlers should never ever get replaced by a third party one.
+// To check that, and to aid with diagnostics, store a copy of the handler setup
+// and compare it periodically against reality (see os::run_periodic_checks()).
+static bool check_signals = true;
+static SavedSignalHandlers vm_handlers;
+static bool do_check_signal_periodically[NSIG] = { 0 };
+
+// For signal-chaining:
+// if chaining is active, chained_handlers contains all handlers which we
+// replaced with our own and to which we must delegate.
+static SavedSignalHandlers chained_handlers;
static bool libjsig_is_loaded = false;
typedef struct sigaction *(*get_signal_t)(int);
static get_signal_t get_signal_action = NULL;
-// For diagnostic
-int sigflags[NSIG];
-
// suspend/resume support
#if defined(__APPLE__)
static OSXSemaphore sr_semaphore;
@@ -270,8 +339,6 @@ static const struct {
{ -1, NULL }
};
-static const char* get_signal_name(int sig, char* out, size_t outlen);
-
////////////////////////////////////////////////////////////////////////////////
// sun.misc.Signal support
@@ -335,19 +402,6 @@ int os::signal_wait() {
////////////////////////////////////////////////////////////////////////////////
// signal chaining support
-static struct sigaction* get_preinstalled_handler(int sig) {
- if (sigismember(&preinstalled_sigs, sig)) {
- return &sigact[sig];
- }
- return NULL;
-}
-
-static void save_preinstalled_handler(int sig, struct sigaction& oldAct) {
- assert(sig > 0 && sig < NSIG, "vm signal out of expected range");
- sigact[sig] = oldAct;
- sigaddset(&preinstalled_sigs, sig);
-}
-
struct sigaction* get_chained_signal_action(int sig) {
struct sigaction *actp = NULL;
@@ -357,7 +411,7 @@ struct sigaction* get_chained_signal_action(int sig) {
}
if (actp == NULL) {
// Retrieve the preinstalled signal handler from jvm
- actp = get_preinstalled_handler(sig);
+ actp = const_cast(chained_handlers.get(sig));
}
return actp;
@@ -736,18 +790,6 @@ static void print_sa_flags(outputStream* st, int flags) {
st->print("%s", buffer);
}
-static int get_our_sigflags(int sig) {
- assert(sig > 0 && sig < NSIG, "vm signal out of expected range");
- return sigflags[sig];
-}
-
-static void set_our_sigflags(int sig, int flags) {
- assert(sig > 0 && sig < NSIG, "vm signal out of expected range");
- if (sig > 0 && sig < NSIG) {
- sigflags[sig] = flags;
- }
-}
-
// Implementation may use the same storage for both the sa_sigaction field and the sa_handler field,
// so check for "sigAct.sa_flags == SA_SIGINFO"
static address get_signal_handler(const struct sigaction* action) {
@@ -763,10 +805,32 @@ typedef int (*os_sigaction_t)(int, const struct sigaction *, struct sigaction *)
static void SR_handler(int sig, siginfo_t* siginfo, ucontext_t* context);
+// Semantically compare two sigaction structures. Return true if they are referring to
+// the same handler, using the same flags.
+static bool are_handlers_equal(const struct sigaction* sa,
+ const struct sigaction* expected_sa) {
+ address this_handler = get_signal_handler(sa);
+ address expected_handler = get_signal_handler(expected_sa);
+ const int this_flags = get_sanitized_sa_flags(sa);
+ const int expected_flags = get_sanitized_sa_flags(expected_sa);
+ return (this_handler == expected_handler) &&
+ (this_flags == expected_flags);
+}
+
+// If we installed one of our signal handlers for sig, check that the current
+// setup matches what we originally installed.
static void check_signal_handler(int sig) {
char buf[O_BUFLEN];
- address jvmHandler = NULL;
+ bool mismatch = false;
+
+ if (!do_check_signal_periodically[sig]) {
+ return;
+ }
+
+ const struct sigaction* expected_act = vm_handlers.get(sig);
+ assert(expected_act != NULL, "Sanity");
+ // Retrieve current signal setup.
struct sigaction act;
static os_sigaction_t os_sigaction = NULL;
if (os_sigaction == NULL) {
@@ -777,65 +841,22 @@ static void check_signal_handler(int sig) {
os_sigaction(sig, (struct sigaction*)NULL, &act);
- // See comment for SA_RESTORER_FLAG_MASK
- LINUX_ONLY(act.sa_flags &= SA_RESTORER_FLAG_MASK;)
-
- address thisHandler = get_signal_handler(&act);
-
- switch (sig) {
- case SIGSEGV:
- case SIGBUS:
- case SIGFPE:
- case SIGPIPE:
- case SIGILL:
- case SIGXFSZ:
- jvmHandler = CAST_FROM_FN_PTR(address, (sa_sigaction_t)javaSignalHandler);
- break;
-
- case SHUTDOWN1_SIGNAL:
- case SHUTDOWN2_SIGNAL:
- case SHUTDOWN3_SIGNAL:
- case BREAK_SIGNAL:
- jvmHandler = (address)os::user_handler();
- break;
-
- default:
- if (sig == PosixSignals::SR_signum) {
- jvmHandler = CAST_FROM_FN_PTR(address, (sa_sigaction_t)SR_handler);
- } else {
- return;
- }
- break;
- }
-
- if (thisHandler != jvmHandler) {
- tty->print("Warning: %s handler ", os::exception_name(sig, buf, O_BUFLEN));
- tty->print_raw("expected:");
- print_signal_handler_name(tty, jvmHandler, buf, O_BUFLEN);
- tty->print_raw(" found:");
- print_signal_handler_name(tty, thisHandler, buf, O_BUFLEN);
- // No need to check this sig any longer
- sigaddset(&check_signal_done, sig);
+ // Compare both sigaction structures (intelligently; only the members we care about).
+ if (!are_handlers_equal(&act, expected_act)) {
+ tty->print_cr("Warning: %s handler modified!", os::exception_name(sig, buf, sizeof(buf)));
+ // If we had a mismatch:
+ // - print all signal handlers. As part of that printout, details will be printed
+ // about any modified handlers.
+ // - Disable any further checks for this signal - we do not want to flood stdout. Though
+ // depending on which signal had been overwritten, we may die very soon anyway.
+ os::print_signal_handlers(tty, buf, O_BUFLEN);
+ do_check_signal_periodically[sig] = false;
+ tty->print_cr("Consider using jsig library.");
// Running under non-interactive shell, SHUTDOWN2_SIGNAL will be reassigned SIG_IGN
if (sig == SHUTDOWN2_SIGNAL && !isatty(fileno(stdin))) {
- tty->print_cr("Running in non-interactive shell, %s handler is replaced by shell",
+ tty->print_cr("Note: Running in non-interactive shell, %s handler is replaced by shell",
os::exception_name(sig, buf, O_BUFLEN));
}
- } else if (get_our_sigflags(sig) != 0 && (int)act.sa_flags != get_our_sigflags(sig)) {
- tty->print("Warning: %s handler flags ", os::exception_name(sig, buf, O_BUFLEN));
- tty->print("expected:");
- print_sa_flags(tty, get_our_sigflags(sig));
- tty->cr();
- tty->print(" found:");
- print_sa_flags(tty, act.sa_flags);
- tty->cr();
- // No need to check this sig any longer
- sigaddset(&check_signal_done, sig);
- }
-
- // Dump all the signal
- if (sigismember(&check_signal_done, sig)) {
- os::print_signal_handlers(tty, buf, O_BUFLEN);
}
}
@@ -857,7 +878,7 @@ void* os::signal(int signal_number, void* handler) {
return (void *)-1;
}
- return CAST_FROM_FN_PTR(void*, oldSigAct.sa_handler);
+ return get_signal_handler(&oldSigAct);
}
void os::signal_raise(int signal_number) {
@@ -869,15 +890,8 @@ int os::sigexitnum_pd() {
return NSIG;
}
-static void do_signal_check(int signal) {
- if (!sigismember(&check_signal_done, signal)) {
- check_signal_handler(signal);
- }
-}
-
// This method is a periodic task to check for misbehaving JNI applications
// under CheckJNI, we can add any periodic checks here
-
void os::run_periodic_checks() {
if (check_signals == false) return;
@@ -886,24 +900,24 @@ void os::run_periodic_checks() {
// generation of hs*.log in the event of a crash, debugging
// such a case can be very challenging, so we absolutely
// check the following for a good measure:
- do_signal_check(SIGSEGV);
- do_signal_check(SIGILL);
- do_signal_check(SIGFPE);
- do_signal_check(SIGBUS);
- do_signal_check(SIGPIPE);
- do_signal_check(SIGXFSZ);
- PPC64_ONLY(do_signal_check(SIGTRAP);)
+ check_signal_handler(SIGSEGV);
+ check_signal_handler(SIGILL);
+ check_signal_handler(SIGFPE);
+ check_signal_handler(SIGBUS);
+ check_signal_handler(SIGPIPE);
+ check_signal_handler(SIGXFSZ);
+ PPC64_ONLY(check_signal_handler(SIGTRAP);)
// ReduceSignalUsage allows the user to override these handlers
// see comments at the very top and jvm_md.h
if (!ReduceSignalUsage) {
- do_signal_check(SHUTDOWN1_SIGNAL);
- do_signal_check(SHUTDOWN2_SIGNAL);
- do_signal_check(SHUTDOWN3_SIGNAL);
- do_signal_check(BREAK_SIGNAL);
+ check_signal_handler(SHUTDOWN1_SIGNAL);
+ check_signal_handler(SHUTDOWN2_SIGNAL);
+ check_signal_handler(SHUTDOWN3_SIGNAL);
+ check_signal_handler(BREAK_SIGNAL);
}
- do_signal_check(PosixSignals::SR_signum);
+ check_signal_handler(PosixSignals::SR_signum);
}
// Helper function for PosixSignals::print_siginfo_...():
@@ -1203,16 +1217,17 @@ void set_signal_handler(int sig) {
struct sigaction oldAct;
sigaction(sig, (struct sigaction*)NULL, &oldAct);
+ // Query the current signal handler. Needs to be a separate operation
+ // from installing a new handler since we need to honor AllowUserSignalHandlers.
void* oldhand = get_signal_handler(&oldAct);
- if (oldhand != CAST_FROM_FN_PTR(void*, SIG_DFL) &&
- oldhand != CAST_FROM_FN_PTR(void*, SIG_IGN) &&
- oldhand != CAST_FROM_FN_PTR(void*, (sa_sigaction_t)javaSignalHandler)) {
+ if (!HANDLER_IS_IGN_OR_DFL(oldhand) &&
+ !HANDLER_IS(oldhand, javaSignalHandler)) {
if (AllowUserSignalHandlers) {
// Do not overwrite; user takes responsibility to forward to us.
return;
} else if (UseSignalChaining) {
// save the old handler in jvm
- save_preinstalled_handler(sig, oldAct);
+ chained_handlers.set(sig, &oldAct);
// libjsig also interposes the sigaction() call below and saves the
// old sigaction on it own.
} else {
@@ -1239,9 +1254,9 @@ void set_signal_handler(int sig) {
}
#endif
- // Save flags, which are set by ours
- assert(sig > 0 && sig < NSIG, "vm signal out of expected range");
- sigflags[sig] = sigAct.sa_flags;
+ // Save handler setup for later checking
+ vm_handlers.set(sig, &sigAct);
+ do_check_signal_periodically[sig] = true;
int ret = sigaction(sig, &sigAct, &oldAct);
assert(ret == 0, "check");
@@ -1346,53 +1361,67 @@ static void print_signal_set_short(outputStream* st, const sigset_t* set) {
st->print("%s", buf);
}
-void PosixSignals::print_signal_handler(outputStream* st, int sig,
- char* buf, size_t buflen) {
- struct sigaction sa;
- sigaction(sig, NULL, &sa);
-
- // See comment for SA_RESTORER_FLAG_MASK
- LINUX_ONLY(sa.sa_flags &= SA_RESTORER_FLAG_MASK;)
-
- st->print("%10s: ", os::exception_name(sig, buf, buflen));
-
- address handler = get_signal_handler(&sa);
+static void print_single_signal_handler(outputStream* st,
+ const struct sigaction* act,
+ char* buf, size_t buflen) {
- if (handler == CAST_FROM_FN_PTR(address, SIG_DFL)) {
+ address handler = get_signal_handler(act);
+ if (HANDLER_IS_DFL(handler)) {
st->print("SIG_DFL");
- } else if (handler == CAST_FROM_FN_PTR(address, SIG_IGN)) {
+ } else if (HANDLER_IS_IGN(handler)) {
st->print("SIG_IGN");
} else {
- print_signal_handler_name(st, handler, buf, O_BUFLEN);
+ print_signal_handler_name(st, handler, buf, buflen);
}
- st->print(", sa_mask[0]=");
- print_signal_set_short(st, &sa.sa_mask);
+ st->print(", mask=");
+ print_signal_set_short(st, &(act->sa_mask));
- address rh = VMError::get_resetted_sighandler(sig);
- // May be, handler was resetted by VMError?
- if (rh != NULL) {
- handler = rh;
- // See comment for SA_RESTORER_FLAG_MASK
- sa.sa_flags = VMError::get_resetted_sigflags(sig) LINUX_ONLY(& SA_RESTORER_FLAG_MASK);
- }
+ st->print(", flags=");
+ int flags = get_sanitized_sa_flags(act);
+ print_sa_flags(st, flags);
- // Print textual representation of sa_flags.
- st->print(", sa_flags=");
- print_sa_flags(st, sa.sa_flags);
+}
- // Check: is it our handler?
- if (handler == CAST_FROM_FN_PTR(address, (sa_sigaction_t)javaSignalHandler) ||
- handler == CAST_FROM_FN_PTR(address, (sa_sigaction_t)SR_handler)) {
- // It is our signal handler
- // check for flags, reset system-used one!
- if ((int)sa.sa_flags != get_our_sigflags(sig)) {
- st->print(
- ", flags was changed from " PTR32_FORMAT ", consider using jsig library",
- get_our_sigflags(sig));
+// Print established signal handler for this signal.
+// - if this signal handler was installed by us and is chained to a pre-established user handler
+// it replaced, print that one too.
+// - otherwise, if this signal handler was installed by us and replaced another handler to which we
+// are not chained (e.g. if chaining is off), print that one too.
+void PosixSignals::print_signal_handler(outputStream* st, int sig,
+ char* buf, size_t buflen) {
+
+ st->print("%10s: ", os::exception_name(sig, buf, buflen));
+
+ struct sigaction current_act;
+ sigaction(sig, NULL, ¤t_act);
+
+ print_single_signal_handler(st, ¤t_act, buf, buflen);
+ st->cr();
+
+ // If we expected to see our own hotspot signal handler but found a different one,
+ // print a warning (unless the handler replacing it is our own crash handler, which can
+ // happen if this function is called during error reporting).
+ const struct sigaction* expected_act = vm_handlers.get(sig);
+ if (expected_act != NULL) {
+ const address current_handler = get_signal_handler(¤t_act);
+ if (!(HANDLER_IS(current_handler, VMError::crash_handler_address))) {
+ if (!are_handlers_equal(¤t_act, expected_act)) {
+ st->print_cr(" *** Handler was modified!");
+ st->print (" *** Expected: ");
+ print_single_signal_handler(st, expected_act, buf, buflen);
+ st->cr();
+ }
}
}
- st->cr();
+
+ // If there is a chained handler waiting behind the current one, print it too.
+ const struct sigaction* chained_act = get_chained_signal_action(sig);
+ if (chained_act != NULL) {
+ st->print(" chained to: ");
+ print_single_signal_handler(st, ¤t_act, buf, buflen);
+ st->cr();
+ }
}
void os::print_signal_handlers(outputStream* st, char* buf, size_t buflen) {
@@ -1421,8 +1450,7 @@ void os::print_signal_handlers(outputStream* st, char* buf, size_t buflen) {
bool PosixSignals::is_sig_ignored(int sig) {
struct sigaction oact;
sigaction(sig, (struct sigaction*)NULL, &oact);
- void* ohlr = get_signal_handler(&oact);
- if (ohlr == CAST_FROM_FN_PTR(void*, SIG_IGN)) {
+ if (HANDLER_IS_IGN(get_signal_handler(&oact))) {
return true;
} else {
return false;
@@ -1672,8 +1700,10 @@ int SR_initialize() {
return -1;
}
- // Save signal flag
- set_our_sigflags(PosixSignals::SR_signum, act.sa_flags);
+ // Save signal setup information for later checking.
+ vm_handlers.set(PosixSignals::SR_signum, &act);
+ do_check_signal_periodically[PosixSignals::SR_signum] = true;
+
return 0;
}
diff --git a/src/hotspot/os/posix/vmError_posix.cpp b/src/hotspot/os/posix/vmError_posix.cpp
index 798097ba8243b08ba8f5a2f65c36456b68dfd2ac..709259141a207a8a858de0662f59ce43d2ae1660 100644
--- a/src/hotspot/os/posix/vmError_posix.cpp
+++ b/src/hotspot/os/posix/vmError_posix.cpp
@@ -49,16 +49,6 @@
#endif
-// handle all synchronous program error signals which may happen during error
-// reporting. They must be unblocked, caught, handled.
-
-static const int SIGNALS[] = { SIGSEGV, SIGBUS, SIGILL, SIGFPE, SIGTRAP }; // add more if needed
-static const int NUM_SIGNALS = sizeof(SIGNALS) / sizeof(int);
-
-// Space for our "saved" signal flags and handlers
-static int resettedSigflags[NUM_SIGNALS];
-static address resettedSighandler[NUM_SIGNALS];
-
// Needed for cancelable steps.
static volatile pthread_t reporter_thread_id;
@@ -74,34 +64,6 @@ void VMError::interrupt_reporting_thread() {
::pthread_kill(reporter_thread_id, SIGILL);
}
-static void save_signal(int idx, int sig)
-{
- struct sigaction sa;
- sigaction(sig, NULL, &sa);
- resettedSigflags[idx] = sa.sa_flags;
- resettedSighandler[idx] = (sa.sa_flags & SA_SIGINFO)
- ? CAST_FROM_FN_PTR(address, sa.sa_sigaction)
- : CAST_FROM_FN_PTR(address, sa.sa_handler);
-}
-
-int VMError::get_resetted_sigflags(int sig) {
- for (int i = 0; i < NUM_SIGNALS; i++) {
- if (SIGNALS[i] == sig) {
- return resettedSigflags[i];
- }
- }
- return -1;
-}
-
-address VMError::get_resetted_sighandler(int sig) {
- for (int i = 0; i < NUM_SIGNALS; i++) {
- if (SIGNALS[i] == sig) {
- return resettedSighandler[i];
- }
- }
- return NULL;
-}
-
static void crash_handler(int sig, siginfo_t* info, void* ucVoid) {
PosixSignals::unblock_error_signals();
@@ -133,10 +95,15 @@ static void crash_handler(int sig, siginfo_t* info, void* ucVoid) {
VMError::report_and_die(NULL, sig, pc, info, ucVoid);
}
+const void* VMError::crash_handler_address = CAST_FROM_FN_PTR(void *, crash_handler);
+
void VMError::install_secondary_signal_handler() {
- for (int i = 0; i < NUM_SIGNALS; i++) {
- save_signal(i, SIGNALS[i]);
- os::signal(SIGNALS[i], CAST_FROM_FN_PTR(void *, crash_handler));
+ static const int signals_to_handle[] = {
+ SIGSEGV, SIGBUS, SIGILL, SIGFPE, SIGTRAP,
+ 0 // end
+ };
+ for (int i = 0; signals_to_handle[i] != 0; i++) {
+ os::signal(signals_to_handle[i], CAST_FROM_FN_PTR(void *, crash_handler));
}
}
diff --git a/src/hotspot/os/windows/os_windows.cpp b/src/hotspot/os/windows/os_windows.cpp
index 53dd2d99f157c7405933b39d43a43e402e4096c1..22590c2e7ca6899ce406cf6fe4a647c60622a03e 100644
--- a/src/hotspot/os/windows/os_windows.cpp
+++ b/src/hotspot/os/windows/os_windows.cpp
@@ -27,7 +27,6 @@
// no precompiled headers
#include "jvm.h"
-#include "classfile/classLoader.hpp"
#include "classfile/vmSymbols.hpp"
#include "code/codeCache.hpp"
#include "code/icBuffer.hpp"
@@ -58,10 +57,10 @@
#include "runtime/orderAccess.hpp"
#include "runtime/osThread.hpp"
#include "runtime/perfMemory.hpp"
+#include "runtime/safefetch.inline.hpp"
#include "runtime/safepointMechanism.hpp"
#include "runtime/sharedRuntime.hpp"
#include "runtime/statSampler.hpp"
-#include "runtime/stubRoutines.hpp"
#include "runtime/thread.inline.hpp"
#include "runtime/threadCritical.hpp"
#include "runtime/timer.hpp"
@@ -267,6 +266,8 @@ bool os::unsetenv(const char* name) {
return (SetEnvironmentVariable(name, NULL) == TRUE);
}
+char** os::get_environ() { return _environ; }
+
// No setuid programs under Windows.
bool os::have_special_privileges() {
return false;
@@ -1701,9 +1702,9 @@ void os::print_os_info(outputStream* st) {
char buffer[1024];
st->print("HostName: ");
if (get_host_name(buffer, sizeof(buffer))) {
- st->print("%s ", buffer);
+ st->print_cr(buffer);
} else {
- st->print("N/A ");
+ st->print_cr("N/A");
}
#endif
st->print_cr("OS:");
@@ -4482,8 +4483,18 @@ bool os::same_files(const char* file1, const char* file2) {
return true;
}
- HANDLE handle1 = create_read_only_file_handle(file1);
- HANDLE handle2 = create_read_only_file_handle(file2);
+ char* native_file1 = os::strdup_check_oom(file1);
+ native_file1 = os::native_path(native_file1);
+ char* native_file2 = os::strdup_check_oom(file2);
+ native_file2 = os::native_path(native_file2);
+ if (strcmp(native_file1, native_file2) == 0) {
+ os::free(native_file1);
+ os::free(native_file2);
+ return true;
+ }
+
+ HANDLE handle1 = create_read_only_file_handle(native_file1);
+ HANDLE handle2 = create_read_only_file_handle(native_file2);
bool result = false;
// if we could open both paths...
@@ -4510,6 +4521,9 @@ bool os::same_files(const char* file1, const char* file2) {
::CloseHandle(handle2);
}
+ os::free(native_file1);
+ os::free(native_file2);
+
return result;
}
@@ -5500,7 +5514,7 @@ int os::PlatformMonitor::wait(jlong millis) {
// Run the specified command in a separate process. Return its exit value,
// or -1 on failure (e.g. can't create a new process).
-int os::fork_and_exec(char* cmd, bool use_vfork_if_available) {
+int os::fork_and_exec(const char* cmd, bool dummy /* ignored */) {
STARTUPINFO si;
PROCESS_INFORMATION pi;
DWORD exit_code;
@@ -5778,58 +5792,6 @@ char* os::build_agent_function_name(const char *sym_name, const char *lib_name,
return agent_entry_name;
}
-#ifndef PRODUCT
-
-// test the code path in reserve_memory_special() that tries to allocate memory in a single
-// contiguous memory block at a particular address.
-// The test first tries to find a good approximate address to allocate at by using the same
-// method to allocate some memory at any address. The test then tries to allocate memory in
-// the vicinity (not directly after it to avoid possible by-chance use of that location)
-// This is of course only some dodgy assumption, there is no guarantee that the vicinity of
-// the previously allocated memory is available for allocation. The only actual failure
-// that is reported is when the test tries to allocate at a particular location but gets a
-// different valid one. A NULL return value at this point is not considered an error but may
-// be legitimate.
-void TestReserveMemorySpecial_test() {
- if (!UseLargePages) {
- return;
- }
- // save current value of globals
- bool old_use_large_pages_individual_allocation = UseLargePagesIndividualAllocation;
- bool old_use_numa_interleaving = UseNUMAInterleaving;
-
- // set globals to make sure we hit the correct code path
- UseLargePagesIndividualAllocation = UseNUMAInterleaving = false;
-
- // do an allocation at an address selected by the OS to get a good one.
- const size_t large_allocation_size = os::large_page_size() * 4;
- char* result = os::reserve_memory_special(large_allocation_size, os::large_page_size(), NULL, false);
- if (result == NULL) {
- } else {
- os::release_memory_special(result, large_allocation_size);
-
- // allocate another page within the recently allocated memory area which seems to be a good location. At least
- // we managed to get it once.
- const size_t expected_allocation_size = os::large_page_size();
- char* expected_location = result + os::large_page_size();
- char* actual_location = os::reserve_memory_special(expected_allocation_size, os::large_page_size(), expected_location, false);
- if (actual_location == NULL) {
- } else {
- // release memory
- os::release_memory_special(actual_location, expected_allocation_size);
- // only now check, after releasing any memory to avoid any leaks.
- assert(actual_location == expected_location,
- "Failed to allocate memory at requested location " PTR_FORMAT " of size " SIZE_FORMAT ", is " PTR_FORMAT " instead",
- expected_location, expected_allocation_size, actual_location);
- }
- }
-
- // restore globals
- UseLargePagesIndividualAllocation = old_use_large_pages_individual_allocation;
- UseNUMAInterleaving = old_use_numa_interleaving;
-}
-#endif // PRODUCT
-
/*
All the defined signal names for Windows.
diff --git a/src/hotspot/os/windows/vmError_windows.cpp b/src/hotspot/os/windows/vmError_windows.cpp
index 3c899e54245e5859bc6654068d7952d6c7108035..1a0a947a749df9f2bc28a8da4d97c5f7aa7e9d07 100644
--- a/src/hotspot/os/windows/vmError_windows.cpp
+++ b/src/hotspot/os/windows/vmError_windows.cpp
@@ -29,14 +29,6 @@
#include "runtime/thread.hpp"
#include "utilities/vmError.hpp"
-int VMError::get_resetted_sigflags(int sig) {
- return -1;
-}
-
-address VMError::get_resetted_sighandler(int sig) {
- return NULL;
-}
-
LONG WINAPI crash_handler(struct _EXCEPTION_POINTERS* exceptionInfo) {
DWORD exception_code = exceptionInfo->ExceptionRecord->ExceptionCode;
VMError::report_and_die(NULL, exception_code, NULL, exceptionInfo->ExceptionRecord,
diff --git a/src/hotspot/os_cpu/aix_ppc/os_aix_ppc.cpp b/src/hotspot/os_cpu/aix_ppc/os_aix_ppc.cpp
index 07f1f8be755fd10eb2e0fac588120d2387275364..d3482e32f2144482cd536e514e6be112798f49de 100644
--- a/src/hotspot/os_cpu/aix_ppc/os_aix_ppc.cpp
+++ b/src/hotspot/os_cpu/aix_ppc/os_aix_ppc.cpp
@@ -27,7 +27,6 @@
#include "jvm.h"
#include "assembler_ppc.hpp"
#include "asm/assembler.inline.hpp"
-#include "classfile/classLoader.hpp"
#include "classfile/vmSymbols.hpp"
#include "code/codeCache.hpp"
#include "code/icBuffer.hpp"
diff --git a/src/hotspot/os_cpu/bsd_x86/os_bsd_x86.cpp b/src/hotspot/os_cpu/bsd_x86/os_bsd_x86.cpp
index d1462df5fce3e52947051dcbc3156356c738c7bc..91ca9564a3e07777454d2f54b7b48e6fcec82939 100644
--- a/src/hotspot/os_cpu/bsd_x86/os_bsd_x86.cpp
+++ b/src/hotspot/os_cpu/bsd_x86/os_bsd_x86.cpp
@@ -25,7 +25,6 @@
// no precompiled headers
#include "jvm.h"
#include "asm/macroAssembler.hpp"
-#include "classfile/classLoader.hpp"
#include "classfile/vmSymbols.hpp"
#include "code/codeCache.hpp"
#include "code/icBuffer.hpp"
@@ -64,7 +63,6 @@
# include
# include
# include
-# include
# include
# include
# include
@@ -391,16 +389,6 @@ enum {
bool PosixSignals::pd_hotspot_signal_handler(int sig, siginfo_t* info,
ucontext_t* uc, JavaThread* thread) {
-
-/*
- NOTE: does not seem to work on bsd.
- if (info == NULL || info->si_code <= 0 || info->si_code == SI_NOINFO) {
- // can't decode this kind of signal
- info = NULL;
- } else {
- assert(sig == info->si_signo, "bad siginfo");
- }
-*/
// decide if this trap can be handled by a stub
address stub = NULL;
@@ -466,7 +454,10 @@ bool PosixSignals::pd_hotspot_signal_handler(int sig, siginfo_t* info,
#ifdef AMD64
if (sig == SIGFPE &&
- (info->si_code == FPE_INTDIV || info->si_code == FPE_FLTDIV)) {
+ (info->si_code == FPE_INTDIV || info->si_code == FPE_FLTDIV
+ // Workaround for macOS ARM incorrectly reporting FPE_FLTINV for "div by 0"
+ // instead of the expected FPE_FLTDIV when running x86_64 binary under Rosetta emulation
+ MACOS_ONLY(|| (VM_Version::is_cpu_emulated() && info->si_code == FPE_FLTINV)))) {
stub =
SharedRuntime::
continuation_for_implicit_exception(thread,
diff --git a/src/hotspot/os_cpu/bsd_x86/thread_bsd_x86.cpp b/src/hotspot/os_cpu/bsd_x86/thread_bsd_x86.cpp
index 3d7dc7e4b5a8bee5d3e50671976692615a47bf73..4564ddc5b9248f57e6328c94b744f588fa7c9198 100644
--- a/src/hotspot/os_cpu/bsd_x86/thread_bsd_x86.cpp
+++ b/src/hotspot/os_cpu/bsd_x86/thread_bsd_x86.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2003, 2020, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2021, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -67,12 +67,6 @@ bool JavaThread::pd_get_top_frame(frame* fr_addr, void* ucontext, bool isInJava)
return false;
}
- if (MetaspaceShared::is_in_trampoline_frame(addr)) {
- // In the middle of a trampoline call. Bail out for safety.
- // This happens rarely so shouldn't affect profiling.
- return false;
- }
-
frame ret_frame(ret_sp, ret_fp, addr);
if (!ret_frame.safe_for_sender(this)) {
#if COMPILER2_OR_JVMCI
diff --git a/src/hotspot/os_cpu/bsd_x86/thread_bsd_x86.hpp b/src/hotspot/os_cpu/bsd_x86/thread_bsd_x86.hpp
index 76a18fb6cdaebc2b90a151e00db083be33df351f..6f4e42e14ff861b08aecdb957857e708d4d1e6a7 100644
--- a/src/hotspot/os_cpu/bsd_x86/thread_bsd_x86.hpp
+++ b/src/hotspot/os_cpu/bsd_x86/thread_bsd_x86.hpp
@@ -37,10 +37,6 @@
return byte_offset_of(JavaThread, _anchor) + JavaFrameAnchor::last_Java_fp_offset();
}
- static ByteSize saved_rbp_address_offset() {
- return byte_offset_of(JavaThread, _anchor) + JavaFrameAnchor::saved_rbp_address_offset();
- }
-
bool pd_get_top_frame_for_signal_handler(frame* fr_addr, void* ucontext,
bool isInJava);
diff --git a/src/hotspot/os_cpu/bsd_x86/vm_version_bsd_x86.cpp b/src/hotspot/os_cpu/bsd_x86/vm_version_bsd_x86.cpp
index 3dade34f9c6f51a83a7fa1a75a94fa19422e1be9..05cb7ef99c3376af3b479032ac535a4885560cc6 100644
--- a/src/hotspot/os_cpu/bsd_x86/vm_version_bsd_x86.cpp
+++ b/src/hotspot/os_cpu/bsd_x86/vm_version_bsd_x86.cpp
@@ -25,3 +25,24 @@
#include "precompiled.hpp"
#include "runtime/os.hpp"
#include "runtime/vm_version.hpp"
+
+#ifdef __APPLE__
+
+#include
+#include
+
+bool VM_Version::is_cpu_emulated() {
+ int ret = 0;
+ size_t size = sizeof(ret);
+ // Is this process being ran in Rosetta (i.e. emulation) mode on macOS?
+ if (sysctlbyname("sysctl.proc_translated", &ret, &size, NULL, 0) == -1) {
+ // errno == ENOENT is a valid response, but anything else is a real error
+ if (errno != ENOENT) {
+ warning("unable to lookup sysctl.proc_translated");
+ }
+ }
+ return (ret==1);
+}
+
+#endif
+
diff --git a/src/hotspot/os_cpu/bsd_zero/os_bsd_zero.cpp b/src/hotspot/os_cpu/bsd_zero/os_bsd_zero.cpp
index 8bca69035b7c9a7a20eda18a9c57204bb75eacc1..18125f1c16ce56611cadb30b0c6c106d899b36e7 100644
--- a/src/hotspot/os_cpu/bsd_zero/os_bsd_zero.cpp
+++ b/src/hotspot/os_cpu/bsd_zero/os_bsd_zero.cpp
@@ -31,7 +31,6 @@
// no precompiled headers
#include "jvm.h"
#include "assembler_zero.inline.hpp"
-#include "classfile/classLoader.hpp"
#include "classfile/vmSymbols.hpp"
#include "code/icBuffer.hpp"
#include "code/vtableStubs.hpp"
diff --git a/src/hotspot/os_cpu/linux_aarch64/atomic_linux_aarch64.S b/src/hotspot/os_cpu/linux_aarch64/atomic_linux_aarch64.S
new file mode 100644
index 0000000000000000000000000000000000000000..f5d2c2b69c2226123fc868dfe8cd4c26c7b32d1e
--- /dev/null
+++ b/src/hotspot/os_cpu/linux_aarch64/atomic_linux_aarch64.S
@@ -0,0 +1,150 @@
+// Copyright (c) 2021, Red Hat Inc. All rights reserved.
+// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+//
+// This code is free software; you can redistribute it and/or modify it
+// under the terms of the GNU General Public License version 2 only, as
+// published by the Free Software Foundation.
+//
+// This code is distributed in the hope that it will be useful, but WITHOUT
+// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+// version 2 for more details (a copy is included in the LICENSE file that
+// accompanied this code).
+//
+// You should have received a copy of the GNU General Public License version
+// 2 along with this work; if not, write to the Free Software Foundation,
+// Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+//
+// Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+// or visit www.oracle.com if you need additional information or have any
+// questions.
+
+
+
+ .text
+
+ .globl aarch64_atomic_fetch_add_8_default_impl
+ .align 5
+aarch64_atomic_fetch_add_8_default_impl:
+ prfm pstl1strm, [x0]
+0: ldaxr x2, [x0]
+ add x8, x2, x1
+ stlxr w9, x8, [x0]
+ cbnz w9, 0b
+ dmb ish
+ mov x0, x2
+ ret
+
+ .globl aarch64_atomic_fetch_add_4_default_impl
+ .align 5
+aarch64_atomic_fetch_add_4_default_impl:
+ prfm pstl1strm, [x0]
+0: ldaxr w2, [x0]
+ add w8, w2, w1
+ stlxr w9, w8, [x0]
+ cbnz w9, 0b
+ dmb ish
+ mov w0, w2
+ ret
+
+ .globl aarch64_atomic_xchg_4_default_impl
+ .align 5
+aarch64_atomic_xchg_4_default_impl:
+ prfm pstl1strm, [x0]
+0: ldaxr w2, [x0]
+ stlxr w8, w1, [x0]
+ cbnz w8, 0b
+ dmb ish
+ mov w0, w2
+ ret
+
+ .globl aarch64_atomic_xchg_8_default_impl
+ .align 5
+aarch64_atomic_xchg_8_default_impl:
+ prfm pstl1strm, [x0]
+0: ldaxr x2, [x0]
+ stlxr w8, x1, [x0]
+ cbnz w8, 0b
+ dmb ish
+ mov x0, x2
+ ret
+
+ .globl aarch64_atomic_cmpxchg_1_default_impl
+ .align 5
+aarch64_atomic_cmpxchg_1_default_impl:
+ dmb ish
+ prfm pstl1strm, [x0]
+0: ldxrb w3, [x0]
+ eor w8, w3, w1
+ tst x8, #0xff
+ b.ne 1f
+ stxrb w8, w2, [x0]
+ cbnz w8, 0b
+1: mov w0, w3
+ dmb ish
+ ret
+
+ .globl aarch64_atomic_cmpxchg_4_default_impl
+ .align 5
+aarch64_atomic_cmpxchg_4_default_impl:
+ dmb ish
+ prfm pstl1strm, [x0]
+0: ldxr w3, [x0]
+ cmp w3, w1
+ b.ne 1f
+ stxr w8, w2, [x0]
+ cbnz w8, 0b
+1: mov w0, w3
+ dmb ish
+ ret
+
+ .globl aarch64_atomic_cmpxchg_8_default_impl
+ .align 5
+aarch64_atomic_cmpxchg_8_default_impl:
+ dmb ish
+ prfm pstl1strm, [x0]
+0: ldxr x3, [x0]
+ cmp x3, x1
+ b.ne 1f
+ stxr w8, x2, [x0]
+ cbnz w8, 0b
+1: mov x0, x3
+ dmb ish
+ ret
+
+ .globl aarch64_atomic_cmpxchg_1_relaxed_default_impl
+ .align 5
+aarch64_atomic_cmpxchg_1_relaxed_default_impl:
+ prfm pstl1strm, [x0]
+0: ldxrb w3, [x0]
+ eor w8, w3, w1
+ tst x8, #0xff
+ b.ne 1f
+ stxrb w8, w2, [x0]
+ cbnz w8, 0b
+1: mov w0, w3
+ ret
+
+ .globl aarch64_atomic_cmpxchg_4_relaxed_default_impl
+ .align 5
+aarch64_atomic_cmpxchg_4_relaxed_default_impl:
+ prfm pstl1strm, [x0]
+0: ldxr w3, [x0]
+ cmp w3, w1
+ b.ne 1f
+ stxr w8, w2, [x0]
+ cbnz w8, 0b
+1: mov w0, w3
+ ret
+
+ .globl aarch64_atomic_cmpxchg_8_relaxed_default_impl
+ .align 5
+aarch64_atomic_cmpxchg_8_relaxed_default_impl:
+ prfm pstl1strm, [x0]
+0: ldxr x3, [x0]
+ cmp x3, x1
+ b.ne 1f
+ stxr w8, x2, [x0]
+ cbnz w8, 0b
+1: mov x0, x3
+ ret
diff --git a/src/hotspot/os_cpu/linux_aarch64/atomic_linux_aarch64.hpp b/src/hotspot/os_cpu/linux_aarch64/atomic_linux_aarch64.hpp
index 8e275a4173e4455f8f707ec4141e04982458cee3..77e860ed5ec85202c4460e4faa0220a781bb426b 100644
--- a/src/hotspot/os_cpu/linux_aarch64/atomic_linux_aarch64.hpp
+++ b/src/hotspot/os_cpu/linux_aarch64/atomic_linux_aarch64.hpp
@@ -1,6 +1,6 @@
/*
* Copyright (c) 1999, 2019, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2014, 2019, Red Hat Inc. All rights reserved.
+ * Copyright (c) 2014, 2021, Red Hat Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -26,59 +26,154 @@
#ifndef OS_CPU_LINUX_AARCH64_ATOMIC_LINUX_AARCH64_HPP
#define OS_CPU_LINUX_AARCH64_ATOMIC_LINUX_AARCH64_HPP
+#include "atomic_aarch64.hpp"
#include "runtime/vm_version.hpp"
// Implementation of class atomic
+
// Note that memory_order_conservative requires a full barrier after atomic stores.
// See https://patchwork.kernel.org/patch/3575821/
+// Call one of the stubs from C++. This uses the C calling convention,
+// but this asm definition is used in order only to clobber the
+// registers we use. If we called the stubs via an ABI call we'd have
+// to save X0 - X18 and most of the vectors.
+//
+// This really ought to be a template definition, but see GCC Bug
+// 33661, template methods forget explicit local register asm
+// vars. The problem is that register specifiers attached to local
+// variables are ignored in any template function.
+inline uint64_t bare_atomic_fastcall(address stub, volatile void *ptr, uint64_t arg1, uint64_t arg2 = 0) {
+ register uint64_t reg0 __asm__("x0") = (uint64_t)ptr;
+ register uint64_t reg1 __asm__("x1") = arg1;
+ register uint64_t reg2 __asm__("x2") = arg2;
+ register uint64_t reg3 __asm__("x3") = (uint64_t)stub;
+ register uint64_t result __asm__("x0");
+ asm volatile(// "stp x29, x30, [sp, #-16]!;"
+ " blr %1;"
+ // " ldp x29, x30, [sp], #16 // regs %0, %1, %2, %3, %4"
+ : "=r"(result), "+r"(reg3), "+r"(reg2)
+ : "r"(reg1), "0"(reg0) : "x8", "x9", "x30", "cc", "memory");
+ return result;
+}
+
+template
+inline D atomic_fastcall(F stub, volatile D *dest, T1 arg1) {
+ return (D)bare_atomic_fastcall(CAST_FROM_FN_PTR(address, stub),
+ dest, (uint64_t)arg1);
+}
+
+template
+inline D atomic_fastcall(F stub, volatile D *dest, T1 arg1, T2 arg2) {
+ return (D)bare_atomic_fastcall(CAST_FROM_FN_PTR(address, stub),
+ dest, (uint64_t)arg1, (uint64_t)arg2);
+}
+
template
struct Atomic::PlatformAdd {
template
- D add_and_fetch(D volatile* dest, I add_value, atomic_memory_order order) const {
- D res = __atomic_add_fetch(dest, add_value, __ATOMIC_RELEASE);
- FULL_MEM_BARRIER;
- return res;
- }
+ D fetch_and_add(D volatile* dest, I add_value, atomic_memory_order order) const;
template
- D fetch_and_add(D volatile* dest, I add_value, atomic_memory_order order) const {
- return add_and_fetch(dest, add_value, order) - add_value;
+ D add_and_fetch(D volatile* dest, I add_value, atomic_memory_order order) const {
+ D value = fetch_and_add(dest, add_value, order) + add_value;
+ return value;
}
};
-template
+template<>
+template
+inline D Atomic::PlatformAdd<4>::fetch_and_add(D volatile* dest, I add_value,
+ atomic_memory_order order) const {
+ STATIC_ASSERT(4 == sizeof(I));
+ STATIC_ASSERT(4 == sizeof(D));
+ D old_value
+ = atomic_fastcall(aarch64_atomic_fetch_add_4_impl, dest, add_value);
+ return old_value;
+}
+
+template<>
+template
+inline D Atomic::PlatformAdd<8>::fetch_and_add(D volatile* dest, I add_value,
+ atomic_memory_order order) const {
+ STATIC_ASSERT(8 == sizeof(I));
+ STATIC_ASSERT(8 == sizeof(D));
+ D old_value
+ = atomic_fastcall(aarch64_atomic_fetch_add_8_impl, dest, add_value);
+ return old_value;
+}
+
+template<>
template
-inline T Atomic::PlatformXchg::operator()(T volatile* dest,
- T exchange_value,
- atomic_memory_order order) const {
- STATIC_ASSERT(byte_size == sizeof(T));
- T res = __atomic_exchange_n(dest, exchange_value, __ATOMIC_RELEASE);
- FULL_MEM_BARRIER;
- return res;
+inline T Atomic::PlatformXchg<4>::operator()(T volatile* dest,
+ T exchange_value,
+ atomic_memory_order order) const {
+ STATIC_ASSERT(4 == sizeof(T));
+ T old_value = atomic_fastcall(aarch64_atomic_xchg_4_impl, dest, exchange_value);
+ return old_value;
}
-// __attribute__((unused)) on dest is to get rid of spurious GCC warnings.
-template
+template<>
template
-inline T Atomic::PlatformCmpxchg::operator()(T volatile* dest __attribute__((unused)),
- T compare_value,
- T exchange_value,
- atomic_memory_order order) const {
- STATIC_ASSERT(byte_size == sizeof(T));
- if (order == memory_order_relaxed) {
- T value = compare_value;
- __atomic_compare_exchange(dest, &value, &exchange_value, /*weak*/false,
- __ATOMIC_RELAXED, __ATOMIC_RELAXED);
- return value;
- } else {
- T value = compare_value;
- FULL_MEM_BARRIER;
- __atomic_compare_exchange(dest, &value, &exchange_value, /*weak*/false,
- __ATOMIC_RELAXED, __ATOMIC_RELAXED);
- FULL_MEM_BARRIER;
- return value;
+inline T Atomic::PlatformXchg<8>::operator()(T volatile* dest, T exchange_value,
+ atomic_memory_order order) const {
+ STATIC_ASSERT(8 == sizeof(T));
+ T old_value = atomic_fastcall(aarch64_atomic_xchg_8_impl, dest, exchange_value);
+ return old_value;
+}
+
+template<>
+template
+inline T Atomic::PlatformCmpxchg<1>::operator()(T volatile* dest,
+ T compare_value,
+ T exchange_value,
+ atomic_memory_order order) const {
+ STATIC_ASSERT(1 == sizeof(T));
+ aarch64_atomic_stub_t stub;
+ switch (order) {
+ case memory_order_relaxed:
+ stub = aarch64_atomic_cmpxchg_1_relaxed_impl; break;
+ default:
+ stub = aarch64_atomic_cmpxchg_1_impl; break;
+ }
+
+ return atomic_fastcall(stub, dest, compare_value, exchange_value);
+}
+
+template<>
+template
+inline T Atomic::PlatformCmpxchg<4>::operator()(T volatile* dest,
+ T compare_value,
+ T exchange_value,
+ atomic_memory_order order) const {
+ STATIC_ASSERT(4 == sizeof(T));
+ aarch64_atomic_stub_t stub;
+ switch (order) {
+ case memory_order_relaxed:
+ stub = aarch64_atomic_cmpxchg_4_relaxed_impl; break;
+ default:
+ stub = aarch64_atomic_cmpxchg_4_impl; break;
+ }
+
+ return atomic_fastcall(stub, dest, compare_value, exchange_value);
+}
+
+template<>
+template
+inline T Atomic::PlatformCmpxchg<8>::operator()(T volatile* dest,
+ T compare_value,
+ T exchange_value,
+ atomic_memory_order order) const {
+ STATIC_ASSERT(8 == sizeof(T));
+ aarch64_atomic_stub_t stub;
+ switch (order) {
+ case memory_order_relaxed:
+ stub = aarch64_atomic_cmpxchg_8_relaxed_impl; break;
+ default:
+ stub = aarch64_atomic_cmpxchg_8_impl; break;
}
+
+ return atomic_fastcall(stub, dest, compare_value, exchange_value);
}
template
diff --git a/src/hotspot/os_cpu/linux_aarch64/os_linux_aarch64.cpp b/src/hotspot/os_cpu/linux_aarch64/os_linux_aarch64.cpp
index a785369a8ddcae90e1931bf5bcd51b6321bdecf4..13702ba1c2364c68a712dc831477a5dad3a2bf1d 100644
--- a/src/hotspot/os_cpu/linux_aarch64/os_linux_aarch64.cpp
+++ b/src/hotspot/os_cpu/linux_aarch64/os_linux_aarch64.cpp
@@ -26,7 +26,6 @@
// no precompiled headers
#include "jvm.h"
#include "asm/macroAssembler.hpp"
-#include "classfile/classLoader.hpp"
#include "classfile/vmSymbols.hpp"
#include "code/codeCache.hpp"
#include "code/icBuffer.hpp"
diff --git a/src/hotspot/os_cpu/linux_aarch64/thread_linux_aarch64.cpp b/src/hotspot/os_cpu/linux_aarch64/thread_linux_aarch64.cpp
index 799d2cf87d18a1e43cef1784a1c47eeb12ea130c..702d6f6dcd5faeee8d0d8f0bc2060b4a3fa3f167 100644
--- a/src/hotspot/os_cpu/linux_aarch64/thread_linux_aarch64.cpp
+++ b/src/hotspot/os_cpu/linux_aarch64/thread_linux_aarch64.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2003, 2020, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2021, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -68,12 +68,6 @@ bool JavaThread::pd_get_top_frame(frame* fr_addr, void* ucontext, bool isInJava)
return false;
}
- if (MetaspaceShared::is_in_trampoline_frame(addr)) {
- // In the middle of a trampoline call. Bail out for safety.
- // This happens rarely so shouldn't affect profiling.
- return false;
- }
-
frame ret_frame(ret_sp, ret_fp, addr);
if (!ret_frame.safe_for_sender(this)) {
#ifdef COMPILER2
diff --git a/src/hotspot/os_cpu/linux_aarch64/thread_linux_aarch64.hpp b/src/hotspot/os_cpu/linux_aarch64/thread_linux_aarch64.hpp
index 8c52a6a99454cb215ea9ef523440510e7697b27c..5a1f273c548660815abf1ce1d513472212d7e01d 100644
--- a/src/hotspot/os_cpu/linux_aarch64/thread_linux_aarch64.hpp
+++ b/src/hotspot/os_cpu/linux_aarch64/thread_linux_aarch64.hpp
@@ -39,10 +39,6 @@
return byte_offset_of(JavaThread, _anchor) + JavaFrameAnchor::last_Java_fp_offset();
}
- static ByteSize saved_fp_address_offset() {
- return byte_offset_of(JavaThread, _anchor) + JavaFrameAnchor::saved_fp_address_offset();
- }
-
bool pd_get_top_frame_for_signal_handler(frame* fr_addr, void* ucontext,
bool isInJava);
diff --git a/src/hotspot/os_cpu/linux_arm/os_linux_arm.cpp b/src/hotspot/os_cpu/linux_arm/os_linux_arm.cpp
index 12eb0f543cefee84db20b9bb3e2edef8b8fca4ce..064c1fba16b6eecf7e4067ab923631e621eac566 100644
--- a/src/hotspot/os_cpu/linux_arm/os_linux_arm.cpp
+++ b/src/hotspot/os_cpu/linux_arm/os_linux_arm.cpp
@@ -25,7 +25,6 @@
// no precompiled headers
#include "jvm.h"
#include "assembler_arm.inline.hpp"
-#include "classfile/classLoader.hpp"
#include "classfile/vmSymbols.hpp"
#include "code/icBuffer.hpp"
#include "code/vtableStubs.hpp"
diff --git a/src/hotspot/os_cpu/linux_arm/thread_linux_arm.cpp b/src/hotspot/os_cpu/linux_arm/thread_linux_arm.cpp
index 6fd8a2fbde56e4cea9892995bf097f416b350c71..66cb5e7f8451328dc2f88fa615cac7e4e199d7d9 100644
--- a/src/hotspot/os_cpu/linux_arm/thread_linux_arm.cpp
+++ b/src/hotspot/os_cpu/linux_arm/thread_linux_arm.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2008, 2020, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2008, 2021, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -101,12 +101,6 @@ bool JavaThread::pd_get_top_frame(frame* fr_addr, void* ucontext, bool isInJava)
return false;
}
- if (MetaspaceShared::is_in_trampoline_frame(addr)) {
- // In the middle of a trampoline call. Bail out for safety.
- // This happens rarely so shouldn't affect profiling.
- return false;
- }
-
frame ret_frame(ret_sp, ret_fp, addr);
if (!ret_frame.safe_for_sender(this)) {
#ifdef COMPILER2
diff --git a/src/hotspot/os_cpu/linux_ppc/os_linux_ppc.cpp b/src/hotspot/os_cpu/linux_ppc/os_linux_ppc.cpp
index 0e390ab3f2809ddfb512f72af710176258450e9b..5edb303b68b0bcd2f8dc350cfdff39d5eca898d5 100644
--- a/src/hotspot/os_cpu/linux_ppc/os_linux_ppc.cpp
+++ b/src/hotspot/os_cpu/linux_ppc/os_linux_ppc.cpp
@@ -27,7 +27,6 @@
#include "jvm.h"
#include "assembler_ppc.hpp"
#include "asm/assembler.inline.hpp"
-#include "classfile/classLoader.hpp"
#include "classfile/vmSymbols.hpp"
#include "code/codeCache.hpp"
#include "code/icBuffer.hpp"
diff --git a/src/hotspot/os_cpu/linux_ppc/thread_linux_ppc.cpp b/src/hotspot/os_cpu/linux_ppc/thread_linux_ppc.cpp
index f853f7a439a3ca8e8c0b06be9f44564a422a0900..9f779456640211c636aa03ae1ab09f46278b5217 100644
--- a/src/hotspot/os_cpu/linux_ppc/thread_linux_ppc.cpp
+++ b/src/hotspot/os_cpu/linux_ppc/thread_linux_ppc.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2020, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2021, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2019 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -24,6 +24,7 @@
*/
#include "precompiled.hpp"
+#include "memory/metaspace.hpp"
#include "runtime/frame.inline.hpp"
#include "runtime/thread.hpp"
diff --git a/src/hotspot/os_cpu/linux_s390/os_linux_s390.cpp b/src/hotspot/os_cpu/linux_s390/os_linux_s390.cpp
index cd370d2f7a3186f995a368ea74a9d63969372be0..53b017d221725486a1b41944092e99d30565aa49 100644
--- a/src/hotspot/os_cpu/linux_s390/os_linux_s390.cpp
+++ b/src/hotspot/os_cpu/linux_s390/os_linux_s390.cpp
@@ -28,7 +28,6 @@
// no precompiled headers
#include "jvm.h"
#include "asm/assembler.inline.hpp"
-#include "classfile/classLoader.hpp"
#include "classfile/vmSymbols.hpp"
#include "code/icBuffer.hpp"
#include "code/nativeInst.hpp"
diff --git a/src/hotspot/os_cpu/linux_s390/thread_linux_s390.cpp b/src/hotspot/os_cpu/linux_s390/thread_linux_s390.cpp
index aebadb25518b6000a16359617860fe00a130667f..eeaf2f47fc607faa828afe9238a725de2cf88f6f 100644
--- a/src/hotspot/os_cpu/linux_s390/thread_linux_s390.cpp
+++ b/src/hotspot/os_cpu/linux_s390/thread_linux_s390.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, 2020, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2019 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -24,6 +24,7 @@
*/
#include "precompiled.hpp"
+#include "memory/metaspace.hpp"
#include "runtime/frame.inline.hpp"
#include "runtime/thread.hpp"
diff --git a/src/hotspot/os_cpu/linux_x86/os_linux_x86.cpp b/src/hotspot/os_cpu/linux_x86/os_linux_x86.cpp
index ed603aa641e28bf493390d3031408ed7edeab0ab..dcd2f566a16d62279e2daa2a59e7097f6fffadee 100644
--- a/src/hotspot/os_cpu/linux_x86/os_linux_x86.cpp
+++ b/src/hotspot/os_cpu/linux_x86/os_linux_x86.cpp
@@ -25,7 +25,6 @@
// no precompiled headers
#include "jvm.h"
#include "asm/macroAssembler.hpp"
-#include "classfile/classLoader.hpp"
#include "classfile/vmSymbols.hpp"
#include "code/codeCache.hpp"
#include "code/icBuffer.hpp"
diff --git a/src/hotspot/os_cpu/linux_x86/thread_linux_x86.cpp b/src/hotspot/os_cpu/linux_x86/thread_linux_x86.cpp
index b72127907b20854668832be9dc819b0d66098d5a..b030abe5b2be61990cfb8ce5b0ec6ef10ec7997a 100644
--- a/src/hotspot/os_cpu/linux_x86/thread_linux_x86.cpp
+++ b/src/hotspot/os_cpu/linux_x86/thread_linux_x86.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2003, 2020, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2021, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -68,12 +68,6 @@ bool JavaThread::pd_get_top_frame(frame* fr_addr, void* ucontext, bool isInJava)
return false;
}
- if (MetaspaceShared::is_in_trampoline_frame(addr)) {
- // In the middle of a trampoline call. Bail out for safety.
- // This happens rarely so shouldn't affect profiling.
- return false;
- }
-
frame ret_frame(ret_sp, ret_fp, addr);
if (!ret_frame.safe_for_sender(this)) {
#if COMPILER2_OR_JVMCI
diff --git a/src/hotspot/os_cpu/linux_x86/thread_linux_x86.hpp b/src/hotspot/os_cpu/linux_x86/thread_linux_x86.hpp
index 1b02aadc52570815ceb8bb870bea27a868c01020..574edc79831be1ad0f5cff45f83639cd42cb04fb 100644
--- a/src/hotspot/os_cpu/linux_x86/thread_linux_x86.hpp
+++ b/src/hotspot/os_cpu/linux_x86/thread_linux_x86.hpp
@@ -37,10 +37,6 @@
return byte_offset_of(JavaThread, _anchor) + JavaFrameAnchor::last_Java_fp_offset();
}
- static ByteSize saved_rbp_address_offset() {
- return byte_offset_of(JavaThread, _anchor) + JavaFrameAnchor::saved_rbp_address_offset();
- }
-
bool pd_get_top_frame_for_signal_handler(frame* fr_addr, void* ucontext,
bool isInJava);
diff --git a/src/hotspot/os_cpu/linux_zero/os_linux_zero.cpp b/src/hotspot/os_cpu/linux_zero/os_linux_zero.cpp
index e38b7d351aec3f895f9e2bfec13b82d88d094f75..ac57da78a25913645175cd54156c9d88aecb6dd4 100644
--- a/src/hotspot/os_cpu/linux_zero/os_linux_zero.cpp
+++ b/src/hotspot/os_cpu/linux_zero/os_linux_zero.cpp
@@ -26,7 +26,6 @@
// no precompiled headers
#include "jvm.h"
#include "assembler_zero.inline.hpp"
-#include "classfile/classLoader.hpp"
#include "classfile/vmSymbols.hpp"
#include "code/icBuffer.hpp"
#include "code/vtableStubs.hpp"
diff --git a/src/hotspot/os_cpu/windows_aarch64/os_windows_aarch64.cpp b/src/hotspot/os_cpu/windows_aarch64/os_windows_aarch64.cpp
index 6422eb406e05a81aed0f77d5a09fc9e81cc9ff99..94a96fc05afeca12df311dda3e31a7d6cd1434c6 100644
--- a/src/hotspot/os_cpu/windows_aarch64/os_windows_aarch64.cpp
+++ b/src/hotspot/os_cpu/windows_aarch64/os_windows_aarch64.cpp
@@ -25,7 +25,6 @@
#include "precompiled.hpp"
#include "jvm.h"
#include "asm/macroAssembler.hpp"
-#include "classfile/classLoader.hpp"
#include "classfile/vmSymbols.hpp"
#include "code/codeCache.hpp"
#include "code/icBuffer.hpp"
diff --git a/src/hotspot/os_cpu/windows_aarch64/thread_windows_aarch64.cpp b/src/hotspot/os_cpu/windows_aarch64/thread_windows_aarch64.cpp
index d235b4850e8afaa7e357cf2376cd6d1ec1277bf9..677e810b78cbec0069be713a743f6093ba82d85a 100644
--- a/src/hotspot/os_cpu/windows_aarch64/thread_windows_aarch64.cpp
+++ b/src/hotspot/os_cpu/windows_aarch64/thread_windows_aarch64.cpp
@@ -69,12 +69,6 @@ bool JavaThread::pd_get_top_frame(frame* fr_addr, void* ucontext, bool isInJava)
return false;
}
- if (MetaspaceShared::is_in_trampoline_frame(ret_frame.pc())) {
- // In the middle of a trampoline call. Bail out for safety.
- // This happens rarely so shouldn't affect profiling.
- return false;
- }
-
if (!ret_frame.safe_for_sender(jt)) {
#if COMPILER2_OR_JVMCI
// C2 and JVMCI use ebp as a general register see if NULL fp helps
diff --git a/src/hotspot/os_cpu/windows_aarch64/thread_windows_aarch64.hpp b/src/hotspot/os_cpu/windows_aarch64/thread_windows_aarch64.hpp
index 64d7a65e62a64451ca2c222537bc7b7f0df07637..bcf43c8b088296c06d8c4d1ac70739dca1b1d8d5 100644
--- a/src/hotspot/os_cpu/windows_aarch64/thread_windows_aarch64.hpp
+++ b/src/hotspot/os_cpu/windows_aarch64/thread_windows_aarch64.hpp
@@ -38,10 +38,6 @@
return byte_offset_of(JavaThread, _anchor) + JavaFrameAnchor::last_Java_fp_offset();
}
- static ByteSize saved_fp_address_offset() {
- return byte_offset_of(JavaThread, _anchor) + JavaFrameAnchor::saved_fp_address_offset();
- }
-
bool pd_get_top_frame_for_signal_handler(frame* fr_addr, void* ucontext,
bool isInJava);
diff --git a/src/hotspot/os_cpu/windows_x86/os_windows_x86.cpp b/src/hotspot/os_cpu/windows_x86/os_windows_x86.cpp
index 2e18762cf634d66acd52c4be0daf429100107b92..764780db4e1c3c629f3752bf1006219ec7976a60 100644
--- a/src/hotspot/os_cpu/windows_x86/os_windows_x86.cpp
+++ b/src/hotspot/os_cpu/windows_x86/os_windows_x86.cpp
@@ -25,7 +25,6 @@
// no precompiled headers
#include "jvm.h"
#include "asm/macroAssembler.hpp"
-#include "classfile/classLoader.hpp"
#include "classfile/vmSymbols.hpp"
#include "code/icBuffer.hpp"
#include "code/vtableStubs.hpp"
diff --git a/src/hotspot/os_cpu/windows_x86/thread_windows_x86.cpp b/src/hotspot/os_cpu/windows_x86/thread_windows_x86.cpp
index 048574596f4e2bf0204e16273e44b19fbc54c5f8..8cf064e0613267a08c10fc03b89c8740a430b96e 100644
--- a/src/hotspot/os_cpu/windows_x86/thread_windows_x86.cpp
+++ b/src/hotspot/os_cpu/windows_x86/thread_windows_x86.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2003, 2020, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2021, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -64,12 +64,6 @@ bool JavaThread::pd_get_top_frame(frame* fr_addr, void* ucontext, bool isInJava)
return false;
}
- if (MetaspaceShared::is_in_trampoline_frame(ret_frame.pc())) {
- // In the middle of a trampoline call. Bail out for safety.
- // This happens rarely so shouldn't affect profiling.
- return false;
- }
-
if (!ret_frame.safe_for_sender(this)) {
#if COMPILER2_OR_JVMCI
// C2 and JVMCI use ebp as a general register see if NULL fp helps
diff --git a/src/hotspot/os_cpu/windows_x86/thread_windows_x86.hpp b/src/hotspot/os_cpu/windows_x86/thread_windows_x86.hpp
index 9cb8bc89682fe798b554f77435f23ecdcd4e779f..21577346246628da9c78dd87122bf9aa5631334f 100644
--- a/src/hotspot/os_cpu/windows_x86/thread_windows_x86.hpp
+++ b/src/hotspot/os_cpu/windows_x86/thread_windows_x86.hpp
@@ -44,10 +44,6 @@
return byte_offset_of(JavaThread, _anchor) + JavaFrameAnchor::last_Java_fp_offset();
}
- static ByteSize saved_rbp_address_offset() {
- return byte_offset_of(JavaThread, _anchor) + JavaFrameAnchor::saved_rbp_address_offset();
- }
-
bool pd_get_top_frame_for_signal_handler(frame* fr_addr, void* ucontext,
bool isInJava);
diff --git a/src/hotspot/share/aot/aotCodeHeap.cpp b/src/hotspot/share/aot/aotCodeHeap.cpp
index a84138d64c8a8e9c2afd34e9973652d10e97232c..462665002ec3445acc89844b04185a8fcb197d8b 100644
--- a/src/hotspot/share/aot/aotCodeHeap.cpp
+++ b/src/hotspot/share/aot/aotCodeHeap.cpp
@@ -112,8 +112,7 @@ Klass* AOTCodeHeap::lookup_klass(const char* name, int len, const Method* method
log_debug(aot, class, resolve)("Probe failed for AOT class %s", name);
return NULL;
}
- Klass* k = SystemDictionary::find_instance_or_array_klass(sym, loader, protection_domain, thread);
- assert(!thread->has_pending_exception(), "should not throw");
+ Klass* k = SystemDictionary::find_instance_or_array_klass(sym, loader, protection_domain);
if (k != NULL) {
log_info(aot, class, resolve)("%s %s (lookup)", caller->method_holder()->external_name(), k->external_name());
diff --git a/src/hotspot/share/asm/codeBuffer.cpp b/src/hotspot/share/asm/codeBuffer.cpp
index 7558c9c85726201e738b7b8b612a4200b1330255..7d76c8ae751dee352be16a6bc7847cdd3ab23829 100644
--- a/src/hotspot/share/asm/codeBuffer.cpp
+++ b/src/hotspot/share/asm/codeBuffer.cpp
@@ -27,6 +27,7 @@
#include "code/oopRecorder.inline.hpp"
#include "compiler/disassembler.hpp"
#include "logging/log.hpp"
+#include "oops/klass.inline.hpp"
#include "oops/methodData.hpp"
#include "oops/oop.inline.hpp"
#include "runtime/icache.hpp"
diff --git a/src/hotspot/share/c1/c1_LIR.hpp b/src/hotspot/share/c1/c1_LIR.hpp
index dcd2d50b327af35b456e49a4f73da4794c2fdb5e..08e4ac2264af4790b132fa77400f8c7cdad78b50 100644
--- a/src/hotspot/share/c1/c1_LIR.hpp
+++ b/src/hotspot/share/c1/c1_LIR.hpp
@@ -231,8 +231,8 @@ class LIR_OprDesc: public CompilationResourceObj {
, is_xmm_bits = 1
, last_use_bits = 1
, is_fpu_stack_offset_bits = 1 // used in assertion checking on x86 for FPU stack slot allocation
- , non_data_bits = kind_bits + type_bits + size_bits + destroys_bits + last_use_bits +
- is_fpu_stack_offset_bits + virtual_bits + is_xmm_bits
+ , non_data_bits = pointer_bits + kind_bits + type_bits + size_bits + destroys_bits + virtual_bits
+ + is_xmm_bits + last_use_bits + is_fpu_stack_offset_bits
, data_bits = BitsPerInt - non_data_bits
, reg_bits = data_bits / 2 // for two registers in one value encoding
};
@@ -649,6 +649,11 @@ class LIR_OprFact: public AllStatic {
#endif // X86
static LIR_Opr virtual_register(int index, BasicType type) {
+ if (index > LIR_OprDesc::vreg_max) {
+ // Running out of virtual registers. Caller should bailout.
+ return illegalOpr;
+ }
+
LIR_Opr res;
switch (type) {
case T_OBJECT: // fall through
diff --git a/src/hotspot/share/c1/c1_LIRGenerator.cpp b/src/hotspot/share/c1/c1_LIRGenerator.cpp
index a78dc845e5962c4346592c90efca107867d1334d..d1fc710252871a1078f7742f48715cabb4d9ef6c 100644
--- a/src/hotspot/share/c1/c1_LIRGenerator.cpp
+++ b/src/hotspot/share/c1/c1_LIRGenerator.cpp
@@ -1049,20 +1049,21 @@ void LIRGenerator::move_to_phi(ValueStack* cur_state) {
LIR_Opr LIRGenerator::new_register(BasicType type) {
- int vreg = _virtual_register_number;
- // add a little fudge factor for the bailout, since the bailout is
- // only checked periodically. This gives a few extra registers to
- // hand out before we really run out, which helps us keep from
- // tripping over assertions.
- if (vreg + 20 >= LIR_OprDesc::vreg_max) {
- bailout("out of virtual registers");
- if (vreg + 2 >= LIR_OprDesc::vreg_max) {
- // wrap it around
+ int vreg_num = _virtual_register_number;
+ // Add a little fudge factor for the bailout since the bailout is only checked periodically. This allows us to hand out
+ // a few extra registers before we really run out which helps to avoid to trip over assertions.
+ if (vreg_num + 20 >= LIR_OprDesc::vreg_max) {
+ bailout("out of virtual registers in LIR generator");
+ if (vreg_num + 2 >= LIR_OprDesc::vreg_max) {
+ // Wrap it around and continue until bailout really happens to avoid hitting assertions.
_virtual_register_number = LIR_OprDesc::vreg_base;
+ vreg_num = LIR_OprDesc::vreg_base;
}
}
_virtual_register_number += 1;
- return LIR_OprFact::virtual_register(vreg, type);
+ LIR_Opr vreg = LIR_OprFact::virtual_register(vreg_num, type);
+ assert(vreg != LIR_OprFact::illegal(), "ran out of virtual registers");
+ return vreg;
}
diff --git a/src/hotspot/share/c1/c1_LinearScan.cpp b/src/hotspot/share/c1/c1_LinearScan.cpp
index 1d49366cc1aa6fa0c709afd2836468fff92c69ce..ccc02b7e85e81d55abb5af1719b7dca3dba969a5 100644
--- a/src/hotspot/share/c1/c1_LinearScan.cpp
+++ b/src/hotspot/share/c1/c1_LinearScan.cpp
@@ -3928,8 +3928,8 @@ void MoveResolver::insert_move(Interval* from_interval, Interval* to_interval) {
assert(_insert_list != NULL && _insert_idx != -1, "must setup insert position first");
assert(_insertion_buffer.lir_list() == _insert_list, "wrong insertion buffer");
- LIR_Opr from_opr = LIR_OprFact::virtual_register(from_interval->reg_num(), from_interval->type());
- LIR_Opr to_opr = LIR_OprFact::virtual_register(to_interval->reg_num(), to_interval->type());
+ LIR_Opr from_opr = get_virtual_register(from_interval);
+ LIR_Opr to_opr = get_virtual_register(to_interval);
if (!_multiple_reads_allowed) {
// the last_use flag is an optimization for FPU stack allocation. When the same
@@ -3947,12 +3947,27 @@ void MoveResolver::insert_move(LIR_Opr from_opr, Interval* to_interval) {
assert(_insert_list != NULL && _insert_idx != -1, "must setup insert position first");
assert(_insertion_buffer.lir_list() == _insert_list, "wrong insertion buffer");
- LIR_Opr to_opr = LIR_OprFact::virtual_register(to_interval->reg_num(), to_interval->type());
+ LIR_Opr to_opr = get_virtual_register(to_interval);
_insertion_buffer.move(_insert_idx, from_opr, to_opr);
TRACE_LINEAR_SCAN(4, tty->print("MoveResolver: inserted move from constant "); from_opr->print(); tty->print_cr(" to %d (%d, %d)", to_interval->reg_num(), to_interval->assigned_reg(), to_interval->assigned_regHi()));
}
+LIR_Opr MoveResolver::get_virtual_register(Interval* interval) {
+ // Add a little fudge factor for the bailout since the bailout is only checked periodically. This allows us to hand out
+ // a few extra registers before we really run out which helps to avoid to trip over assertions.
+ int reg_num = interval->reg_num();
+ if (reg_num + 20 >= LIR_OprDesc::vreg_max) {
+ _allocator->bailout("out of virtual registers in linear scan");
+ if (reg_num + 2 >= LIR_OprDesc::vreg_max) {
+ // Wrap it around and continue until bailout really happens to avoid hitting assertions.
+ reg_num = LIR_OprDesc::vreg_base;
+ }
+ }
+ LIR_Opr vreg = LIR_OprFact::virtual_register(reg_num, interval->type());
+ assert(vreg != LIR_OprFact::illegal(), "ran out of virtual registers");
+ return vreg;
+}
void MoveResolver::resolve_mappings() {
TRACE_LINEAR_SCAN(4, tty->print_cr("MoveResolver: resolving mappings for Block B%d, index %d", _insert_list->block() != NULL ? _insert_list->block()->block_id() : -1, _insert_idx));
diff --git a/src/hotspot/share/c1/c1_LinearScan.hpp b/src/hotspot/share/c1/c1_LinearScan.hpp
index 0249453d9c10237660b930730c1d534fcb4c93ff..761de2c3dc73ecc3691157892dddd969ea3e6c36 100644
--- a/src/hotspot/share/c1/c1_LinearScan.hpp
+++ b/src/hotspot/share/c1/c1_LinearScan.hpp
@@ -436,6 +436,7 @@ class MoveResolver: public StackObj {
void append_insertion_buffer();
void insert_move(Interval* from_interval, Interval* to_interval);
void insert_move(LIR_Opr from_opr, Interval* to_interval);
+ LIR_Opr get_virtual_register(Interval* interval);
DEBUG_ONLY(void verify_before_resolve();)
void resolve_mappings();
diff --git a/src/hotspot/share/ci/ciEnv.cpp b/src/hotspot/share/ci/ciEnv.cpp
index 3df467216ba934542619b6e6abe81549d21ac0ad..9ffba8d9fd6d0f09ee35e2694d31ebddb2b9493a 100644
--- a/src/hotspot/share/ci/ciEnv.cpp
+++ b/src/hotspot/share/ci/ciEnv.cpp
@@ -304,10 +304,10 @@ ciInstance* ciEnv::get_or_create_exception(jobject& handle, Symbol* name) {
VM_ENTRY_MARK;
if (handle == NULL) {
// Cf. universe.cpp, creation of Universe::_null_ptr_exception_instance.
- Klass* k = SystemDictionary::find(name, Handle(), Handle(), THREAD);
+ InstanceKlass* ik = SystemDictionary::find_instance_klass(name, Handle(), Handle());
jobject objh = NULL;
- if (!HAS_PENDING_EXCEPTION && k != NULL) {
- oop obj = InstanceKlass::cast(k)->allocate_instance(THREAD);
+ if (ik != NULL) {
+ oop obj = ik->allocate_instance(THREAD);
if (!HAS_PENDING_EXCEPTION)
objh = JNIHandles::make_global(Handle(THREAD, obj));
}
@@ -445,11 +445,9 @@ ciKlass* ciEnv::get_klass_by_name_impl(ciKlass* accessing_klass,
MutexLocker ml(Compile_lock);
Klass* kls;
if (!require_local) {
- kls = SystemDictionary::find_constrained_instance_or_array_klass(sym, loader,
- CHECK_AND_CLEAR_(fail_type));
+ kls = SystemDictionary::find_constrained_instance_or_array_klass(sym, loader, THREAD);
} else {
- kls = SystemDictionary::find_instance_or_array_klass(sym, loader, domain,
- CHECK_AND_CLEAR_(fail_type));
+ kls = SystemDictionary::find_instance_or_array_klass(sym, loader, domain);
}
found_klass = kls;
}
@@ -957,7 +955,7 @@ void ciEnv::register_method(ciMethod* target,
bool has_unsafe_access,
bool has_wide_vectors,
RTMState rtm_state,
- const GrowableArrayView& native_invokers) {
+ const GrowableArrayView& native_invokers) {
VM_ENTRY_MARK;
nmethod* nm = NULL;
{
diff --git a/src/hotspot/share/ci/ciEnv.hpp b/src/hotspot/share/ci/ciEnv.hpp
index 5baf280764e12a90c678c4e410761865af25405f..ab1c359659d9bfd8e0dc2c824e47166a4ca1dad5 100644
--- a/src/hotspot/share/ci/ciEnv.hpp
+++ b/src/hotspot/share/ci/ciEnv.hpp
@@ -380,7 +380,7 @@ public:
bool has_unsafe_access,
bool has_wide_vectors,
RTMState rtm_state = NoRTM,
- const GrowableArrayView& native_invokers = GrowableArrayView::EMPTY);
+ const GrowableArrayView& native_invokers = GrowableArrayView::EMPTY);
// Access to certain well known ciObjects.
diff --git a/src/hotspot/share/ci/ciInstanceKlass.cpp b/src/hotspot/share/ci/ciInstanceKlass.cpp
index 8baa34d19e631835bfe9919d84228695d303a8b4..e9a42e451cca1c6ca8e05cedd7f3c50f3c425285 100644
--- a/src/hotspot/share/ci/ciInstanceKlass.cpp
+++ b/src/hotspot/share/ci/ciInstanceKlass.cpp
@@ -727,7 +727,7 @@ void ciInstanceKlass::dump_replay_data(outputStream* out) {
// Try to record related loaded classes
Klass* sub = ik->subklass();
while (sub != NULL) {
- if (sub->is_instance_klass()) {
+ if (sub->is_instance_klass() && !sub->is_hidden() && !InstanceKlass::cast(sub)->is_unsafe_anonymous()) {
out->print_cr("instanceKlass %s", sub->name()->as_quoted_ascii());
}
sub = sub->next_sibling();
diff --git a/src/hotspot/share/ci/ciMethodData.cpp b/src/hotspot/share/ci/ciMethodData.cpp
index 8e177980a7a4edd66c2a4241fb864e751335904f..8f61ce744dcecc51e32c19c5e9066d338a9409a9 100644
--- a/src/hotspot/share/ci/ciMethodData.cpp
+++ b/src/hotspot/share/ci/ciMethodData.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2001, 2020, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2021, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -30,6 +30,7 @@
#include "compiler/compiler_globals.hpp"
#include "memory/allocation.inline.hpp"
#include "memory/resourceArea.hpp"
+#include "oops/klass.inline.hpp"
#include "runtime/deoptimization.hpp"
#include "utilities/copy.hpp"
diff --git a/src/hotspot/share/classfile/classFileStream.cpp b/src/hotspot/share/classfile/classFileStream.cpp
index 6a625e5350c15d315851475caf9b5aaf84a0b323..b80f873c4908afc8752339795aeb10518c7151d3 100644
--- a/src/hotspot/share/classfile/classFileStream.cpp
+++ b/src/hotspot/share/classfile/classFileStream.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2021, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -45,7 +45,9 @@ ClassFileStream::ClassFileStream(const u1* buffer,
_current(buffer),
_source(source),
_need_verify(verify_stream),
- _from_boot_loader_modules_image(from_boot_loader_modules_image) {}
+ _from_boot_loader_modules_image(from_boot_loader_modules_image) {
+ assert(buffer != NULL, "caller should throw NPE");
+}
const u1* ClassFileStream::clone_buffer() const {
u1* const new_buffer_start = NEW_RESOURCE_ARRAY(u1, length());
diff --git a/src/hotspot/share/classfile/classListParser.cpp b/src/hotspot/share/classfile/classListParser.cpp
index e5e8f2cd243cf9f6ea8a07c742fd8748ab56ab09..b32801504eed5446f93ef0d52e29dece8b7ef061 100644
--- a/src/hotspot/share/classfile/classListParser.cpp
+++ b/src/hotspot/share/classfile/classListParser.cpp
@@ -43,6 +43,7 @@
#include "memory/metaspaceShared.hpp"
#include "memory/resourceArea.hpp"
#include "oops/constantPool.hpp"
+#include "runtime/atomic.hpp"
#include "runtime/handles.inline.hpp"
#include "runtime/java.hpp"
#include "runtime/javaCalls.hpp"
@@ -50,11 +51,10 @@
#include "utilities/hashtable.inline.hpp"
#include "utilities/macros.hpp"
+volatile Thread* ClassListParser::_parsing_thread = NULL;
ClassListParser* ClassListParser::_instance = NULL;
ClassListParser::ClassListParser(const char* file) {
- assert(_instance == NULL, "must be singleton");
- _instance = this;
_classlist_file = file;
_file = NULL;
// Use os::open() because neither fopen() nor os::fopen()
@@ -73,12 +73,22 @@ ClassListParser::ClassListParser(const char* file) {
_line_no = 0;
_interfaces = new (ResourceObj::C_HEAP, mtClass) GrowableArray(10, mtClass);
_indy_items = new (ResourceObj::C_HEAP, mtClass) GrowableArray(9, mtClass);
+
+ // _instance should only be accessed by the thread that created _instance.
+ assert(_instance == NULL, "must be singleton");
+ _instance = this;
+ Atomic::store(&_parsing_thread, Thread::current());
+}
+
+bool ClassListParser::is_parsing_thread() {
+ return Atomic::load(&_parsing_thread) == Thread::current();
}
ClassListParser::~ClassListParser() {
if (_file) {
fclose(_file);
}
+ Atomic::store(&_parsing_thread, (Thread*)NULL);
_instance = NULL;
}
diff --git a/src/hotspot/share/classfile/classListParser.hpp b/src/hotspot/share/classfile/classListParser.hpp
index f8598a500219bbb52283f0031588948a3fc9b869..ed7116ce90e4dcbc71a5504855ed66a659c553f5 100644
--- a/src/hotspot/share/classfile/classListParser.hpp
+++ b/src/hotspot/share/classfile/classListParser.hpp
@@ -33,6 +33,8 @@
#define LAMBDA_PROXY_TAG "@lambda-proxy"
#define LAMBDA_FORM_TAG "@lambda-form-invoker"
+class Thread;
+
class ID2KlassTable : public KVHashtable {
public:
ID2KlassTable() : KVHashtable(1987) {}
@@ -81,6 +83,7 @@ class ClassListParser : public StackObj {
_line_buf_size = _max_allowed_line_len + _line_buf_extra
};
+ static volatile Thread* _parsing_thread; // the thread that created _instance
static ClassListParser* _instance; // the singleton.
const char* _classlist_file;
FILE* _file;
@@ -119,9 +122,13 @@ public:
ClassListParser(const char* file);
~ClassListParser();
+ static bool is_parsing_thread();
static ClassListParser* instance() {
+ assert(is_parsing_thread(), "call this only in the thread that created ClassListParsing::_instance");
+ assert(_instance != NULL, "must be");
return _instance;
}
+
bool parse_one_line();
void split_tokens_by_whitespace(int offset);
int split_at_tag_from_line();
diff --git a/src/hotspot/share/classfile/classLoader.cpp b/src/hotspot/share/classfile/classLoader.cpp
index d80fe19bd357459797014dd0d956eec0ebba3c60..47eaecead777044967c6ac7bf5cd32344c5bfe9b 100644
--- a/src/hotspot/share/classfile/classLoader.cpp
+++ b/src/hotspot/share/classfile/classLoader.cpp
@@ -287,7 +287,7 @@ ClassPathZipEntry::~ClassPathZipEntry() {
u1* ClassPathZipEntry::open_entry(const char* name, jint* filesize, bool nul_terminate, TRAPS) {
// enable call to C land
- JavaThread* thread = JavaThread::current();
+ JavaThread* thread = THREAD->as_Java_thread();
ThreadToNativeFromVM ttn(thread);
// check whether zip archive contains name
jint name_len;
@@ -501,7 +501,7 @@ void ClassLoader::trace_class_path(const char* msg, const char* name) {
}
}
-void ClassLoader::setup_bootstrap_search_path() {
+void ClassLoader::setup_bootstrap_search_path(TRAPS) {
const char* sys_class_path = Arguments::get_sysclasspath();
assert(sys_class_path != NULL, "System boot class path must not be NULL");
if (PrintSharedArchiveAndExit) {
@@ -510,11 +510,11 @@ void ClassLoader::setup_bootstrap_search_path() {
} else {
trace_class_path("bootstrap loader class path=", sys_class_path);
}
- setup_boot_search_path(sys_class_path);
+ setup_bootstrap_search_path_impl(sys_class_path, CHECK);
}
#if INCLUDE_CDS
-void ClassLoader::setup_app_search_path(const char *class_path) {
+void ClassLoader::setup_app_search_path(const char *class_path, TRAPS) {
Arguments::assert_is_dumping_archive();
ResourceMark rm;
@@ -522,7 +522,7 @@ void ClassLoader::setup_app_search_path(const char *class_path) {
while (cp_stream.has_next()) {
const char* path = cp_stream.get_next();
- update_class_path_entry_list(path, false, false, false);
+ update_class_path_entry_list(path, false, false, false, CHECK);
}
}
@@ -542,7 +542,7 @@ void ClassLoader::add_to_module_path_entries(const char* path,
}
// Add a module path to the _module_path_entries list.
-void ClassLoader::update_module_path_entry_list(const char *path, TRAPS) {
+void ClassLoader::setup_module_search_path(const char* path, TRAPS) {
Arguments::assert_is_dumping_archive();
struct stat st;
if (os::stat(path, &st) != 0) {
@@ -562,10 +562,6 @@ void ClassLoader::update_module_path_entry_list(const char *path, TRAPS) {
return;
}
-void ClassLoader::setup_module_search_path(const char* path, TRAPS) {
- update_module_path_entry_list(path, THREAD);
-}
-
#endif // INCLUDE_CDS
void ClassLoader::close_jrt_image() {
@@ -632,8 +628,7 @@ bool ClassLoader::is_in_patch_mod_entries(Symbol* module_name) {
}
// Set up the _jrt_entry if present and boot append path
-void ClassLoader::setup_boot_search_path(const char *class_path) {
- EXCEPTION_MARK;
+void ClassLoader::setup_bootstrap_search_path_impl(const char *class_path, TRAPS) {
ResourceMark rm(THREAD);
ClasspathStream cp_stream(class_path);
bool set_base_piece = true;
@@ -675,7 +670,7 @@ void ClassLoader::setup_boot_search_path(const char *class_path) {
} else {
// Every entry on the system boot class path after the initial base piece,
// which is set by os::set_boot_path(), is considered an appended entry.
- update_class_path_entry_list(path, false, true, false);
+ update_class_path_entry_list(path, false, true, false, CHECK);
}
}
}
@@ -717,19 +712,27 @@ void ClassLoader::add_to_exploded_build_list(Symbol* module_sym, TRAPS) {
}
}
+jzfile* ClassLoader::open_zip_file(const char* canonical_path, char** error_msg, JavaThread* thread) {
+ // enable call to C land
+ ThreadToNativeFromVM ttn(thread);
+ HandleMark hm(thread);
+ load_zip_library_if_needed();
+ return (*ZipOpen)(canonical_path, error_msg);
+}
+
ClassPathEntry* ClassLoader::create_class_path_entry(const char *path, const struct stat* st,
bool throw_exception,
bool is_boot_append,
bool from_class_path_attr,
TRAPS) {
- JavaThread* thread = JavaThread::current();
+ JavaThread* thread = THREAD->as_Java_thread();
ClassPathEntry* new_entry = NULL;
if ((st->st_mode & S_IFMT) == S_IFREG) {
ResourceMark rm(thread);
// Regular file, should be a zip or jimage file
// Canonicalized filename
- char* canonical_path = NEW_RESOURCE_ARRAY_IN_THREAD(thread, char, JVM_MAXPATHLEN);
- if (!get_canonical_path(path, canonical_path, JVM_MAXPATHLEN)) {
+ const char* canonical_path = get_canonical_path(path, thread);
+ if (canonical_path == NULL) {
// This matches the classic VM
if (throw_exception) {
THROW_MSG_(vmSymbols::java_io_IOException(), "Bad pathname", NULL);
@@ -743,14 +746,7 @@ ClassPathEntry* ClassLoader::create_class_path_entry(const char *path, const str
new_entry = new ClassPathImageEntry(jimage, canonical_path);
} else {
char* error_msg = NULL;
- jzfile* zip;
- {
- // enable call to C land
- ThreadToNativeFromVM ttn(thread);
- HandleMark hm(thread);
- load_zip_library_if_needed();
- zip = (*ZipOpen)(canonical_path, &error_msg);
- }
+ jzfile* zip = open_zip_file(canonical_path, &error_msg, thread);
if (zip != NULL && error_msg == NULL) {
new_entry = new ClassPathZipEntry(zip, path, is_boot_append, from_class_path_attr);
} else {
@@ -789,18 +785,12 @@ ClassPathZipEntry* ClassLoader::create_class_path_zip_entry(const char *path, bo
struct stat st;
if (os::stat(path, &st) == 0) {
if ((st.st_mode & S_IFMT) == S_IFREG) {
- char canonical_path[JVM_MAXPATHLEN];
- if (get_canonical_path(path, canonical_path, JVM_MAXPATHLEN)) {
+ JavaThread* thread = JavaThread::current();
+ ResourceMark rm(thread);
+ const char* canonical_path = get_canonical_path(path, thread);
+ if (canonical_path != NULL) {
char* error_msg = NULL;
- jzfile* zip;
- {
- // enable call to C land
- JavaThread* thread = JavaThread::current();
- ThreadToNativeFromVM ttn(thread);
- HandleMark hm(thread);
- load_zip_library_if_needed();
- zip = (*ZipOpen)(canonical_path, &error_msg);
- }
+ jzfile* zip = open_zip_file(canonical_path, &error_msg, thread);
if (zip != NULL && error_msg == NULL) {
// create using canonical path
return new ClassPathZipEntry(zip, canonical_path, is_boot_append, false);
@@ -847,7 +837,8 @@ void ClassLoader::add_to_boot_append_entries(ClassPathEntry *new_entry) {
// jdk/internal/loader/ClassLoaders$AppClassLoader instance.
void ClassLoader::add_to_app_classpath_entries(const char* path,
ClassPathEntry* entry,
- bool check_for_duplicates) {
+ bool check_for_duplicates,
+ TRAPS) {
#if INCLUDE_CDS
assert(entry != NULL, "ClassPathEntry should not be NULL");
ClassPathEntry* e = _app_classpath_entries;
@@ -871,7 +862,7 @@ void ClassLoader::add_to_app_classpath_entries(const char* path,
}
if (entry->is_jar_file()) {
- ClassLoaderExt::process_jar_manifest(entry, check_for_duplicates);
+ ClassLoaderExt::process_jar_manifest(entry, check_for_duplicates, CHECK);
}
#endif
}
@@ -881,13 +872,12 @@ bool ClassLoader::update_class_path_entry_list(const char *path,
bool check_for_duplicates,
bool is_boot_append,
bool from_class_path_attr,
- bool throw_exception) {
+ TRAPS) {
struct stat st;
if (os::stat(path, &st) == 0) {
// File or directory found
ClassPathEntry* new_entry = NULL;
- Thread* THREAD = Thread::current();
- new_entry = create_class_path_entry(path, &st, throw_exception, is_boot_append, from_class_path_attr, CHECK_(false));
+ new_entry = create_class_path_entry(path, &st, /*throw_exception=*/true, is_boot_append, from_class_path_attr, CHECK_false);
if (new_entry == NULL) {
return false;
}
@@ -897,7 +887,7 @@ bool ClassLoader::update_class_path_entry_list(const char *path,
if (is_boot_append) {
add_to_boot_append_entries(new_entry);
} else {
- add_to_app_classpath_entries(path, new_entry, check_for_duplicates);
+ add_to_app_classpath_entries(path, new_entry, check_for_duplicates, CHECK_false);
}
return true;
} else {
@@ -1286,7 +1276,7 @@ InstanceKlass* ClassLoader::load_class(Symbol* name, bool search_append_only, TR
return NULL;
}
- result->set_classpath_index(classpath_index, THREAD);
+ result->set_classpath_index(classpath_index);
return result;
}
@@ -1339,25 +1329,22 @@ void ClassLoader::record_result(InstanceKlass* ik, const ClassFileStream* stream
PackageEntry* pkg_entry = ik->package();
if (FileMapInfo::get_number_of_shared_paths() > 0) {
- char* canonical_path_table_entry = NEW_RESOURCE_ARRAY_IN_THREAD(THREAD, char, JVM_MAXPATHLEN);
-
- // save the path from the file: protocol or the module name from the jrt: protocol
- // if no protocol prefix is found, path is the same as stream->source()
+ // Save the path from the file: protocol or the module name from the jrt: protocol
+ // if no protocol prefix is found, path is the same as stream->source(). This path
+ // must be valid since the class has been successfully parsed.
char* path = skip_uri_protocol(src);
- char* canonical_class_src_path = NEW_RESOURCE_ARRAY_IN_THREAD(THREAD, char, JVM_MAXPATHLEN);
- bool success = get_canonical_path(path, canonical_class_src_path, JVM_MAXPATHLEN);
- // The path is from the ClassFileStream. Since a ClassFileStream has been created successfully in functions
- // such as ClassLoader::load_class(), its source path must be valid.
- assert(success, "must be valid path");
+ assert(path != NULL, "sanity");
for (int i = 0; i < FileMapInfo::get_number_of_shared_paths(); i++) {
SharedClassPathEntry* ent = FileMapInfo::shared_path(i);
- success = get_canonical_path(ent->name(), canonical_path_table_entry, JVM_MAXPATHLEN);
// A shared path has been validated during its creation in ClassLoader::create_class_path_entry(),
// it must be valid here.
- assert(success, "must be valid path");
+ assert(ent->name() != NULL, "sanity");
// If the path (from the class stream source) is the same as the shared
// class or module path, then we have a match.
- if (strcmp(canonical_path_table_entry, canonical_class_src_path) == 0) {
+ // src may come from the App/Platform class loaders, which would canonicalize
+ // the file name. We cannot use strcmp to check for equality against ent->name().
+ // We must use os::same_files (which is faster than canonicalizing ent->name()).
+ if (os::same_files(ent->name(), path)) {
// NULL pkg_entry and pkg_entry in an unnamed module implies the class
// is from the -cp or boot loader append path which consists of -Xbootclasspath/a
// and jvmti appended entries.
@@ -1421,7 +1408,7 @@ void ClassLoader::record_result(InstanceKlass* ik, const ClassFileStream* stream
ik->name()->utf8_length());
assert(file_name != NULL, "invariant");
- ClassLoaderExt::record_result(classpath_index, ik, THREAD);
+ ClassLoaderExt::record_result(classpath_index, ik, CHECK);
}
#endif // INCLUDE_CDS
@@ -1430,9 +1417,7 @@ void ClassLoader::record_result(InstanceKlass* ik, const ClassFileStream* stream
// this list has been created, it must not change order (see class PackageInfo)
// it can be appended to and is by jvmti.
-void ClassLoader::initialize() {
- EXCEPTION_MARK;
-
+void ClassLoader::initialize(TRAPS) {
if (UsePerfData) {
// jvmstat performance counters
NEWPERFTICKCOUNTER(_perf_accumulated_time, SUN_CLS, "time");
@@ -1464,7 +1449,7 @@ void ClassLoader::initialize() {
// lookup java library entry points
load_java_library();
// jimage library entry points are loaded below, in lookup_vm_options
- setup_bootstrap_search_path();
+ setup_bootstrap_search_path(CHECK);
}
char* lookup_vm_resource(JImageFile *jimage, const char *jimage_version, const char *path) {
@@ -1501,16 +1486,16 @@ char* ClassLoader::lookup_vm_options() {
}
#if INCLUDE_CDS
-void ClassLoader::initialize_shared_path() {
+void ClassLoader::initialize_shared_path(TRAPS) {
if (Arguments::is_dumping_archive()) {
- ClassLoaderExt::setup_search_paths();
+ ClassLoaderExt::setup_search_paths(CHECK);
}
}
void ClassLoader::initialize_module_path(TRAPS) {
if (Arguments::is_dumping_archive()) {
- ClassLoaderExt::setup_module_paths(THREAD);
- FileMapInfo::allocate_shared_path_table();
+ ClassLoaderExt::setup_module_paths(CHECK);
+ FileMapInfo::allocate_shared_path_table(CHECK);
}
}
@@ -1566,7 +1551,11 @@ int ClassLoader::compute_Object_vtable() {
void classLoader_init1() {
- ClassLoader::initialize();
+ EXCEPTION_MARK;
+ ClassLoader::initialize(THREAD);
+ if (HAS_PENDING_EXCEPTION) {
+ vm_exit_during_initialization("ClassLoader::initialize() failed unexpectedly");
+ }
}
// Complete the ClassPathEntry setup for the boot loader
@@ -1599,18 +1588,18 @@ void ClassLoader::classLoader_init2(TRAPS) {
}
}
-bool ClassLoader::get_canonical_path(const char* orig, char* out, int len) {
- assert(orig != NULL && out != NULL && len > 0, "bad arguments");
- JavaThread* THREAD = JavaThread::current();
- ResourceMark rm(THREAD);
-
+char* ClassLoader::get_canonical_path(const char* orig, Thread* thread) {
+ assert(orig != NULL, "bad arguments");
+ // caller needs to allocate ResourceMark for the following output buffer
+ char* canonical_path = NEW_RESOURCE_ARRAY_IN_THREAD(thread, char, JVM_MAXPATHLEN);
+ ResourceMark rm(thread);
// os::native_path writes into orig_copy
- char* orig_copy = NEW_RESOURCE_ARRAY_IN_THREAD(THREAD, char, strlen(orig)+1);
+ char* orig_copy = NEW_RESOURCE_ARRAY_IN_THREAD(thread, char, strlen(orig)+1);
strcpy(orig_copy, orig);
- if ((CanonicalizeEntry)(os::native_path(orig_copy), out, len) < 0) {
- return false;
+ if ((CanonicalizeEntry)(os::native_path(orig_copy), canonical_path, JVM_MAXPATHLEN) < 0) {
+ return NULL;
}
- return true;
+ return canonical_path;
}
void ClassLoader::create_javabase() {
diff --git a/src/hotspot/share/classfile/classLoader.hpp b/src/hotspot/share/classfile/classLoader.hpp
index 8df5a50b7354aa2413f20bd0bc0b4480477f5363..d46fbe14bfe734deec3356d692fee240faafbf9c 100644
--- a/src/hotspot/share/classfile/classLoader.hpp
+++ b/src/hotspot/share/classfile/classLoader.hpp
@@ -222,11 +222,12 @@ class ClassLoader: AllStatic {
CDS_ONLY(static ClassPathEntry* _last_app_classpath_entry;)
CDS_ONLY(static ClassPathEntry* _module_path_entries;)
CDS_ONLY(static ClassPathEntry* _last_module_path_entry;)
- CDS_ONLY(static void setup_app_search_path(const char* class_path);)
+ CDS_ONLY(static void setup_app_search_path(const char* class_path, TRAPS);)
CDS_ONLY(static void setup_module_search_path(const char* path, TRAPS);)
static void add_to_app_classpath_entries(const char* path,
ClassPathEntry* entry,
- bool check_for_duplicates);
+ bool check_for_duplicates,
+ TRAPS);
CDS_ONLY(static void add_to_module_path_entries(const char* path,
ClassPathEntry* entry);)
public:
@@ -240,8 +241,8 @@ class ClassLoader: AllStatic {
// - setup the boot loader's system class path
// - setup the boot loader's patch mod entries, if present
// - create the ModuleEntry for java.base
- static void setup_bootstrap_search_path();
- static void setup_boot_search_path(const char *class_path);
+ static void setup_bootstrap_search_path(TRAPS);
+ static void setup_bootstrap_search_path_impl(const char *class_path, TRAPS);
static void setup_patch_mod_entries();
static void create_javabase();
@@ -254,6 +255,7 @@ class ClassLoader: AllStatic {
static int _libzip_loaded; // used to sync loading zip.
static void release_load_zip_library();
static inline void load_zip_library_if_needed();
+ static jzfile* open_zip_file(const char* canonical_path, char** error_msg, JavaThread* thread);
public:
static ClassPathEntry* create_class_path_entry(const char *path, const struct stat* st,
@@ -263,7 +265,7 @@ class ClassLoader: AllStatic {
// Canonicalizes path names, so strcmp will work properly. This is mainly
// to avoid confusing the zip library
- static bool get_canonical_path(const char* orig, char* out, int len);
+ static char* get_canonical_path(const char* orig, Thread* thread);
static const char* file_name_for_class_name(const char* class_name,
int class_name_len);
static PackageEntry* get_package_entry(Symbol* pkg_name, ClassLoaderData* loader_data);
@@ -272,8 +274,7 @@ class ClassLoader: AllStatic {
bool check_for_duplicates,
bool is_boot_append,
bool from_class_path_attr,
- bool throw_exception=true);
- CDS_ONLY(static void update_module_path_entry_list(const char *path, TRAPS);)
+ TRAPS);
static void print_bootclasspath();
// Timing
@@ -335,9 +336,9 @@ class ClassLoader: AllStatic {
static objArrayOop get_system_packages(TRAPS);
// Initialization
- static void initialize();
+ static void initialize(TRAPS);
static void classLoader_init2(TRAPS);
- CDS_ONLY(static void initialize_shared_path();)
+ CDS_ONLY(static void initialize_shared_path(TRAPS);)
CDS_ONLY(static void initialize_module_path(TRAPS);)
static int compute_Object_vtable();
diff --git a/src/hotspot/share/classfile/classLoaderData.hpp b/src/hotspot/share/classfile/classLoaderData.hpp
index bfad9a5cab91d7c50669d7cb81447b41eb97e745..d2c994ec1be3a405fca0824cf09d337e6392f810 100644
--- a/src/hotspot/share/classfile/classLoaderData.hpp
+++ b/src/hotspot/share/classfile/classLoaderData.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2012, 2020, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2021, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -26,8 +26,6 @@
#define SHARE_CLASSFILE_CLASSLOADERDATA_HPP
#include "memory/allocation.hpp"
-#include "memory/memRegion.hpp"
-#include "memory/metaspace.hpp"
#include "oops/oopHandle.hpp"
#include "oops/weakHandle.hpp"
#include "runtime/atomic.hpp"
diff --git a/src/hotspot/share/classfile/classLoaderExt.cpp b/src/hotspot/share/classfile/classLoaderExt.cpp
index bd39fc071e5df65432ecbda49d17bdb02456c323..f9003f3436841db80661129dac3524b1a38802de 100644
--- a/src/hotspot/share/classfile/classLoaderExt.cpp
+++ b/src/hotspot/share/classfile/classLoaderExt.cpp
@@ -65,7 +65,7 @@ void ClassLoaderExt::append_boot_classpath(ClassPathEntry* new_entry) {
ClassLoader::add_to_boot_append_entries(new_entry);
}
-void ClassLoaderExt::setup_app_search_path() {
+void ClassLoaderExt::setup_app_search_path(TRAPS) {
Arguments::assert_is_dumping_archive();
_app_class_paths_start_index = ClassLoader::num_boot_classpath_entries();
char* app_class_path = os::strdup(Arguments::get_appclasspath());
@@ -77,7 +77,7 @@ void ClassLoaderExt::setup_app_search_path() {
trace_class_path("app loader class path (skipped)=", app_class_path);
} else {
trace_class_path("app loader class path=", app_class_path);
- ClassLoader::setup_app_search_path(app_class_path);
+ ClassLoader::setup_app_search_path(app_class_path, CHECK);
}
}
@@ -88,7 +88,7 @@ void ClassLoaderExt::process_module_table(ModuleEntryTable* met, TRAPS) {
char* path = m->location()->as_C_string();
if (strncmp(path, "file:", 5) == 0) {
path = ClassLoader::skip_uri_protocol(path);
- ClassLoader::setup_module_search_path(path, THREAD);
+ ClassLoader::setup_module_search_path(path, CHECK);
}
m = m->next();
}
@@ -100,7 +100,7 @@ void ClassLoaderExt::setup_module_paths(TRAPS) {
ClassLoader::num_app_classpath_entries();
Handle system_class_loader (THREAD, SystemDictionary::java_system_loader());
ModuleEntryTable* met = Modules::get_module_entry_table(system_class_loader);
- process_module_table(met, THREAD);
+ process_module_table(met, CHECK);
}
char* ClassLoaderExt::read_manifest(ClassPathEntry* entry, jint *manifest_size, bool clean_text, TRAPS) {
@@ -164,8 +164,7 @@ char* ClassLoaderExt::get_class_path_attr(const char* jar_path, char* manifest,
}
void ClassLoaderExt::process_jar_manifest(ClassPathEntry* entry,
- bool check_for_duplicates) {
- Thread* THREAD = Thread::current();
+ bool check_for_duplicates, TRAPS) {
ResourceMark rm(THREAD);
jint manifest_size;
char* manifest = read_manifest(entry, &manifest_size, CHECK);
@@ -213,7 +212,8 @@ void ClassLoaderExt::process_jar_manifest(ClassPathEntry* entry,
char* libname = NEW_RESOURCE_ARRAY(char, libname_len + 1);
int n = os::snprintf(libname, libname_len + 1, "%.*s%s", dir_len, dir_name, file_start);
assert((size_t)n == libname_len, "Unexpected number of characters in string");
- if (ClassLoader::update_class_path_entry_list(libname, true, false, true /* from_class_path_attr */)) {
+ bool status = ClassLoader::update_class_path_entry_list(libname, true, false, true /* from_class_path_attr */, CHECK);
+ if (status) {
trace_class_path("library = ", libname);
} else {
trace_class_path("library (non-existent) = ", libname);
@@ -226,8 +226,8 @@ void ClassLoaderExt::process_jar_manifest(ClassPathEntry* entry,
}
}
-void ClassLoaderExt::setup_search_paths() {
- ClassLoaderExt::setup_app_search_path();
+void ClassLoaderExt::setup_search_paths(TRAPS) {
+ ClassLoaderExt::setup_app_search_path(CHECK);
}
void ClassLoaderExt::record_result(const s2 classpath_index,
diff --git a/src/hotspot/share/classfile/classLoaderExt.hpp b/src/hotspot/share/classfile/classLoaderExt.hpp
index cca5bed24f5d94285efac25da0d16a83e565a069..983741bc87b204032a834d36baf100f6cbdffdf2 100644
--- a/src/hotspot/share/classfile/classLoaderExt.hpp
+++ b/src/hotspot/share/classfile/classLoaderExt.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2014, 2021, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -45,7 +45,7 @@ private:
};
static char* get_class_path_attr(const char* jar_path, char* manifest, jint manifest_size);
- static void setup_app_search_path(); // Only when -Xshare:dump
+ static void setup_app_search_path(TRAPS); // Only when -Xshare:dump
static void process_module_table(ModuleEntryTable* met, TRAPS);
// index of first app JAR in shared classpath entry table
static jshort _app_class_paths_start_index;
@@ -61,12 +61,12 @@ private:
static ClassPathEntry* find_classpath_entry_from_cache(const char* path, TRAPS);
public:
- static void process_jar_manifest(ClassPathEntry* entry, bool check_for_duplicates);
+ static void process_jar_manifest(ClassPathEntry* entry, bool check_for_duplicates, TRAPS);
// Called by JVMTI code to add boot classpath
static void append_boot_classpath(ClassPathEntry* new_entry);
- static void setup_search_paths();
+ static void setup_search_paths(TRAPS);
static void setup_module_paths(TRAPS);
static char* read_manifest(ClassPathEntry* entry, jint *manifest_size, TRAPS) {
diff --git a/src/hotspot/share/classfile/classLoaderStats.hpp b/src/hotspot/share/classfile/classLoaderStats.hpp
index c9f602721d43b498979019c48458c0c76ec81579..ce3f2331730b6f0436dcded49fe41bae20b7317b 100644
--- a/src/hotspot/share/classfile/classLoaderStats.hpp
+++ b/src/hotspot/share/classfile/classLoaderStats.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014, 2020, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2014, 2021, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -30,7 +30,7 @@
#include "oops/klass.hpp"
#include "oops/oop.hpp"
#include "oops/oopsHierarchy.hpp"
-#include "runtime/vmOperations.hpp"
+#include "runtime/vmOperation.hpp"
#include "services/diagnosticCommand.hpp"
#include "utilities/resourceHash.hpp"
diff --git a/src/hotspot/share/classfile/compactHashtable.cpp b/src/hotspot/share/classfile/compactHashtable.cpp
index 9780f0bfbf30d9c2e1794439e6651239116c9f77..808de7c7fb266b6513cf0652691adb76d2281867 100644
--- a/src/hotspot/share/classfile/compactHashtable.cpp
+++ b/src/hotspot/share/classfile/compactHashtable.cpp
@@ -27,10 +27,9 @@
#include "classfile/compactHashtable.hpp"
#include "classfile/javaClasses.hpp"
#include "logging/logMessage.hpp"
-#include "memory/dynamicArchive.hpp"
+#include "memory/archiveBuilder.hpp"
#include "memory/heapShared.inline.hpp"
#include "memory/metadataFactory.hpp"
-#include "memory/metaspaceShared.hpp"
#include "runtime/arguments.hpp"
#include "runtime/globals.hpp"
#include "runtime/vmThread.hpp"
@@ -74,11 +73,11 @@ CompactHashtableWriter::~CompactHashtableWriter() {
size_t CompactHashtableWriter::estimate_size(int num_entries) {
int num_buckets = calculate_num_buckets(num_entries);
- size_t bucket_bytes = MetaspaceShared::ro_array_bytesize(num_buckets + 1);
+ size_t bucket_bytes = ArchiveBuilder::ro_array_bytesize(num_buckets + 1);
// In worst case, we have no VALUE_ONLY_BUCKET_TYPE, so each entry takes 2 slots
int entries_space = 2 * num_entries;
- size_t entry_bytes = MetaspaceShared::ro_array_bytesize(entries_space);
+ size_t entry_bytes = ArchiveBuilder::ro_array_bytesize(entries_space);
return bucket_bytes
+ entry_bytes
@@ -109,8 +108,8 @@ void CompactHashtableWriter::allocate_table() {
"Too many entries.");
}
- _compact_buckets = MetaspaceShared::new_ro_array(_num_buckets + 1);
- _compact_entries = MetaspaceShared::new_ro_array(entries_space);
+ _compact_buckets = ArchiveBuilder::new_ro_array(_num_buckets + 1);
+ _compact_entries = ArchiveBuilder::new_ro_array(entries_space);
_stats->bucket_count = _num_buckets;
_stats->bucket_bytes = align_up(_compact_buckets->size() * BytesPerWord,
diff --git a/src/hotspot/share/classfile/compactHashtable.hpp b/src/hotspot/share/classfile/compactHashtable.hpp
index 4b27058b59d3a6fca9c24707287d414d784fc9a8..7eedb48b08b21d929803fb3d29acc535733b2411 100644
--- a/src/hotspot/share/classfile/compactHashtable.hpp
+++ b/src/hotspot/share/classfile/compactHashtable.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2021, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -27,6 +27,7 @@
#include "oops/array.hpp"
#include "oops/symbol.hpp"
+#include "runtime/globals.hpp"
#include "utilities/growableArray.hpp"
@@ -48,6 +49,10 @@ public:
int hashentry_bytes;
int bucket_count;
int bucket_bytes;
+
+ CompactHashtableStats() :
+ hashentry_count(0), hashentry_bytes(0),
+ bucket_count(0), bucket_bytes(0) {}
};
#if INCLUDE_CDS
diff --git a/src/hotspot/share/classfile/javaAssertions.hpp b/src/hotspot/share/classfile/javaAssertions.hpp
index b2fa038b24a2111d7b19aa481d659e2781551988..58d03eacd481d861720337caaef2556ce439a506 100644
--- a/src/hotspot/share/classfile/javaAssertions.hpp
+++ b/src/hotspot/share/classfile/javaAssertions.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2000, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2021, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -27,6 +27,7 @@
#include "oops/objArrayOop.hpp"
#include "oops/typeArrayOop.hpp"
+#include "runtime/handles.hpp"
#include "utilities/exceptions.hpp"
#include "utilities/ostream.hpp"
diff --git a/src/hotspot/share/classfile/klassFactory.cpp b/src/hotspot/share/classfile/klassFactory.cpp
index a02d01020b50293579d9af734f56c0cadba0b8c4..629f5f8c0c103d593d61fb65ea0ee052914e2d1f 100644
--- a/src/hotspot/share/classfile/klassFactory.cpp
+++ b/src/hotspot/share/classfile/klassFactory.cpp
@@ -1,5 +1,5 @@
/*
-* Copyright (c) 2015, 2020, Oracle and/or its affiliates. All rights reserved.
+* Copyright (c) 2015, 2021, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -31,7 +31,6 @@
#include "classfile/classLoadInfo.hpp"
#include "classfile/klassFactory.hpp"
#include "memory/filemap.hpp"
-#include "memory/metaspaceShared.hpp"
#include "memory/resourceArea.hpp"
#include "prims/jvmtiEnvBase.hpp"
#include "prims/jvmtiRedefineClasses.hpp"
@@ -98,7 +97,7 @@ InstanceKlass* KlassFactory::check_shared_class_file_load_hook(
}
if (class_loader.is_null()) {
- new_ik->set_classpath_index(path_index, THREAD);
+ new_ik->set_classpath_index(path_index);
}
return new_ik;
@@ -206,10 +205,7 @@ InstanceKlass* KlassFactory::create_from_stream(ClassFileStream* stream,
const ClassInstanceInfo* cl_inst_info = cl_info.class_hidden_info_ptr();
InstanceKlass* result = parser.create_instance_klass(old_stream != stream, *cl_inst_info, CHECK_NULL);
-
- if (result == NULL) {
- return NULL;
- }
+ assert(result != NULL, "result cannot be null with no pending exception");
if (cached_class_file != NULL) {
// JVMTI: we have an InstanceKlass now, tell it about the cached bytes
diff --git a/src/hotspot/share/classfile/loaderConstraints.cpp b/src/hotspot/share/classfile/loaderConstraints.cpp
index d0bf7052821513f27c4ec533304b7ae9c325b36a..3caaddb3cb2956489f0fdb19eee407f4fd9e04b6 100644
--- a/src/hotspot/share/classfile/loaderConstraints.cpp
+++ b/src/hotspot/share/classfile/loaderConstraints.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2003, 2020, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2021, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -29,6 +29,7 @@
#include "classfile/loaderConstraints.hpp"
#include "logging/log.hpp"
#include "memory/resourceArea.hpp"
+#include "oops/klass.inline.hpp"
#include "oops/oop.inline.hpp"
#include "runtime/handles.inline.hpp"
#include "runtime/safepoint.hpp"
diff --git a/src/hotspot/share/classfile/moduleEntry.cpp b/src/hotspot/share/classfile/moduleEntry.cpp
index 08c97c9e7a7f96ba3c50c5ddc0e30db2b55e2718..7d9c9001b40f9b9613d532eec1315839a42e2849 100644
--- a/src/hotspot/share/classfile/moduleEntry.cpp
+++ b/src/hotspot/share/classfile/moduleEntry.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, 2020, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -33,7 +33,6 @@
#include "memory/archiveUtils.hpp"
#include "memory/filemap.hpp"
#include "memory/heapShared.hpp"
-#include "memory/metaspaceShared.hpp"
#include "memory/resourceArea.hpp"
#include "memory/universe.hpp"
#include "oops/oopHandle.inline.hpp"
@@ -368,6 +367,11 @@ ModuleEntryTable::~ModuleEntryTable() {
assert(new_entry_free_list() == NULL, "entry present on ModuleEntryTable's free list");
}
+void ModuleEntry::set_loader_data(ClassLoaderData* cld) {
+ assert(!cld->has_class_mirror_holder(), "Unexpected has_class_mirror_holder cld");
+ _loader_data = cld;
+}
+
#if INCLUDE_CDS_JAVA_HEAP
typedef ResourceHashtable<
const ModuleEntry*,
@@ -380,7 +384,7 @@ static ArchivedModuleEntries* _archive_modules_entries = NULL;
ModuleEntry* ModuleEntry::allocate_archived_entry() const {
assert(is_named(), "unnamed packages/modules are not archived");
- ModuleEntry* archived_entry = (ModuleEntry*)MetaspaceShared::read_write_space_alloc(sizeof(ModuleEntry));
+ ModuleEntry* archived_entry = (ModuleEntry*)ArchiveBuilder::rw_region_alloc(sizeof(ModuleEntry));
memcpy((void*)archived_entry, (void*)this, sizeof(ModuleEntry));
if (_archive_modules_entries == NULL) {
@@ -405,7 +409,7 @@ Array* ModuleEntry::write_growable_array(GrowableArray* archived_array = NULL;
int length = (array == NULL) ? 0 : array->length();
if (length > 0) {
- archived_array = MetaspaceShared::new_ro_array(length);
+ archived_array = ArchiveBuilder::new_ro_array(length);
for (int i = 0; i < length; i++) {
ModuleEntry* archived_entry = get_archived_entry(array->at(i));
archived_array->at_put(i, archived_entry);
@@ -513,7 +517,7 @@ void ModuleEntryTable::iterate_symbols(MetaspaceClosure* closure) {
}
Array* ModuleEntryTable::allocate_archived_entries() {
- Array* archived_modules = MetaspaceShared::new_rw_array(number_of_entries());
+ Array* archived_modules = ArchiveBuilder::new_rw_array(number_of_entries());
int n = 0;
for (int i = 0; i < table_size(); ++i) {
for (ModuleEntry* m = bucket(i); m != NULL; m = m->next()) {
diff --git a/src/hotspot/share/classfile/moduleEntry.hpp b/src/hotspot/share/classfile/moduleEntry.hpp
index c86904bc823a4e8c7bc15dd26babfbc6a24202e3..959fcac74e3e855ac2a827d88ac7796d8b7a1b8f 100644
--- a/src/hotspot/share/classfile/moduleEntry.hpp
+++ b/src/hotspot/share/classfile/moduleEntry.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, 2020, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -26,7 +26,6 @@
#define SHARE_CLASSFILE_MODULEENTRY_HPP
#include "jni.h"
-#include "classfile/classLoaderData.hpp"
#include "oops/oopHandle.hpp"
#include "oops/symbol.hpp"
#include "runtime/mutexLocker.hpp"
@@ -46,6 +45,7 @@
#define JAVA_BASE_NAME_LEN 9
template class Array;
+class ClassLoaderData;
class MetaspaceClosure;
class ModuleClosure;
@@ -112,11 +112,7 @@ public:
void set_shared_protection_domain(ClassLoaderData *loader_data, Handle pd);
ClassLoaderData* loader_data() const { return _loader_data; }
-
- void set_loader_data(ClassLoaderData* cld) {
- assert(!cld->has_class_mirror_holder(), "Unexpected has_class_mirror_holder cld");
- _loader_data = cld;
- }
+ void set_loader_data(ClassLoaderData* cld);
Symbol* version() const { return _version; }
void set_version(Symbol* version);
diff --git a/src/hotspot/share/classfile/modules.cpp b/src/hotspot/share/classfile/modules.cpp
index e8b107e34ded76b86036cb907f9f123c60f320a9..f5b5fe8ada59aa338f5c73229a9df43940f534a7 100644
--- a/src/hotspot/share/classfile/modules.cpp
+++ b/src/hotspot/share/classfile/modules.cpp
@@ -100,13 +100,12 @@ static PackageEntryTable* get_package_entry_table(Handle h_loader) {
return loader_cld->packages();
}
-static ModuleEntry* get_module_entry(jobject module, TRAPS) {
- oop m = JNIHandles::resolve_non_null(module);
- if (!java_lang_Module::is_instance(m)) {
+static ModuleEntry* get_module_entry(Handle module, TRAPS) {
+ if (!java_lang_Module::is_instance(module())) {
THROW_MSG_NULL(vmSymbols::java_lang_IllegalArgumentException(),
"module is not an instance of type java.lang.Module");
}
- return java_lang_Module::module_entry(m);
+ return java_lang_Module::module_entry(module());
}
@@ -272,23 +271,22 @@ void throw_dup_pkg_exception(const char* module_name, PackageEntry* package, TRA
}
}
-void Modules::define_module(jobject module, jboolean is_open, jstring version,
+void Modules::define_module(Handle module, jboolean is_open, jstring version,
jstring location, jobjectArray packages, TRAPS) {
check_cds_restrictions(CHECK);
ResourceMark rm(THREAD);
- if (module == NULL) {
+ if (module.is_null()) {
THROW_MSG(vmSymbols::java_lang_NullPointerException(), "Null module object");
}
- Handle module_handle(THREAD, JNIHandles::resolve_non_null(module));
- if (!java_lang_Module::is_instance(module_handle())) {
+ if (!java_lang_Module::is_instance(module())) {
THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(),
"module is not an instance of type java.lang.Module");
}
int module_name_len;
- char* module_name = get_module_name(module_handle(), module_name_len, CHECK);
+ char* module_name = get_module_name(module(), module_name_len, CHECK);
if (module_name == NULL) {
THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(),
"Module name cannot be null");
@@ -301,11 +299,11 @@ void Modules::define_module(jobject module, jboolean is_open, jstring version,
// Special handling of java.base definition
if (strcmp(module_name, JAVA_BASE_NAME) == 0) {
assert(is_open == JNI_FALSE, "java.base module cannot be open");
- define_javabase_module(module_handle, version, location, packages_h, num_packages, CHECK);
+ define_javabase_module(module, version, location, packages_h, num_packages, CHECK);
return;
}
- oop loader = java_lang_Module::loader(module_handle());
+ oop loader = java_lang_Module::loader(module());
// Make sure loader is not the jdk.internal.reflect.DelegatingClassLoader.
if (loader != java_lang_ClassLoader::non_reflection_class_loader(loader)) {
THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(),
@@ -402,7 +400,7 @@ void Modules::define_module(jobject module, jboolean is_open, jstring version,
if (!dupl_modules && existing_pkg == NULL) {
if (module_table->lookup_only(module_symbol) == NULL) {
// Create the entry for this module in the class loader's module entry table.
- ModuleEntry* module_entry = module_table->locked_create_entry(module_handle,
+ ModuleEntry* module_entry = module_table->locked_create_entry(module,
(is_open == JNI_TRUE), module_symbol,
version_symbol, location_symbol, loader_data);
assert(module_entry != NULL, "module_entry creation failed");
@@ -419,7 +417,7 @@ void Modules::define_module(jobject module, jboolean is_open, jstring version,
}
// Store pointer to ModuleEntry record in java.lang.Module object.
- java_lang_Module::set_module_entry(module_handle(), module_entry);
+ java_lang_Module::set_module_entry(module(), module_entry);
} else {
dupl_modules = true;
}
@@ -476,7 +474,7 @@ void Modules::define_module(jobject module, jboolean is_open, jstring version,
}
#if INCLUDE_CDS_JAVA_HEAP
-void Modules::define_archived_modules(jobject platform_loader, jobject system_loader, TRAPS) {
+void Modules::define_archived_modules(Handle h_platform_loader, Handle h_system_loader, TRAPS) {
assert(UseSharedSpaces && MetaspaceShared::use_full_module_graph(), "must be");
// We don't want the classes used by the archived full module graph to be redefined by JVMTI.
@@ -490,19 +488,17 @@ void Modules::define_archived_modules(jobject platform_loader, jobject system_lo
// Patch any previously loaded class's module field with java.base's java.lang.Module.
ModuleEntryTable::patch_javabase_entries(java_base_module);
- if (platform_loader == NULL) {
+ if (h_platform_loader.is_null()) {
THROW_MSG(vmSymbols::java_lang_NullPointerException(), "Null platform loader object");
}
- if (system_loader == NULL) {
+ if (h_system_loader.is_null()) {
THROW_MSG(vmSymbols::java_lang_NullPointerException(), "Null system loader object");
}
- Handle h_platform_loader(THREAD, JNIHandles::resolve_non_null(platform_loader));
ClassLoaderData* platform_loader_data = SystemDictionary::register_loader(h_platform_loader);
ClassLoaderDataShared::restore_java_platform_loader_from_archive(platform_loader_data);
- Handle h_system_loader(THREAD, JNIHandles::resolve_non_null(system_loader));
ClassLoaderData* system_loader_data = SystemDictionary::register_loader(h_system_loader);
ClassLoaderDataShared::restore_java_system_loader_from_archive(system_loader_data);
}
@@ -515,27 +511,26 @@ void Modules::check_cds_restrictions(TRAPS) {
}
#endif // INCLUDE_CDS_JAVA_HEAP
-void Modules::set_bootloader_unnamed_module(jobject module, TRAPS) {
+void Modules::set_bootloader_unnamed_module(Handle module, TRAPS) {
ResourceMark rm(THREAD);
- if (module == NULL) {
+ if (module.is_null()) {
THROW_MSG(vmSymbols::java_lang_NullPointerException(), "Null module object");
}
- Handle module_handle(THREAD, JNIHandles::resolve(module));
- if (!java_lang_Module::is_instance(module_handle())) {
+ if (!java_lang_Module::is_instance(module())) {
THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(),
"module is not an instance of type java.lang.Module");
}
// Ensure that this is an unnamed module
- oop name = java_lang_Module::name(module_handle());
+ oop name = java_lang_Module::name(module());
if (name != NULL) {
THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(),
"boot loader's unnamed module's java.lang.Module has a name");
}
// Validate java_base's loader is the boot loader.
- oop loader = java_lang_Module::loader(module_handle());
+ oop loader = java_lang_Module::loader(module());
if (loader != NULL) {
THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(),
"Class loader must be the boot class loader");
@@ -547,19 +542,19 @@ void Modules::set_bootloader_unnamed_module(jobject module, TRAPS) {
ClassLoaderData* boot_loader_data = ClassLoaderData::the_null_class_loader_data();
ModuleEntry* unnamed_module = boot_loader_data->unnamed_module();
assert(unnamed_module != NULL, "boot loader's unnamed ModuleEntry not defined");
- unnamed_module->set_module(boot_loader_data->add_handle(module_handle));
+ unnamed_module->set_module(boot_loader_data->add_handle(module));
// Store pointer to the ModuleEntry in the unnamed module's java.lang.Module object.
- java_lang_Module::set_module_entry(module_handle(), unnamed_module);
+ java_lang_Module::set_module_entry(module(), unnamed_module);
}
-void Modules::add_module_exports(jobject from_module, jstring package_name, jobject to_module, TRAPS) {
+void Modules::add_module_exports(Handle from_module, jstring package_name, Handle to_module, TRAPS) {
check_cds_restrictions(CHECK);
if (package_name == NULL) {
THROW_MSG(vmSymbols::java_lang_NullPointerException(),
"package is null");
}
- if (from_module == NULL) {
+ if (from_module.is_null()) {
THROW_MSG(vmSymbols::java_lang_NullPointerException(),
"from_module is null");
}
@@ -573,7 +568,7 @@ void Modules::add_module_exports(jobject from_module, jstring package_name, jobj
if (!from_module_entry->is_named() || from_module_entry->is_open()) return;
ModuleEntry* to_module_entry;
- if (to_module == NULL) {
+ if (to_module.is_null()) {
to_module_entry = NULL; // It's an unqualified export.
} else {
to_module_entry = get_module_entry(to_module, CHECK);
@@ -619,19 +614,19 @@ void Modules::add_module_exports(jobject from_module, jstring package_name, jobj
}
-void Modules::add_module_exports_qualified(jobject from_module, jstring package,
- jobject to_module, TRAPS) {
+void Modules::add_module_exports_qualified(Handle from_module, jstring package,
+ Handle to_module, TRAPS) {
check_cds_restrictions(CHECK);
- if (to_module == NULL) {
+ if (to_module.is_null()) {
THROW_MSG(vmSymbols::java_lang_NullPointerException(),
"to_module is null");
}
add_module_exports(from_module, package, to_module, CHECK);
}
-void Modules::add_reads_module(jobject from_module, jobject to_module, TRAPS) {
+void Modules::add_reads_module(Handle from_module, Handle to_module, TRAPS) {
check_cds_restrictions(CHECK);
- if (from_module == NULL) {
+ if (from_module.is_null()) {
THROW_MSG(vmSymbols::java_lang_NullPointerException(),
"from_module is null");
}
@@ -643,7 +638,7 @@ void Modules::add_reads_module(jobject from_module, jobject to_module, TRAPS) {
}
ModuleEntry* to_module_entry;
- if (to_module != NULL) {
+ if (!to_module.is_null()) {
to_module_entry = get_module_entry(to_module, CHECK);
if (to_module_entry == NULL) {
THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(),
@@ -735,9 +730,9 @@ jobject Modules::get_named_module(Handle h_loader, const char* package_name, TRA
}
// Export package in module to all unnamed modules.
-void Modules::add_module_exports_to_all_unnamed(jobject module, jstring package_name, TRAPS) {
+void Modules::add_module_exports_to_all_unnamed(Handle module, jstring package_name, TRAPS) {
check_cds_restrictions(CHECK);
- if (module == NULL) {
+ if (module.is_null()) {
THROW_MSG(vmSymbols::java_lang_NullPointerException(),
"module is null");
}
diff --git a/src/hotspot/share/classfile/modules.hpp b/src/hotspot/share/classfile/modules.hpp
index b76aa8b4aed47896100198da6c3a3c271ace7c42..461d7b514eeb35e15f93711651772c299c13dcb8 100644
--- a/src/hotspot/share/classfile/modules.hpp
+++ b/src/hotspot/share/classfile/modules.hpp
@@ -1,5 +1,5 @@
/*
-* Copyright (c) 2016, 2020, Oracle and/or its affiliates. All rights reserved.
+* Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -50,10 +50,10 @@ public:
// * A package already exists in another module for this class loader
// * Module is an unnamed module
// NullPointerExceptions are thrown if module is null.
- static void define_module(jobject module, jboolean is_open, jstring version,
+ static void define_module(Handle module, jboolean is_open, jstring version,
jstring location, jobjectArray packages, TRAPS);
- static void define_archived_modules(jobject platform_loader, jobject system_loader,
+ static void define_archived_modules(Handle h_platform_loader, Handle h_system_loader,
TRAPS) NOT_CDS_JAVA_HEAP_RETURN;
// Provides the java.lang.Module for the unnamed module defined
@@ -64,7 +64,7 @@ public:
// * Module is not a subclass of java.lang.Module
// * Module's class loader is not the boot loader
// NullPointerExceptions are thrown if module is null.
- static void set_bootloader_unnamed_module(jobject module, TRAPS);
+ static void set_bootloader_unnamed_module(Handle module, TRAPS);
// This either does a qualified export of package in module from_module to module
// to_module or, if to_module is null, does an unqualified export of package.
@@ -76,7 +76,7 @@ public:
// * Package is not syntactically correct
// * Package is not defined for from_module's class loader
// * Package is not in module from_module.
- static void add_module_exports(jobject from_module, jstring package, jobject to_module, TRAPS);
+ static void add_module_exports(Handle from_module, jstring package, Handle to_module, TRAPS);
// This does a qualified export of package in module from_module to module
// to_module. Any "." in the package name will be converted to "/"
@@ -87,7 +87,7 @@ public:
// * Package is not syntactically correct
// * Package is not defined for from_module's class loader
// * Package is not in module from_module.
- static void add_module_exports_qualified(jobject from_module, jstring package, jobject to_module, TRAPS);
+ static void add_module_exports_qualified(Handle from_module, jstring package, Handle to_module, TRAPS);
// add_reads_module adds module to_module to the list of modules that from_module
// can read. If from_module is the same as to_module then this is a no-op.
@@ -95,7 +95,7 @@ public:
// from_module can read all current and future unnamed modules).
// An IllegalArgumentException is thrown if from_module is null or either (non-null)
// module does not exist.
- static void add_reads_module(jobject from_module, jobject to_module, TRAPS);
+ static void add_reads_module(Handle from_module, Handle to_module, TRAPS);
// Return the java.lang.Module object for this class object.
static jobject get_module(jclass clazz, TRAPS);
@@ -112,7 +112,7 @@ public:
// If either module or package is null then NullPointerException is thrown.
// If module or package is bad, or module is unnamed, or package is not in
// module then IllegalArgumentException is thrown.
- static void add_module_exports_to_all_unnamed(jobject module, jstring package, TRAPS);
+ static void add_module_exports_to_all_unnamed(Handle module, jstring package, TRAPS);
// Return TRUE iff package is defined by loader
static bool is_package_defined(Symbol* package_name, Handle h_loader, TRAPS);
diff --git a/src/hotspot/share/classfile/packageEntry.cpp b/src/hotspot/share/classfile/packageEntry.cpp
index 8e5c826dbad3033612c05f77b71a2664e2b7d846..b32a6a511021b575df8907b46d0a923e64f3bb91 100644
--- a/src/hotspot/share/classfile/packageEntry.cpp
+++ b/src/hotspot/share/classfile/packageEntry.cpp
@@ -23,13 +23,13 @@
*/
#include "precompiled.hpp"
+#include "classfile/classLoaderData.hpp"
#include "classfile/moduleEntry.hpp"
#include "classfile/packageEntry.hpp"
#include "classfile/vmSymbols.hpp"
#include "logging/log.hpp"
#include "memory/archiveBuilder.hpp"
#include "memory/archiveUtils.hpp"
-#include "memory/metaspaceShared.hpp"
#include "memory/resourceArea.hpp"
#include "oops/array.hpp"
#include "oops/symbol.hpp"
@@ -208,7 +208,7 @@ static ArchivedPackageEntries* _archived_packages_entries = NULL;
PackageEntry* PackageEntry::allocate_archived_entry() const {
assert(!in_unnamed_module(), "unnamed packages/modules are not archived");
- PackageEntry* archived_entry = (PackageEntry*)MetaspaceShared::read_write_space_alloc(sizeof(PackageEntry));
+ PackageEntry* archived_entry = (PackageEntry*)ArchiveBuilder::rw_region_alloc(sizeof(PackageEntry));
memcpy((void*)archived_entry, (void*)this, sizeof(PackageEntry));
if (_archived_packages_entries == NULL) {
@@ -278,7 +278,7 @@ Array* PackageEntryTable::allocate_archived_entries() {
}
}
- Array* archived_packages = MetaspaceShared::new_rw_array(n);
+ Array* archived_packages = ArchiveBuilder::new_rw_array(n);
for (n = 0, i = 0; i < table_size(); ++i) {
for (PackageEntry* p = bucket(i); p != NULL; p = p->next()) {
if (p->module()->name() != NULL) {
diff --git a/src/hotspot/share/classfile/placeholders.cpp b/src/hotspot/share/classfile/placeholders.cpp
index 4f2b8d60b60bc090ec2bf225d2556918727fb507..551b26e3099d3dc39ed06c6371831e49d0021186 100644
--- a/src/hotspot/share/classfile/placeholders.cpp
+++ b/src/hotspot/share/classfile/placeholders.cpp
@@ -104,7 +104,7 @@ void PlaceholderEntry::set_threadQ(SeenThread* seenthread, PlaceholderTable::cla
// Doubly-linked list of Threads per action for class/classloader pair
// Class circularity support: links in thread before loading superclass
-// bootstrapsearchpath support: links in a thread before load_instance_class
+// bootstrap loader support: links in a thread before load_instance_class
// definers: use as queue of define requestors, including owner of
// define token. Appends for debugging of requestor order
void PlaceholderEntry::add_seen_thread(Thread* thread, PlaceholderTable::classloadAction action) {
@@ -112,6 +112,9 @@ void PlaceholderEntry::add_seen_thread(Thread* thread, PlaceholderTable::classlo
SeenThread* threadEntry = new SeenThread(thread);
SeenThread* seen = actionToQueue(action);
+ assert(action != PlaceholderTable::LOAD_INSTANCE || seen == NULL,
+ "Only one LOAD_INSTANCE allowed at a time");
+
if (seen == NULL) {
set_threadQ(threadEntry, action);
return;
diff --git a/src/hotspot/share/classfile/placeholders.hpp b/src/hotspot/share/classfile/placeholders.hpp
index abb0dc18ffae0788cd1395f369de4f969acb0451..d85ac9adfdc843fb7a5549746a4794abbf6ff6d2 100644
--- a/src/hotspot/share/classfile/placeholders.hpp
+++ b/src/hotspot/share/classfile/placeholders.hpp
@@ -120,8 +120,8 @@ class PlaceholderEntry : public HashtableEntry {
InstanceKlass* _instanceKlass; // InstanceKlass from successful define
SeenThread* _superThreadQ; // doubly-linked queue of Threads loading a superclass for this class
SeenThread* _loadInstanceThreadQ; // loadInstance thread
- // can be multiple threads if classloader object lock broken by application
- // or if classloader supports parallel classloading
+ // This can't be multiple threads since class loading waits for
+ // this token to be removed.
SeenThread* _defineThreadQ; // queue of Threads trying to define this class
// including _definer
diff --git a/src/hotspot/share/classfile/resolutionErrors.cpp b/src/hotspot/share/classfile/resolutionErrors.cpp
index d248a5e55ec0f69996c71acb9e11c2c7809e1e81..47eca140606cfb94cfae66420247b9444f7102aa 100644
--- a/src/hotspot/share/classfile/resolutionErrors.cpp
+++ b/src/hotspot/share/classfile/resolutionErrors.cpp
@@ -27,6 +27,7 @@
#include "memory/allocation.hpp"
#include "memory/resourceArea.hpp"
#include "oops/instanceKlass.hpp"
+#include "oops/klass.inline.hpp"
#include "oops/oop.inline.hpp"
#include "runtime/handles.inline.hpp"
#include "runtime/safepoint.hpp"
diff --git a/src/hotspot/share/classfile/stringTable.cpp b/src/hotspot/share/classfile/stringTable.cpp
index 6c2f61a26c7873bdd0bcce1f5f762c3cfe03abf5..8fe73d6759cadf24983b2094d1994994978df2ba 100644
--- a/src/hotspot/share/classfile/stringTable.cpp
+++ b/src/hotspot/share/classfile/stringTable.cpp
@@ -34,7 +34,7 @@
#include "logging/log.hpp"
#include "logging/logStream.hpp"
#include "memory/allocation.inline.hpp"
-#include "memory/filemap.hpp"
+#include "memory/archiveBuilder.hpp"
#include "memory/heapShared.inline.hpp"
#include "memory/resourceArea.hpp"
#include "memory/universe.hpp"
@@ -760,7 +760,7 @@ void StringTable::write_to_archive(const DumpedInternedStrings* dumped_interned_
assert(HeapShared::is_heap_object_archiving_allowed(), "must be");
_shared_table.reset();
- CompactHashtableWriter writer(_items_count, &MetaspaceShared::stats()->string);
+ CompactHashtableWriter writer(_items_count, ArchiveBuilder::string_stats());
// Copy the interned strings into the "string space" within the java heap
CopyToArchive copier(&writer);
diff --git a/src/hotspot/share/classfile/symbolTable.cpp b/src/hotspot/share/classfile/symbolTable.cpp
index 776a74a27060c0fdd681059a20bd4d92a27c10ce..26f3c6ad379a3740c0f96de6a3d6be2482814ab0 100644
--- a/src/hotspot/share/classfile/symbolTable.cpp
+++ b/src/hotspot/share/classfile/symbolTable.cpp
@@ -24,6 +24,7 @@
#include "precompiled.hpp"
#include "classfile/altHashing.hpp"
+#include "classfile/classLoaderData.hpp"
#include "classfile/compactHashtable.hpp"
#include "classfile/javaClasses.hpp"
#include "classfile/symbolTable.hpp"
@@ -31,7 +32,6 @@
#include "memory/archiveBuilder.hpp"
#include "memory/dynamicArchive.hpp"
#include "memory/metaspaceClosure.hpp"
-#include "memory/metaspaceShared.hpp"
#include "memory/resourceArea.hpp"
#include "oops/oop.inline.hpp"
#include "runtime/atomic.hpp"
@@ -601,8 +601,7 @@ size_t SymbolTable::estimate_size_for_archive() {
}
void SymbolTable::write_to_archive(GrowableArray* symbols) {
- CompactHashtableWriter writer(int(_items_count),
- &MetaspaceShared::stats()->symbol);
+ CompactHashtableWriter writer(int(_items_count), ArchiveBuilder::symbol_stats());
copy_shared_symbol_table(symbols, &writer);
if (!DynamicDumpSharedSpaces) {
_shared_table.reset();
diff --git a/src/hotspot/share/classfile/systemDictionary.cpp b/src/hotspot/share/classfile/systemDictionary.cpp
index 2ede5d1aacb2f3b95f3fd352398e1321fd804fd9..13644a298e0e885564dba1a3e9aeb723a19ae415 100644
--- a/src/hotspot/share/classfile/systemDictionary.cpp
+++ b/src/hotspot/share/classfile/systemDictionary.cpp
@@ -185,7 +185,7 @@ bool SystemDictionary::is_platform_class_loader(oop class_loader) {
return (class_loader->klass() == vmClasses::jdk_internal_loader_ClassLoaders_PlatformClassLoader_klass());
}
-Handle SystemDictionary::compute_loader_lock_object(Handle class_loader) {
+Handle SystemDictionary::get_loader_lock_or_null(Handle class_loader) {
// If class_loader is NULL or parallelCapable, the JVM doesn't acquire a lock while loading.
if (is_parallelCapable(class_loader)) {
return Handle();
@@ -369,8 +369,8 @@ InstanceKlass* SystemDictionary::resolve_super_or_fail(Symbol* class_name,
#if INCLUDE_CDS
if (DumpSharedSpaces) {
// Special processing for handling UNREGISTERED shared classes.
- InstanceKlass* k = SystemDictionaryShared::dump_time_resolve_super_or_fail(class_name,
- super_name, class_loader, protection_domain, is_superclass, CHECK_NULL);
+ InstanceKlass* k = SystemDictionaryShared::lookup_super_for_unregistered_class(class_name,
+ super_name, is_superclass);
if (k) {
return k;
}
@@ -570,12 +570,9 @@ void SystemDictionary::double_lock_wait(Thread* thread, Handle lockObject) {
// super class loading here.
// This also is critical in cases where the original thread gets stalled
// even in non-circularity situations.
-// Note: must call resolve_super_or_fail even if null super -
-// to force placeholder entry creation for this class for circularity detection
-// Caller must check for pending exception
// Returns non-null Klass* if other thread has completed load
-// and we are done,
-// If return null Klass* and no pending exception, the caller must load the class
+// and we are done. If this returns a null Klass* and no pending exception,
+// the caller must load the class.
InstanceKlass* SystemDictionary::handle_parallel_super_load(
Symbol* name, Symbol* superclassname, Handle class_loader,
Handle protection_domain, Handle lockObject, TRAPS) {
@@ -584,14 +581,7 @@ InstanceKlass* SystemDictionary::handle_parallel_super_load(
Dictionary* dictionary = loader_data->dictionary();
unsigned int name_hash = dictionary->compute_hash(name);
- // superk is not used, resolve_super called for circularity check only
- // This code is reached in two situations. One if this thread
- // is loading the same class twice (e.g. ClassCircularity, or
- // java.lang.instrument).
- // The second is if another thread started the resolve_super first
- // and has not yet finished.
- // In both cases the original caller will clean up the placeholder
- // entry on error.
+ // superk is not used; resolve_super_or_fail is called for circularity check only.
Klass* superk = SystemDictionary::resolve_super_or_fail(name,
superclassname,
class_loader,
@@ -603,7 +593,6 @@ InstanceKlass* SystemDictionary::handle_parallel_super_load(
// Serial class loaders and bootstrap classloader do wait for superclass loads
if (!class_loader.is_null() && is_parallelCapable(class_loader)) {
MutexLocker mu(THREAD, SystemDictionary_lock);
- // Check if classloading completed while we were loading superclass or waiting
return dictionary->find_class(name_hash, name);
}
@@ -703,7 +692,7 @@ InstanceKlass* SystemDictionary::resolve_instance_class_or_null(Symbol* name,
// the define.
// ParallelCapable Classloaders and the bootstrap classloader
// do not acquire lock here.
- Handle lockObject = compute_loader_lock_object(class_loader);
+ Handle lockObject = get_loader_lock_or_null(class_loader);
ObjectLocker ol(lockObject, THREAD);
// Check again (after locking) if the class already exists in SystemDictionary
@@ -759,10 +748,8 @@ InstanceKlass* SystemDictionary::resolve_instance_class_or_null(Symbol* name,
// but only allows a single thread to load a class/classloader pair.
// The LOAD_INSTANCE placeholder is the mechanism for mutual exclusion.
// case 2. parallelCapable user level classloaders
- // These class loaders don't lock the object until load_instance_class is
- // called after this placeholder is added.
- // Allow parallel classloading of a class/classloader pair where mutual
- // exclusion is provided by this lock in the class loader Java code.
+ // These class loaders lock a per-class object lock when ClassLoader.loadClass()
+ // is called. A LOAD_INSTANCE placeholder isn't used for mutual exclusion.
// case 3. traditional classloaders that rely on the classloader object lock
// There should be no need for need for LOAD_INSTANCE, except:
// case 4. traditional class loaders that break the classloader object lock
@@ -771,65 +758,64 @@ InstanceKlass* SystemDictionary::resolve_instance_class_or_null(Symbol* name,
// and that lock is still held when calling classloader's loadClass.
// For these classloaders, we ensure that the first requestor
// completes the load and other requestors wait for completion.
- {
+ if (class_loader.is_null() || !is_parallelCapable(class_loader)) {
MutexLocker mu(THREAD, SystemDictionary_lock);
- if (class_loader.is_null() || !is_parallelCapable(class_loader)) {
- PlaceholderEntry* oldprobe = placeholders()->get_entry(name_hash, name, loader_data);
- if (oldprobe != NULL) {
- // only need check_seen_thread once, not on each loop
- // 6341374 java/lang/Instrument with -Xcomp
- if (oldprobe->check_seen_thread(THREAD, PlaceholderTable::LOAD_INSTANCE)) {
- throw_circularity_error = true;
- } else {
- // case 3: traditional: should never see load_in_progress.
- while (!class_has_been_loaded && oldprobe != NULL && oldprobe->instance_load_in_progress()) {
-
- // case 1: bootstrap classloader: prevent futile classloading,
- // wait on first requestor
- if (class_loader.is_null()) {
- SystemDictionary_lock->wait();
- } else {
+ PlaceholderEntry* oldprobe = placeholders()->get_entry(name_hash, name, loader_data);
+ if (oldprobe != NULL) {
+ // only need check_seen_thread once, not on each loop
+ // 6341374 java/lang/Instrument with -Xcomp
+ if (oldprobe->check_seen_thread(THREAD, PlaceholderTable::LOAD_INSTANCE)) {
+ throw_circularity_error = true;
+ } else {
+ // case 3: traditional: should never see load_in_progress.
+ while (!class_has_been_loaded && oldprobe != NULL && oldprobe->instance_load_in_progress()) {
+
+ // case 1: bootstrap classloader: prevent futile classloading,
+ // wait on first requestor
+ if (class_loader.is_null()) {
+ SystemDictionary_lock->wait();
+ } else {
// case 4: traditional with broken classloader lock. wait on first
// requestor.
- double_lock_wait(THREAD, lockObject);
- }
- // Check if classloading completed while we were waiting
- InstanceKlass* check = dictionary->find_class(name_hash, name);
- if (check != NULL) {
- // Klass is already loaded, so just return it
- loaded_class = check;
- class_has_been_loaded = true;
- }
- // check if other thread failed to load and cleaned up
- oldprobe = placeholders()->get_entry(name_hash, name, loader_data);
+ double_lock_wait(THREAD, lockObject);
+ }
+ // Check if classloading completed while we were waiting
+ InstanceKlass* check = dictionary->find_class(name_hash, name);
+ if (check != NULL) {
+ // Klass is already loaded, so just return it
+ loaded_class = check;
+ class_has_been_loaded = true;
}
+ // check if other thread failed to load and cleaned up
+ oldprobe = placeholders()->get_entry(name_hash, name, loader_data);
}
}
}
- // All cases: add LOAD_INSTANCE while holding the SystemDictionary_lock
+ // Add LOAD_INSTANCE while holding the SystemDictionary_lock
if (!throw_circularity_error && !class_has_been_loaded) {
- PlaceholderEntry* newprobe = placeholders()->find_and_add(name_hash, name, loader_data,
- PlaceholderTable::LOAD_INSTANCE, NULL, THREAD);
- load_instance_added = true;
- // For class loaders that do not acquire the classloader object lock,
- // if they did not catch another thread holding LOAD_INSTANCE,
- // need a check analogous to the acquire ObjectLocker/find_class
- // i.e. now that we hold the LOAD_INSTANCE token on loading this class/CL
- // one final check if the load has already completed
- // class loaders holding the ObjectLock shouldn't find the class here
+ // For the bootclass loader, if the thread did not catch another thread holding
+ // the LOAD_INSTANCE token, we need to check whether it completed loading
+ // while holding the SD_lock.
InstanceKlass* check = dictionary->find_class(name_hash, name);
if (check != NULL) {
// Klass is already loaded, so return it after checking/adding protection domain
loaded_class = check;
class_has_been_loaded = true;
+ } else {
+ // Now we've got the LOAD_INSTANCE token. Threads will wait on loading to complete for this thread.
+ PlaceholderEntry* newprobe = placeholders()->find_and_add(name_hash, name, loader_data,
+ PlaceholderTable::LOAD_INSTANCE,
+ NULL,
+ THREAD);
+ load_instance_added = true;
}
}
}
// must throw error outside of owning lock
if (throw_circularity_error) {
- assert(!HAS_PENDING_EXCEPTION && load_instance_added == false,"circularity error cleanup");
+ assert(!HAS_PENDING_EXCEPTION && !load_instance_added, "circularity error cleanup");
ResourceMark rm(THREAD);
THROW_MSG_NULL(vmSymbols::java_lang_ClassCircularityError(), name->as_C_string());
}
@@ -913,16 +899,15 @@ InstanceKlass* SystemDictionary::resolve_instance_class_or_null(Symbol* name,
// _dictionary->bucket(index) is read here, so the caller will not see
// the new entry.
-Klass* SystemDictionary::find(Symbol* class_name,
- Handle class_loader,
- Handle protection_domain,
- TRAPS) {
+InstanceKlass* SystemDictionary::find_instance_klass(Symbol* class_name,
+ Handle class_loader,
+ Handle protection_domain) {
// The result of this call should be consistent with the result
// of the call to resolve_instance_class_or_null().
// See evaluation 6790209 and 4474172 for more details.
- class_loader = Handle(THREAD, java_lang_ClassLoader::non_reflection_class_loader(class_loader()));
- ClassLoaderData* loader_data = ClassLoaderData::class_loader_data_or_null(class_loader());
+ oop class_loader_oop = java_lang_ClassLoader::non_reflection_class_loader(class_loader());
+ ClassLoaderData* loader_data = ClassLoaderData::class_loader_data_or_null(class_loader_oop);
if (loader_data == NULL) {
// If the ClassLoaderData has not been setup,
@@ -932,16 +917,14 @@ Klass* SystemDictionary::find(Symbol* class_name,
Dictionary* dictionary = loader_data->dictionary();
unsigned int name_hash = dictionary->compute_hash(class_name);
- return dictionary->find(name_hash, class_name,
- protection_domain);
+ return dictionary->find(name_hash, class_name, protection_domain);
}
// Look for a loaded instance or array klass by name. Do not do any loading.
// return NULL in case of error.
Klass* SystemDictionary::find_instance_or_array_klass(Symbol* class_name,
Handle class_loader,
- Handle protection_domain,
- TRAPS) {
+ Handle protection_domain) {
Klass* k = NULL;
assert(class_name != NULL, "class name must be non NULL");
@@ -955,13 +938,13 @@ Klass* SystemDictionary::find_instance_or_array_klass(Symbol* class_name,
if (t != T_OBJECT) {
k = Universe::typeArrayKlassObj(t);
} else {
- k = SystemDictionary::find(ss.as_symbol(), class_loader, protection_domain, THREAD);
+ k = SystemDictionary::find_instance_klass(ss.as_symbol(), class_loader, protection_domain);
}
if (k != NULL) {
k = k->array_klass_or_null(ndims);
}
} else {
- k = find(class_name, class_loader, protection_domain, THREAD);
+ k = find_instance_klass(class_name, class_loader, protection_domain);
}
return k;
}
@@ -1007,8 +990,9 @@ InstanceKlass* SystemDictionary::parse_stream(Symbol* class_name,
loader_data,
cl_info,
CHECK_NULL);
+ assert(k != NULL, "no klass created");
- if ((cl_info.is_hidden() || is_unsafe_anon_class) && k != NULL) {
+ if (cl_info.is_hidden() || is_unsafe_anon_class) {
// Hidden classes that are not strong and unsafe anonymous classes must update
// ClassLoaderData holder so that they can be unloaded when the mirror is no
// longer referenced.
@@ -1052,7 +1036,8 @@ InstanceKlass* SystemDictionary::parse_stream(Symbol* class_name,
// JVM_DefineClass).
// Note: class_name can be NULL. In that case we do not know the name of
// the class until we have parsed the stream.
-
+// This function either returns an InstanceKlass or throws an exception. It does
+// not return NULL without a pending exception.
InstanceKlass* SystemDictionary::resolve_from_stream(Symbol* class_name,
Handle class_loader,
Handle protection_domain,
@@ -1065,7 +1050,7 @@ InstanceKlass* SystemDictionary::resolve_from_stream(Symbol* class_name,
// Classloaders that support parallelism, e.g. bootstrap classloader,
// do not acquire lock here
- Handle lockObject = compute_loader_lock_object(class_loader);
+ Handle lockObject = get_loader_lock_or_null(class_loader);
ObjectLocker ol(lockObject, THREAD);
// Parse the stream and create a klass.
@@ -1085,9 +1070,6 @@ InstanceKlass* SystemDictionary::resolve_from_stream(Symbol* class_name,
#endif
if (k == NULL) {
- if (st->buffer() == NULL) {
- return NULL;
- }
ClassLoadInfo cl_info(protection_domain);
k = KlassFactory::create_from_stream(st, class_name, loader_data, cl_info, CHECK_NULL);
}
@@ -1233,7 +1215,7 @@ bool SystemDictionary::check_shared_class_super_type(InstanceKlass* klass, Insta
if (!super_type->is_shared_unregistered_class() && super_type->class_loader_data() != NULL) {
// Check if the super class is loaded by the current class_loader
Symbol* name = super_type->name();
- Klass* check = find(name, class_loader, protection_domain, CHECK_0);
+ InstanceKlass* check = find_instance_klass(name, class_loader, protection_domain);
if (check == super_type) {
return true;
}
@@ -1356,7 +1338,7 @@ InstanceKlass* SystemDictionary::load_shared_class(InstanceKlass* ik,
ClassLoaderData* loader_data = ClassLoaderData::class_loader_data(class_loader());
{
HandleMark hm(THREAD);
- Handle lockObject = compute_loader_lock_object(class_loader);
+ Handle lockObject = get_loader_lock_or_null(class_loader);
ObjectLocker ol(lockObject, THREAD);
// prohibited package check assumes all classes loaded from archive call
// restore_unshareable_info which calls ik->set_package()
@@ -1374,7 +1356,7 @@ void SystemDictionary::load_shared_class_misc(InstanceKlass* ik, ClassLoaderData
// package was loaded.
if (loader_data->is_the_null_class_loader_data()) {
int path_index = ik->shared_classpath_index();
- ik->set_classpath_index(path_index, THREAD);
+ ik->set_classpath_index(path_index);
}
// notify a class loaded from shared object
@@ -1558,7 +1540,7 @@ void SystemDictionary::define_instance_class(InstanceKlass* k, Handle class_load
// hole with systemDictionary updates and check_constraints
if (!is_parallelCapable(class_loader)) {
assert(ObjectSynchronizer::current_thread_holds_lock(THREAD->as_Java_thread(),
- compute_loader_lock_object(class_loader)),
+ get_loader_lock_or_null(class_loader)),
"define called without lock");
}
@@ -1812,17 +1794,6 @@ void SystemDictionary::initialize(TRAPS) {
}
}
-#ifdef ASSERT
-// Verify that this placeholder exists since this class is in the middle of loading.
-void verify_placeholder(Symbol* class_name, ClassLoaderData* loader_data) {
- // Only parallel capable class loaders use placeholder table for define class.
- assert_locked_or_safepoint(SystemDictionary_lock);
- unsigned int name_hash = placeholders()->compute_hash(class_name);
- Symbol* ph_check = placeholders()->find_entry(name_hash, class_name, loader_data);
- assert(ph_check != NULL, "This placeholder should exist");
-}
-#endif // ASSERT
-
// Constraints on class loaders. The details of the algorithm can be
// found in the OOPSLA'98 paper "Dynamic Class Loading in the Java
// Virtual Machine" by Sheng Liang and Gilad Bracha. The basic idea is
@@ -1862,8 +1833,6 @@ void SystemDictionary::check_constraints(unsigned int name_hash,
}
}
- DEBUG_ONLY(if (is_parallelCapable(class_loader)) verify_placeholder(name, loader_data));
-
if (throwException == false) {
if (constraints()->check_or_update(k, class_loader, name) == false) {
throwException = true;
@@ -1918,13 +1887,13 @@ void SystemDictionary::update_dictionary(unsigned int hash,
// loader constraints might know about a class that isn't fully loaded
// yet and these will be ignored.
Klass* SystemDictionary::find_constrained_instance_or_array_klass(
- Symbol* class_name, Handle class_loader, TRAPS) {
+ Symbol* class_name, Handle class_loader, Thread* THREAD) {
// First see if it has been loaded directly.
// Force the protection domain to be null. (This removes protection checks.)
Handle no_protection_domain;
Klass* klass = find_instance_or_array_klass(class_name, class_loader,
- no_protection_domain, CHECK_NULL);
+ no_protection_domain);
if (klass != NULL)
return klass;
diff --git a/src/hotspot/share/classfile/systemDictionary.hpp b/src/hotspot/share/classfile/systemDictionary.hpp
index 2ef0e9b9b5b2a1360ea4566030d65d9e4ceff793..cdc26435c0a51200d66a13b01ea5fe1d1d6b5e33 100644
--- a/src/hotspot/share/classfile/systemDictionary.hpp
+++ b/src/hotspot/share/classfile/systemDictionary.hpp
@@ -139,15 +139,14 @@ class SystemDictionary : AllStatic {
TRAPS);
// Lookup an already loaded class. If not found NULL is returned.
- static Klass* find(Symbol* class_name, Handle class_loader, Handle protection_domain, TRAPS);
+ static InstanceKlass* find_instance_klass(Symbol* class_name, Handle class_loader, Handle protection_domain);
// Lookup an already loaded instance or array class.
// Do not make any queries to class loaders; consult only the cache.
// If not found NULL is returned.
static Klass* find_instance_or_array_klass(Symbol* class_name,
Handle class_loader,
- Handle protection_domain,
- TRAPS);
+ Handle protection_domain);
// Lookup an instance or array class that has already been loaded
// either into the given class loader, or else into another class
@@ -172,7 +171,7 @@ class SystemDictionary : AllStatic {
// to local linkage and access checks.
static Klass* find_constrained_instance_or_array_klass(Symbol* class_name,
Handle class_loader,
- TRAPS);
+ Thread* THREAD);
static void classes_do(MetaspaceClosure* it);
// Iterate over all methods in all klasses
@@ -387,7 +386,7 @@ protected:
static InstanceKlass* load_shared_boot_class(Symbol* class_name,
PackageEntry* pkg_entry,
TRAPS);
- static Handle compute_loader_lock_object(Handle class_loader);
+ static Handle get_loader_lock_or_null(Handle class_loader);
static InstanceKlass* find_or_define_instance_class(Symbol* class_name,
Handle class_loader,
InstanceKlass* k, TRAPS);
diff --git a/src/hotspot/share/classfile/systemDictionaryShared.cpp b/src/hotspot/share/classfile/systemDictionaryShared.cpp
index f45f70275297a04b986fa2fbedf53cf346463f9f..719e6a9b79678a4522fa237011ee1c80f0329356 100644
--- a/src/hotspot/share/classfile/systemDictionaryShared.cpp
+++ b/src/hotspot/share/classfile/systemDictionaryShared.cpp
@@ -1013,7 +1013,7 @@ InstanceKlass* SystemDictionaryShared::find_or_load_shared_class(
// Note: currently, find_or_load_shared_class is called only from
// JVM_FindLoadedClass and used for PlatformClassLoader and AppClassLoader,
// which are parallel-capable loaders, so a lock here is NOT taken.
- assert(compute_loader_lock_object(class_loader) == NULL, "ObjectLocker not required");
+ assert(get_loader_lock_or_null(class_loader) == NULL, "ObjectLocker not required");
{
MutexLocker mu(THREAD, SystemDictionary_lock);
InstanceKlass* check = dictionary->find_class(d_hash, name);
@@ -1196,18 +1196,23 @@ bool SystemDictionaryShared::add_unregistered_class(InstanceKlass* k, TRAPS) {
return created;
}
-// This function is called to resolve the super/interfaces of shared classes for
-// non-built-in loaders. E.g., SharedClass in the below example
+// This function is called to lookup the super/interfaces of shared classes for
+// unregistered loaders. E.g., SharedClass in the below example
// where "super:" (and optionally "interface:") have been specified.
//
// java/lang/Object id: 0
-// Interface id: 2 super: 0 source: cust.jar
+// Interface id: 2 super: 0 source: cust.jar
// SharedClass id: 4 super: 0 interfaces: 2 source: cust.jar
-InstanceKlass* SystemDictionaryShared::dump_time_resolve_super_or_fail(
- Symbol* class_name, Symbol* super_name, Handle class_loader,
- Handle protection_domain, bool is_superclass, TRAPS) {
+InstanceKlass* SystemDictionaryShared::lookup_super_for_unregistered_class(
+ Symbol* class_name, Symbol* super_name, bool is_superclass) {
- assert(DumpSharedSpaces, "only when dumping");
+ assert(DumpSharedSpaces, "only when static dumping");
+
+ if (!ClassListParser::is_parsing_thread()) {
+ // Unregistered classes can be created only by ClassListParser::_parsing_thread.
+
+ return NULL;
+ }
ClassListParser* parser = ClassListParser::instance();
if (parser == NULL) {
@@ -1600,7 +1605,9 @@ void SystemDictionaryShared::add_lambda_proxy_class(InstanceKlass* caller_ik,
InstanceKlass* nest_host = caller_ik->nest_host(THREAD);
DumpTimeSharedClassInfo* info = _dumptime_table->get(lambda_ik);
- if (info != NULL && !lambda_ik->is_non_strong_hidden() && is_builtin(lambda_ik) && is_builtin(caller_ik)) {
+ if (info != NULL && !lambda_ik->is_non_strong_hidden() && is_builtin(lambda_ik) && is_builtin(caller_ik)
+ // Don't include the lambda proxy if its nest host is not in the "linked" state.
+ && nest_host->is_linked()) {
// Set _is_archived_lambda_proxy in DumpTimeSharedClassInfo so that the lambda_ik
// won't be excluded during dumping of shared archive. See ExcludeDumpTimeSharedClasses.
info->_is_archived_lambda_proxy = true;
@@ -1670,7 +1677,7 @@ InstanceKlass* SystemDictionaryShared::prepare_shared_lambda_proxy_class(Instanc
InstanceKlass* caller_ik, TRAPS) {
Handle class_loader(THREAD, caller_ik->class_loader());
Handle protection_domain;
- PackageEntry* pkg_entry = get_package_entry_from_class(caller_ik, class_loader);
+ PackageEntry* pkg_entry = caller_ik->package();
if (caller_ik->class_loader() != NULL) {
protection_domain = SystemDictionaryShared::init_security_info(class_loader, caller_ik, pkg_entry, CHECK_NULL);
}
@@ -2038,7 +2045,7 @@ public:
log_info(cds,dynamic)("Archiving hidden %s", info._proxy_klasses->at(0)->external_name());
size_t byte_size = sizeof(RunTimeLambdaProxyClassInfo);
RunTimeLambdaProxyClassInfo* runtime_info =
- (RunTimeLambdaProxyClassInfo*)MetaspaceShared::read_only_space_alloc(byte_size);
+ (RunTimeLambdaProxyClassInfo*)ArchiveBuilder::ro_region_alloc(byte_size);
runtime_info->init(key, info);
unsigned int hash = runtime_info->hash();
u4 delta = _builder->any_to_offset_u4((void*)runtime_info);
@@ -2086,7 +2093,7 @@ public:
if (!info.is_excluded() && info.is_builtin() == _is_builtin) {
size_t byte_size = RunTimeSharedClassInfo::byte_size(info._klass, info.num_verifier_constraints(), info.num_loader_constraints());
RunTimeSharedClassInfo* record;
- record = (RunTimeSharedClassInfo*)MetaspaceShared::read_only_space_alloc(byte_size);
+ record = (RunTimeSharedClassInfo*)ArchiveBuilder::ro_region_alloc(byte_size);
record->init(info);
unsigned int hash;
diff --git a/src/hotspot/share/classfile/systemDictionaryShared.hpp b/src/hotspot/share/classfile/systemDictionaryShared.hpp
index db02009b8807e26841d4518731da097b53ff67fe..6a7939e4c8506897353d4b2efa738435963268cf 100644
--- a/src/hotspot/share/classfile/systemDictionaryShared.hpp
+++ b/src/hotspot/share/classfile/systemDictionaryShared.hpp
@@ -25,6 +25,7 @@
#ifndef SHARE_CLASSFILE_SYSTEMDICTIONARYSHARED_HPP
#define SHARE_CLASSFILE_SYSTEMDICTIONARYSHARED_HPP
+#include "classfile/classLoaderData.hpp"
#include "classfile/packageEntry.hpp"
#include "classfile/systemDictionary.hpp"
#include "memory/filemap.hpp"
@@ -246,12 +247,8 @@ public:
static bool is_sharing_possible(ClassLoaderData* loader_data);
static bool add_unregistered_class(InstanceKlass* k, TRAPS);
- static InstanceKlass* dump_time_resolve_super_or_fail(Symbol* class_name,
- Symbol* super_name,
- Handle class_loader,
- Handle protection_domain,
- bool is_superclass,
- TRAPS);
+ static InstanceKlass* lookup_super_for_unregistered_class(Symbol* class_name,
+ Symbol* super_name, bool is_superclass);
static void init_dumptime_info(InstanceKlass* k) NOT_CDS_RETURN;
static void remove_dumptime_info(InstanceKlass* k) NOT_CDS_RETURN;
diff --git a/src/hotspot/share/classfile/verifier.cpp b/src/hotspot/share/classfile/verifier.cpp
index 03c716b2362e79fc34d5706a02a83fde5ad6f516..4ff2d4793bb76df9f13d66e246f5990ef6eb33c9 100644
--- a/src/hotspot/share/classfile/verifier.cpp
+++ b/src/hotspot/share/classfile/verifier.cpp
@@ -3149,7 +3149,7 @@ void ClassVerifier::verify_return_value(
if (return_type == VerificationType::bogus_type()) {
verify_error(ErrorContext::bad_type(bci,
current_frame->stack_top_ctx(), TypeOrigin::signature(return_type)),
- "Method expects a return value");
+ "Method does not expect a return value");
return;
}
bool match = return_type.is_assignable_from(type, this, false, CHECK_VERIFY(this));
diff --git a/src/hotspot/share/code/compiledIC.cpp b/src/hotspot/share/code/compiledIC.cpp
index 848cc98a4c2cb29bf87f3c8759a4ef56d964ee52..f326ccc738df7eabd63c97bcdff8bdb1524351ac 100644
--- a/src/hotspot/share/code/compiledIC.cpp
+++ b/src/hotspot/share/code/compiledIC.cpp
@@ -35,6 +35,7 @@
#include "memory/oopFactory.hpp"
#include "memory/resourceArea.hpp"
#include "memory/universe.hpp"
+#include "oops/klass.inline.hpp"
#include "oops/method.inline.hpp"
#include "oops/oop.inline.hpp"
#include "oops/symbol.hpp"
diff --git a/src/hotspot/share/code/compiledMethod.cpp b/src/hotspot/share/code/compiledMethod.cpp
index 0d6ffe789c58e50ac083157dbbb44dde56292510..74c38880742bc2ad830d605ecaa9f6fd231b19e6 100644
--- a/src/hotspot/share/code/compiledMethod.cpp
+++ b/src/hotspot/share/code/compiledMethod.cpp
@@ -36,6 +36,8 @@
#include "logging/log.hpp"
#include "logging/logTag.hpp"
#include "memory/resourceArea.hpp"
+#include "oops/compiledICHolder.inline.hpp"
+#include "oops/klass.inline.hpp"
#include "oops/methodData.hpp"
#include "oops/method.inline.hpp"
#include "prims/methodHandles.hpp"
diff --git a/src/hotspot/share/code/debugInfo.cpp b/src/hotspot/share/code/debugInfo.cpp
index aeed7f4d937d6ab2e488ba88445fd2488bb2fcc4..47d6f7b9300da203a9906c420bdcf6b00d338a90 100644
--- a/src/hotspot/share/code/debugInfo.cpp
+++ b/src/hotspot/share/code/debugInfo.cpp
@@ -159,11 +159,11 @@ void ObjectValue::read_object(DebugInfoReadStream* stream) {
}
void ObjectValue::write_on(DebugInfoWriteStream* stream) {
- if (_visited) {
+ if (is_visited()) {
stream->write_int(OBJECT_ID_CODE);
stream->write_int(_id);
} else {
- _visited = true;
+ set_visited(true);
stream->write_int(is_auto_box() ? AUTO_BOX_OBJECT_CODE : OBJECT_CODE);
stream->write_int(_id);
_klass->write_on(stream);
diff --git a/src/hotspot/share/code/debugInfo.hpp b/src/hotspot/share/code/debugInfo.hpp
index bfba1523b083fad128d972799c4bcf6bfc95e17e..3f213783a218afc6b24effa529fde4e9e0630d15 100644
--- a/src/hotspot/share/code/debugInfo.hpp
+++ b/src/hotspot/share/code/debugInfo.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2021, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -149,7 +149,7 @@ class ObjectValue: public ScopeValue {
bool is_visited() const { return _visited; }
void set_value(oop value);
- void set_visited(bool visited) { _visited = false; }
+ void set_visited(bool visited) { _visited = visited; }
// Serialization of debugging information
void read_object(DebugInfoReadStream* stream);
diff --git a/src/hotspot/share/code/dependencies.cpp b/src/hotspot/share/code/dependencies.cpp
index 862e673fc5db915fda03c04c5a3a9f8089643ffd..6c077d3f092e00c026c580efd6158339a918bcc8 100644
--- a/src/hotspot/share/code/dependencies.cpp
+++ b/src/hotspot/share/code/dependencies.cpp
@@ -99,32 +99,12 @@ void Dependencies::assert_abstract_with_unique_concrete_subtype(ciKlass* ctxk, c
assert_common_2(abstract_with_unique_concrete_subtype, ctxk, conck);
}
-void Dependencies::assert_abstract_with_no_concrete_subtype(ciKlass* ctxk) {
- check_ctxk_abstract(ctxk);
- assert_common_1(abstract_with_no_concrete_subtype, ctxk);
-}
-
-void Dependencies::assert_concrete_with_no_concrete_subtype(ciKlass* ctxk) {
- check_ctxk_concrete(ctxk);
- assert_common_1(concrete_with_no_concrete_subtype, ctxk);
-}
-
void Dependencies::assert_unique_concrete_method(ciKlass* ctxk, ciMethod* uniqm) {
check_ctxk(ctxk);
check_unique_method(ctxk, uniqm);
assert_common_2(unique_concrete_method, ctxk, uniqm);
}
-void Dependencies::assert_abstract_with_exclusive_concrete_subtypes(ciKlass* ctxk, ciKlass* k1, ciKlass* k2) {
- check_ctxk(ctxk);
- assert_common_3(abstract_with_exclusive_concrete_subtypes_2, ctxk, k1, k2);
-}
-
-void Dependencies::assert_exclusive_concrete_methods(ciKlass* ctxk, ciMethod* m1, ciMethod* m2) {
- check_ctxk(ctxk);
- assert_common_3(exclusive_concrete_methods_2, ctxk, m1, m2);
-}
-
void Dependencies::assert_has_no_finalizable_subclasses(ciKlass* ctxk) {
check_ctxk(ctxk);
assert_common_1(no_finalizable_subclasses, ctxk);
@@ -266,47 +246,6 @@ void Dependencies::assert_common_2(DepType dept,
deps->append(x1);
}
-void Dependencies::assert_common_3(DepType dept,
- ciKlass* ctxk, ciBaseObject* x, ciBaseObject* x2) {
- assert(dep_context_arg(dept) == 0, "sanity");
- assert(dep_args(dept) == 3, "sanity");
- log_dependency(dept, ctxk, x, x2);
- GrowableArray* deps = _deps[dept];
-
- // try to normalize an unordered pair:
- bool swap = false;
- switch (dept) {
- case abstract_with_exclusive_concrete_subtypes_2:
- swap = (x->ident() > x2->ident() && x->as_metadata()->as_klass() != ctxk);
- break;
- case exclusive_concrete_methods_2:
- swap = (x->ident() > x2->ident() && x->as_metadata()->as_method()->holder() != ctxk);
- break;
- default:
- break;
- }
- if (swap) { ciBaseObject* t = x; x = x2; x2 = t; }
-
- // see if the same (or a similar) dep is already recorded
- if (note_dep_seen(dept, x) && note_dep_seen(dept, x2)) {
- // look in this bucket for redundant assertions
- const int stride = 3;
- for (int i = deps->length(); (i -= stride) >= 0; ) {
- ciBaseObject* y = deps->at(i+1);
- ciBaseObject* y2 = deps->at(i+2);
- if (x == y && x2 == y2) { // same subjects; check the context
- if (maybe_merge_ctxk(deps, i+0, ctxk)) {
- return;
- }
- }
- }
- }
- // append the assertion in the correct bucket:
- deps->append(ctxk);
- deps->append(x);
- deps->append(x2);
-}
-
#if INCLUDE_JVMCI
bool Dependencies::maybe_merge_ctxk(GrowableArray* deps,
int ctxk_i, DepValue ctxk2_dv) {
@@ -473,10 +412,7 @@ size_t Dependencies::estimate_size_in_bytes() {
ciKlass* Dependencies::ctxk_encoded_as_null(DepType dept, ciBaseObject* x) {
switch (dept) {
- case abstract_with_exclusive_concrete_subtypes_2:
- return x->as_metadata()->as_klass();
case unique_concrete_method:
- case exclusive_concrete_methods_2:
return x->as_metadata()->as_method()->holder();
default:
return NULL; // let NULL be NULL
@@ -486,11 +422,7 @@ ciKlass* Dependencies::ctxk_encoded_as_null(DepType dept, ciBaseObject* x) {
Klass* Dependencies::ctxk_encoded_as_null(DepType dept, Metadata* x) {
assert(must_be_in_vm(), "raw oops here");
switch (dept) {
- case abstract_with_exclusive_concrete_subtypes_2:
- assert(x->is_klass(), "sanity");
- return (Klass*) x;
case unique_concrete_method:
- case exclusive_concrete_methods_2:
assert(x->is_method(), "sanity");
return ((Method*)x)->method_holder();
default:
@@ -593,11 +525,7 @@ const char* Dependencies::_dep_name[TYPE_LIMIT] = {
"evol_method",
"leaf_type",
"abstract_with_unique_concrete_subtype",
- "abstract_with_no_concrete_subtype",
- "concrete_with_no_concrete_subtype",
"unique_concrete_method",
- "abstract_with_exclusive_concrete_subtypes_2",
- "exclusive_concrete_methods_2",
"no_finalizable_subclasses",
"call_site_target_value"
};
@@ -607,11 +535,7 @@ int Dependencies::_dep_args[TYPE_LIMIT] = {
1, // evol_method m
1, // leaf_type ctxk
2, // abstract_with_unique_concrete_subtype ctxk, k
- 1, // abstract_with_no_concrete_subtype ctxk
- 1, // concrete_with_no_concrete_subtype ctxk
2, // unique_concrete_method ctxk, m
- 3, // unique_concrete_subtypes_2 ctxk, k1, k2
- 3, // unique_concrete_methods_2 ctxk, m1, m2
1, // no_finalizable_subclasses ctxk
2 // call_site_target_value call_site, method_handle
};
@@ -1198,17 +1122,18 @@ class ClassHierarchyWalker {
} else if (!k->is_instance_klass()) {
return false; // no methods to find in an array type
} else {
+ InstanceKlass* ik = InstanceKlass::cast(k);
// Search class hierarchy first, skipping private implementations
// as they never override any inherited methods
- Method* m = InstanceKlass::cast(k)->find_instance_method(_name, _signature, Klass::PrivateLookupMode::skip);
- if (!Dependencies::is_concrete_method(m, k)) {
+ Method* m = ik->find_instance_method(_name, _signature, Klass::PrivateLookupMode::skip);
+ if (!Dependencies::is_concrete_method(m, ik)) {
// Check for re-abstraction of method
- if (!k->is_interface() && m != NULL && m->is_abstract()) {
+ if (!ik->is_interface() && m != NULL && m->is_abstract()) {
// Found a matching abstract method 'm' in the class hierarchy.
// This is fine iff 'k' is an abstract class and all concrete subtypes
// of 'k' override 'm' and are participates of the current search.
ClassHierarchyWalker wf(_participants, _num_participants);
- Klass* w = wf.find_witness_subtype(k);
+ Klass* w = wf.find_witness_subtype(ik);
if (w != NULL) {
Method* wm = InstanceKlass::cast(w)->find_instance_method(_name, _signature, Klass::PrivateLookupMode::skip);
if (!Dependencies::is_concrete_method(wm, w)) {
@@ -1221,10 +1146,10 @@ class ClassHierarchyWalker {
}
}
// Check interface defaults also, if any exist.
- Array* default_methods = InstanceKlass::cast(k)->default_methods();
+ Array* default_methods = ik->default_methods();
if (default_methods == NULL)
return false;
- m = InstanceKlass::cast(k)->find_method(default_methods, _name, _signature);
+ m = ik->find_method(default_methods, _name, _signature);
if (!Dependencies::is_concrete_method(m, NULL))
return false;
}
@@ -1264,16 +1189,17 @@ class ClassHierarchyWalker {
private:
// the actual search method:
- Klass* find_witness_anywhere(Klass* context_type,
- bool participants_hide_witnesses,
- bool top_level_call = true);
+ Klass* find_witness_anywhere(InstanceKlass* context_type,
+ bool participants_hide_witnesses);
// the spot-checking version:
Klass* find_witness_in(KlassDepChange& changes,
- Klass* context_type,
- bool participants_hide_witnesses);
+ InstanceKlass* context_type,
+ bool participants_hide_witnesses);
public:
- Klass* find_witness_subtype(Klass* context_type, KlassDepChange* changes = NULL) {
+ Klass* find_witness_subtype(Klass* k, KlassDepChange* changes = NULL) {
assert(doing_subtype_search(), "must set up a subtype search");
+ assert(k->is_instance_klass(), "required");
+ InstanceKlass* context_type = InstanceKlass::cast(k);
// When looking for unexpected concrete types,
// do not look beneath expected ones.
const bool participants_hide_witnesses = true;
@@ -1285,8 +1211,10 @@ class ClassHierarchyWalker {
return find_witness_anywhere(context_type, participants_hide_witnesses);
}
}
- Klass* find_witness_definer(Klass* context_type, KlassDepChange* changes = NULL) {
+ Klass* find_witness_definer(Klass* k, KlassDepChange* changes = NULL) {
assert(!doing_subtype_search(), "must set up a method definer search");
+ assert(k->is_instance_klass(), "required");
+ InstanceKlass* context_type = InstanceKlass::cast(k);
// When looking for unexpected concrete methods,
// look beneath expected ones, to see if there are overrides.
const bool participants_hide_witnesses = true;
@@ -1347,8 +1275,8 @@ static bool count_find_witness_calls() {
Klass* ClassHierarchyWalker::find_witness_in(KlassDepChange& changes,
- Klass* context_type,
- bool participants_hide_witnesses) {
+ InstanceKlass* context_type,
+ bool participants_hide_witnesses) {
assert(changes.involves_context(context_type), "irrelevant dependency");
Klass* new_type = changes.new_type();
@@ -1360,7 +1288,7 @@ Klass* ClassHierarchyWalker::find_witness_in(KlassDepChange& changes,
// Must not move the class hierarchy during this check:
assert_locked_or_safepoint(Compile_lock);
- int nof_impls = InstanceKlass::cast(context_type)->nof_implementors();
+ int nof_impls = context_type->nof_implementors();
if (nof_impls > 1) {
// Avoid this case: *I.m > { A.m, C }; B.m > C
// %%% Until this is fixed more systematically, bail out.
@@ -1391,15 +1319,8 @@ Klass* ClassHierarchyWalker::find_witness_in(KlassDepChange& changes,
return NULL;
}
-
// Walk hierarchy under a context type, looking for unexpected types.
-// Do not report participant types, and recursively walk beneath
-// them only if participants_hide_witnesses is false.
-// If top_level_call is false, skip testing the context type,
-// because the caller has already considered it.
-Klass* ClassHierarchyWalker::find_witness_anywhere(Klass* context_type,
- bool participants_hide_witnesses,
- bool top_level_call) {
+Klass* ClassHierarchyWalker::find_witness_anywhere(InstanceKlass* context_type, bool participants_hide_witnesses) {
// Current thread must be in VM (not native mode, as in CI):
assert(must_be_in_vm(), "raw oops here");
// Must not move the class hierarchy during this check:
@@ -1408,106 +1329,50 @@ Klass* ClassHierarchyWalker::find_witness_anywhere(Klass* context_type,
bool do_counts = count_find_witness_calls();
// Check the root of the sub-hierarchy first.
- if (top_level_call) {
- if (do_counts) {
- NOT_PRODUCT(deps_find_witness_calls++);
- NOT_PRODUCT(deps_find_witness_steps++);
- }
- if (is_participant(context_type)) {
- if (participants_hide_witnesses) return NULL;
- // else fall through to search loop...
- } else if (is_witness(context_type) && !ignore_witness(context_type)) {
- // The context is an abstract class or interface, to start with.
- return context_type;
- }
+ if (do_counts) {
+ NOT_PRODUCT(deps_find_witness_calls++);
}
- // Now we must check each implementor and each subclass.
- // Use a short worklist to avoid blowing the stack.
- // Each worklist entry is a *chain* of subklass siblings to process.
- const int CHAINMAX = 100; // >= 1 + InstanceKlass::implementors_limit
- Klass* chains[CHAINMAX];
- int chaini = 0; // index into worklist
- Klass* chain; // scratch variable
-#define ADD_SUBCLASS_CHAIN(k) { \
- assert(chaini < CHAINMAX, "oob"); \
- chain = k->subklass(); \
- if (chain != NULL) chains[chaini++] = chain; }
-
- // Look for non-abstract subclasses.
- // (Note: Interfaces do not have subclasses.)
- ADD_SUBCLASS_CHAIN(context_type);
-
+ // (Note: Interfaces do not have subclasses.)
// If it is an interface, search its direct implementors.
- // (Their subclasses are additional indirect implementors.
- // See InstanceKlass::add_implementor.)
- // (Note: nof_implementors is always zero for non-interfaces.)
- if (top_level_call) {
- int nof_impls = InstanceKlass::cast(context_type)->nof_implementors();
- if (nof_impls > 1) {
+ // (Their subclasses are additional indirect implementors. See InstanceKlass::add_implementor().)
+ if (context_type->is_interface()) {
+ int nof_impls = context_type->nof_implementors();
+ if (nof_impls == 0) {
+ return NULL; // no implementors
+ } else if (nof_impls == 1) { // unique implementor
+ assert(context_type != context_type->implementor(), "not unique");
+ context_type = context_type->implementor();
+ } else { // nof_impls >= 2
// Avoid this case: *I.m > { A.m, C }; B.m > C
// Here, I.m has 2 concrete implementations, but m appears unique
// as A.m, because the search misses B.m when checking C.
// The inherited method B.m was getting missed by the walker
// when interface 'I' was the starting point.
// %%% Until this is fixed more systematically, bail out.
- // (Old CHA had the same limitation.)
return context_type;
}
- if (nof_impls > 0) {
- Klass* impl = InstanceKlass::cast(context_type)->implementor();
- assert(impl != NULL, "just checking");
- // If impl is the same as the context_type, then more than one
- // implementor has seen. No exact info in this case.
- if (impl == context_type) {
- return context_type; // report an inexact witness to this sad affair
- }
- if (do_counts)
- { NOT_PRODUCT(deps_find_witness_steps++); }
- if (is_participant(impl)) {
- if (!participants_hide_witnesses) {
- ADD_SUBCLASS_CHAIN(impl);
- }
- } else if (is_witness(impl) && !ignore_witness(impl)) {
- return impl;
- } else {
- ADD_SUBCLASS_CHAIN(impl);
- }
- }
}
- // Recursively process each non-trivial sibling chain.
- while (chaini > 0) {
- Klass* chain = chains[--chaini];
- for (Klass* sub = chain; sub != NULL; sub = sub->next_sibling()) {
- if (do_counts) { NOT_PRODUCT(deps_find_witness_steps++); }
- if (is_participant(sub)) {
- if (participants_hide_witnesses) continue;
- // else fall through to process this guy's subclasses
- } else if (is_witness(sub) && !ignore_witness(sub)) {
- return sub;
- }
- if (chaini < (VerifyDependencies? 2: CHAINMAX)) {
- // Fast path. (Partially disabled if VerifyDependencies.)
- ADD_SUBCLASS_CHAIN(sub);
- } else {
- // Worklist overflow. Do a recursive call. Should be rare.
- // The recursive call will have its own worklist, of course.
- // (Note that sub has already been tested, so that there is
- // no need for the recursive call to re-test. That's handy,
- // since the recursive call sees sub as the context_type.)
- if (do_counts) { NOT_PRODUCT(deps_find_witness_recursions++); }
- Klass* witness = find_witness_anywhere(sub,
- participants_hide_witnesses,
- /*top_level_call=*/ false);
- if (witness != NULL) return witness;
+ assert(!context_type->is_interface(), "not allowed");
+
+ for (ClassHierarchyIterator iter(context_type); !iter.done(); iter.next()) {
+ Klass* sub = iter.klass();
+
+ if (do_counts) { NOT_PRODUCT(deps_find_witness_steps++); }
+
+ // Do not report participant types.
+ if (is_participant(sub)) {
+ // Walk beneath a participant only when it doesn't hide witnesses.
+ if (participants_hide_witnesses) {
+ iter.skip_subclasses();
}
+ } else if (is_witness(sub) && !ignore_witness(sub)) {
+ return sub; // found a witness
}
}
-
// No witness found. The dependency remains unbroken.
return NULL;
-#undef ADD_SUBCLASS_CHAIN
}
@@ -1606,32 +1471,12 @@ Klass* Dependencies::check_leaf_type(Klass* ctxk) {
// This allows the compiler to narrow occurrences of ctxk by conck,
// when dealing with the types of actual instances.
Klass* Dependencies::check_abstract_with_unique_concrete_subtype(Klass* ctxk,
- Klass* conck,
- KlassDepChange* changes) {
+ Klass* conck,
+ KlassDepChange* changes) {
ClassHierarchyWalker wf(conck);
return wf.find_witness_subtype(ctxk, changes);
}
-// If a non-concrete class has no concrete subtypes, it is not (yet)
-// instantiatable. This can allow the compiler to make some paths go
-// dead, if they are gated by a test of the type.
-Klass* Dependencies::check_abstract_with_no_concrete_subtype(Klass* ctxk,
- KlassDepChange* changes) {
- // Find any concrete subtype, with no participants:
- ClassHierarchyWalker wf;
- return wf.find_witness_subtype(ctxk, changes);
-}
-
-
-// If a concrete class has no concrete subtypes, it can always be
-// exactly typed. This allows the use of a cheaper type test.
-Klass* Dependencies::check_concrete_with_no_concrete_subtype(Klass* ctxk,
- KlassDepChange* changes) {
- // Find any concrete subtype, with only the ctxk as participant:
- ClassHierarchyWalker wf(ctxk);
- return wf.find_witness_subtype(ctxk, changes);
-}
-
// Find the unique concrete proper subtype of ctxk, or NULL if there
// is more than one concrete proper subtype. If there are no concrete
@@ -1645,22 +1490,6 @@ Klass* Dependencies::find_unique_concrete_subtype(Klass* ctxk) {
if (wit != NULL) return NULL; // Too many witnesses.
Klass* conck = wf.participant(0);
if (conck == NULL) {
-#ifndef PRODUCT
- // Make sure the dependency mechanism will pass this discovery:
- if (VerifyDependencies) {
- // Turn off dependency tracing while actually testing deps.
- FlagSetting fs(TraceDependencies, false);
- if (!Dependencies::is_concrete_klass(ctxk)) {
- guarantee(NULL ==
- (void *)check_abstract_with_no_concrete_subtype(ctxk),
- "verify dep.");
- } else {
- guarantee(NULL ==
- (void *)check_concrete_with_no_concrete_subtype(ctxk),
- "verify dep.");
- }
- }
-#endif //PRODUCT
return ctxk; // Return ctxk as a flag for "no subtypes".
} else {
#ifndef PRODUCT
@@ -1679,76 +1508,12 @@ Klass* Dependencies::find_unique_concrete_subtype(Klass* ctxk) {
}
}
-// Test the assertion that the k[12] are the only concrete subtypes of ctxk,
-// except possibly for further subtypes of k[12] themselves.
-// The context type must be abstract. The types k1 and k2 are themselves
-// allowed to have further concrete subtypes.
-Klass* Dependencies::check_abstract_with_exclusive_concrete_subtypes(
- Klass* ctxk,
- Klass* k1,
- Klass* k2,
- KlassDepChange* changes) {
- ClassHierarchyWalker wf;
- wf.add_participant(k1);
- wf.add_participant(k2);
- return wf.find_witness_subtype(ctxk, changes);
-}
-
-// Search ctxk for concrete implementations. If there are klen or fewer,
-// pack them into the given array and return the number.
-// Otherwise, return -1, meaning the given array would overflow.
-// (Note that a return of 0 means there are exactly no concrete subtypes.)
-// In this search, if ctxk is concrete, it will be reported alone.
-// For any type CC reported, no proper subtypes of CC will be reported.
-int Dependencies::find_exclusive_concrete_subtypes(Klass* ctxk,
- int klen,
- Klass* karray[]) {
- ClassHierarchyWalker wf;
- wf.record_witnesses(klen);
- Klass* wit = wf.find_witness_subtype(ctxk);
- if (wit != NULL) return -1; // Too many witnesses.
- int num = wf.num_participants();
- assert(num <= klen, "oob");
- // Pack the result array with the good news.
- for (int i = 0; i < num; i++)
- karray[i] = wf.participant(i);
-#ifndef PRODUCT
- // Make sure the dependency mechanism will pass this discovery:
- if (VerifyDependencies) {
- // Turn off dependency tracing while actually testing deps.
- FlagSetting fs(TraceDependencies, false);
- switch (Dependencies::is_concrete_klass(ctxk)? -1: num) {
- case -1: // ctxk was itself concrete
- guarantee(num == 1 && karray[0] == ctxk, "verify dep.");
- break;
- case 0:
- guarantee(NULL == (void *)check_abstract_with_no_concrete_subtype(ctxk),
- "verify dep.");
- break;
- case 1:
- guarantee(NULL == (void *)
- check_abstract_with_unique_concrete_subtype(ctxk, karray[0]),
- "verify dep.");
- break;
- case 2:
- guarantee(NULL == (void *)
- check_abstract_with_exclusive_concrete_subtypes(ctxk,
- karray[0],
- karray[1]),
- "verify dep.");
- break;
- default:
- ShouldNotReachHere(); // klen > 2 yet supported
- }
- }
-#endif //PRODUCT
- return num;
-}
// If a class (or interface) has a unique concrete method uniqm, return NULL.
// Otherwise, return a class that contains an interfering method.
-Klass* Dependencies::check_unique_concrete_method(Klass* ctxk, Method* uniqm,
- KlassDepChange* changes) {
+Klass* Dependencies::check_unique_concrete_method(Klass* ctxk,
+ Method* uniqm,
+ KlassDepChange* changes) {
// Here is a missing optimization: If uniqm->is_final(),
// we don't really need to search beneath it for overrides.
// This is probably not important, since we don't use dependencies
@@ -1792,16 +1557,6 @@ Method* Dependencies::find_unique_concrete_method(Klass* ctxk, Method* m) {
return fm;
}
-Klass* Dependencies::check_exclusive_concrete_methods(Klass* ctxk,
- Method* m1,
- Method* m2,
- KlassDepChange* changes) {
- ClassHierarchyWalker wf(m1);
- wf.add_participant(m1->method_holder());
- wf.add_participant(m2->method_holder());
- return wf.find_witness_definer(ctxk, changes);
-}
-
Klass* Dependencies::check_has_no_finalizable_subclasses(Klass* ctxk, KlassDepChange* changes) {
Klass* search_at = ctxk;
if (changes != NULL)
@@ -1854,21 +1609,9 @@ Klass* Dependencies::DepStream::check_klass_dependency(KlassDepChange* changes)
case abstract_with_unique_concrete_subtype:
witness = check_abstract_with_unique_concrete_subtype(context_type(), type_argument(1), changes);
break;
- case abstract_with_no_concrete_subtype:
- witness = check_abstract_with_no_concrete_subtype(context_type(), changes);
- break;
- case concrete_with_no_concrete_subtype:
- witness = check_concrete_with_no_concrete_subtype(context_type(), changes);
- break;
case unique_concrete_method:
witness = check_unique_concrete_method(context_type(), method_argument(1), changes);
break;
- case abstract_with_exclusive_concrete_subtypes_2:
- witness = check_abstract_with_exclusive_concrete_subtypes(context_type(), type_argument(1), type_argument(2), changes);
- break;
- case exclusive_concrete_methods_2:
- witness = check_exclusive_concrete_methods(context_type(), method_argument(1), method_argument(2), changes);
- break;
case no_finalizable_subclasses:
witness = check_has_no_finalizable_subclasses(context_type(), changes);
break;
diff --git a/src/hotspot/share/code/dependencies.hpp b/src/hotspot/share/code/dependencies.hpp
index 4366201ef19e0a476d8837a3f4b26d89dc308489..55bbc11502af75b2b4eafaec79f0506af0558a5a 100644
--- a/src/hotspot/share/code/dependencies.hpp
+++ b/src/hotspot/share/code/dependencies.hpp
@@ -116,12 +116,6 @@ class Dependencies: public ResourceObj {
// An abstract class CX has exactly one concrete subtype CC.
abstract_with_unique_concrete_subtype,
- // The type CX is purely abstract, with no concrete subtype* at all.
- abstract_with_no_concrete_subtype,
-
- // The concrete CX is free of concrete proper subtypes.
- concrete_with_no_concrete_subtype,
-
// Given a method M1 and a context class CX, the set MM(CX, M1) of
// "concrete matching methods" in CX of M1 is the set of every
// concrete M2 for which it is possible to create an invokevirtual
@@ -140,23 +134,6 @@ class Dependencies: public ResourceObj {
// than {M1}.
unique_concrete_method, // one unique concrete method under CX
- // An "exclusive" assertion concerns two methods or subtypes, and
- // declares that there are at most two (or perhaps later N>2)
- // specific items that jointly satisfy the restriction.
- // We list all items explicitly rather than just giving their
- // count, for robustness in the face of complex schema changes.
-
- // A context class CX (which may be either abstract or concrete)
- // has two exclusive concrete subtypes* C1, C2 if every concrete
- // subtype* of CX is either C1 or C2. Note that if neither C1 or C2
- // are equal to CX, then CX itself must be abstract. But it is
- // also possible (for example) that C1 is CX (a concrete class)
- // and C2 is a proper subtype of C1.
- abstract_with_exclusive_concrete_subtypes_2,
-
- // This dependency asserts that MM(CX, M1) is no greater than {M1,M2}.
- exclusive_concrete_methods_2,
-
// This dependency asserts that no instances of class or it's
// subclasses require finalization registration.
no_finalizable_subclasses,
@@ -348,18 +325,13 @@ class Dependencies: public ResourceObj {
void assert_common_1(DepType dept, ciBaseObject* x);
void assert_common_2(DepType dept, ciBaseObject* x0, ciBaseObject* x1);
- void assert_common_3(DepType dept, ciKlass* ctxk, ciBaseObject* x1, ciBaseObject* x2);
public:
// Adding assertions to a new dependency set at compile time:
void assert_evol_method(ciMethod* m);
void assert_leaf_type(ciKlass* ctxk);
void assert_abstract_with_unique_concrete_subtype(ciKlass* ctxk, ciKlass* conck);
- void assert_abstract_with_no_concrete_subtype(ciKlass* ctxk);
- void assert_concrete_with_no_concrete_subtype(ciKlass* ctxk);
void assert_unique_concrete_method(ciKlass* ctxk, ciMethod* uniqm);
- void assert_abstract_with_exclusive_concrete_subtypes(ciKlass* ctxk, ciKlass* k1, ciKlass* k2);
- void assert_exclusive_concrete_methods(ciKlass* ctxk, ciMethod* m1, ciMethod* m2);
void assert_has_no_finalizable_subclasses(ciKlass* ctxk);
void assert_call_site_target_value(ciCallSite* call_site, ciMethodHandle* method_handle);
@@ -426,18 +398,8 @@ class Dependencies: public ResourceObj {
// Checking old assertions at run-time (in the VM only):
static Klass* check_evol_method(Method* m);
static Klass* check_leaf_type(Klass* ctxk);
- static Klass* check_abstract_with_unique_concrete_subtype(Klass* ctxk, Klass* conck,
- KlassDepChange* changes = NULL);
- static Klass* check_abstract_with_no_concrete_subtype(Klass* ctxk,
- KlassDepChange* changes = NULL);
- static Klass* check_concrete_with_no_concrete_subtype(Klass* ctxk,
- KlassDepChange* changes = NULL);
- static Klass* check_unique_concrete_method(Klass* ctxk, Method* uniqm,
- KlassDepChange* changes = NULL);
- static Klass* check_abstract_with_exclusive_concrete_subtypes(Klass* ctxk, Klass* k1, Klass* k2,
- KlassDepChange* changes = NULL);
- static Klass* check_exclusive_concrete_methods(Klass* ctxk, Method* m1, Method* m2,
- KlassDepChange* changes = NULL);
+ static Klass* check_abstract_with_unique_concrete_subtype(Klass* ctxk, Klass* conck, KlassDepChange* changes = NULL);
+ static Klass* check_unique_concrete_method(Klass* ctxk, Method* uniqm, KlassDepChange* changes = NULL);
static Klass* check_has_no_finalizable_subclasses(Klass* ctxk, KlassDepChange* changes = NULL);
static Klass* check_call_site_target_value(oop call_site, oop method_handle, CallSiteDepChange* changes = NULL);
// A returned Klass* is NULL if the dependency assertion is still
@@ -455,9 +417,8 @@ class Dependencies: public ResourceObj {
// It is used by DepStream::spot_check_dependency_at.
// Detecting possible new assertions:
- static Klass* find_unique_concrete_subtype(Klass* ctxk);
- static Method* find_unique_concrete_method(Klass* ctxk, Method* m);
- static int find_exclusive_concrete_subtypes(Klass* ctxk, int klen, Klass* k[]);
+ static Klass* find_unique_concrete_subtype(Klass* ctxk);
+ static Method* find_unique_concrete_method(Klass* ctxk, Method* m);
// Create the encoding which will be stored in an nmethod.
void encode_content_bytes();
diff --git a/src/hotspot/share/code/icBuffer.cpp b/src/hotspot/share/code/icBuffer.cpp
index fb49e14b38033ecb51a4f01f4925ff5a501eaf04..cb13bdbd54ef0ff64c369d7a33205c0efb6042bd 100644
--- a/src/hotspot/share/code/icBuffer.cpp
+++ b/src/hotspot/share/code/icBuffer.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2021, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -38,6 +38,7 @@
#include "runtime/mutexLocker.hpp"
#include "runtime/stubRoutines.hpp"
#include "runtime/thread.inline.hpp"
+#include "runtime/vmOperations.hpp"
DEF_STUB_INTERFACE(ICStub);
diff --git a/src/hotspot/share/code/nmethod.cpp b/src/hotspot/share/code/nmethod.cpp
index 0711c399e82d4bae0d8f8eaccd0c09176ba36494..6805198a945ba110e7253d21236069d1568e46b0 100644
--- a/src/hotspot/share/code/nmethod.cpp
+++ b/src/hotspot/share/code/nmethod.cpp
@@ -46,6 +46,7 @@
#include "memory/resourceArea.hpp"
#include "memory/universe.hpp"
#include "oops/access.inline.hpp"
+#include "oops/klass.inline.hpp"
#include "oops/method.inline.hpp"
#include "oops/methodData.hpp"
#include "oops/oop.inline.hpp"
@@ -501,7 +502,7 @@ nmethod* nmethod::new_nmethod(const methodHandle& method,
ImplicitExceptionTable* nul_chk_table,
AbstractCompiler* compiler,
int comp_level,
- const GrowableArrayView& native_invokers
+ const GrowableArrayView& native_invokers
#if INCLUDE_JVMCI
, char* speculations,
int speculations_len,
@@ -726,7 +727,7 @@ nmethod::nmethod(
ImplicitExceptionTable* nul_chk_table,
AbstractCompiler* compiler,
int comp_level,
- const GrowableArrayView& native_invokers
+ const GrowableArrayView& native_invokers
#if INCLUDE_JVMCI
, char* speculations,
int speculations_len,
@@ -1057,7 +1058,7 @@ void nmethod::copy_values(GrowableArray* array) {
}
void nmethod::free_native_invokers() {
- for (BufferBlob** it = native_invokers_begin(); it < native_invokers_end(); it++) {
+ for (RuntimeStub** it = native_invokers_begin(); it < native_invokers_end(); it++) {
CodeCache::free(*it);
}
}
@@ -2696,7 +2697,7 @@ void nmethod::print_pcs_on(outputStream* st) {
void nmethod::print_native_invokers() {
ResourceMark m; // in case methods get printed via debugger
tty->print_cr("Native invokers:");
- for (BufferBlob** itt = native_invokers_begin(); itt < native_invokers_end(); itt++) {
+ for (RuntimeStub** itt = native_invokers_begin(); itt < native_invokers_end(); itt++) {
(*itt)->print_on(tty);
}
}
diff --git a/src/hotspot/share/code/nmethod.hpp b/src/hotspot/share/code/nmethod.hpp
index 592fa8f7017fae6c59afcc22a571403a181d8c7d..6eb7f6f81b649284c2878bb5e40b852bc18526f4 100644
--- a/src/hotspot/share/code/nmethod.hpp
+++ b/src/hotspot/share/code/nmethod.hpp
@@ -314,7 +314,7 @@ class nmethod : public CompiledMethod {
ImplicitExceptionTable* nul_chk_table,
AbstractCompiler* compiler,
int comp_level,
- const GrowableArrayView& native_invokers
+ const GrowableArrayView& native_invokers
#if INCLUDE_JVMCI
, char* speculations,
int speculations_len,
@@ -363,7 +363,7 @@ class nmethod : public CompiledMethod {
ImplicitExceptionTable* nul_chk_table,
AbstractCompiler* compiler,
int comp_level,
- const GrowableArrayView& native_invokers = GrowableArrayView::EMPTY
+ const GrowableArrayView& native_invokers = GrowableArrayView::EMPTY
#if INCLUDE_JVMCI
, char* speculations = NULL,
int speculations_len = 0,
@@ -413,8 +413,8 @@ class nmethod : public CompiledMethod {
PcDesc* scopes_pcs_end () const { return (PcDesc*)(header_begin() + _dependencies_offset) ; }
address dependencies_begin () const { return header_begin() + _dependencies_offset ; }
address dependencies_end () const { return header_begin() + _native_invokers_offset ; }
- BufferBlob** native_invokers_begin() const { return (BufferBlob**)(header_begin() + _native_invokers_offset) ; }
- BufferBlob** native_invokers_end () const { return (BufferBlob**)(header_begin() + _handler_table_offset); }
+ RuntimeStub** native_invokers_begin() const { return (RuntimeStub**)(header_begin() + _native_invokers_offset) ; }
+ RuntimeStub** native_invokers_end () const { return (RuntimeStub**)(header_begin() + _handler_table_offset); }
address handler_table_begin () const { return header_begin() + _handler_table_offset ; }
address handler_table_end () const { return header_begin() + _nul_chk_table_offset ; }
address nul_chk_table_begin () const { return header_begin() + _nul_chk_table_offset ; }
diff --git a/src/hotspot/share/compiler/compilationPolicy.cpp b/src/hotspot/share/compiler/compilationPolicy.cpp
index 100182e87074691c9c642864996ae1c6d94bde8f..1218ead4eea295b55e1590bc9947dc4da575e9ce 100644
--- a/src/hotspot/share/compiler/compilationPolicy.cpp
+++ b/src/hotspot/share/compiler/compilationPolicy.cpp
@@ -86,15 +86,15 @@ bool CompilationPolicy::must_be_compiled(const methodHandle& m, int comp_level)
(UseCompiler && AlwaysCompileLoopMethods && m->has_loops() && CompileBroker::should_compile_new_jobs()); // eagerly compile loop methods
}
-void CompilationPolicy::compile_if_required(const methodHandle& selected_method, TRAPS) {
- if (must_be_compiled(selected_method)) {
+void CompilationPolicy::compile_if_required(const methodHandle& m, TRAPS) {
+ if (must_be_compiled(m)) {
// This path is unusual, mostly used by the '-Xcomp' stress test mode.
if (!THREAD->can_call_java() || THREAD->is_Compiler_thread()) {
// don't force compilation, resolve was on behalf of compiler
return;
}
- if (selected_method->method_holder()->is_not_initialized()) {
+ if (m->method_holder()->is_not_initialized()) {
// 'is_not_initialized' means not only '!is_initialized', but also that
// initialization has not been started yet ('!being_initialized')
// Do not force compilation of methods in uninitialized classes.
@@ -104,9 +104,11 @@ void CompilationPolicy::compile_if_required(const methodHandle& selected_method,
// even before classes are initialized.
return;
}
- CompileBroker::compile_method(selected_method, InvocationEntryBci,
- CompilationPolicy::initial_compile_level(selected_method),
- methodHandle(), 0, CompileTask::Reason_MustBeCompiled, THREAD);
+ CompLevel level = initial_compile_level(m);
+ if (PrintTieredEvents) {
+ print_event(COMPILE, m(), m(), InvocationEntryBci, level);
+ }
+ CompileBroker::compile_method(m, InvocationEntryBci, level, methodHandle(), 0, CompileTask::Reason_MustBeCompiled, THREAD);
}
}
@@ -326,7 +328,7 @@ double CompilationPolicy::threshold_scale(CompLevel level, int feedback_k) {
// than specified by IncreaseFirstTierCompileThresholdAt percentage.
// The main intention is to keep enough free space for C2 compiled code
// to achieve peak performance if the code cache is under stress.
- if (!CompilationModeFlag::disable_intermediate() && TieredStopAtLevel == CompLevel_full_optimization && level != CompLevel_full_optimization) {
+ if (CompilerConfig::is_tiered() && !CompilationModeFlag::disable_intermediate() && is_c1_compile(level)) {
double current_reverse_free_ratio = CodeCache::reverse_free_ratio(CodeCache::get_code_blob_type(level));
if (current_reverse_free_ratio > _increase_threshold_at_ratio) {
k *= exp(current_reverse_free_ratio - _increase_threshold_at_ratio);
@@ -337,7 +339,7 @@ double CompilationPolicy::threshold_scale(CompLevel level, int feedback_k) {
return 1;
}
-void CompilationPolicy::print_counters(const char* prefix, Method* m) {
+void CompilationPolicy::print_counters(const char* prefix, const Method* m) {
int invocation_count = m->invocation_count();
int backedge_count = m->backedge_count();
MethodData* mdh = m->method_data();
@@ -358,8 +360,7 @@ void CompilationPolicy::print_counters(const char* prefix, Method* m) {
}
// Print an event.
-void CompilationPolicy::print_event(EventType type, Method* m, Method* im,
- int bci, CompLevel level) {
+void CompilationPolicy::print_event(EventType type, const Method* m, const Method* im, int bci, CompLevel level) {
bool inlinee_event = m != im;
ttyLocker tty_lock;
@@ -509,6 +510,17 @@ void CompilationPolicy::initialize() {
#ifdef ASSERT
bool CompilationPolicy::verify_level(CompLevel level) {
+ if (TieredCompilation && level > TieredStopAtLevel) {
+ return false;
+ }
+ // Check if there is a compiler to process the requested level
+ if (!CompilerConfig::is_c1_enabled() && is_c1_compile(level)) {
+ return false;
+ }
+ if (!CompilerConfig::is_c2_or_jvmci_compiler_enabled() && is_c2_compile(level)) {
+ return false;
+ }
+
// AOT and interpreter levels are always valid.
if (level == CompLevel_aot || level == CompLevel_none) {
return true;
@@ -528,49 +540,54 @@ bool CompilationPolicy::verify_level(CompLevel level) {
CompLevel CompilationPolicy::highest_compile_level() {
- CompLevel max_level = CompLevel_none;
+ CompLevel level = CompLevel_none;
+ // Setup the maximum level availible for the current compiler configuration.
if (!CompilerConfig::is_interpreter_only()) {
if (CompilerConfig::is_c2_or_jvmci_compiler_enabled()) {
- max_level = CompLevel_full_optimization;
+ level = CompLevel_full_optimization;
} else if (CompilerConfig::is_c1_enabled()) {
if (CompilerConfig::is_c1_simple_only()) {
- max_level = CompLevel_simple;
+ level = CompLevel_simple;
} else {
- max_level = CompLevel_full_profile;
+ level = CompLevel_full_profile;
}
}
- max_level = MAX2(max_level, (CompLevel) TieredStopAtLevel);
- }
- return max_level;
-}
-
-CompLevel CompilationPolicy::limit_level(CompLevel level) {
- if (CompilationModeFlag::quick_only()) {
- level = MIN2(level, CompLevel_simple);
}
- assert(verify_level(level), "Invalid compilation level %d", level);
- if (level <= TieredStopAtLevel) {
- return level;
- }
- // Some compilation levels are not valid depending on a compilation mode:
- // a) quick_only - levels 2,3,4 are invalid; levels -1,0,1 are valid;
- // b) high_only - levels 1,2,3 are invalid; levels -1,0,4 are valid;
- // c) high_only_quick_internal - levels 2,3 are invalid; levels -1,0,1,4 are valid.
- // The invalid levels are actually sequential so a single comparison is sufficient.
- // Down here we already have (level > TieredStopAtLevel), which also implies that
- // (TieredStopAtLevel < Highest Possible Level), so we need to return a level that is:
- // a) a max level that is strictly less than the highest for a given compilation mode
- // b) less or equal to TieredStopAtLevel
- if (CompilationModeFlag::normal() || CompilationModeFlag::quick_only()) {
- return (CompLevel)TieredStopAtLevel;
+ // Clamp the maximum level with TieredStopAtLevel.
+ if (TieredCompilation) {
+ level = MIN2(level, (CompLevel) TieredStopAtLevel);
+ }
+
+ // Fix it up if after the clamping it has become invalid.
+ // Bring it monotonically down depending on the next available level for
+ // the compilation mode.
+ if (!CompilationModeFlag::normal()) {
+ // a) quick_only - levels 2,3,4 are invalid; levels -1,0,1 are valid;
+ // b) high_only - levels 1,2,3 are invalid; levels -1,0,4 are valid;
+ // c) high_only_quick_internal - levels 2,3 are invalid; levels -1,0,1,4 are valid.
+ if (CompilationModeFlag::quick_only()) {
+ if (level == CompLevel_limited_profile || level == CompLevel_full_profile || level == CompLevel_full_optimization) {
+ level = CompLevel_simple;
+ }
+ } else if (CompilationModeFlag::high_only()) {
+ if (level == CompLevel_simple || level == CompLevel_limited_profile || level == CompLevel_full_profile) {
+ level = CompLevel_none;
+ }
+ } else if (CompilationModeFlag::high_only_quick_internal()) {
+ if (level == CompLevel_limited_profile || level == CompLevel_full_profile) {
+ level = CompLevel_simple;
+ }
+ }
}
- if (CompilationModeFlag::high_only() || CompilationModeFlag::high_only_quick_internal()) {
- return MIN2(CompLevel_none, (CompLevel)TieredStopAtLevel);
- }
+ assert(verify_level(level), "Invalid highest compilation level: %d", level);
+ return level;
+}
- ShouldNotReachHere();
- return CompLevel_any;
+CompLevel CompilationPolicy::limit_level(CompLevel level) {
+ level = MIN2(level, highest_compile_level());
+ assert(verify_level(level), "Invalid compilation level: %d", level);
+ return level;
}
CompLevel CompilationPolicy::initial_compile_level(const methodHandle& method) {
@@ -658,9 +675,8 @@ CompileTask* CompilationPolicy::select_task(CompileQueue* compile_queue) {
methodHandle max_method_h(Thread::current(), max_method);
- if (max_task != NULL && max_task->comp_level() == CompLevel_full_profile &&
- TieredStopAtLevel > CompLevel_full_profile &&
- max_method != NULL && is_method_profiled(max_method_h)) {
+ if (max_task != NULL && max_task->comp_level() == CompLevel_full_profile && TieredStopAtLevel > CompLevel_full_profile &&
+ max_method != NULL && is_method_profiled(max_method_h) && !Arguments::is_compiler_only()) {
max_task->set_comp_level(CompLevel_limited_profile);
if (CompileBroker::compilation_is_complete(max_method_h, max_task->osr_bci(), CompLevel_limited_profile)) {
@@ -740,7 +756,7 @@ nmethod* CompilationPolicy::event(const methodHandle& method, const methodHandle
// Check if the method can be compiled, change level if necessary
void CompilationPolicy::compile(const methodHandle& mh, int bci, CompLevel level, TRAPS) {
- assert(verify_level(level) && level <= TieredStopAtLevel, "Invalid compilation level %d", level);
+ assert(verify_level(level), "Invalid compilation level requested: %d", level);
if (level == CompLevel_none) {
if (mh->has_compiled_code()) {
@@ -1038,33 +1054,18 @@ CompLevel CompilationPolicy::common(const methodHandle& method, CompLevel cur_le
if (common(method, CompLevel_full_profile, disable_feedback) == CompLevel_full_optimization) {
next_level = CompLevel_full_optimization;
} else if (!CompilationModeFlag::disable_intermediate() && Predicate::apply(i, b, cur_level, method)) {
-#if INCLUDE_JVMCI
- if (EnableJVMCI && UseJVMCICompiler) {
- // Since JVMCI takes a while to warm up, its queue inevitably backs up during
- // early VM execution. As of 2014-06-13, JVMCI's inliner assumes that the root
- // compilation method and all potential inlinees have mature profiles (which
- // includes type profiling). If it sees immature profiles, JVMCI's inliner
- // can perform pathologically bad (e.g., causing OutOfMemoryErrors due to
- // exploring/inlining too many graphs). Since a rewrite of the inliner is
- // in progress, we simply disable the dialing back heuristic for now and will
- // revisit this decision once the new inliner is completed.
+ // C1-generated fully profiled code is about 30% slower than the limited profile
+ // code that has only invocation and backedge counters. The observation is that
+ // if C2 queue is large enough we can spend too much time in the fully profiled code
+ // while waiting for C2 to pick the method from the queue. To alleviate this problem
+ // we introduce a feedback on the C2 queue size. If the C2 queue is sufficiently long
+ // we choose to compile a limited profiled version and then recompile with full profiling
+ // when the load on C2 goes down.
+ if (!disable_feedback && CompileBroker::queue_size(CompLevel_full_optimization) >
+ Tier3DelayOn * compiler_count(CompLevel_full_optimization)) {
+ next_level = CompLevel_limited_profile;
+ } else {
next_level = CompLevel_full_profile;
- } else
-#endif
- {
- // C1-generated fully profiled code is about 30% slower than the limited profile
- // code that has only invocation and backedge counters. The observation is that
- // if C2 queue is large enough we can spend too much time in the fully profiled code
- // while waiting for C2 to pick the method from the queue. To alleviate this problem
- // we introduce a feedback on the C2 queue size. If the C2 queue is sufficiently long
- // we choose to compile a limited profiled version and then recompile with full profiling
- // when the load on C2 goes down.
- if (!disable_feedback && CompileBroker::queue_size(CompLevel_full_optimization) >
- Tier3DelayOn * compiler_count(CompLevel_full_optimization)) {
- next_level = CompLevel_limited_profile;
- } else {
- next_level = CompLevel_full_profile;
- }
}
}
break;
diff --git a/src/hotspot/share/compiler/compilationPolicy.hpp b/src/hotspot/share/compiler/compilationPolicy.hpp
index e4c0e643df657941d4d5c5b84d3531dd9981fa9b..0c039a8d593477bb238f75993524e214cbf9ddd9 100644
--- a/src/hotspot/share/compiler/compilationPolicy.hpp
+++ b/src/hotspot/share/compiler/compilationPolicy.hpp
@@ -184,7 +184,7 @@ class CompilationPolicy : AllStatic {
// loop_event checks if a method should be OSR compiled at a different
// level.
static CompLevel loop_event(const methodHandle& method, CompLevel cur_level, Thread* thread);
- static void print_counters(const char* prefix, Method* m);
+ static void print_counters(const char* prefix, const Method* m);
// Has a method been long around?
// We don't remove old methods from the compile queue even if they have
// very low activity (see select_task()).
@@ -216,7 +216,7 @@ class CompilationPolicy : AllStatic {
static void set_c2_count(int x) { _c2_count = x; }
enum EventType { CALL, LOOP, COMPILE, REMOVE_FROM_QUEUE, UPDATE_IN_QUEUE, REPROFILE, MAKE_NOT_ENTRANT };
- static void print_event(EventType type, Method* m, Method* im, int bci, CompLevel level);
+ static void print_event(EventType type, const Method* m, const Method* im, int bci, CompLevel level);
// Check if the method can be compiled, change level if necessary
static void compile(const methodHandle& mh, int bci, CompLevel level, TRAPS);
// Simple methods are as good being compiled with C1 as C2.
diff --git a/src/hotspot/share/compiler/compileBroker.cpp b/src/hotspot/share/compiler/compileBroker.cpp
index 3e88c915236ef9b1091ea18aea3e50f4293c4a33..129798a96c25264dff2aee54ae4b7681fb8ba923 100644
--- a/src/hotspot/share/compiler/compileBroker.cpp
+++ b/src/hotspot/share/compiler/compileBroker.cpp
@@ -64,6 +64,7 @@
#include "runtime/safepointVerifiers.hpp"
#include "runtime/sharedRuntime.hpp"
#include "runtime/sweeper.hpp"
+#include "runtime/threadSMR.hpp"
#include "runtime/timerTrace.hpp"
#include "runtime/vframe.inline.hpp"
#include "utilities/debug.hpp"
@@ -1005,7 +1006,8 @@ void CompileBroker::init_compiler_sweeper_threads() {
_compilers[1]->set_num_compiler_threads(i + 1);
if (TraceCompilerThreads) {
ResourceMark rm;
- MutexLocker mu(Threads_lock);
+ ThreadsListHandle tlh; // get_thread_name() depends on the TLH.
+ assert(tlh.includes(ct), "ct=" INTPTR_FORMAT " exited unexpectedly.", p2i(ct));
tty->print_cr("Added initial compiler thread %s", ct->get_thread_name());
}
}
@@ -1025,7 +1027,8 @@ void CompileBroker::init_compiler_sweeper_threads() {
_compilers[0]->set_num_compiler_threads(i + 1);
if (TraceCompilerThreads) {
ResourceMark rm;
- MutexLocker mu(Threads_lock);
+ ThreadsListHandle tlh; // get_thread_name() depends on the TLH.
+ assert(tlh.includes(ct), "ct=" INTPTR_FORMAT " exited unexpectedly.", p2i(ct));
tty->print_cr("Added initial compiler thread %s", ct->get_thread_name());
}
}
@@ -1111,7 +1114,8 @@ void CompileBroker::possibly_add_compiler_threads(Thread* THREAD) {
_compilers[1]->set_num_compiler_threads(i + 1);
if (TraceCompilerThreads) {
ResourceMark rm;
- MutexLocker mu(Threads_lock);
+ ThreadsListHandle tlh; // get_thread_name() depends on the TLH.
+ assert(tlh.includes(ct), "ct=" INTPTR_FORMAT " exited unexpectedly.", p2i(ct));
tty->print_cr("Added compiler thread %s (available memory: %dMB, available non-profiled code cache: %dMB)",
ct->get_thread_name(), (int)(available_memory/M), (int)(available_cc_np/M));
}
@@ -1131,7 +1135,8 @@ void CompileBroker::possibly_add_compiler_threads(Thread* THREAD) {
_compilers[0]->set_num_compiler_threads(i + 1);
if (TraceCompilerThreads) {
ResourceMark rm;
- MutexLocker mu(Threads_lock);
+ ThreadsListHandle tlh; // get_thread_name() depends on the TLH.
+ assert(tlh.includes(ct), "ct=" INTPTR_FORMAT " exited unexpectedly.", p2i(ct));
tty->print_cr("Added compiler thread %s (available memory: %dMB, available profiled code cache: %dMB)",
ct->get_thread_name(), (int)(available_memory/M), (int)(available_cc_p/M));
}
diff --git a/src/hotspot/share/compiler/compileTask.cpp b/src/hotspot/share/compiler/compileTask.cpp
index c56ebdb888b5df614189ba2b558608da38f49ab3..d610d8bdcf814730579233cf65bd9649566b2d25 100644
--- a/src/hotspot/share/compiler/compileTask.cpp
+++ b/src/hotspot/share/compiler/compileTask.cpp
@@ -31,6 +31,7 @@
#include "logging/log.hpp"
#include "logging/logStream.hpp"
#include "memory/resourceArea.hpp"
+#include "oops/klass.inline.hpp"
#include "runtime/handles.inline.hpp"
#include "runtime/jniHandles.hpp"
diff --git a/src/hotspot/share/compiler/compilerDefinitions.cpp b/src/hotspot/share/compiler/compilerDefinitions.cpp
index cd29c595e87d75ce78e8b8db9a9e96fb498a6fe2..6ca90baf21fe7565af7c0087fa582f383b0df577 100644
--- a/src/hotspot/share/compiler/compilerDefinitions.cpp
+++ b/src/hotspot/share/compiler/compilerDefinitions.cpp
@@ -191,9 +191,6 @@ void set_client_emulation_mode_flags() {
if (FLAG_IS_DEFAULT(CodeCacheExpansionSize)) {
FLAG_SET_ERGO(CodeCacheExpansionSize, 32*K);
}
- if (FLAG_IS_DEFAULT(MetaspaceSize)) {
- FLAG_SET_ERGO(MetaspaceSize, MIN2(12*M, MaxMetaspaceSize));
- }
if (FLAG_IS_DEFAULT(MaxRAM)) {
// Do not use FLAG_SET_ERGO to update MaxRAM, as this will impact
// heap setting done based on available phys_mem (see Arguments::set_heap_size).
@@ -308,12 +305,19 @@ void CompilerConfig::set_compilation_policy_flags() {
8 * CodeCache::page_size() <= ReservedCodeCacheSize) {
FLAG_SET_ERGO(SegmentedCodeCache, true);
}
+ if (Arguments::is_compiler_only()) { // -Xcomp
+ // Be much more aggressive in tiered mode with -Xcomp and exercise C2 more.
+ // We will first compile a level 3 version (C1 with full profiling), then do one invocation of it and
+ // compile a level 4 (C2) and then continue executing it.
+ if (FLAG_IS_DEFAULT(Tier3InvokeNotifyFreqLog)) {
+ FLAG_SET_CMDLINE(Tier3InvokeNotifyFreqLog, 0);
+ }
+ if (FLAG_IS_DEFAULT(Tier4InvocationThreshold)) {
+ FLAG_SET_CMDLINE(Tier4InvocationThreshold, 0);
+ }
+ }
}
- if (!UseInterpreter) { // -Xcomp
- Tier3InvokeNotifyFreqLog = 0;
- Tier4InvocationThreshold = 0;
- }
if (CompileThresholdScaling < 0) {
vm_exit_during_initialization("Negative value specified for CompileThresholdScaling", NULL);
@@ -444,12 +448,19 @@ void CompilerConfig::set_jvmci_specific_flags() {
if (FLAG_IS_DEFAULT(InitialCodeCacheSize)) {
FLAG_SET_DEFAULT(InitialCodeCacheSize, MAX2(16*M, InitialCodeCacheSize));
}
- if (FLAG_IS_DEFAULT(MetaspaceSize)) {
- FLAG_SET_DEFAULT(MetaspaceSize, MIN2(MAX2(12*M, MetaspaceSize), MaxMetaspaceSize));
- }
if (FLAG_IS_DEFAULT(NewSizeThreadIncrease)) {
FLAG_SET_DEFAULT(NewSizeThreadIncrease, MAX2(4*K, NewSizeThreadIncrease));
}
+ if (FLAG_IS_DEFAULT(Tier3DelayOn)) {
+ // This effectively prevents the compile broker scheduling tier 2
+ // (i.e., limited C1 profiling) compilations instead of tier 3
+ // (i.e., full C1 profiling) compilations when the tier 4 queue
+ // backs up (which is quite likely when using a non-AOT compiled JVMCI
+ // compiler). The observation based on jargraal is that the downside
+ // of skipping full profiling is much worse for performance than the
+ // queue backing up.
+ FLAG_SET_DEFAULT(Tier3DelayOn, 100000);
+ }
} // !UseJVMCINativeLibrary
} // UseJVMCICompiler
}
@@ -555,6 +566,8 @@ void CompilerConfig::ergo_initialize() {
if (NeverActAsServerClassMachine) {
set_client_emulation_mode_flags();
}
+ } else if (!has_c2() && !is_jvmci_compiler()) {
+ set_client_emulation_mode_flags();
}
set_legacy_emulation_flags();
diff --git a/src/hotspot/share/compiler/compilerDefinitions.hpp b/src/hotspot/share/compiler/compilerDefinitions.hpp
index 8e5e58f2c36f395735d6d5f2c2404e5f901c0e35..48036afc279fc2ab1d32bc0cfb18aa52f4a6ff73 100644
--- a/src/hotspot/share/compiler/compilerDefinitions.hpp
+++ b/src/hotspot/share/compiler/compilerDefinitions.hpp
@@ -158,7 +158,7 @@ public:
static bool is_c1_only() {
if (!is_interpreter_only() && has_c1()) {
const bool c1_only = !has_c2() && !is_jvmci_compiler();
- const bool tiered_degraded_to_c1_only = TieredStopAtLevel >= CompLevel_simple && TieredStopAtLevel < CompLevel_full_optimization;
+ const bool tiered_degraded_to_c1_only = TieredCompilation && TieredStopAtLevel >= CompLevel_simple && TieredStopAtLevel < CompLevel_full_optimization;
const bool c1_only_compilation_mode = CompilationModeFlag::quick_only();
return c1_only || tiered_degraded_to_c1_only || c1_only_compilation_mode;
}
@@ -177,9 +177,10 @@ public:
// Is the JVM in a configuration that permits only c1-compiled methods at level 1?
static bool is_c1_simple_only() {
if (is_c1_only()) {
- const bool tiered_degraded_to_level_1 = TieredStopAtLevel == CompLevel_simple;
+ const bool tiered_degraded_to_level_1 = TieredCompilation && TieredStopAtLevel == CompLevel_simple;
const bool c1_only_compilation_mode = CompilationModeFlag::quick_only();
- return tiered_degraded_to_level_1 || c1_only_compilation_mode;
+ const bool tiered_off = !TieredCompilation;
+ return tiered_degraded_to_level_1 || c1_only_compilation_mode || tiered_off;
}
return false;
}
diff --git a/src/hotspot/share/compiler/compilerOracle.cpp b/src/hotspot/share/compiler/compilerOracle.cpp
index b1cd911ddea4db5021ff88d06bb61317d4f70798..ab7fd4bcc199c2213619be62b5fb15e9e85fa797 100644
--- a/src/hotspot/share/compiler/compilerOracle.cpp
+++ b/src/hotspot/share/compiler/compilerOracle.cpp
@@ -416,7 +416,7 @@ static enum CompileCommand match_option_name(const char* line, int* bytes_read,
*bytes_read = 0;
char option_buf[256];
int matches = sscanf(line, "%255[a-zA-Z0-9]%n", option_buf, bytes_read);
- if (matches > 0) {
+ if (matches > 0 && strcasecmp(option_buf, "unknown") != 0) {
for (uint i = 0; i < ARRAY_SIZE(option_names); i++) {
if (strcasecmp(option_buf, option_names[i]) == 0) {
return static_cast(i);
diff --git a/src/hotspot/share/compiler/compiler_globals_pd.hpp b/src/hotspot/share/compiler/compiler_globals_pd.hpp
index faa1c11fe130348c97936573dcd56ad4351d1a2d..0cb8c6917ba63d49cf7930b7cee76726399266e4 100644
--- a/src/hotspot/share/compiler/compiler_globals_pd.hpp
+++ b/src/hotspot/share/compiler/compiler_globals_pd.hpp
@@ -70,7 +70,6 @@ define_pd_global(uintx, NonNMethodCodeHeapSize, 32*M);
define_pd_global(uintx, CodeCacheExpansionSize, 32*K);
define_pd_global(uintx, CodeCacheMinBlockLength, 1);
define_pd_global(uintx, CodeCacheMinimumUseSpace, 200*K);
-define_pd_global(size_t, MetaspaceSize, ScaleForWordSize(4*M));
define_pd_global(bool, NeverActAsServerClassMachine, true);
define_pd_global(uint64_t,MaxRAM, 1ULL*G);
#define CI_COMPILER_COUNT 0
diff --git a/src/hotspot/share/compiler/methodLiveness.cpp b/src/hotspot/share/compiler/methodLiveness.cpp
index 8d99627cca2c8e744e1b0798af9ff043dab973cc..5f83eea6716e65406e2239906f7f724b2fd004f4 100644
--- a/src/hotspot/share/compiler/methodLiveness.cpp
+++ b/src/hotspot/share/compiler/methodLiveness.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1998, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2021, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -467,8 +467,6 @@ void MethodLiveness::BasicBlock::compute_gen_kill_range(ciBytecodeStream *bytes)
}
void MethodLiveness::BasicBlock::compute_gen_kill_single(ciBytecodeStream *instruction) {
- int localNum;
-
// We prohibit _gen and _kill from having locals in common. If we
// know that one is definitely going to be applied before the other,
// we could save some computation time by relaxing this prohibition.
@@ -693,7 +691,7 @@ void MethodLiveness::BasicBlock::compute_gen_kill_single(ciBytecodeStream *instr
case Bytecodes::_lstore:
case Bytecodes::_dstore:
- store_two(localNum = instruction->get_index());
+ store_two(instruction->get_index());
break;
case Bytecodes::_lstore_0:
diff --git a/src/hotspot/share/gc/epsilon/epsilonHeap.cpp b/src/hotspot/share/gc/epsilon/epsilonHeap.cpp
index bd937cffca474f8cea2a82055ee69427c37f47e8..867ad32f9bea424bf01ed71142fa20e7402e44a4 100644
--- a/src/hotspot/share/gc/epsilon/epsilonHeap.cpp
+++ b/src/hotspot/share/gc/epsilon/epsilonHeap.cpp
@@ -31,6 +31,7 @@
#include "gc/shared/locationPrinter.inline.hpp"
#include "memory/allocation.hpp"
#include "memory/allocation.inline.hpp"
+#include "memory/metaspaceUtils.hpp"
#include "memory/resourceArea.hpp"
#include "memory/universe.hpp"
#include "runtime/atomic.hpp"
diff --git a/src/hotspot/share/gc/g1/g1BarrierSet.hpp b/src/hotspot/share/gc/g1/g1BarrierSet.hpp
index 9b7ee9e93e74f19b55070867d34cb68736fcc693..8d009a9e19f3f134a42ad554913786c7e7034f65 100644
--- a/src/hotspot/share/gc/g1/g1BarrierSet.hpp
+++ b/src/hotspot/share/gc/g1/g1BarrierSet.hpp
@@ -53,6 +53,10 @@ class G1BarrierSet: public CardTableBarrierSet {
G1BarrierSet(G1CardTable* table);
~G1BarrierSet() { }
+ virtual bool card_mark_must_follow_store() const {
+ return true;
+ }
+
// Add "pre_val" to a set of objects that may have been disconnected from the
// pre-marking object graph.
static void enqueue(oop pre_val);
diff --git a/src/hotspot/share/gc/g1/g1CardTable.hpp b/src/hotspot/share/gc/g1/g1CardTable.hpp
index 3540bb9411a813293379c6e45dc6e943971b0941..925ae098b63504128a7ffdedd139f83208438ac8 100644
--- a/src/hotspot/share/gc/g1/g1CardTable.hpp
+++ b/src/hotspot/share/gc/g1/g1CardTable.hpp
@@ -79,7 +79,7 @@ public:
STATIC_ASSERT(BitsPerByte == 8);
static const size_t WordAlreadyScanned = (SIZE_MAX / 255) * g1_card_already_scanned;
- G1CardTable(MemRegion whole_heap): CardTable(whole_heap, /* scanned concurrently */ true), _listener() {
+ G1CardTable(MemRegion whole_heap): CardTable(whole_heap), _listener() {
_listener.set_card_table(this);
}
diff --git a/src/hotspot/share/gc/g1/g1CollectedHeap.cpp b/src/hotspot/share/gc/g1/g1CollectedHeap.cpp
index 8f33eac44430d8d77388489c44df6fe3325c63cc..9453aac73460f026d98d4e395a3d3e859d97be4d 100644
--- a/src/hotspot/share/gc/g1/g1CollectedHeap.cpp
+++ b/src/hotspot/share/gc/g1/g1CollectedHeap.cpp
@@ -93,6 +93,7 @@
#include "memory/allocation.hpp"
#include "memory/iterator.hpp"
#include "memory/heapInspection.hpp"
+#include "memory/metaspaceUtils.hpp"
#include "memory/resourceArea.hpp"
#include "memory/universe.hpp"
#include "oops/access.inline.hpp"
@@ -1497,8 +1498,8 @@ G1RegionToSpaceMapper* G1CollectedHeap::create_aux_memory_mapper(const char* des
os::trace_page_sizes_for_requested_size(description,
size,
- preferred_page_size,
page_size,
+ preferred_page_size,
rs.base(),
rs.size());
@@ -1794,12 +1795,9 @@ void G1CollectedHeap::ref_processing_init() {
// * Discovery is atomic - i.e. not concurrent.
// * Reference discovery will not need a barrier.
- bool mt_processing = ParallelRefProcEnabled && (ParallelGCThreads > 1);
-
// Concurrent Mark ref processor
_ref_processor_cm =
new ReferenceProcessor(&_is_subject_to_discovery_cm,
- mt_processing, // mt processing
ParallelGCThreads, // degree of mt processing
(ParallelGCThreads > 1) || (ConcGCThreads > 1), // mt discovery
MAX2(ParallelGCThreads, ConcGCThreads), // degree of mt discovery
@@ -1810,7 +1808,6 @@ void G1CollectedHeap::ref_processing_init() {
// STW ref processor
_ref_processor_stw =
new ReferenceProcessor(&_is_subject_to_discovery_stw,
- mt_processing, // mt processing
ParallelGCThreads, // degree of mt processing
(ParallelGCThreads > 1), // mt discovery
ParallelGCThreads, // degree of mt discovery
diff --git a/src/hotspot/share/gc/g1/g1CollectionSetCandidates.cpp b/src/hotspot/share/gc/g1/g1CollectionSetCandidates.cpp
index d9904f2fc8f120019b0da335e559451b461118df..587aa33618337d4fc1ff7f2236055e716bee7038 100644
--- a/src/hotspot/share/gc/g1/g1CollectionSetCandidates.cpp
+++ b/src/hotspot/share/gc/g1/g1CollectionSetCandidates.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019, 2020, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2019, 2021, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -35,6 +35,25 @@ void G1CollectionSetCandidates::remove(uint num_regions) {
}
}
+void G1CollectionSetCandidates::remove_from_end(uint num_remove, size_t wasted) {
+ assert(num_remove <= num_remaining(), "trying to remove more regions than remaining");
+
+#ifdef ASSERT
+ size_t reclaimable = 0;
+
+ for (uint i = 0; i < num_remove; i++) {
+ uint cur_idx = _num_regions - i - 1;
+ reclaimable += at(cur_idx)->reclaimable_bytes();
+ // Make sure we crash if we access it.
+ _regions[cur_idx] = NULL;
+ }
+
+ assert(reclaimable == wasted, "Recalculated reclaimable inconsistent");
+#endif
+ _num_regions -= num_remove;
+ _remaining_reclaimable_bytes -= wasted;
+}
+
void G1CollectionSetCandidates::iterate(HeapRegionClosure* cl) {
for (uint i = _front_idx; i < _num_regions; i++) {
HeapRegion* r = _regions[i];
@@ -45,6 +64,16 @@ void G1CollectionSetCandidates::iterate(HeapRegionClosure* cl) {
}
}
+void G1CollectionSetCandidates::iterate_backwards(HeapRegionClosure* cl) {
+ for (uint i = _num_regions; i > _front_idx; i--) {
+ HeapRegion* r = _regions[i - 1];
+ if (cl->do_heap_region(r)) {
+ cl->set_incomplete();
+ break;
+ }
+ }
+}
+
#ifndef PRODUCT
void G1CollectionSetCandidates::verify() const {
guarantee(_front_idx <= _num_regions, "Index: %u Num_regions: %u", _front_idx, _num_regions);
diff --git a/src/hotspot/share/gc/g1/g1CollectionSetCandidates.hpp b/src/hotspot/share/gc/g1/g1CollectionSetCandidates.hpp
index ce358d122aa74f86e666ee1d1d57ffd28b1a350e..3086cff0903af2544f658a0ba8ddc5c049d35dad 100644
--- a/src/hotspot/share/gc/g1/g1CollectionSetCandidates.hpp
+++ b/src/hotspot/share/gc/g1/g1CollectionSetCandidates.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2019, 2021, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -74,10 +74,16 @@ public:
return res;
}
+ // Remove num_regions from the front of the collection set candidate list.
void remove(uint num_regions);
+ // Remove num_remove regions from the back of the collection set candidate list.
+ void remove_from_end(uint num_remove, size_t wasted);
// Iterate over all remaining collection set candidate regions.
void iterate(HeapRegionClosure* cl);
+ // Iterate over all remaining collectin set candidate regions from the end
+ // to the beginning of the set.
+ void iterate_backwards(HeapRegionClosure* cl);
// Return the number of candidate regions remaining.
uint num_remaining() { return _num_regions - _front_idx; }
diff --git a/src/hotspot/share/gc/g1/g1CollectionSetChooser.cpp b/src/hotspot/share/gc/g1/g1CollectionSetChooser.cpp
index 362e39bbeab6c5c46794435cd8142e2103a1d8ee..c982caf7d66e09023faf84b3711aab8112b62b3c 100644
--- a/src/hotspot/share/gc/g1/g1CollectionSetChooser.cpp
+++ b/src/hotspot/share/gc/g1/g1CollectionSetChooser.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2001, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2021, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -257,6 +257,60 @@ bool G1CollectionSetChooser::should_add(HeapRegion* hr) {
hr->rem_set()->is_complete();
}
+// Closure implementing early pruning (removal) of regions meeting the
+// G1HeapWastePercent criteria. That is, either until _max_pruned regions were
+// removed (for forward progress in evacuation) or the waste accumulated by the
+// removed regions is above max_wasted.
+class G1PruneRegionClosure : public HeapRegionClosure {
+ uint _num_pruned;
+ size_t _cur_wasted;
+
+ uint const _max_pruned;
+ size_t const _max_wasted;
+
+public:
+ G1PruneRegionClosure(uint max_pruned, size_t max_wasted) :
+ _num_pruned(0), _cur_wasted(0), _max_pruned(max_pruned), _max_wasted(max_wasted) { }
+
+ virtual bool do_heap_region(HeapRegion* r) {
+ size_t const reclaimable = r->reclaimable_bytes();
+ if (_num_pruned > _max_pruned ||
+ _cur_wasted + reclaimable > _max_wasted) {
+ return true;
+ }
+ r->rem_set()->clear(true /* cardset_only */);
+ _cur_wasted += reclaimable;
+ _num_pruned++;
+ return false;
+ }
+
+ uint num_pruned() const { return _num_pruned; }
+ size_t wasted() const { return _cur_wasted; }
+};
+
+void G1CollectionSetChooser::prune(G1CollectionSetCandidates* candidates) {
+ G1Policy* p = G1CollectedHeap::heap()->policy();
+
+ uint min_old_cset_length = p->calc_min_old_cset_length(candidates);
+ uint num_candidates = candidates->num_regions();
+
+ if (min_old_cset_length < num_candidates) {
+ size_t allowed_waste = p->allowed_waste_in_collection_set();
+
+ G1PruneRegionClosure prune_cl(num_candidates - min_old_cset_length,
+ allowed_waste);
+ candidates->iterate_backwards(&prune_cl);
+
+ log_debug(gc, ergo, cset)("Pruned %u regions out of %u, leaving " SIZE_FORMAT " bytes waste (allowed " SIZE_FORMAT ")",
+ prune_cl.num_pruned(),
+ candidates->num_regions(),
+ prune_cl.wasted(),
+ allowed_waste);
+
+ candidates->remove_from_end(prune_cl.num_pruned(), prune_cl.wasted());
+ }
+}
+
G1CollectionSetCandidates* G1CollectionSetChooser::build(WorkGang* workers, uint max_num_regions) {
uint num_workers = workers->active_workers();
uint chunk_size = calculate_work_chunk_size(num_workers, max_num_regions);
@@ -265,6 +319,7 @@ G1CollectionSetCandidates* G1CollectionSetChooser::build(WorkGang* workers, uint
workers->run_task(&cl, num_workers);
G1CollectionSetCandidates* result = cl.get_sorted_candidates();
+ prune(result);
result->verify();
return result;
}
diff --git a/src/hotspot/share/gc/g1/g1CollectionSetChooser.hpp b/src/hotspot/share/gc/g1/g1CollectionSetChooser.hpp
index 4d6a77abc191bf987a84d4751135b392ef2fc166..2fadcd8945b89afcec9896e811e2aeb91cb2e084 100644
--- a/src/hotspot/share/gc/g1/g1CollectionSetChooser.hpp
+++ b/src/hotspot/share/gc/g1/g1CollectionSetChooser.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2001, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2021, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -36,6 +36,11 @@ class WorkGang;
// methods.
class G1CollectionSetChooser : public AllStatic {
static uint calculate_work_chunk_size(uint num_workers, uint num_regions);
+
+ // Remove regions in the collection set candidates as long as the G1HeapWastePercent
+ // criteria is met. Keep at least the minimum amount of old regions to guarantee
+ // some progress.
+ static void prune(G1CollectionSetCandidates* candidates);
public:
static size_t mixed_gc_live_threshold_bytes() {
diff --git a/src/hotspot/share/gc/g1/g1ConcurrentMark.cpp b/src/hotspot/share/gc/g1/g1ConcurrentMark.cpp
index 53b31b22577b8855927cc3eeb40e6b423f15a43c..64780555a35859000b4895309c8406bf8be177d1 100644
--- a/src/hotspot/share/gc/g1/g1ConcurrentMark.cpp
+++ b/src/hotspot/share/gc/g1/g1ConcurrentMark.cpp
@@ -58,6 +58,7 @@
#include "logging/log.hpp"
#include "memory/allocation.hpp"
#include "memory/iterator.hpp"
+#include "memory/metaspaceUtils.hpp"
#include "memory/resourceArea.hpp"
#include "memory/universe.hpp"
#include "oops/access.inline.hpp"
diff --git a/src/hotspot/share/gc/g1/g1ConcurrentMark.hpp b/src/hotspot/share/gc/g1/g1ConcurrentMark.hpp
index 2e0171a72eeda6110da6d680c2de76d21bdd4d30..93eddc65eceb073cc29882be27e3e2bef3d6bc37 100644
--- a/src/hotspot/share/gc/g1/g1ConcurrentMark.hpp
+++ b/src/hotspot/share/gc/g1/g1ConcurrentMark.hpp
@@ -50,10 +50,6 @@ class G1RegionToSpaceMapper;
class G1SurvivorRegions;
class ThreadClosure;
-PRAGMA_DIAG_PUSH
-// warning C4522: multiple assignment operators specified
-PRAGMA_DISABLE_MSVC_WARNING(4522)
-
// This is a container class for either an oop or a continuation address for
// mark stack entries. Both are pushed onto the mark stack.
class G1TaskQueueEntry {
@@ -89,8 +85,6 @@ public:
bool is_null() const { return _holder == NULL; }
};
-PRAGMA_DIAG_POP
-
typedef GenericTaskQueue G1CMTaskQueue;
typedef GenericTaskQueueSet G1CMTaskQueueSet;
diff --git a/src/hotspot/share/gc/g1/g1FullGCAdjustTask.cpp b/src/hotspot/share/gc/g1/g1FullGCAdjustTask.cpp
index ac5ba8834fb81a432f314614b7733a9c38d7ca2d..80dd4288b20b7ba5b2b2aece32b9960a833b63b2 100644
--- a/src/hotspot/share/gc/g1/g1FullGCAdjustTask.cpp
+++ b/src/hotspot/share/gc/g1/g1FullGCAdjustTask.cpp
@@ -81,7 +81,7 @@ class G1AdjustRegionClosure : public HeapRegionClosure {
G1FullGCAdjustTask::G1FullGCAdjustTask(G1FullCollector* collector) :
G1FullGCTask("G1 Adjust", collector),
_root_processor(G1CollectedHeap::heap(), collector->workers()),
- _references_done(0),
+ _references_done(false),
_weak_proc_task(collector->workers()),
_hrclaimer(collector->workers()),
_adjust(collector),
@@ -99,8 +99,7 @@ void G1FullGCAdjustTask::work(uint worker_id) {
marker->preserved_stack()->adjust_during_full_gc();
// Adjust the weak roots.
-
- if (Atomic::add(&_references_done, 1u) == 1u) { // First incr claims task.
+ if (!Atomic::cmpxchg(&_references_done, false, true)) {
G1CollectedHeap::heap()->ref_processor_stw()->weak_oops_do(&_adjust);
}
diff --git a/src/hotspot/share/gc/g1/g1FullGCAdjustTask.hpp b/src/hotspot/share/gc/g1/g1FullGCAdjustTask.hpp
index 2ddcc9853fed760509475d0bea0641e45ea521cc..0b33b485452f8faf975266b7f04e2299ba5c2409 100644
--- a/src/hotspot/share/gc/g1/g1FullGCAdjustTask.hpp
+++ b/src/hotspot/share/gc/g1/g1FullGCAdjustTask.hpp
@@ -38,7 +38,7 @@ class G1CollectedHeap;
class G1FullGCAdjustTask : public G1FullGCTask {
G1RootProcessor _root_processor;
- volatile uint _references_done; // Atomic counter / bool
+ volatile bool _references_done;
WeakProcessor::Task _weak_proc_task;
HeapRegionClaimer _hrclaimer;
G1AdjustClosure _adjust;
diff --git a/src/hotspot/share/gc/g1/g1FullGCMarker.inline.hpp b/src/hotspot/share/gc/g1/g1FullGCMarker.inline.hpp
index 75d8f6563051a92150c7a02b34b18b07ca7addd2..755929968988e81138d022cddb506d885cd7223b 100644
--- a/src/hotspot/share/gc/g1/g1FullGCMarker.inline.hpp
+++ b/src/hotspot/share/gc/g1/g1FullGCMarker.inline.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017, 2020, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2017, 2021, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -25,6 +25,7 @@
#ifndef SHARE_GC_G1_G1FULLGCMARKER_INLINE_HPP
#define SHARE_GC_G1_G1FULLGCMARKER_INLINE_HPP
+#include "classfile/classLoaderData.hpp"
#include "classfile/javaClasses.inline.hpp"
#include "gc/g1/g1Allocator.inline.hpp"
#include "gc/g1/g1ConcurrentMarkBitMap.inline.hpp"
diff --git a/src/hotspot/share/gc/g1/g1FullGCReferenceProcessorExecutor.cpp b/src/hotspot/share/gc/g1/g1FullGCReferenceProcessorExecutor.cpp
index 103fb4eca8843653bead7454e95eea48c1405ed9..a730a44e8b8b21a0d203c2772be45016cf57fe12 100644
--- a/src/hotspot/share/gc/g1/g1FullGCReferenceProcessorExecutor.cpp
+++ b/src/hotspot/share/gc/g1/g1FullGCReferenceProcessorExecutor.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017, 2020, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2017, 2021, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -52,8 +52,7 @@ G1FullGCReferenceProcessingExecutor::G1RefProcTaskProxy::G1RefProcTaskProxy(Proc
G1FullCollector* collector) :
AbstractGangTask("G1 reference processing task"),
_proc_task(proc_task),
- _collector(collector),
- _terminator(_collector->workers(), _collector->oop_queue_set()) { }
+ _collector(collector) { }
void G1FullGCReferenceProcessingExecutor::G1RefProcTaskProxy::work(uint worker_id) {
G1FullGCMarker* marker = _collector->marker(worker_id);
diff --git a/src/hotspot/share/gc/g1/g1FullGCReferenceProcessorExecutor.hpp b/src/hotspot/share/gc/g1/g1FullGCReferenceProcessorExecutor.hpp
index 657421490a01b048b4ba3b07fb365935310ac2c0..9887d0f130ce47af518253d97a93112706c3ec05 100644
--- a/src/hotspot/share/gc/g1/g1FullGCReferenceProcessorExecutor.hpp
+++ b/src/hotspot/share/gc/g1/g1FullGCReferenceProcessorExecutor.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017, 2020, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2017, 2021, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -61,7 +61,6 @@ private:
typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask;
ProcessTask& _proc_task;
G1FullCollector* _collector;
- TaskTerminator _terminator;
public:
G1RefProcTaskProxy(ProcessTask& proc_task,
diff --git a/src/hotspot/share/gc/g1/g1HeapTransition.cpp b/src/hotspot/share/gc/g1/g1HeapTransition.cpp
index a6cef0bed7e05176842adc3443fdd8e8de25b2ad..c3d1e740ab487cfa3a9218d64fc6db9c824f5a41 100644
--- a/src/hotspot/share/gc/g1/g1HeapTransition.cpp
+++ b/src/hotspot/share/gc/g1/g1HeapTransition.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -27,7 +27,7 @@
#include "gc/g1/g1HeapTransition.hpp"
#include "gc/g1/g1Policy.hpp"
#include "logging/logStream.hpp"
-#include "memory/metaspace.hpp"
+#include "memory/metaspaceUtils.hpp"
G1HeapTransition::Data::Data(G1CollectedHeap* g1_heap) :
_eden_length(g1_heap->eden_regions_count()),
diff --git a/src/hotspot/share/gc/g1/g1Policy.cpp b/src/hotspot/share/gc/g1/g1Policy.cpp
index 7dee921bc7957b7d1c1799c8ac5cc72415eb4a41..0e58094a871c3340db97a844388727f872bf8e8c 100644
--- a/src/hotspot/share/gc/g1/g1Policy.cpp
+++ b/src/hotspot/share/gc/g1/g1Policy.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2001, 2020, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2021, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -1278,22 +1278,16 @@ bool G1Policy::next_gc_should_be_mixed(const char* true_action_str,
log_debug(gc, ergo)("%s (candidate old regions not available)", false_action_str);
return false;
}
-
- // Is the amount of uncollected reclaimable space above G1HeapWastePercent?
- size_t reclaimable_bytes = candidates->remaining_reclaimable_bytes();
- double reclaimable_percent = reclaimable_bytes_percent(reclaimable_bytes);
- double threshold = (double) G1HeapWastePercent;
- if (reclaimable_percent <= threshold) {
- log_debug(gc, ergo)("%s (reclaimable percentage not over threshold). candidate old regions: %u reclaimable: " SIZE_FORMAT " (%1.2f) threshold: " UINTX_FORMAT,
- false_action_str, candidates->num_remaining(), reclaimable_bytes, reclaimable_percent, G1HeapWastePercent);
- return false;
- }
- log_debug(gc, ergo)("%s (candidate old regions available). candidate old regions: %u reclaimable: " SIZE_FORMAT " (%1.2f) threshold: " UINTX_FORMAT,
- true_action_str, candidates->num_remaining(), reclaimable_bytes, reclaimable_percent, G1HeapWastePercent);
+ // Go through all regions - we already pruned regions not worth collecting
+ // during candidate selection.
return true;
}
-uint G1Policy::calc_min_old_cset_length() const {
+size_t G1Policy::allowed_waste_in_collection_set() const {
+ return G1HeapWastePercent * _g1h->capacity() / 100;
+}
+
+uint G1Policy::calc_min_old_cset_length(G1CollectionSetCandidates* candidates) const {
// The min old CSet region bound is based on the maximum desired
// number of mixed GCs after a cycle. I.e., even if some old regions
// look expensive, we should add them to the CSet anyway to make
@@ -1304,7 +1298,7 @@ uint G1Policy::calc_min_old_cset_length() const {
// to the CSet candidates in the first place, not how many remain, so
// that the result is the same during all mixed GCs that follow a cycle.
- const size_t region_num = _collection_set->candidates()->num_regions();
+ const size_t region_num = candidates->num_regions();
const size_t gc_num = (size_t) MAX2(G1MixedGCCountTarget, (uintx) 1);
size_t result = region_num / gc_num;
// emulate ceiling
@@ -1347,7 +1341,7 @@ void G1Policy::calculate_old_collection_set_regions(G1CollectionSetCandidates* c
double optional_threshold_ms = time_remaining_ms * optional_prediction_fraction();
- const uint min_old_cset_length = calc_min_old_cset_length();
+ const uint min_old_cset_length = calc_min_old_cset_length(candidates);
const uint max_old_cset_length = MAX2(min_old_cset_length, calc_max_old_cset_length());
const uint max_optional_regions = max_old_cset_length - min_old_cset_length;
bool check_time_remaining = use_adaptive_young_list_length();
diff --git a/src/hotspot/share/gc/g1/g1Policy.hpp b/src/hotspot/share/gc/g1/g1Policy.hpp
index d3d30805c5d8bbb0cb4961195198036dbc2914b0..5ee3bb0c754cb0cc97d72bb6e4f5c8afdb3cb5d0 100644
--- a/src/hotspot/share/gc/g1/g1Policy.hpp
+++ b/src/hotspot/share/gc/g1/g1Policy.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, 2020, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -246,7 +246,7 @@ public:
// Calculate the minimum number of old regions we'll add to the CSet
// during a mixed GC.
- uint calc_min_old_cset_length() const;
+ uint calc_min_old_cset_length(G1CollectionSetCandidates* candidates) const;
// Calculate the maximum number of old regions we'll add to the CSet
// during a mixed GC.
@@ -347,6 +347,8 @@ public:
bool next_gc_should_be_mixed(const char* true_action_str,
const char* false_action_str) const;
+ // Amount of allowed waste in bytes in the collection set.
+ size_t allowed_waste_in_collection_set() const;
// Calculate and return the number of initial and optional old gen regions from
// the given collection set candidates and the remaining time.
void calculate_old_collection_set_regions(G1CollectionSetCandidates* candidates,
diff --git a/src/hotspot/share/gc/g1/g1RootProcessor.cpp b/src/hotspot/share/gc/g1/g1RootProcessor.cpp
index 8ed58797955f019e0ed53480ac5c042eb11250a7..93a45b9ffaeafde1d4ed375dafc18376769a1d0b 100644
--- a/src/hotspot/share/gc/g1/g1RootProcessor.cpp
+++ b/src/hotspot/share/gc/g1/g1RootProcessor.cpp
@@ -75,7 +75,7 @@ void G1RootProcessor::evacuate_roots(G1ParScanThreadState* pss, uint worker_id)
}
// CodeCache is already processed in java roots
- _process_strong_tasks.all_tasks_completed(n_workers(), G1RP_PS_CodeCache_oops_do);
+ _process_strong_tasks.all_tasks_claimed(G1RP_PS_CodeCache_oops_do);
}
// Adaptor to pass the closures to the strong roots in the VM.
@@ -106,9 +106,8 @@ void G1RootProcessor::process_strong_roots(OopClosure* oops,
// CodeCache is already processed in java roots
// refProcessor is not needed since we are inside a safe point
- _process_strong_tasks.all_tasks_completed(n_workers(),
- G1RP_PS_CodeCache_oops_do,
- G1RP_PS_refProcessor_oops_do);
+ _process_strong_tasks.all_tasks_claimed(G1RP_PS_CodeCache_oops_do,
+ G1RP_PS_refProcessor_oops_do);
}
// Adaptor to pass the closures to all the roots in the VM.
@@ -144,7 +143,7 @@ void G1RootProcessor::process_all_roots(OopClosure* oops,
process_code_cache_roots(blobs, NULL, 0);
// refProcessor is not needed since we are inside a safe point
- _process_strong_tasks.all_tasks_completed(n_workers(), G1RP_PS_refProcessor_oops_do);
+ _process_strong_tasks.all_tasks_claimed(G1RP_PS_refProcessor_oops_do);
}
void G1RootProcessor::process_java_roots(G1RootClosures* closures,
diff --git a/src/hotspot/share/gc/g1/heapRegionRemSet.cpp b/src/hotspot/share/gc/g1/heapRegionRemSet.cpp
index 4c122ab41bd0f00694e2091b9a17b33b8e299227..63ea389e072fbd0b257b44c5c803620e683f0fae 100644
--- a/src/hotspot/share/gc/g1/heapRegionRemSet.cpp
+++ b/src/hotspot/share/gc/g1/heapRegionRemSet.cpp
@@ -145,6 +145,13 @@ void OtherRegionsTable::add_reference(OopOrNarrowOopStar from, uint tid) {
PerRegionTable* prt = find_region_table(ind, from_hr);
if (prt == NULL) {
MutexLocker x(_m, Mutex::_no_safepoint_check_flag);
+
+ // Rechecking if the region is coarsened, while holding the lock.
+ if (is_region_coarsened(from_hrm_ind)) {
+ assert(contains_reference_locked(from), "We just found " PTR_FORMAT " in the Coarse table", p2i(from));
+ return;
+ }
+
// Confirm that it's really not there...
prt = find_region_table(ind, from_hr);
if (prt == NULL) {
@@ -160,6 +167,8 @@ void OtherRegionsTable::add_reference(OopOrNarrowOopStar from, uint tid) {
return;
}
+ // Sparse PRT returned overflow (sparse table is full)
+
if (_n_fine_entries == _max_fine_entries) {
prt = delete_region_table(num_added_by_coarsening);
// There is no need to clear the links to the 'all' list here:
diff --git a/src/hotspot/share/gc/parallel/mutableSpace.cpp b/src/hotspot/share/gc/parallel/mutableSpace.cpp
index 62b52fecd9dbaa25f32de31feefdff7ca40822e2..defb1ffd04e117c25fee6b13d23e2ff14805a402 100644
--- a/src/hotspot/share/gc/parallel/mutableSpace.cpp
+++ b/src/hotspot/share/gc/parallel/mutableSpace.cpp
@@ -215,6 +215,15 @@ bool MutableSpace::cas_deallocate(HeapWord *obj, size_t size) {
return Atomic::cmpxchg(top_addr(), expected_top, obj) == expected_top;
}
+// Only used by oldgen allocation.
+bool MutableSpace::needs_expand(size_t word_size) const {
+ assert_lock_strong(ExpandHeap_lock);
+ // Holding the lock means end is stable. So while top may be advancing
+ // via concurrent allocations, there is no need to order the reads of top
+ // and end here, unlike in cas_allocate.
+ return pointer_delta(end(), top()) < word_size;
+}
+
void MutableSpace::oop_iterate(OopIterateClosure* cl) {
HeapWord* obj_addr = bottom();
HeapWord* t = top();
diff --git a/src/hotspot/share/gc/parallel/mutableSpace.hpp b/src/hotspot/share/gc/parallel/mutableSpace.hpp
index 3e9b0a1514c822096d44c5060dbd01ee472be4e5..b6bb131828f22b0da41bb9ad0247bde8da816b00 100644
--- a/src/hotspot/share/gc/parallel/mutableSpace.hpp
+++ b/src/hotspot/share/gc/parallel/mutableSpace.hpp
@@ -142,6 +142,11 @@ class MutableSpace: public CHeapObj {
virtual HeapWord* cas_allocate(size_t word_size);
// Optional deallocation. Used in NUMA-allocator.
bool cas_deallocate(HeapWord *obj, size_t size);
+ // Return true if this space needs to be expanded in order to satisfy an
+ // allocation request of the indicated size. Concurrent allocations and
+ // resizes may change the result of a later call. Used by oldgen allocator.
+ // precondition: holding ExpandHeap_lock
+ bool needs_expand(size_t word_size) const;
// Iteration.
void oop_iterate(OopIterateClosure* cl);
diff --git a/src/hotspot/share/gc/parallel/parMarkBitMap.cpp b/src/hotspot/share/gc/parallel/parMarkBitMap.cpp
index f26992f41207ad2663f90b4d0354321b6c8d79a4..d08762b2ca15cade115e6e0ee2793fe77b017e78 100644
--- a/src/hotspot/share/gc/parallel/parMarkBitMap.cpp
+++ b/src/hotspot/share/gc/parallel/parMarkBitMap.cpp
@@ -50,7 +50,8 @@ ParMarkBitMap::initialize(MemRegion covered_region)
const size_t rs_align = page_sz == (size_t) os::vm_page_size() ? 0 :
MAX2(page_sz, granularity);
ReservedSpace rs(_reserved_byte_size, rs_align, rs_align > 0);
- os::trace_page_sizes("Mark Bitmap", raw_bytes, raw_bytes, page_sz,
+ const size_t used_page_sz = ReservedSpace::actual_reserved_page_size(rs);
+ os::trace_page_sizes("Mark Bitmap", raw_bytes, raw_bytes, used_page_sz,
rs.base(), rs.size());
MemTracker::record_virtual_memory_type((address)rs.base(), mtGC);
diff --git a/src/hotspot/share/gc/parallel/parallelScavengeHeap.cpp b/src/hotspot/share/gc/parallel/parallelScavengeHeap.cpp
index 25af816bf2d94e5a1f2713378a4b115a0b0ca605..550002367c86569097cbc52292a811cee692a2c3 100644
--- a/src/hotspot/share/gc/parallel/parallelScavengeHeap.cpp
+++ b/src/hotspot/share/gc/parallel/parallelScavengeHeap.cpp
@@ -44,6 +44,7 @@
#include "logging/log.hpp"
#include "memory/iterator.hpp"
#include "memory/metaspaceCounters.hpp"
+#include "memory/metaspaceUtils.hpp"
#include "memory/universe.hpp"
#include "oops/oop.inline.hpp"
#include "runtime/handles.inline.hpp"
diff --git a/src/hotspot/share/gc/parallel/parallelScavengeHeap.hpp b/src/hotspot/share/gc/parallel/parallelScavengeHeap.hpp
index e4cdb776453e48e99838188e5eb13f6e2fe8e4f9..689400fbe040ea68623db06986b0342d90382b99 100644
--- a/src/hotspot/share/gc/parallel/parallelScavengeHeap.hpp
+++ b/src/hotspot/share/gc/parallel/parallelScavengeHeap.hpp
@@ -39,7 +39,6 @@
#include "gc/shared/strongRootsScope.hpp"
#include "gc/shared/workgroup.hpp"
#include "logging/log.hpp"
-#include "memory/metaspace.hpp"
#include "utilities/growableArray.hpp"
#include "utilities/ostream.hpp"
diff --git a/src/hotspot/share/gc/parallel/psCardTable.hpp b/src/hotspot/share/gc/parallel/psCardTable.hpp
index df1e0158727a1c8d192b207b14260fcb1afca82f..d912c6567412503e1c08ced94824b24d4b41c401 100644
--- a/src/hotspot/share/gc/parallel/psCardTable.hpp
+++ b/src/hotspot/share/gc/parallel/psCardTable.hpp
@@ -51,7 +51,7 @@ class PSCardTable: public CardTable {
};
public:
- PSCardTable(MemRegion whole_heap) : CardTable(whole_heap, /* scanned_concurrently */ false) {}
+ PSCardTable(MemRegion whole_heap) : CardTable(whole_heap) {}
static CardValue youngergen_card_val() { return youngergen_card; }
static CardValue verify_card_val() { return verify_card; }
diff --git a/src/hotspot/share/gc/parallel/psClosure.inline.hpp b/src/hotspot/share/gc/parallel/psClosure.inline.hpp
index 9100abf3bb9f53ddb32d0249d382355b5b67b019..8e347b20f7866d715d363acf459ddac2fec159e1 100644
--- a/src/hotspot/share/gc/parallel/psClosure.inline.hpp
+++ b/src/hotspot/share/gc/parallel/psClosure.inline.hpp
@@ -32,6 +32,26 @@
#include "oops/oop.inline.hpp"
#include "utilities/globalDefinitions.hpp"
+class PSAdjustWeakRootsClosure final: public OopClosure {
+public:
+ virtual void do_oop(narrowOop* p) { ShouldNotReachHere(); }
+
+ virtual void do_oop(oop* p) {
+ if (PSScavenge::should_scavenge(p)) {
+ oop o = RawAccess::oop_load(p);
+ assert(o->is_forwarded(), "Objects are already forwarded before weak processing");
+ oop new_obj = o->forwardee();
+ if (log_develop_is_enabled(Trace, gc, scavenge)) {
+ ResourceMark rm; // required by internal_name()
+ log_develop_trace(gc, scavenge)("{%s %s " PTR_FORMAT " -> " PTR_FORMAT " (%d)}",
+ "forwarding",
+ new_obj->klass()->internal_name(), p2i((void *)o), p2i((void *)new_obj), new_obj->size());
+ }
+ RawAccess::oop_store(p, new_obj);
+ }
+ }
+};
+
template
class PSRootsClosure: public OopClosure {
private:
diff --git a/src/hotspot/share/gc/parallel/psCompactionManager.cpp b/src/hotspot/share/gc/parallel/psCompactionManager.cpp
index fecb67317f1d68309c8cf31a0c06a4c0dfd02d10..117817caacc9c9c3fb99678f2495adc120d037a7 100644
--- a/src/hotspot/share/gc/parallel/psCompactionManager.cpp
+++ b/src/hotspot/share/gc/parallel/psCompactionManager.cpp
@@ -179,3 +179,19 @@ void ParCompactionManager::push_shadow_region(size_t shadow_region) {
void ParCompactionManager::remove_all_shadow_regions() {
_shadow_region_array->clear();
}
+
+#ifdef ASSERT
+void ParCompactionManager::verify_all_marking_stack_empty() {
+ uint parallel_gc_threads = ParallelGCThreads;
+ for (uint i = 0; i <= parallel_gc_threads; i++) {
+ assert(_manager_array[i]->marking_stacks_empty(), "Marking stack should be empty");
+ }
+}
+
+void ParCompactionManager::verify_all_region_stack_empty() {
+ uint parallel_gc_threads = ParallelGCThreads;
+ for (uint i = 0; i <= parallel_gc_threads; i++) {
+ assert(_manager_array[i]->region_stack()->is_empty(), "Region stack should be empty");
+ }
+}
+#endif
diff --git a/src/hotspot/share/gc/parallel/psCompactionManager.hpp b/src/hotspot/share/gc/parallel/psCompactionManager.hpp
index 6b899231ac6ac3259ce51b73ab9c53b4dbe35faf..29be946c072ddf2768e54c56f3c093591db802c9 100644
--- a/src/hotspot/share/gc/parallel/psCompactionManager.hpp
+++ b/src/hotspot/share/gc/parallel/psCompactionManager.hpp
@@ -46,10 +46,6 @@ class ParCompactionManager : public CHeapObj {
friend class PCRefProcTask;
friend class MarkFromRootsTask;
friend class UpdateDensePrefixAndCompactionTask;
-
- public:
-
-
private:
typedef GenericTaskQueue OopTaskQueue;
typedef GenericTaskQueueSet OopTaskQueueSet;
@@ -69,7 +65,6 @@ class ParCompactionManager : public CHeapObj {
static RegionTaskQueueSet* _region_task_queues;
static PSOldGen* _old_gen;
-private:
OverflowTaskQueue _marking_stack;
ObjArrayTaskQueue _objarray_stack;
size_t _next_shadow_region;
@@ -143,7 +138,7 @@ private:
RegionTaskQueue* region_stack() { return &_region_stack; }
- inline static ParCompactionManager* manager_array(uint index);
+ static ParCompactionManager* get_vmthread_cm() { return _manager_array[ParallelGCThreads]; }
ParCompactionManager();
@@ -196,13 +191,13 @@ private:
FollowStackClosure(ParCompactionManager* cm) : _compaction_manager(cm) { }
virtual void do_void();
};
-};
-inline ParCompactionManager* ParCompactionManager::manager_array(uint index) {
- assert(_manager_array != NULL, "access of NULL manager_array");
- assert(index <= ParallelGCThreads, "out of range manager_array access");
- return _manager_array[index];
-}
+ // Called after marking.
+ static void verify_all_marking_stack_empty() NOT_DEBUG_RETURN;
+
+ // Region staks hold regions in from-space; called after compaction.
+ static void verify_all_region_stack_empty() NOT_DEBUG_RETURN;
+};
bool ParCompactionManager::marking_stacks_empty() const {
return _marking_stack.is_empty() && _objarray_stack.is_empty();
diff --git a/src/hotspot/share/gc/parallel/psCompactionManager.inline.hpp b/src/hotspot/share/gc/parallel/psCompactionManager.inline.hpp
index 03ce090a4f43e201a4dad06cc3be3b14a2c5bc4b..a0df3dade2a90cc35691434e8a6819c2a03837c8 100644
--- a/src/hotspot/share/gc/parallel/psCompactionManager.inline.hpp
+++ b/src/hotspot/share/gc/parallel/psCompactionManager.inline.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2010, 2020, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2010, 2021, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -25,6 +25,7 @@
#ifndef SHARE_GC_PARALLEL_PSCOMPACTIONMANAGER_INLINE_HPP
#define SHARE_GC_PARALLEL_PSCOMPACTIONMANAGER_INLINE_HPP
+#include "classfile/classLoaderData.hpp"
#include "classfile/javaClasses.inline.hpp"
#include "gc/parallel/parMarkBitMap.hpp"
#include "gc/parallel/psCompactionManager.hpp"
diff --git a/src/hotspot/share/gc/parallel/psOldGen.cpp b/src/hotspot/share/gc/parallel/psOldGen.cpp
index 1b97f949628ba40790e264d20275b4b8b5d1e047..44c7901571ac339bbf726203e040b8873ab1737b 100644
--- a/src/hotspot/share/gc/parallel/psOldGen.cpp
+++ b/src/hotspot/share/gc/parallel/psOldGen.cpp
@@ -178,19 +178,31 @@ void PSOldGen::object_iterate_block(ObjectClosure* cl, size_t block_index) {
}
}
-HeapWord* PSOldGen::expand_and_cas_allocate(size_t word_size) {
- expand(word_size*HeapWordSize);
+bool PSOldGen::expand_for_allocate(size_t word_size) {
+ assert(word_size > 0, "allocating zero words?");
+ bool result = true;
+ {
+ MutexLocker x(ExpandHeap_lock);
+ // Avoid "expand storms" by rechecking available space after obtaining
+ // the lock, because another thread may have already made sufficient
+ // space available. If insufficient space available, that will remain
+ // true until we expand, since we have the lock. Other threads may take
+ // the space we need before we can allocate it, regardless of whether we
+ // expand. That's okay, we'll just try expanding again.
+ if (object_space()->needs_expand(word_size)) {
+ result = expand(word_size*HeapWordSize);
+ }
+ }
if (GCExpandToAllocateDelayMillis > 0) {
os::naked_sleep(GCExpandToAllocateDelayMillis);
}
- return cas_allocate_noexpand(word_size);
+ return result;
}
-void PSOldGen::expand(size_t bytes) {
- if (bytes == 0) {
- return;
- }
- MutexLocker x(ExpandHeap_lock);
+bool PSOldGen::expand(size_t bytes) {
+ assert_lock_strong(ExpandHeap_lock);
+ assert_locked_or_safepoint(Heap_lock);
+ assert(bytes > 0, "precondition");
const size_t alignment = virtual_space()->alignment();
size_t aligned_bytes = align_up(bytes, alignment);
size_t aligned_expand_bytes = align_up(MinHeapDeltaBytes, alignment);
@@ -200,13 +212,11 @@ void PSOldGen::expand(size_t bytes) {
// providing a page per lgroup. Alignment is larger or equal to the page size.
aligned_expand_bytes = MAX2(aligned_expand_bytes, alignment * os::numa_get_groups_num());
}
- if (aligned_bytes == 0){
- // The alignment caused the number of bytes to wrap. An expand_by(0) will
- // return true with the implication that and expansion was done when it
- // was not. A call to expand implies a best effort to expand by "bytes"
- // but not a guarantee. Align down to give a best effort. This is likely
- // the most that the generation can expand since it has some capacity to
- // start with.
+ if (aligned_bytes == 0) {
+ // The alignment caused the number of bytes to wrap. A call to expand
+ // implies a best effort to expand by "bytes" but not a guarantee. Align
+ // down to give a best effort. This is likely the most that the generation
+ // can expand since it has some capacity to start with.
aligned_bytes = align_down(bytes, alignment);
}
@@ -224,14 +234,13 @@ void PSOldGen::expand(size_t bytes) {
if (success && GCLocker::is_active_and_needs_gc()) {
log_debug(gc)("Garbage collection disabled, expanded heap instead");
}
+ return success;
}
bool PSOldGen::expand_by(size_t bytes) {
assert_lock_strong(ExpandHeap_lock);
assert_locked_or_safepoint(Heap_lock);
- if (bytes == 0) {
- return true; // That's what virtual_space()->expand_by(0) would return
- }
+ assert(bytes > 0, "precondition");
bool result = virtual_space()->expand_by(bytes);
if (result) {
if (ZapUnusedHeapArea) {
@@ -268,7 +277,7 @@ bool PSOldGen::expand_to_reserved() {
assert_lock_strong(ExpandHeap_lock);
assert_locked_or_safepoint(Heap_lock);
- bool result = true;
+ bool result = false;
const size_t remaining_bytes = virtual_space()->uncommitted_size();
if (remaining_bytes > 0) {
result = expand_by(remaining_bytes);
@@ -323,10 +332,10 @@ void PSOldGen::resize(size_t desired_free_space) {
}
if (new_size > current_size) {
size_t change_bytes = new_size - current_size;
+ MutexLocker x(ExpandHeap_lock);
expand(change_bytes);
} else {
size_t change_bytes = current_size - new_size;
- // shrink doesn't grab this lock, expand does. Is that right?
MutexLocker x(ExpandHeap_lock);
shrink(change_bytes);
}
diff --git a/src/hotspot/share/gc/parallel/psOldGen.hpp b/src/hotspot/share/gc/parallel/psOldGen.hpp
index dd0e7fe1e83c56debf2362c24a73692e6c2d6980..53947a948984caffa548ba7dfb3880599dc96d5f 100644
--- a/src/hotspot/share/gc/parallel/psOldGen.hpp
+++ b/src/hotspot/share/gc/parallel/psOldGen.hpp
@@ -79,8 +79,8 @@ class PSOldGen : public CHeapObj {
return res;
}
- HeapWord* expand_and_cas_allocate(size_t word_size);
- void expand(size_t bytes);
+ bool expand_for_allocate(size_t word_size);
+ bool expand(size_t bytes);
bool expand_by(size_t bytes);
bool expand_to_reserved();
@@ -135,8 +135,12 @@ class PSOldGen : public CHeapObj {
void resize(size_t desired_free_space);
HeapWord* allocate(size_t word_size) {
- HeapWord* res = cas_allocate_noexpand(word_size);
- return (res == NULL) ? expand_and_cas_allocate(word_size) : res;
+ HeapWord* res;
+ do {
+ res = cas_allocate_noexpand(word_size);
+ // Retry failed allocation if expand succeeds.
+ } while ((res == nullptr) && expand_for_allocate(word_size));
+ return res;
}
// Iteration.
diff --git a/src/hotspot/share/gc/parallel/psParallelCompact.cpp b/src/hotspot/share/gc/parallel/psParallelCompact.cpp
index 41e0d28d6914075b92f7e37e93ebcd6e7681e49a..0acee8a8ecb7dc59956980e7ac6c2b466e2f0e7e 100644
--- a/src/hotspot/share/gc/parallel/psParallelCompact.cpp
+++ b/src/hotspot/share/gc/parallel/psParallelCompact.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2005, 2020, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2021, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -57,11 +57,12 @@
#include "gc/shared/referenceProcessorPhaseTimes.hpp"
#include "gc/shared/spaceDecorator.inline.hpp"
#include "gc/shared/taskTerminator.hpp"
-#include "gc/shared/weakProcessor.hpp"
+#include "gc/shared/weakProcessor.inline.hpp"
#include "gc/shared/workerPolicy.hpp"
#include "gc/shared/workgroup.hpp"
#include "logging/log.hpp"
#include "memory/iterator.inline.hpp"
+#include "memory/metaspaceUtils.hpp"
#include "memory/resourceArea.hpp"
#include "memory/universe.hpp"
#include "oops/access.inline.hpp"
@@ -779,7 +780,7 @@ bool ParallelCompactData::summarize(SplitInfo& split_info,
return true;
}
-HeapWord* ParallelCompactData::calc_new_pointer(HeapWord* addr, ParCompactionManager* cm) {
+HeapWord* ParallelCompactData::calc_new_pointer(HeapWord* addr, ParCompactionManager* cm) const {
assert(addr != NULL, "Should detect NULL oop earlier");
assert(ParallelScavengeHeap::heap()->is_in(addr), "not in heap");
assert(PSParallelCompact::mark_bitmap()->is_marked(addr), "not marked");
@@ -858,7 +859,6 @@ public:
BoolObjectClosure* is_subject_to_discovery,
BoolObjectClosure* is_alive_non_header) :
ReferenceProcessor(is_subject_to_discovery,
- ParallelRefProcEnabled && (ParallelGCThreads > 1), // mt processing
ParallelGCThreads, // mt processing degree
true, // mt discovery
ParallelGCThreads, // mt discovery degree
@@ -1784,12 +1784,9 @@ bool PSParallelCompact::invoke_no_policy(bool maximum_heap_compaction) {
const PreGenGCValues pre_gc_values = heap->get_pre_gc_values();
// Get the compaction manager reserved for the VM thread.
- ParCompactionManager* const vmthread_cm =
- ParCompactionManager::manager_array(ParallelScavengeHeap::heap()->workers().total_workers());
+ ParCompactionManager* const vmthread_cm = ParCompactionManager::get_vmthread_cm();
{
- ResourceMark rm;
-
const uint active_workers =
WorkerPolicy::calc_active_workers(ParallelScavengeHeap::heap()->workers().total_workers(),
ParallelScavengeHeap::heap()->workers().active_workers(),
@@ -1834,11 +1831,13 @@ bool PSParallelCompact::invoke_no_policy(bool maximum_heap_compaction) {
// adjust_roots() updates Universe::_intArrayKlassObj which is
// needed by the compaction for filling holes in the dense prefix.
- adjust_roots(vmthread_cm);
+ adjust_roots();
compaction_start.update();
compact();
+ ParCompactionManager::verify_all_region_stack_empty();
+
// Reset the mark bitmap, summary data, and do other bookkeeping. Must be
// done before resizing.
post_compact();
@@ -1935,15 +1934,6 @@ bool PSParallelCompact::invoke_no_policy(bool maximum_heap_compaction) {
heap->post_full_gc_dump(&_gc_timer);
}
-#ifdef ASSERT
- for (size_t i = 0; i < ParallelGCThreads + 1; ++i) {
- ParCompactionManager* const cm =
- ParCompactionManager::manager_array(int(i));
- assert(cm->marking_stack()->is_empty(), "should be empty");
- assert(cm->region_stack()->is_empty(), "Region stack " SIZE_FORMAT " is not empty", i);
- }
-#endif // ASSERT
-
if (VerifyAfterGC && heap->total_collections() >= VerifyGCStartAt) {
Universe::verify("After GC");
}
@@ -2183,7 +2173,7 @@ void PSParallelCompact::marking_phase(ParCompactionManager* cm,
}
// This is the point where the entire marking should have completed.
- assert(cm->marking_stacks_empty(), "Marking should have completed");
+ ParCompactionManager::verify_all_marking_stack_empty();
{
GCTraceTime(Debug, gc, phases) tm("Weak Processing", &_gc_timer);
@@ -2209,35 +2199,94 @@ void PSParallelCompact::marking_phase(ParCompactionManager* cm,
_gc_tracer.report_object_count_after_gc(is_alive_closure());
}
-void PSParallelCompact::adjust_roots(ParCompactionManager* cm) {
- // Adjust the pointers to reflect the new locations
- GCTraceTime(Info, gc, phases) tm("Adjust Roots", &_gc_timer);
+#ifdef ASSERT
+void PCAdjustPointerClosure::verify_cm(ParCompactionManager* cm) {
+ assert(cm != NULL, "associate ParCompactionManage should not be NULL");
+ auto vmthread_cm = ParCompactionManager::get_vmthread_cm();
+ if (Thread::current()->is_VM_thread()) {
+ assert(cm == vmthread_cm, "VM threads should use ParCompactionManager from get_vmthread_cm()");
+ } else {
+ assert(Thread::current()->is_GC_task_thread(), "Must be a GC thread");
+ assert(cm != vmthread_cm, "GC threads should use ParCompactionManager from gc_thread_compaction_manager()");
+ }
+}
+#endif
- // Need new claim bits when tracing through and adjusting pointers.
- ClassLoaderDataGraph::clear_claimed_marks();
+class PSAdjustTask final : public AbstractGangTask {
+ SubTasksDone _sub_tasks;
+ WeakProcessor::Task _weak_proc_task;
+ OopStorageSetStrongParState _oop_storage_iter;
+ uint _nworkers;
- PCAdjustPointerClosure oop_closure(cm);
+ enum PSAdjustSubTask {
+ PSAdjustSubTask_code_cache,
+ PSAdjustSubTask_aot,
+ PSAdjustSubTask_old_ref_process,
+ PSAdjustSubTask_young_ref_process,
- // General strong roots.
- Threads::oops_do(&oop_closure, NULL);
- OopStorageSet::strong_oops_do(&oop_closure);
- CLDToOopClosure cld_closure(&oop_closure, ClassLoaderData::_claim_strong);
- ClassLoaderDataGraph::cld_do(&cld_closure);
+ PSAdjustSubTask_num_elements
+ };
- // Now adjust pointers in remaining weak roots. (All of which should
- // have been cleared if they pointed to non-surviving objects.)
- WeakProcessor::oops_do(&oop_closure);
+public:
+ PSAdjustTask(uint nworkers) :
+ AbstractGangTask("PSAdjust task"),
+ _sub_tasks(PSAdjustSubTask_num_elements),
+ _weak_proc_task(nworkers),
+ _nworkers(nworkers) {
+ // Need new claim bits when tracing through and adjusting pointers.
+ ClassLoaderDataGraph::clear_claimed_marks();
+ if (nworkers > 1) {
+ Threads::change_thread_claim_token();
+ }
+ }
- CodeBlobToOopClosure adjust_from_blobs(&oop_closure, CodeBlobToOopClosure::FixRelocations);
- CodeCache::blobs_do(&adjust_from_blobs);
- AOT_ONLY(AOTLoader::oops_do(&oop_closure);)
+ ~PSAdjustTask() {
+ Threads::assert_all_threads_claimed();
+ }
- ref_processor()->weak_oops_do(&oop_closure);
- // Roots were visited so references into the young gen in roots
- // may have been scanned. Process them also.
- // Should the reference processor have a span that excludes
- // young gen objects?
- PSScavenge::reference_processor()->weak_oops_do(&oop_closure);
+ void work(uint worker_id) {
+ ParCompactionManager* cm = ParCompactionManager::gc_thread_compaction_manager(worker_id);
+ PCAdjustPointerClosure adjust(cm);
+ {
+ ResourceMark rm;
+ Threads::possibly_parallel_oops_do(_nworkers > 1, &adjust, nullptr);
+ }
+ _oop_storage_iter.oops_do(&adjust);
+ {
+ CLDToOopClosure cld_closure(&adjust, ClassLoaderData::_claim_strong);
+ ClassLoaderDataGraph::cld_do(&cld_closure);
+ }
+ {
+ AlwaysTrueClosure always_alive;
+ _weak_proc_task.work(worker_id, &always_alive, &adjust);
+ }
+ if (_sub_tasks.try_claim_task(PSAdjustSubTask_code_cache)) {
+ CodeBlobToOopClosure adjust_code(&adjust, CodeBlobToOopClosure::FixRelocations);
+ CodeCache::blobs_do(&adjust_code);
+ }
+ if (_sub_tasks.try_claim_task(PSAdjustSubTask_aot)) {
+ AOT_ONLY(AOTLoader::oops_do(&adjust);)
+ }
+ if (_sub_tasks.try_claim_task(PSAdjustSubTask_old_ref_process)) {
+ PSParallelCompact::ref_processor()->weak_oops_do(&adjust);
+ }
+ if (_sub_tasks.try_claim_task(PSAdjustSubTask_young_ref_process)) {
+ // Roots were visited so references into the young gen in roots
+ // may have been scanned. Process them also.
+ // Should the reference processor have a span that excludes
+ // young gen objects?
+ PSScavenge::reference_processor()->weak_oops_do(&adjust);
+ }
+ _sub_tasks.all_tasks_claimed();
+ }
+};
+
+void PSParallelCompact::adjust_roots() {
+ // Adjust the pointers to reflect the new locations
+ GCTraceTime(Info, gc, phases) tm("Adjust Roots", &_gc_timer);
+ uint nworkers = ParallelScavengeHeap::heap()->workers().active_workers();
+ PSAdjustTask task(nworkers);
+ ParallelScavengeHeap::heap()->workers().run_task(&task);
}
// Helper class to print 8 region numbers per line and then print the total at the end.
@@ -2306,7 +2355,7 @@ void PSParallelCompact::prepare_region_draining_tasks(uint parallel_gc_threads)
for (size_t cur = end_region - 1; cur + 1 > beg_region; --cur) {
if (sd.region(cur)->claim_unsafe()) {
- ParCompactionManager* cm = ParCompactionManager::manager_array(worker_id);
+ ParCompactionManager* cm = ParCompactionManager::gc_thread_compaction_manager(worker_id);
bool result = sd.region(cur)->mark_normal();
assert(result, "Must succeed at this point.");
cm->region_stack()->push(cur);
@@ -2505,7 +2554,6 @@ static void compaction_with_stealing_work(TaskTerminator* terminator, uint worke
// Go around again.
}
}
- return;
}
class UpdateDensePrefixAndCompactionTask: public AbstractGangTask {
@@ -2571,9 +2619,11 @@ void PSParallelCompact::compact() {
}
{
- // Update the deferred objects, if any. Any compaction manager can be used.
GCTraceTime(Trace, gc, phases) tm("Deferred Updates", &_gc_timer);
- ParCompactionManager* cm = ParCompactionManager::manager_array(0);
+ // Update the deferred objects, if any. In principle, any compaction
+ // manager can be used. However, since the current thread is VM thread, we
+ // use the rightful one to keep the verification logic happy.
+ ParCompactionManager* cm = ParCompactionManager::get_vmthread_cm();
for (unsigned int id = old_space_id; id < last_space_id; ++id) {
update_deferred_objects(cm, SpaceId(id));
}
@@ -3133,7 +3183,7 @@ void PSParallelCompact::initialize_shadow_regions(uint parallel_gc_threads)
size_t beg_region = sd.addr_to_region_idx(_space_info[old_space_id].dense_prefix());
for (uint i = 0; i < parallel_gc_threads; i++) {
- ParCompactionManager *cm = ParCompactionManager::manager_array(i);
+ ParCompactionManager *cm = ParCompactionManager::gc_thread_compaction_manager(i);
cm->set_next_shadow_region(beg_region + i);
}
}
diff --git a/src/hotspot/share/gc/parallel/psParallelCompact.hpp b/src/hotspot/share/gc/parallel/psParallelCompact.hpp
index c14dffb2334991be1b1b1c2341873090622d2baa..91cfc9a484f2a6a49f41df5c1c5ea4ac5c4735de 100644
--- a/src/hotspot/share/gc/parallel/psParallelCompact.hpp
+++ b/src/hotspot/share/gc/parallel/psParallelCompact.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2005, 2020, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2021, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -480,9 +480,9 @@ public:
HeapWord* partial_obj_end(size_t region_idx) const;
// Return the location of the object after compaction.
- HeapWord* calc_new_pointer(HeapWord* addr, ParCompactionManager* cm);
+ HeapWord* calc_new_pointer(HeapWord* addr, ParCompactionManager* cm) const;
- HeapWord* calc_new_pointer(oop p, ParCompactionManager* cm) {
+ HeapWord* calc_new_pointer(oop p, ParCompactionManager* cm) const {
return calc_new_pointer(cast_from_oop(p), cm);
}
@@ -1107,7 +1107,7 @@ class PSParallelCompact : AllStatic {
static void summary_phase(ParCompactionManager* cm, bool maximum_compaction);
// Adjust addresses in roots. Does not adjust addresses in heap.
- static void adjust_roots(ParCompactionManager* cm);
+ static void adjust_roots();
DEBUG_ONLY(static void write_block_fill_histogram();)
@@ -1144,7 +1144,7 @@ class PSParallelCompact : AllStatic {
static bool initialize();
// Closure accessors
- static BoolObjectClosure* is_alive_closure() { return (BoolObjectClosure*)&_is_alive_closure; }
+ static BoolObjectClosure* is_alive_closure() { return &_is_alive_closure; }
// Public accessors
static elapsedTimer* accumulated_time() { return &_accumulated_time; }
diff --git a/src/hotspot/share/gc/parallel/psParallelCompact.inline.hpp b/src/hotspot/share/gc/parallel/psParallelCompact.inline.hpp
index 688da81e9c2848caa2e5756b59c944307db8f58e..7d2678e3f4c457fd4979e5b64a49396f79d9b8d0 100644
--- a/src/hotspot/share/gc/parallel/psParallelCompact.inline.hpp
+++ b/src/hotspot/share/gc/parallel/psParallelCompact.inline.hpp
@@ -113,10 +113,9 @@ inline void PSParallelCompact::adjust_pointer(T* p, ParCompactionManager* cm) {
assert(ParallelScavengeHeap::heap()->is_in(obj), "should be in heap");
oop new_obj = (oop)summary_data().calc_new_pointer(obj, cm);
- assert(new_obj != NULL, // is forwarding ptr?
- "should be forwarded");
- // Just always do the update unconditionally?
- if (new_obj != NULL) {
+ assert(new_obj != NULL, "non-null address for live objects");
+ // Is it actually relocated at all?
+ if (new_obj != obj) {
assert(ParallelScavengeHeap::heap()->is_in_reserved(new_obj),
"should be in object space");
RawAccess::oop_store(p, new_obj);
@@ -127,7 +126,7 @@ inline void PSParallelCompact::adjust_pointer(T* p, ParCompactionManager* cm) {
class PCAdjustPointerClosure: public BasicOopIterateClosure {
public:
PCAdjustPointerClosure(ParCompactionManager* cm) {
- assert(cm != NULL, "associate ParCompactionManage should not be NULL");
+ verify_cm(cm);
_cm = cm;
}
template void do_oop_nv(T* p) { PSParallelCompact::adjust_pointer(p, _cm); }
@@ -137,6 +136,8 @@ public:
virtual ReferenceIterationMode reference_iteration_mode() { return DO_FIELDS; }
private:
ParCompactionManager* _cm;
+
+ static void verify_cm(ParCompactionManager* cm) NOT_DEBUG_RETURN;
};
#endif // SHARE_GC_PARALLEL_PSPARALLELCOMPACT_INLINE_HPP
diff --git a/src/hotspot/share/gc/parallel/psScavenge.cpp b/src/hotspot/share/gc/parallel/psScavenge.cpp
index 0b07c01f2625445894bed07f0f3bd70b18ca3fe6..61af24c6c4bf7209c9dc31b4d86c4f3457b272e4 100644
--- a/src/hotspot/share/gc/parallel/psScavenge.cpp
+++ b/src/hotspot/share/gc/parallel/psScavenge.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2002, 2020, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2021, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -52,7 +52,7 @@
#include "gc/shared/scavengableNMethods.hpp"
#include "gc/shared/spaceDecorator.inline.hpp"
#include "gc/shared/taskTerminator.hpp"
-#include "gc/shared/weakProcessor.hpp"
+#include "gc/shared/weakProcessor.inline.hpp"
#include "gc/shared/workerPolicy.hpp"
#include "gc/shared/workgroup.hpp"
#include "memory/iterator.hpp"
@@ -520,11 +520,10 @@ bool PSScavenge::invoke_no_policy() {
assert(promotion_manager->stacks_empty(),"stacks should be empty at this point");
- PSScavengeRootsClosure root_closure(promotion_manager);
-
{
GCTraceTime(Debug, gc, phases) tm("Weak Processing", &_gc_timer);
- WeakProcessor::weak_oops_do(&_is_alive_closure, &root_closure);
+ PSAdjustWeakRootsClosure root_closure;
+ WeakProcessor::weak_oops_do(&ParallelScavengeHeap::heap()->workers(), &_is_alive_closure, &root_closure, 1);
}
// Verify that usage of root_closure didn't copy any objects.
@@ -822,7 +821,6 @@ void PSScavenge::initialize() {
_span_based_discoverer.set_span(young_gen->reserved());
_ref_processor =
new ReferenceProcessor(&_span_based_discoverer,
- ParallelRefProcEnabled && (ParallelGCThreads > 1), // mt processing
ParallelGCThreads, // mt processing degree
true, // mt discovery
ParallelGCThreads, // mt discovery degree
diff --git a/src/hotspot/share/gc/serial/cSpaceCounters.cpp b/src/hotspot/share/gc/serial/cSpaceCounters.cpp
index 24a78036122b8087cda90156c544040ff00122f0..1f95a971ccab130e0c628f17297772f23cf6de6a 100644
--- a/src/hotspot/share/gc/serial/cSpaceCounters.cpp
+++ b/src/hotspot/share/gc/serial/cSpaceCounters.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2002, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2021, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -25,7 +25,6 @@
#include "precompiled.hpp"
#include "gc/serial/cSpaceCounters.hpp"
#include "memory/allocation.inline.hpp"
-#include "memory/metaspace.hpp"
#include "memory/resourceArea.hpp"
CSpaceCounters::CSpaceCounters(const char* name, int ordinal, size_t max_size,
diff --git a/src/hotspot/share/gc/shared/c1/cardTableBarrierSetC1.cpp b/src/hotspot/share/gc/shared/c1/cardTableBarrierSetC1.cpp
index 106412d90b48a9593b2b5cd6be2785327d0f279a..a89b57d8e457707c3e87946cc99aca96659c49cb 100644
--- a/src/hotspot/share/gc/shared/c1/cardTableBarrierSetC1.cpp
+++ b/src/hotspot/share/gc/shared/c1/cardTableBarrierSetC1.cpp
@@ -83,9 +83,6 @@ void CardTableBarrierSetC1::post_barrier(LIRAccess& access, LIR_OprDesc* addr, L
LIR_Opr dirty = LIR_OprFact::intConst(CardTable::dirty_card_val());
if (UseCondCardMark) {
LIR_Opr cur_value = gen->new_register(T_INT);
- if (ct->scanned_concurrently()) {
- __ membar_storeload();
- }
__ move(card_addr, cur_value);
LabelObj* L_already_dirty = new LabelObj();
@@ -94,9 +91,6 @@ void CardTableBarrierSetC1::post_barrier(LIRAccess& access, LIR_OprDesc* addr, L
__ move(dirty, card_addr);
__ branch_destination(L_already_dirty->label());
} else {
- if (ct->scanned_concurrently()) {
- __ membar_storestore();
- }
__ move(dirty, card_addr);
}
#endif
diff --git a/src/hotspot/share/gc/shared/c2/cardTableBarrierSetC2.cpp b/src/hotspot/share/gc/shared/c2/cardTableBarrierSetC2.cpp
index be20fbc12ddd6248bf38c1a1955eff8435f572d0..d3da34e5b27fb41f9126f8601ccfdb568cc81371 100644
--- a/src/hotspot/share/gc/shared/c2/cardTableBarrierSetC2.cpp
+++ b/src/hotspot/share/gc/shared/c2/cardTableBarrierSetC2.cpp
@@ -58,8 +58,6 @@ void CardTableBarrierSetC2::post_barrier(GraphKit* kit,
Node* val,
BasicType bt,
bool use_precise) const {
- CardTableBarrierSet* ctbs = barrier_set_cast(BarrierSet::barrier_set());
- CardTable* ct = ctbs->card_table();
// No store check needed if we're storing a NULL or an old object
// (latter case is probably a string constant). The concurrent
// mark sweep garbage collector, however, needs to have all nonNull
@@ -105,10 +103,6 @@ void CardTableBarrierSetC2::post_barrier(GraphKit* kit,
Node* zero = __ ConI(0); // Dirty card value
if (UseCondCardMark) {
- if (ct->scanned_concurrently()) {
- kit->insert_mem_bar(Op_MemBarVolatile, oop_store);
- __ sync_kit(kit);
- }
// The classic GC reference write barrier is typically implemented
// as a store into the global card mark table. Unfortunately
// unconditional stores can result in false sharing and excessive
@@ -121,12 +115,7 @@ void CardTableBarrierSetC2::post_barrier(GraphKit* kit,
}
// Smash zero into card
- if (!ct->scanned_concurrently()) {
- __ store(__ ctrl(), card_adr, zero, T_BYTE, adr_type, MemNode::unordered);
- } else {
- // Specialized path for CM store barrier
- __ storeCM(__ ctrl(), card_adr, zero, oop_store, adr_idx, T_BYTE, adr_type);
- }
+ __ store(__ ctrl(), card_adr, zero, T_BYTE, adr_type, MemNode::unordered);
if (UseCondCardMark) {
__ end_if();
diff --git a/src/hotspot/share/gc/shared/cardTable.cpp b/src/hotspot/share/gc/shared/cardTable.cpp
index b2a7118e8aa77bed4feb3ffc01c17c9b689f1c1e..84f624b300133adb26a108f849a9a29480bfacb7 100644
--- a/src/hotspot/share/gc/shared/cardTable.cpp
+++ b/src/hotspot/share/gc/shared/cardTable.cpp
@@ -41,8 +41,7 @@ size_t CardTable::compute_byte_map_size() {
return align_up(_guard_index + 1, MAX2(_page_size, granularity));
}
-CardTable::CardTable(MemRegion whole_heap, bool conc_scan) :
- _scanned_concurrently(conc_scan),
+CardTable::CardTable(MemRegion whole_heap) :
_whole_heap(whole_heap),
_guard_index(0),
_last_valid_index(0),
diff --git a/src/hotspot/share/gc/shared/cardTable.hpp b/src/hotspot/share/gc/shared/cardTable.hpp
index f5b06ebb172cebdf7e2e64379e9573dc1388e94b..ff406eee4be5becec92b4c3eef08fab6eaeb4f36 100644
--- a/src/hotspot/share/gc/shared/cardTable.hpp
+++ b/src/hotspot/share/gc/shared/cardTable.hpp
@@ -43,7 +43,6 @@ public:
protected:
// The declaration order of these const fields is important; see the
// constructor before changing.
- const bool _scanned_concurrently;
const MemRegion _whole_heap; // the region covered by the card table
size_t _guard_index; // index of very last element in the card
// table; it is set to a guard value
@@ -113,7 +112,7 @@ protected:
static const intptr_t clean_card_row = (intptr_t)(-1);
public:
- CardTable(MemRegion whole_heap, bool conc_scan);
+ CardTable(MemRegion whole_heap);
virtual ~CardTable();
virtual void initialize();
@@ -245,7 +244,6 @@ public:
// But since the heap starts at some higher address, this points to somewhere
// before the beginning of the actual _byte_map.
CardValue* byte_map_base() const { return _byte_map_base; }
- bool scanned_concurrently() const { return _scanned_concurrently; }
virtual bool is_in_young(oop obj) const = 0;
diff --git a/src/hotspot/share/gc/shared/cardTableBarrierSet.cpp b/src/hotspot/share/gc/shared/cardTableBarrierSet.cpp
index 7e491c36dd5fee3f17f9593ad6068315242b775a..225fca264bd84b56300bc0503be2e15e37b5814b 100644
--- a/src/hotspot/share/gc/shared/cardTableBarrierSet.cpp
+++ b/src/hotspot/share/gc/shared/cardTableBarrierSet.cpp
@@ -192,5 +192,5 @@ void CardTableBarrierSet::on_thread_detach(Thread* thread) {
}
bool CardTableBarrierSet::card_mark_must_follow_store() const {
- return _card_table->scanned_concurrently();
+ return false;
}
diff --git a/src/hotspot/share/gc/shared/cardTableBarrierSet.inline.hpp b/src/hotspot/share/gc/shared/cardTableBarrierSet.inline.hpp
index f88a0dc070a34cc2a2486a396bc33f5a7be32c43..97e3c4593df31ed6325b0165c01b41b3b05d1163 100644
--- a/src/hotspot/share/gc/shared/cardTableBarrierSet.inline.hpp
+++ b/src/hotspot/share/gc/shared/cardTableBarrierSet.inline.hpp
@@ -32,12 +32,7 @@
template
inline void CardTableBarrierSet::write_ref_field_post(T* field, oop newVal) {
volatile CardValue* byte = _card_table->byte_for(field);
- if (_card_table->scanned_concurrently()) {
- // Perform a releasing store if the card table is scanned concurrently
- Atomic::release_store(byte, CardTable::dirty_card_val());
- } else {
- *byte = CardTable::dirty_card_val();
- }
+ *byte = CardTable::dirty_card_val();
}
#endif // SHARE_GC_SHARED_CARDTABLEBARRIERSET_INLINE_HPP
diff --git a/src/hotspot/share/gc/shared/cardTableRS.cpp b/src/hotspot/share/gc/shared/cardTableRS.cpp
index cc232960a05eecad0268d20ec5200aac4d16b606..3dc15fb23a18219b1f296bd966dcce6f2a6ced88 100644
--- a/src/hotspot/share/gc/shared/cardTableRS.cpp
+++ b/src/hotspot/share/gc/shared/cardTableRS.cpp
@@ -434,8 +434,8 @@ void CardTableRS::verify() {
CardTable::verify();
}
-CardTableRS::CardTableRS(MemRegion whole_heap, bool scanned_concurrently) :
- CardTable(whole_heap, scanned_concurrently) { }
+CardTableRS::CardTableRS(MemRegion whole_heap) :
+ CardTable(whole_heap) { }
void CardTableRS::initialize() {
CardTable::initialize();
diff --git a/src/hotspot/share/gc/shared/cardTableRS.hpp b/src/hotspot/share/gc/shared/cardTableRS.hpp
index e90ab9e313444bdcbb7da9e85ae38add40c73ba8..86ea16e0957f5e432210fb3620e03b55a095aa71 100644
--- a/src/hotspot/share/gc/shared/cardTableRS.hpp
+++ b/src/hotspot/share/gc/shared/cardTableRS.hpp
@@ -45,7 +45,7 @@ class CardTableRS : public CardTable {
void verify_space(Space* s, HeapWord* gen_start);
public:
- CardTableRS(MemRegion whole_heap, bool scanned_concurrently);
+ CardTableRS(MemRegion whole_heap);
void younger_refs_in_space_iterate(Space* sp, HeapWord* gen_boundary, OopIterateClosure* cl);
diff --git a/src/hotspot/share/gc/shared/collectedHeap.cpp b/src/hotspot/share/gc/shared/collectedHeap.cpp
index 105d64ba2f17c0624ee8f723ee0445df90345b0f..b864ca2fd7aadcda1eebae3cdb121599c1674cb9 100644
--- a/src/hotspot/share/gc/shared/collectedHeap.cpp
+++ b/src/hotspot/share/gc/shared/collectedHeap.cpp
@@ -23,6 +23,7 @@
*/
#include "precompiled.hpp"
+#include "classfile/classLoaderData.hpp"
#include "classfile/vmClasses.hpp"
#include "gc/shared/allocTracer.hpp"
#include "gc/shared/barrierSet.hpp"
@@ -40,6 +41,7 @@
#include "logging/log.hpp"
#include "logging/logStream.hpp"
#include "memory/classLoaderMetaspace.hpp"
+#include "memory/metaspaceUtils.hpp"
#include "memory/resourceArea.hpp"
#include "memory/universe.hpp"
#include "oops/instanceMirrorKlass.hpp"
diff --git a/src/hotspot/share/gc/shared/collectedHeap.hpp b/src/hotspot/share/gc/shared/collectedHeap.hpp
index ef532f148faaaa76501a94d068b544a67584e4c6..d2723e9b804698e54ddb8b65c87edcdeded75a57 100644
--- a/src/hotspot/share/gc/shared/collectedHeap.hpp
+++ b/src/hotspot/share/gc/shared/collectedHeap.hpp
@@ -29,6 +29,7 @@
#include "gc/shared/gcWhen.hpp"
#include "gc/shared/verifyOption.hpp"
#include "memory/allocation.hpp"
+#include "memory/metaspace.hpp"
#include "memory/universe.hpp"
#include "runtime/handles.hpp"
#include "runtime/perfDataTypes.hpp"
diff --git a/src/hotspot/share/gc/shared/gcLocker.cpp b/src/hotspot/share/gc/shared/gcLocker.cpp
index 814e2fa1a40333288bb88e02e21cff130a44201d..f1b3dce71cfdf2cae07810ed565a21bd87e31df1 100644
--- a/src/hotspot/share/gc/shared/gcLocker.cpp
+++ b/src/hotspot/share/gc/shared/gcLocker.cpp
@@ -36,7 +36,6 @@
volatile jint GCLocker::_jni_lock_count = 0;
volatile bool GCLocker::_needs_gc = false;
-volatile bool GCLocker::_doing_gc = false;
unsigned int GCLocker::_total_collections = 0;
#ifdef ASSERT
@@ -127,12 +126,16 @@ bool GCLocker::should_discard(GCCause::Cause cause, uint total_collections) {
void GCLocker::jni_lock(JavaThread* thread) {
assert(!thread->in_critical(), "shouldn't currently be in a critical region");
MonitorLocker ml(JNICritical_lock);
- // Block entering threads if we know at least one thread is in a
- // JNI critical region and we need a GC.
- // We check that at least one thread is in a critical region before
- // blocking because blocked threads are woken up by a thread exiting
- // a JNI critical region.
- while (is_active_and_needs_gc() || _doing_gc) {
+ // Block entering threads if there's a pending GC request.
+ while (needs_gc()) {
+ // There's at least one thread that has not left the critical region (CR)
+ // completely. When that last thread (no new threads can enter CR due to the
+ // blocking) exits CR, it calls `jni_unlock`, which sets `_needs_gc`
+ // to false and wakes up all blocked threads.
+ // We would like to assert #threads in CR to be > 0, `_jni_lock_count > 0`
+ // in the code, but it's too strong; it's possible that the last thread
+ // has called `jni_unlock`, but not yet finished the call, e.g. initiating
+ // a GCCause::_gc_locker GC.
ml.wait();
}
thread->enter_critical();
@@ -154,7 +157,6 @@ void GCLocker::jni_unlock(JavaThread* thread) {
// must not be a safepoint between the lock becoming inactive and
// getting the count, else there may be unnecessary GCLocker GCs.
_total_collections = Universe::heap()->total_collections();
- _doing_gc = true;
GCLockerTracer::report_gc_locker();
{
// Must give up the lock while at a safepoint
@@ -162,7 +164,6 @@ void GCLocker::jni_unlock(JavaThread* thread) {
log_debug_jni("Performing GC after exiting critical section.");
Universe::heap()->collect(GCCause::_gc_locker);
}
- _doing_gc = false;
_needs_gc = false;
JNICritical_lock->notify_all();
}
diff --git a/src/hotspot/share/gc/shared/gcLocker.hpp b/src/hotspot/share/gc/shared/gcLocker.hpp
index 4b776058da8f530f7289ea1390138dc7d89d4666..91ed84c41a99b6ba7d8cbd5278380461686ca5e8 100644
--- a/src/hotspot/share/gc/shared/gcLocker.hpp
+++ b/src/hotspot/share/gc/shared/gcLocker.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2020, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2021, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -44,8 +44,6 @@ class GCLocker: public AllStatic {
// unlocking.
static volatile jint _jni_lock_count; // number of jni active instances.
static volatile bool _needs_gc; // heap is filling, we need a GC
- // note: bool is typedef'd as jint
- static volatile bool _doing_gc; // unlock_critical() is doing a GC
static uint _total_collections; // value for _gc_locker collection
#ifdef ASSERT
diff --git a/src/hotspot/share/gc/shared/gcVMOperations.cpp b/src/hotspot/share/gc/shared/gcVMOperations.cpp
index a8a4084990eccb65fa7382a50c9941847d670108..f73974981c9cffec524918efcfc1c291a4cca3c0 100644
--- a/src/hotspot/share/gc/shared/gcVMOperations.cpp
+++ b/src/hotspot/share/gc/shared/gcVMOperations.cpp
@@ -23,7 +23,7 @@
*/
#include "precompiled.hpp"
-#include "classfile/classLoader.hpp"
+#include "classfile/classLoaderData.hpp"
#include "classfile/javaClasses.hpp"
#include "gc/shared/allocTracer.hpp"
#include "gc/shared/gcId.hpp"
diff --git a/src/hotspot/share/gc/shared/gcVMOperations.hpp b/src/hotspot/share/gc/shared/gcVMOperations.hpp
index aa8e0e9314640d0c0c14c30b4f4e066784c814ed..25af6a1d1544255caec38be48491416e5a73a70d 100644
--- a/src/hotspot/share/gc/shared/gcVMOperations.hpp
+++ b/src/hotspot/share/gc/shared/gcVMOperations.hpp
@@ -31,7 +31,7 @@
#include "prims/jvmtiExport.hpp"
#include "runtime/handles.hpp"
#include "runtime/synchronizer.hpp"
-#include "runtime/vmOperations.hpp"
+#include "runtime/vmOperation.hpp"
// The following class hierarchy represents
// a set of operations (VM_Operation) related to GC.
diff --git a/src/hotspot/share/gc/shared/genCollectedHeap.cpp b/src/hotspot/share/gc/shared/genCollectedHeap.cpp
index fb303850c08348a2270abc59b6456e97f26265e6..743e8882624c1351b0e995ac6b6252df8178e1e2 100644
--- a/src/hotspot/share/gc/shared/genCollectedHeap.cpp
+++ b/src/hotspot/share/gc/shared/genCollectedHeap.cpp
@@ -60,6 +60,7 @@
#include "memory/iterator.hpp"
#include "memory/metaspace/metaspaceSizesSnapshot.hpp"
#include "memory/metaspaceCounters.hpp"
+#include "memory/metaspaceUtils.hpp"
#include "memory/resourceArea.hpp"
#include "memory/universe.hpp"
#include "oops/oop.inline.hpp"
@@ -142,7 +143,7 @@ jint GenCollectedHeap::initialize() {
}
CardTableRS* GenCollectedHeap::create_rem_set(const MemRegion& reserved_region) {
- return new CardTableRS(reserved_region, false /* scan_concurrently */);
+ return new CardTableRS(reserved_region);
}
void GenCollectedHeap::initialize_size_policy(size_t init_eden_size,
@@ -172,11 +173,12 @@ ReservedHeapSpace GenCollectedHeap::allocate(size_t alignment) {
SIZE_FORMAT, total_reserved, alignment);
ReservedHeapSpace heap_rs = Universe::reserve_heap(total_reserved, alignment);
+ size_t used_page_size = ReservedSpace::actual_reserved_page_size(heap_rs);
os::trace_page_sizes("Heap",
MinHeapSize,
total_reserved,
- alignment,
+ used_page_size,
heap_rs.base(),
heap_rs.size());
diff --git a/src/hotspot/share/gc/shared/genOopClosures.inline.hpp b/src/hotspot/share/gc/shared/genOopClosures.inline.hpp
index da85dd1db4225560a7bfbd7fbdf914aa7be50857..0b547e6dcff9001ae3216137feed7ab25a80a90b 100644
--- a/src/hotspot/share/gc/shared/genOopClosures.inline.hpp
+++ b/src/hotspot/share/gc/shared/genOopClosures.inline.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2001, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2021, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -25,6 +25,7 @@
#ifndef SHARE_GC_SHARED_GENOOPCLOSURES_INLINE_HPP
#define SHARE_GC_SHARED_GENOOPCLOSURES_INLINE_HPP
+#include "classfile/classLoaderData.hpp"
#include "gc/shared/cardTableRS.hpp"
#include "gc/shared/genCollectedHeap.hpp"
#include "gc/shared/genOopClosures.hpp"
diff --git a/src/hotspot/share/gc/shared/oopStorage.cpp b/src/hotspot/share/gc/shared/oopStorage.cpp
index 4248c9d91b83e9480221f90f824404297cd95e10..45142a0bd75d03bc5ea8003bd946c6eda5b86dd3 100644
--- a/src/hotspot/share/gc/shared/oopStorage.cpp
+++ b/src/hotspot/share/gc/shared/oopStorage.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018, 2020, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2018, 2021, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -36,8 +36,8 @@
#include "runtime/mutexLocker.hpp"
#include "runtime/orderAccess.hpp"
#include "runtime/os.hpp"
+#include "runtime/safefetch.inline.hpp"
#include "runtime/safepoint.hpp"
-#include "runtime/stubRoutines.hpp"
#include "runtime/thread.hpp"
#include "services/memTracker.hpp"
#include "utilities/align.hpp"
@@ -787,6 +787,21 @@ OopStorage::~OopStorage() {
os::free(const_cast(_name));
}
+void OopStorage::register_num_dead_callback(NumDeadCallback f) {
+ assert(_num_dead_callback == NULL, "Only one callback function supported");
+ _num_dead_callback = f;
+}
+
+void OopStorage::report_num_dead(size_t num_dead) const {
+ if (_num_dead_callback != NULL) {
+ _num_dead_callback(num_dead);
+ }
+}
+
+bool OopStorage::should_report_num_dead() const {
+ return _num_dead_callback != NULL;
+}
+
// Managing service thread notifications.
//
// We don't want cleanup work to linger indefinitely, but we also don't want
@@ -815,21 +830,6 @@ static jlong cleanup_trigger_permit_time = 0;
// too frequent.
const jlong cleanup_trigger_defer_period = 500 * NANOSECS_PER_MILLISEC;
-void OopStorage::register_num_dead_callback(NumDeadCallback f) {
- assert(_num_dead_callback == NULL, "Only one callback function supported");
- _num_dead_callback = f;
-}
-
-void OopStorage::report_num_dead(size_t num_dead) const {
- if (_num_dead_callback != NULL) {
- _num_dead_callback(num_dead);
- }
-}
-
-bool OopStorage::should_report_num_dead() const {
- return _num_dead_callback != NULL;
-}
-
void OopStorage::trigger_cleanup_if_needed() {
MonitorLocker ml(Service_lock, Monitor::_no_safepoint_check_flag);
if (Atomic::load(&needs_cleanup_requested) &&
diff --git a/src/hotspot/share/gc/shared/referenceProcessor.cpp b/src/hotspot/share/gc/shared/referenceProcessor.cpp
index e2fed87ffb365f637ffdd7546c21487fb4f56c45..4fd331337130c1994458bc32495f6cef50ed4fdf 100644
--- a/src/hotspot/share/gc/shared/referenceProcessor.cpp
+++ b/src/hotspot/share/gc/shared/referenceProcessor.cpp
@@ -26,9 +26,9 @@
#include "classfile/javaClasses.inline.hpp"
#include "gc/shared/collectedHeap.hpp"
#include "gc/shared/collectedHeap.inline.hpp"
+#include "gc/shared/gc_globals.hpp"
#include "gc/shared/gcTimer.hpp"
#include "gc/shared/gcTraceTime.inline.hpp"
-#include "gc/shared/gc_globals.hpp"
#include "gc/shared/referencePolicy.hpp"
#include "gc/shared/referenceProcessor.inline.hpp"
#include "gc/shared/referenceProcessorPhaseTimes.hpp"
@@ -93,7 +93,6 @@ void ReferenceProcessor::enable_discovery(bool check_no_refs) {
}
ReferenceProcessor::ReferenceProcessor(BoolObjectClosure* is_subject_to_discovery,
- bool mt_processing,
uint mt_processing_degree,
bool mt_discovery,
uint mt_discovery_degree,
@@ -103,7 +102,6 @@ ReferenceProcessor::ReferenceProcessor(BoolObjectClosure* is_subject_to_discover
_is_subject_to_discovery(is_subject_to_discovery),
_discovering_refs(false),
_enqueuing_is_done(false),
- _processing_is_mt(mt_processing),
_next_id(0),
_adjust_no_of_processing_threads(adjust_no_of_processing_threads),
_is_alive_non_header(is_alive_non_header)
@@ -140,6 +138,10 @@ void ReferenceProcessor::verify_no_references_recorded() {
}
#endif
+bool ReferenceProcessor::processing_is_mt() const {
+ return ParallelRefProcEnabled && _num_queues > 1;
+}
+
void ReferenceProcessor::weak_oops_do(OopClosure* f) {
for (uint i = 0; i < _max_num_queues * number_of_subclasses_of_ref(); i++) {
if (UseCompressedOops) {
@@ -245,11 +247,6 @@ ReferenceProcessorStats ReferenceProcessor::process_discovered_references(
process_phantom_refs(is_alive, keep_alive, complete_gc, task_executor, phase_times);
}
- if (task_executor != NULL) {
- // Record the work done by the parallel workers.
- task_executor->set_single_threaded_mode();
- }
-
phase_times->set_total_time_ms((os::elapsedTime() - start_time) * 1000);
return stats;
@@ -662,7 +659,7 @@ void ReferenceProcessor::set_active_mt_degree(uint v) {
}
bool ReferenceProcessor::need_balance_queues(DiscoveredList refs_lists[]) {
- assert(_processing_is_mt, "why balance non-mt processing?");
+ assert(processing_is_mt(), "why balance non-mt processing?");
// _num_queues is the processing degree. Only list entries up to
// _num_queues will be processed, so any non-empty lists beyond
// that must be redistributed to lists in that range. Even if not
@@ -684,7 +681,7 @@ bool ReferenceProcessor::need_balance_queues(DiscoveredList refs_lists[]) {
}
void ReferenceProcessor::maybe_balance_queues(DiscoveredList refs_lists[]) {
- assert(_processing_is_mt, "Should not call this otherwise");
+ assert(processing_is_mt(), "Should not call this otherwise");
if (need_balance_queues(refs_lists)) {
balance_queues(refs_lists);
}
@@ -774,21 +771,16 @@ void ReferenceProcessor::balance_queues(DiscoveredList ref_lists[])
#endif
}
-bool ReferenceProcessor::is_mt_processing_set_up(AbstractRefProcTaskExecutor* task_executor) const {
- return task_executor != NULL && _processing_is_mt;
-}
-
void ReferenceProcessor::process_soft_ref_reconsider(BoolObjectClosure* is_alive,
OopClosure* keep_alive,
VoidClosure* complete_gc,
AbstractRefProcTaskExecutor* task_executor,
ReferenceProcessorPhaseTimes* phase_times) {
- assert(!_processing_is_mt || task_executor != NULL, "Task executor must not be NULL when mt processing is set.");
+ assert(!processing_is_mt() || task_executor != NULL, "Task executor must not be NULL when mt processing is set.");
size_t const num_soft_refs = total_count(_discoveredSoftRefs);
phase_times->set_ref_discovered(REF_SOFT, num_soft_refs);
-
- phase_times->set_processing_is_mt(_processing_is_mt);
+ phase_times->set_processing_is_mt(processing_is_mt());
if (num_soft_refs == 0) {
log_debug(gc, ref)("Skipped phase 1 of Reference Processing: no references");
@@ -802,7 +794,7 @@ void ReferenceProcessor::process_soft_ref_reconsider(BoolObjectClosure* is_alive
RefProcMTDegreeAdjuster a(this, RefPhase1, num_soft_refs);
- if (_processing_is_mt) {
+ if (processing_is_mt()) {
RefProcBalanceQueuesTimeTracker tt(RefPhase1, phase_times);
maybe_balance_queues(_discoveredSoftRefs);
}
@@ -810,7 +802,7 @@ void ReferenceProcessor::process_soft_ref_reconsider(BoolObjectClosure* is_alive
RefProcPhaseTimeTracker tt(RefPhase1, phase_times);
log_reflist("Phase 1 Soft before", _discoveredSoftRefs, _max_num_queues);
- if (_processing_is_mt) {
+ if (processing_is_mt()) {
RefProcPhase1Task phase1(*this, phase_times, _current_soft_ref_policy);
task_executor->execute(phase1, num_queues());
} else {
@@ -832,7 +824,7 @@ void ReferenceProcessor::process_soft_weak_final_refs(BoolObjectClosure* is_aliv
VoidClosure* complete_gc,
AbstractRefProcTaskExecutor* task_executor,
ReferenceProcessorPhaseTimes* phase_times) {
- assert(!_processing_is_mt || task_executor != NULL, "Task executor must not be NULL when mt processing is set.");
+ assert(!processing_is_mt() || task_executor != NULL, "Task executor must not be NULL when mt processing is set.");
size_t const num_soft_refs = total_count(_discoveredSoftRefs);
size_t const num_weak_refs = total_count(_discoveredWeakRefs);
@@ -841,7 +833,7 @@ void ReferenceProcessor::process_soft_weak_final_refs(BoolObjectClosure* is_aliv
phase_times->set_ref_discovered(REF_WEAK, num_weak_refs);
phase_times->set_ref_discovered(REF_FINAL, num_final_refs);
- phase_times->set_processing_is_mt(_processing_is_mt);
+ phase_times->set_processing_is_mt(processing_is_mt());
if (num_total_refs == 0) {
log_debug(gc, ref)("Skipped phase 2 of Reference Processing: no references");
@@ -850,7 +842,7 @@ void ReferenceProcessor::process_soft_weak_final_refs(BoolObjectClosure* is_aliv
RefProcMTDegreeAdjuster a(this, RefPhase2, num_total_refs);
- if (_processing_is_mt) {
+ if (processing_is_mt()) {
RefProcBalanceQueuesTimeTracker tt(RefPhase2, phase_times);
maybe_balance_queues(_discoveredSoftRefs);
maybe_balance_queues(_discoveredWeakRefs);
@@ -862,7 +854,7 @@ void ReferenceProcessor::process_soft_weak_final_refs(BoolObjectClosure* is_aliv
log_reflist("Phase 2 Soft before", _discoveredSoftRefs, _max_num_queues);
log_reflist("Phase 2 Weak before", _discoveredWeakRefs, _max_num_queues);
log_reflist("Phase 2 Final before", _discoveredFinalRefs, _max_num_queues);
- if (_processing_is_mt) {
+ if (processing_is_mt()) {
RefProcPhase2Task phase2(*this, phase_times);
task_executor->execute(phase2, num_queues());
} else {
@@ -908,11 +900,11 @@ void ReferenceProcessor::process_final_keep_alive(OopClosure* keep_alive,
VoidClosure* complete_gc,
AbstractRefProcTaskExecutor* task_executor,
ReferenceProcessorPhaseTimes* phase_times) {
- assert(!_processing_is_mt || task_executor != NULL, "Task executor must not be NULL when mt processing is set.");
+ assert(!processing_is_mt() || task_executor != NULL, "Task executor must not be NULL when mt processing is set.");
size_t const num_final_refs = total_count(_discoveredFinalRefs);
- phase_times->set_processing_is_mt(_processing_is_mt);
+ phase_times->set_processing_is_mt(processing_is_mt());
if (num_final_refs == 0) {
log_debug(gc, ref)("Skipped phase 3 of Reference Processing: no references");
@@ -921,7 +913,7 @@ void ReferenceProcessor::process_final_keep_alive(OopClosure* keep_alive,
RefProcMTDegreeAdjuster a(this, RefPhase3, num_final_refs);
- if (_processing_is_mt) {
+ if (processing_is_mt()) {
RefProcBalanceQueuesTimeTracker tt(RefPhase3, phase_times);
maybe_balance_queues(_discoveredFinalRefs);
}
@@ -930,7 +922,7 @@ void ReferenceProcessor::process_final_keep_alive(OopClosure* keep_alive,
// . Traverse referents of final references and keep them and followers alive.
RefProcPhaseTimeTracker tt(RefPhase3, phase_times);
- if (_processing_is_mt) {
+ if (processing_is_mt()) {
RefProcPhase3Task phase3(*this, phase_times);
task_executor->execute(phase3, num_queues());
} else {
@@ -947,12 +939,12 @@ void ReferenceProcessor::process_phantom_refs(BoolObjectClosure* is_alive,
VoidClosure* complete_gc,
AbstractRefProcTaskExecutor* task_executor,
ReferenceProcessorPhaseTimes* phase_times) {
- assert(!_processing_is_mt || task_executor != NULL, "Task executor must not be NULL when mt processing is set.");
+ assert(!processing_is_mt() || task_executor != NULL, "Task executor must not be NULL when mt processing is set.");
size_t const num_phantom_refs = total_count(_discoveredPhantomRefs);
- phase_times->set_ref_discovered(REF_PHANTOM, num_phantom_refs);
- phase_times->set_processing_is_mt(_processing_is_mt);
+ phase_times->set_ref_discovered(REF_PHANTOM, num_phantom_refs);
+ phase_times->set_processing_is_mt(processing_is_mt());
if (num_phantom_refs == 0) {
log_debug(gc, ref)("Skipped phase 4 of Reference Processing: no references");
@@ -961,7 +953,7 @@ void ReferenceProcessor::process_phantom_refs(BoolObjectClosure* is_alive,
RefProcMTDegreeAdjuster a(this, RefPhase4, num_phantom_refs);
- if (_processing_is_mt) {
+ if (processing_is_mt()) {
RefProcBalanceQueuesTimeTracker tt(RefPhase4, phase_times);
maybe_balance_queues(_discoveredPhantomRefs);
}
@@ -970,7 +962,7 @@ void ReferenceProcessor::process_phantom_refs(BoolObjectClosure* is_alive,
RefProcPhaseTimeTracker tt(RefPhase4, phase_times);
log_reflist("Phase 4 Phantom before", _discoveredPhantomRefs, _max_num_queues);
- if (_processing_is_mt) {
+ if (processing_is_mt()) {
RefProcPhase4Task phase4(*this, phase_times);
task_executor->execute(phase4, num_queues());
} else {
@@ -997,7 +989,7 @@ inline DiscoveredList* ReferenceProcessor::get_discovered_list(ReferenceType rt)
} else {
// single-threaded discovery, we save in round-robin
// fashion to each of the lists.
- if (_processing_is_mt) {
+ if (processing_is_mt()) {
id = next_id();
}
}
@@ -1165,8 +1157,7 @@ bool ReferenceProcessor::discover_reference(oop obj, ReferenceType rt) {
// Check assumption that an object is not potentially
// discovered twice except by concurrent collectors that potentially
// trace the same Reference object twice.
- assert(UseG1GC || UseShenandoahGC,
- "Only possible with a concurrent marking collector");
+ assert(UseG1GC, "Only possible with a concurrent marking collector");
return true;
}
}
@@ -1381,7 +1372,6 @@ RefProcMTDegreeAdjuster::RefProcMTDegreeAdjuster(ReferenceProcessor* rp,
RefProcPhases phase,
size_t ref_count):
_rp(rp),
- _saved_mt_processing(_rp->processing_is_mt()),
_saved_num_queues(_rp->num_queues()) {
if (!_rp->processing_is_mt() || !_rp->adjust_no_of_processing_threads() || (ReferencesPerThread == 0)) {
return;
@@ -1389,12 +1379,10 @@ RefProcMTDegreeAdjuster::RefProcMTDegreeAdjuster(ReferenceProcessor* rp,
uint workers = ergo_proc_thread_count(ref_count, _rp->num_queues(), phase);
- _rp->set_mt_processing(workers > 1);
_rp->set_active_mt_degree(workers);
}
RefProcMTDegreeAdjuster::~RefProcMTDegreeAdjuster() {
// Revert to previous status.
- _rp->set_mt_processing(_saved_mt_processing);
_rp->set_active_mt_degree(_saved_num_queues);
}
diff --git a/src/hotspot/share/gc/shared/referenceProcessor.hpp b/src/hotspot/share/gc/shared/referenceProcessor.hpp
index 695bdf49053f208aeff382679214599152ddfcfb..08519712e9deb363ce76839e81845d4b7a775e8c 100644
--- a/src/hotspot/share/gc/shared/referenceProcessor.hpp
+++ b/src/hotspot/share/gc/shared/referenceProcessor.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2001, 2020, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2021, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -201,8 +201,6 @@ private:
bool _discovery_is_mt; // true if reference discovery is MT.
bool _enqueuing_is_done; // true if all weak references enqueued
- bool _processing_is_mt; // true during phases when
- // reference processing is MT.
uint _next_id; // round-robin mod _num_queues counter in
// support of work distribution
@@ -371,12 +369,10 @@ private:
bool is_subject_to_discovery(oop const obj) const;
- bool is_mt_processing_set_up(AbstractRefProcTaskExecutor* task_executor) const;
-
public:
// Default parameters give you a vanilla reference processor.
ReferenceProcessor(BoolObjectClosure* is_subject_to_discovery,
- bool mt_processing = false, uint mt_processing_degree = 1,
+ uint mt_processing_degree = 1,
bool mt_discovery = false, uint mt_discovery_degree = 1,
bool atomic_discovery = true,
BoolObjectClosure* is_alive_non_header = NULL,
@@ -417,8 +413,7 @@ public:
void set_mt_discovery(bool mt) { _discovery_is_mt = mt; }
// Whether we are in a phase when _processing_ is MT.
- bool processing_is_mt() const { return _processing_is_mt; }
- void set_mt_processing(bool mt) { _processing_is_mt = mt; }
+ bool processing_is_mt() const;
// whether all enqueueing of weak references is complete
bool enqueuing_is_done() { return _enqueuing_is_done; }
@@ -601,28 +596,6 @@ class ReferenceProcessorAtomicMutator: StackObj {
}
};
-
-// A utility class to temporarily change the MT processing
-// disposition of the given ReferenceProcessor instance
-// in the scope that contains it.
-class ReferenceProcessorMTProcMutator: StackObj {
- private:
- ReferenceProcessor* _rp;
- bool _saved_mt;
-
- public:
- ReferenceProcessorMTProcMutator(ReferenceProcessor* rp,
- bool mt):
- _rp(rp) {
- _saved_mt = _rp->processing_is_mt();
- _rp->set_mt_processing(mt);
- }
-
- ~ReferenceProcessorMTProcMutator() {
- _rp->set_mt_processing(_saved_mt);
- }
-};
-
// This class is an interface used to implement task execution for the
// reference processing.
class AbstractRefProcTaskExecutor {
@@ -633,9 +606,6 @@ public:
// Executes a task using worker threads.
virtual void execute(ProcessTask& task, uint ergo_workers) = 0;
-
- // Switch to single threaded mode.
- virtual void set_single_threaded_mode() { };
};
// Abstract reference processing task to execute.
@@ -670,7 +640,6 @@ class RefProcMTDegreeAdjuster : public StackObj {
typedef ReferenceProcessor::RefProcPhases RefProcPhases;
ReferenceProcessor* _rp;
- bool _saved_mt_processing;
uint _saved_num_queues;
// Calculate based on total of references.
diff --git a/src/hotspot/share/gc/shared/stringdedup/stringDedupTable.cpp b/src/hotspot/share/gc/shared/stringdedup/stringDedupTable.cpp
index 63760bc563fc792dd75529a098ef096275235ea5..84a03402f9440ae6411a39852e97c0ac0f0a4cab 100644
--- a/src/hotspot/share/gc/shared/stringdedup/stringDedupTable.cpp
+++ b/src/hotspot/share/gc/shared/stringdedup/stringDedupTable.cpp
@@ -395,32 +395,33 @@ bool StringDedupTable::is_rehashing() {
StringDedupTable* StringDedupTable::prepare_resize() {
size_t size = _table->_size;
- // Check if the hashtable needs to be resized
+ // Decide whether to resize, and compute desired new size if so.
if (_table->_entries > _table->_grow_threshold) {
- // Grow table, double the size
- size *= 2;
- if (size > _max_size) {
- // Too big, don't resize
- return NULL;
+ // Compute new size.
+ size_t needed = _table->_entries / _grow_load_factor;
+ if (needed < _max_size) {
+ size = round_up_power_of_2(needed);
+ } else {
+ size = _max_size;
}
} else if (_table->_entries < _table->_shrink_threshold) {
- // Shrink table, half the size
- size /= 2;
- if (size < _min_size) {
- // Too small, don't resize
- return NULL;
- }
- } else if (StringDeduplicationResizeALot) {
- // Force grow
- size *= 2;
- if (size > _max_size) {
- // Too big, force shrink instead
- size /= 4;
+ // Compute new size. We can't shrink by more than a factor of 2,
+ // because the partitioning for parallelization doesn't support more.
+ if (size > _min_size) size /= 2;
+ }
+ // If no change in size needed (and not forcing resize) then done.
+ if (size == _table->_size) {
+ if (!StringDeduplicationResizeALot) {
+ return NULL; // Don't resize.
+ } else if (size < _max_size) {
+ size *= 2; // Force grow, but not past _max_size.
+ } else {
+ size /= 2; // Can't force grow, so force shrink instead.
}
- } else {
- // Resize not needed
- return NULL;
}
+ assert(size <= _max_size, "invariant: %zu", size);
+ assert(size >= _min_size, "invariant: %zu", size);
+ assert(is_power_of_2(size), "invariant: %zu", size);
// Update statistics
_resize_count++;
diff --git a/src/hotspot/share/gc/shared/workgroup.cpp b/src/hotspot/share/gc/shared/workgroup.cpp
index 3a1d7cfd694f746acbf5ea417017d65552a1bbae..f03a0610292f3b0cc1105df9f63ef5cbb15ddb97 100644
--- a/src/hotspot/share/gc/shared/workgroup.cpp
+++ b/src/hotspot/share/gc/shared/workgroup.cpp
@@ -352,26 +352,19 @@ void WorkGangBarrierSync::abort() {
// SubTasksDone functions.
SubTasksDone::SubTasksDone(uint n) :
- _tasks(NULL), _n_tasks(n), _threads_completed(0) {
+ _tasks(NULL), _n_tasks(n) {
_tasks = NEW_C_HEAP_ARRAY(bool, n, mtInternal);
- clear();
-}
-
-bool SubTasksDone::valid() {
- return _tasks != NULL;
-}
-
-void SubTasksDone::clear() {
for (uint i = 0; i < _n_tasks; i++) {
_tasks[i] = false;
}
- _threads_completed = 0;
}
-void SubTasksDone::all_tasks_completed_impl(uint n_threads,
- uint skipped[],
- size_t skipped_size) {
#ifdef ASSERT
+void SubTasksDone::all_tasks_claimed_impl(uint skipped[], size_t skipped_size) {
+ if (Atomic::cmpxchg(&_verification_done, false, true)) {
+ // another thread has done the verification
+ return;
+ }
// all non-skipped tasks are claimed
for (uint i = 0; i < _n_tasks; ++i) {
if (!_tasks[i]) {
@@ -391,19 +384,8 @@ void SubTasksDone::all_tasks_completed_impl(uint n_threads,
assert(task_index < _n_tasks, "Array in range.");
assert(!_tasks[task_index], "%d is both claimed and skipped.", task_index);
}
-#endif
- uint observed = _threads_completed;
- uint old;
- do {
- old = observed;
- observed = Atomic::cmpxchg(&_threads_completed, old, old+1);
- } while (observed != old);
- // If this was the last thread checking in, clear the tasks.
- uint adjusted_thread_count = (n_threads == 0 ? 1 : n_threads);
- if (observed + 1 == adjusted_thread_count) {
- clear();
- }
}
+#endif
bool SubTasksDone::try_claim_task(uint t) {
assert(t < _n_tasks, "bad task id.");
@@ -411,6 +393,7 @@ bool SubTasksDone::try_claim_task(uint t) {
}
SubTasksDone::~SubTasksDone() {
+ assert(_verification_done, "all_tasks_claimed must have been called.");
FREE_C_HEAP_ARRAY(bool, _tasks);
}
diff --git a/src/hotspot/share/gc/shared/workgroup.hpp b/src/hotspot/share/gc/shared/workgroup.hpp
index a499451333e9a02e8a505fa3ac1f71b506c2d64f..e6c7a686765f655ed388c3d7c2b8baaad9457c8d 100644
--- a/src/hotspot/share/gc/shared/workgroup.hpp
+++ b/src/hotspot/share/gc/shared/workgroup.hpp
@@ -305,24 +305,18 @@ public:
class SubTasksDone: public CHeapObj {
volatile bool* _tasks;
uint _n_tasks;
- volatile uint _threads_completed;
- // Set all tasks to unclaimed.
- void clear();
-
- void all_tasks_completed_impl(uint n_threads, uint skipped[], size_t skipped_size);
+ // make sure verification logic is run exactly once to avoid duplicate assertion failures
+ DEBUG_ONLY(volatile bool _verification_done = false;)
+ void all_tasks_claimed_impl(uint skipped[], size_t skipped_size) NOT_DEBUG_RETURN;
NONCOPYABLE(SubTasksDone);
public:
// Initializes "this" to a state in which there are "n" tasks to be
- // processed, none of the which are originally claimed. The number of
- // threads doing the tasks is initialized 1.
+ // processed, none of the which are originally claimed.
SubTasksDone(uint n);
- // True iff the object is in a valid state.
- bool valid();
-
// Attempt to claim the task "t", returning true if successful,
// false if it has already been claimed. The task "t" is required
// to be within the range of "this".
@@ -331,21 +325,17 @@ public:
// The calling thread asserts that it has attempted to claim all the tasks
// that it will try to claim. Tasks that are meant to be skipped must be
// explicitly passed as extra arguments. Every thread in the parallel task
- // must execute this. (When the last thread does so, the task array is
- // cleared.)
- //
- // n_threads - Number of threads executing the sub-tasks.
- void all_tasks_completed(uint n_threads) {
- all_tasks_completed_impl(n_threads, nullptr, 0);
- }
-
- // Augmented by variadic args, each for a skipped task.
+ // must execute this.
template...>::value)>
- void all_tasks_completed(uint n_threads, T0 first_skipped, Ts... more_skipped) {
+ void all_tasks_claimed(T0 first_skipped, Ts... more_skipped) {
static_assert(std::is_convertible::value, "not convertible");
uint skipped[] = { static_cast(first_skipped), static_cast(more_skipped)... };
- all_tasks_completed_impl(n_threads, skipped, ARRAY_SIZE(skipped));
+ all_tasks_claimed_impl(skipped, ARRAY_SIZE(skipped));
+ }
+ // if there are no skipped tasks.
+ void all_tasks_claimed() {
+ all_tasks_claimed_impl(nullptr, 0);
}
// Destructor.
diff --git a/src/hotspot/share/gc/shenandoah/c1/shenandoahBarrierSetC1.cpp b/src/hotspot/share/gc/shenandoah/c1/shenandoahBarrierSetC1.cpp
index 083951ca14003781e5844928f00a4d8ad09c186f..1478d64e5efc88f6359e8c51435a4589c3734616 100644
--- a/src/hotspot/share/gc/shenandoah/c1/shenandoahBarrierSetC1.cpp
+++ b/src/hotspot/share/gc/shenandoah/c1/shenandoahBarrierSetC1.cpp
@@ -220,27 +220,22 @@ void ShenandoahBarrierSetC1::load_at_resolved(LIRAccess& access, LIR_Opr result)
BarrierSetC1::load_at_resolved(access, result);
}
- // 3: apply keep-alive barrier if ShenandoahSATBBarrier is set
- if (ShenandoahSATBBarrier) {
- bool is_weak = (decorators & ON_WEAK_OOP_REF) != 0;
- bool is_phantom = (decorators & ON_PHANTOM_OOP_REF) != 0;
+ // 3: apply keep-alive barrier for java.lang.ref.Reference if needed
+ if (ShenandoahBarrierSet::need_keep_alive_barrier(decorators, type)) {
bool is_anonymous = (decorators & ON_UNKNOWN_OOP_REF) != 0;
- bool keep_alive = (decorators & AS_NO_KEEPALIVE) == 0;
-
- if ((is_weak || is_phantom || is_anonymous) && keep_alive) {
- // Register the value in the referent field with the pre-barrier
- LabelObj *Lcont_anonymous;
- if (is_anonymous) {
- Lcont_anonymous = new LabelObj();
- generate_referent_check(access, Lcont_anonymous);
- }
- pre_barrier(gen, access.access_emit_info(), decorators, LIR_OprFact::illegalOpr /* addr_opr */,
- result /* pre_val */);
- if (is_anonymous) {
- __ branch_destination(Lcont_anonymous->label());
- }
+
+ // Register the value in the referent field with the pre-barrier
+ LabelObj *Lcont_anonymous;
+ if (is_anonymous) {
+ Lcont_anonymous = new LabelObj();
+ generate_referent_check(access, Lcont_anonymous);
+ }
+ pre_barrier(gen, access.access_emit_info(), decorators, LIR_OprFact::illegalOpr /* addr_opr */,
+ result /* pre_val */);
+ if (is_anonymous) {
+ __ branch_destination(Lcont_anonymous->label());
}
- }
+ }
}
class C1ShenandoahPreBarrierCodeGenClosure : public StubAssemblerCodeGenClosure {
diff --git a/src/hotspot/share/gc/shenandoah/c2/shenandoahBarrierSetC2.cpp b/src/hotspot/share/gc/shenandoah/c2/shenandoahBarrierSetC2.cpp
index f561b760596ed035b0e3d80af5b5fbeffbde3a70..1bf51fbf3b83638f8adb0fc1b3cfe773281baa7c 100644
--- a/src/hotspot/share/gc/shenandoah/c2/shenandoahBarrierSetC2.cpp
+++ b/src/hotspot/share/gc/shenandoah/c2/shenandoahBarrierSetC2.cpp
@@ -551,7 +551,7 @@ Node* ShenandoahBarrierSetC2::load_at_resolved(C2Access& access, const Type* val
}
}
- // 3: apply keep-alive barrier if needed
+ // 3: apply keep-alive barrier for java.lang.ref.Reference if needed
if (ShenandoahBarrierSet::need_keep_alive_barrier(decorators, type)) {
Node* top = Compile::current()->top();
Node* adr = access.addr().node();
diff --git a/src/hotspot/share/gc/shenandoah/mode/shenandoahIUMode.cpp b/src/hotspot/share/gc/shenandoah/mode/shenandoahIUMode.cpp
index abc4a7cfdc9db1dc76c0f0d33688305426901775..00e71f4afc491f6da0be68dd4c03fdb7c017fe91 100644
--- a/src/hotspot/share/gc/shenandoah/mode/shenandoahIUMode.cpp
+++ b/src/hotspot/share/gc/shenandoah/mode/shenandoahIUMode.cpp
@@ -34,6 +34,11 @@
#include "runtime/java.hpp"
void ShenandoahIUMode::initialize_flags() const {
+ if (FLAG_IS_CMDLINE(ClassUnloadingWithConcurrentMark) && ClassUnloading) {
+ log_warning(gc)("Shenandoah I-U mode sets -XX:-ClassUnloadingWithConcurrentMark; see JDK-8261341 for details");
+ }
+ FLAG_SET_DEFAULT(ClassUnloadingWithConcurrentMark, false);
+
if (ClassUnloading) {
FLAG_SET_DEFAULT(ShenandoahSuspendibleWorkers, true);
FLAG_SET_DEFAULT(VerifyBeforeExit, false);
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahBreakpoint.cpp b/src/hotspot/share/gc/shenandoah/shenandoahBreakpoint.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..fb0de2bc741066cd3a798031d94b31b09dc985f2
--- /dev/null
+++ b/src/hotspot/share/gc/shenandoah/shenandoahBreakpoint.cpp
@@ -0,0 +1,65 @@
+/*
+ * Copyright (c) 2021, Red Hat, Inc. All rights reserved.
+ * Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "gc/shared/concurrentGCBreakpoints.hpp"
+#include "gc/shenandoah/shenandoahBreakpoint.hpp"
+#include "runtime/mutexLocker.hpp"
+#include "utilities/debug.hpp"
+
+bool ShenandoahBreakpoint::_start_gc = false;
+
+void ShenandoahBreakpoint::start_gc() {
+ MonitorLocker ml(ConcurrentGCBreakpoints::monitor());
+ assert(ConcurrentGCBreakpoints::is_controlled(), "Invalid state");
+ assert(!_start_gc, "Invalid state");
+ _start_gc = true;
+ ml.notify_all();
+}
+
+void ShenandoahBreakpoint::at_before_gc() {
+ MonitorLocker ml(ConcurrentGCBreakpoints::monitor(), Mutex::_no_safepoint_check_flag);
+ while (ConcurrentGCBreakpoints::is_controlled() && !_start_gc) {
+ ml.wait();
+ }
+ _start_gc = false;
+ ConcurrentGCBreakpoints::notify_idle_to_active();
+}
+
+void ShenandoahBreakpoint::at_after_gc() {
+ ConcurrentGCBreakpoints::notify_active_to_idle();
+}
+
+void ShenandoahBreakpoint::at_after_marking_started() {
+ ConcurrentGCBreakpoints::at("AFTER MARKING STARTED");
+}
+
+void ShenandoahBreakpoint::at_before_marking_completed() {
+ ConcurrentGCBreakpoints::at("BEFORE MARKING COMPLETED");
+}
+
+void ShenandoahBreakpoint::at_after_reference_processing_started() {
+ ConcurrentGCBreakpoints::at("AFTER CONCURRENT REFERENCE PROCESSING STARTED");
+}
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahBreakpoint.hpp b/src/hotspot/share/gc/shenandoah/shenandoahBreakpoint.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..f8b7489a3bbd0e7aedc4d5b1fe599f531629aedc
--- /dev/null
+++ b/src/hotspot/share/gc/shenandoah/shenandoahBreakpoint.hpp
@@ -0,0 +1,44 @@
+/*
+ * Copyright (c) 2021, Red Hat, Inc. All rights reserved.
+ * Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_GC_SHENANDOAH_SHENANDOAHBREAKPOINT_HPP
+#define SHARE_GC_SHENANDOAH_SHENANDOAHBREAKPOINT_HPP
+
+#include "memory/allocation.hpp"
+
+class ShenandoahBreakpoint : public AllStatic {
+private:
+ static bool _start_gc;
+
+public:
+ static void start_gc();
+
+ static void at_before_gc();
+ static void at_after_gc();
+ static void at_after_marking_started();
+ static void at_before_marking_completed();
+ static void at_after_reference_processing_started();
+};
+#endif // SHARE_GC_SHENANDOAH_SHENANDOAHBREAKPOINT_HPP
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahCollectionSet.cpp b/src/hotspot/share/gc/shenandoah/shenandoahCollectionSet.cpp
index 385408d7fd56236b8925129a28d773f6698b2e1b..5f269a7f64a2372d24cdb75d53d2d9e335e26d8d 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahCollectionSet.cpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahCollectionSet.cpp
@@ -110,27 +110,25 @@ void ShenandoahCollectionSet::clear() {
}
ShenandoahHeapRegion* ShenandoahCollectionSet::claim_next() {
- size_t num_regions = _heap->num_regions();
- if (_current_index >= (jint)num_regions) {
- return NULL;
- }
+ // This code is optimized for the case when collection set contains only
+ // a few regions. In this case, it is more constructive to check for is_in
+ // before hitting the (potentially contended) atomic index.
- jint saved_current = _current_index;
- size_t index = (size_t)saved_current;
+ size_t max = _heap->num_regions();
+ size_t old = Atomic::load(&_current_index);
- while(index < num_regions) {
+ for (size_t index = old; index < max; index++) {
if (is_in(index)) {
- jint cur = Atomic::cmpxchg(&_current_index, saved_current, (jint)(index + 1));
- assert(cur >= (jint)saved_current, "Must move forward");
- if (cur == saved_current) {
- assert(is_in(index), "Invariant");
+ size_t cur = Atomic::cmpxchg(&_current_index, old, index + 1, memory_order_relaxed);
+ assert(cur >= old, "Always move forward");
+ if (cur == old) {
+ // Successfully moved the claim index, this is our region.
return _heap->get_region(index);
} else {
- index = (size_t)cur;
- saved_current = cur;
+ // Somebody else moved the claim index, restart from there.
+ index = cur - 1; // adjust for loop post-increment
+ old = cur;
}
- } else {
- index ++;
}
}
return NULL;
@@ -139,10 +137,11 @@ ShenandoahHeapRegion* ShenandoahCollectionSet::claim_next() {
ShenandoahHeapRegion* ShenandoahCollectionSet::next() {
assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
assert(Thread::current()->is_VM_thread(), "Must be VMThread");
- size_t num_regions = _heap->num_regions();
- for (size_t index = (size_t)_current_index; index < num_regions; index ++) {
+
+ size_t max = _heap->num_regions();
+ for (size_t index = _current_index; index < max; index++) {
if (is_in(index)) {
- _current_index = (jint)(index + 1);
+ _current_index = index + 1;
return _heap->get_region(index);
}
}
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahCollectionSet.hpp b/src/hotspot/share/gc/shenandoah/shenandoahCollectionSet.hpp
index 56e96522094fdca10b2de5b0a65b4755be8aa9b6..8ac2d9fb2eacb4cc95c094fb64958c4b1cd4b7bf 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahCollectionSet.hpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahCollectionSet.hpp
@@ -26,6 +26,7 @@
#define SHARE_GC_SHENANDOAH_SHENANDOAHCOLLECTIONSET_HPP
#include "memory/allocation.hpp"
+#include "memory/virtualspace.hpp"
#include "gc/shenandoah/shenandoahHeap.hpp"
#include "gc/shenandoah/shenandoahHeapRegion.hpp"
#include "gc/shenandoah/shenandoahPadding.hpp"
@@ -47,7 +48,7 @@ private:
size_t _region_count;
shenandoah_padding(0);
- volatile jint _current_index;
+ volatile size_t _current_index;
shenandoah_padding(1);
public:
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahConcurrentGC.cpp b/src/hotspot/share/gc/shenandoah/shenandoahConcurrentGC.cpp
index 24bea946506977a35ccfe67378230b9b18ea29f5..8982a4e23096cf5a41b13733f5c6db38d58cc924 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahConcurrentGC.cpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahConcurrentGC.cpp
@@ -26,6 +26,7 @@
#include "gc/shared/barrierSetNMethod.hpp"
#include "gc/shared/collectorCounters.hpp"
+#include "gc/shenandoah/shenandoahBreakpoint.hpp"
#include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
#include "gc/shenandoah/shenandoahConcurrentGC.hpp"
#include "gc/shenandoah/shenandoahFreeSet.hpp"
@@ -42,9 +43,34 @@
#include "gc/shenandoah/shenandoahVMOperations.hpp"
#include "gc/shenandoah/shenandoahWorkGroup.hpp"
#include "gc/shenandoah/shenandoahWorkerPolicy.hpp"
+#include "memory/allocation.hpp"
#include "prims/jvmtiTagMap.hpp"
+#include "runtime/vmThread.hpp"
#include "utilities/events.hpp"
+// Breakpoint support
+class ShenandoahBreakpointGCScope : public StackObj {
+public:
+ ShenandoahBreakpointGCScope() {
+ ShenandoahBreakpoint::at_before_gc();
+ }
+
+ ~ShenandoahBreakpointGCScope() {
+ ShenandoahBreakpoint::at_after_gc();
+ }
+};
+
+class ShenandoahBreakpointMarkScope : public StackObj {
+public:
+ ShenandoahBreakpointMarkScope() {
+ ShenandoahBreakpoint::at_after_marking_started();
+ }
+
+ ~ShenandoahBreakpointMarkScope() {
+ ShenandoahBreakpoint::at_before_marking_completed();
+ }
+};
+
ShenandoahConcurrentGC::ShenandoahConcurrentGC() :
_mark(),
_degen_point(ShenandoahDegenPoint::_degenerated_unset) {
@@ -60,6 +86,10 @@ void ShenandoahConcurrentGC::cancel() {
bool ShenandoahConcurrentGC::collect(GCCause::Cause cause) {
ShenandoahHeap* const heap = ShenandoahHeap::heap();
+ if (cause == GCCause::_wb_breakpoint) {
+ ShenandoahBreakpoint::start_gc();
+ }
+ ShenandoahBreakpointGCScope breakpoint_gc_scope;
// Reset for upcoming marking
entry_reset();
@@ -67,13 +97,16 @@ bool ShenandoahConcurrentGC::collect(GCCause::Cause cause) {
// Start initial mark under STW
vmop_entry_init_mark();
+ {
+ ShenandoahBreakpointMarkScope breakpoint_mark_scope;
// Concurrent mark roots
- entry_mark_roots();
- if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_outside_cycle)) return false;
+ entry_mark_roots();
+ if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_outside_cycle)) return false;
- // Continue concurrent mark
- entry_mark();
- if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_mark)) return false;
+ // Continue concurrent mark
+ entry_mark();
+ if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_mark)) return false;
+ }
// Complete marking under STW, and start evacuation
vmop_entry_final_mark();
@@ -621,6 +654,7 @@ void ShenandoahConcurrentGC::op_weak_refs() {
assert(heap->is_concurrent_weak_root_in_progress(), "Only during this phase");
// Concurrent weak refs processing
ShenandoahGCWorkerPhase worker_phase(ShenandoahPhaseTimings::conc_weak_refs);
+ ShenandoahBreakpoint::at_after_reference_processing_started();
heap->ref_processor()->process_references(ShenandoahPhaseTimings::conc_weak_refs, heap->workers(), true /* concurrent */);
}
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahControlThread.cpp b/src/hotspot/share/gc/shenandoah/shenandoahControlThread.cpp
index 1a3030e1d24fc4cd730a2f1e8a27335fca0dddcb..fdc6943a9d2b89398c5efbba1c40d6d8fcca1e1b 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahControlThread.cpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahControlThread.cpp
@@ -23,7 +23,6 @@
*/
#include "precompiled.hpp"
-
#include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
#include "gc/shenandoah/shenandoahConcurrentGC.hpp"
#include "gc/shenandoah/shenandoahControlThread.hpp"
@@ -41,6 +40,7 @@
#include "gc/shenandoah/shenandoahWorkerPolicy.hpp"
#include "gc/shenandoah/heuristics/shenandoahHeuristics.hpp"
#include "memory/iterator.hpp"
+#include "memory/metaspaceUtils.hpp"
#include "memory/universe.hpp"
#include "runtime/atomic.hpp"
@@ -100,7 +100,7 @@ void ShenandoahControlThread::run_service() {
bool implicit_gc_requested = _gc_requested.is_set() && !is_explicit_gc(_requested_gc_cause);
// This control loop iteration have seen this much allocations.
- size_t allocs_seen = Atomic::xchg(&_allocs_seen, (size_t)0);
+ size_t allocs_seen = Atomic::xchg(&_allocs_seen, (size_t)0, memory_order_relaxed);
// Check if we have seen a new target for soft max heap size.
bool soft_max_changed = check_soft_max_changed();
@@ -478,6 +478,7 @@ void ShenandoahControlThread::request_gc(GCCause::Cause cause) {
cause == GCCause::_metadata_GC_clear_soft_refs ||
cause == GCCause::_full_gc_alot ||
cause == GCCause::_wb_full_gc ||
+ cause == GCCause::_wb_breakpoint ||
cause == GCCause::_scavenge_alot,
"only requested GCs here");
@@ -506,7 +507,10 @@ void ShenandoahControlThread::handle_requested_gc(GCCause::Cause cause) {
while (current_gc_id < required_gc_id) {
_gc_requested.set();
_requested_gc_cause = cause;
- ml.wait();
+
+ if (cause != GCCause::_wb_breakpoint) {
+ ml.wait();
+ }
current_gc_id = get_gc_id();
}
}
@@ -595,7 +599,7 @@ void ShenandoahControlThread::notify_heap_changed() {
void ShenandoahControlThread::pacing_notify_alloc(size_t words) {
assert(ShenandoahPacing, "should only call when pacing is enabled");
- Atomic::add(&_allocs_seen, words);
+ Atomic::add(&_allocs_seen, words, memory_order_relaxed);
}
void ShenandoahControlThread::set_forced_counters_update(bool value) {
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahFullGC.cpp b/src/hotspot/share/gc/shenandoah/shenandoahFullGC.cpp
index e7b87f7dcbf32068ad01c5176e4d06abaee09469..e54feb68298655525e2ab22fe50176480e26b41b 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahFullGC.cpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahFullGC.cpp
@@ -48,7 +48,7 @@
#include "gc/shenandoah/shenandoahVerifier.hpp"
#include "gc/shenandoah/shenandoahVMOperations.hpp"
#include "gc/shenandoah/shenandoahWorkerPolicy.hpp"
-#include "memory/metaspace.hpp"
+#include "memory/metaspaceUtils.hpp"
#include "memory/universe.hpp"
#include "oops/compressedOops.inline.hpp"
#include "oops/oop.inline.hpp"
@@ -179,15 +179,14 @@ void ShenandoahFullGC::do_it(GCCause::Cause gc_cause) {
ShenandoahReferenceProcessor* rp = heap->ref_processor();
rp->abandon_partial_discovery();
- // f. Set back forwarded objects bit back, in case some steps above dropped it.
- heap->set_has_forwarded_objects(has_forwarded_objects);
-
- // g. Sync pinned region status from the CP marks
+ // f. Sync pinned region status from the CP marks
heap->sync_pinned_region_status();
// The rest of prologue:
BiasedLocking::preserve_marks();
_preserved_marks->init(heap->workers()->active_workers());
+
+ assert(heap->has_forwarded_objects() == has_forwarded_objects, "This should not change");
}
if (UseTLAB) {
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp b/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp
index 375bbd441b68ddd5eda9fd9af6387944b4af341a..43af68534c71bef6a8ccdc5027a3a3fc568ce0a2 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp
@@ -73,6 +73,7 @@
#include "classfile/systemDictionary.hpp"
#include "memory/classLoaderMetaspace.hpp"
+#include "memory/metaspaceUtils.hpp"
#include "oops/compressedOops.inline.hpp"
#include "prims/jvmtiTagMap.hpp"
#include "runtime/atomic.hpp"
@@ -620,12 +621,11 @@ void ShenandoahHeap::post_initialize() {
}
size_t ShenandoahHeap::used() const {
- return Atomic::load_acquire(&_used);
+ return Atomic::load(&_used);
}
size_t ShenandoahHeap::committed() const {
- OrderAccess::acquire();
- return _committed;
+ return Atomic::load(&_committed);
}
void ShenandoahHeap::increase_committed(size_t bytes) {
@@ -639,20 +639,20 @@ void ShenandoahHeap::decrease_committed(size_t bytes) {
}
void ShenandoahHeap::increase_used(size_t bytes) {
- Atomic::add(&_used, bytes);
+ Atomic::add(&_used, bytes, memory_order_relaxed);
}
void ShenandoahHeap::set_used(size_t bytes) {
- Atomic::release_store_fence(&_used, bytes);
+ Atomic::store(&_used, bytes);
}
void ShenandoahHeap::decrease_used(size_t bytes) {
assert(used() >= bytes, "never decrease heap size by more than we've left");
- Atomic::sub(&_used, bytes);
+ Atomic::sub(&_used, bytes, memory_order_relaxed);
}
void ShenandoahHeap::increase_allocated(size_t bytes) {
- Atomic::add(&_bytes_allocated_since_gc_start, bytes);
+ Atomic::add(&_bytes_allocated_since_gc_start, bytes, memory_order_relaxed);
}
void ShenandoahHeap::notify_mutator_alloc_words(size_t words, bool waste) {
@@ -1498,8 +1498,8 @@ public:
size_t stride = ShenandoahParallelRegionStride;
size_t max = _heap->num_regions();
- while (_index < max) {
- size_t cur = Atomic::fetch_and_add(&_index, stride);
+ while (Atomic::load(&_index) < max) {
+ size_t cur = Atomic::fetch_and_add(&_index, stride, memory_order_relaxed);
size_t start = cur;
size_t end = MIN2(cur + stride, max);
if (start >= max) break;
@@ -1883,11 +1883,11 @@ address ShenandoahHeap::gc_state_addr() {
}
size_t ShenandoahHeap::bytes_allocated_since_gc_start() {
- return Atomic::load_acquire(&_bytes_allocated_since_gc_start);
+ return Atomic::load(&_bytes_allocated_since_gc_start);
}
void ShenandoahHeap::reset_bytes_allocated_since_gc_start() {
- Atomic::release_store_fence(&_bytes_allocated_since_gc_start, (size_t)0);
+ Atomic::store(&_bytes_allocated_since_gc_start, (size_t)0);
}
void ShenandoahHeap::set_degenerated_gc_in_progress(bool in_progress) {
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahHeap.hpp b/src/hotspot/share/gc/shenandoah/shenandoahHeap.hpp
index d721d4c5ff5d82a004cbf21cdd1e3e7c7c475065..d0dda89a9e9fba81a4fc6a7435873e7e4342c220 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahHeap.hpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahHeap.hpp
@@ -162,6 +162,11 @@ public:
void prepare_for_verify();
void verify(VerifyOption vo);
+// WhiteBox testing support.
+ bool supports_concurrent_gc_breakpoints() const {
+ return true;
+ }
+
// ---------- Heap counters and metrics
//
private:
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahHeap.inline.hpp b/src/hotspot/share/gc/shenandoah/shenandoahHeap.inline.hpp
index cea6c322cbadb4db4099c2505a05e3d096670c4c..c39737c15b84398164205183f6ab11baba3bbd6e 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahHeap.inline.hpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahHeap.inline.hpp
@@ -54,7 +54,7 @@ inline ShenandoahHeap* ShenandoahHeap::heap() {
}
inline ShenandoahHeapRegion* ShenandoahRegionIterator::next() {
- size_t new_index = Atomic::add(&_index, (size_t) 1);
+ size_t new_index = Atomic::add(&_index, (size_t) 1, memory_order_relaxed);
// get_region() provides the bounds-check and returns NULL on OOB.
return _heap->get_region(new_index - 1);
}
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.inline.hpp b/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.inline.hpp
index c02c00b7d4f28c2c674d464ec3daadd3dc4ef25f..461aa30fcbee1d2a84c7149cf04467b355e9b86c 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.inline.hpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.inline.hpp
@@ -80,7 +80,7 @@ inline void ShenandoahHeapRegion::increase_live_data_gc_words(size_t s) {
}
inline void ShenandoahHeapRegion::internal_increase_live_data(size_t s) {
- size_t new_live_data = Atomic::add(&_live_data, s);
+ size_t new_live_data = Atomic::add(&_live_data, s, memory_order_relaxed);
#ifdef ASSERT
size_t live_bytes = new_live_data * HeapWordSize;
size_t used_bytes = used();
@@ -90,11 +90,11 @@ inline void ShenandoahHeapRegion::internal_increase_live_data(size_t s) {
}
inline void ShenandoahHeapRegion::clear_live_data() {
- Atomic::release_store_fence(&_live_data, (size_t)0);
+ Atomic::store(&_live_data, (size_t)0);
}
inline size_t ShenandoahHeapRegion::get_live_data_words() const {
- return Atomic::load_acquire(&_live_data);
+ return Atomic::load(&_live_data);
}
inline size_t ShenandoahHeapRegion::get_live_data_bytes() const {
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahHeapRegionSet.cpp b/src/hotspot/share/gc/shenandoah/shenandoahHeapRegionSet.cpp
index 438441a400f7af6703a5279dbf18dbcee263ad32..fe34b629637d549b33a36ff116ddfa7d4eeeb433 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahHeapRegionSet.cpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahHeapRegionSet.cpp
@@ -33,17 +33,10 @@
ShenandoahHeapRegionSetIterator::ShenandoahHeapRegionSetIterator(const ShenandoahHeapRegionSet* const set) :
_set(set), _heap(ShenandoahHeap::heap()), _current_index(0) {}
-void ShenandoahHeapRegionSetIterator::reset(const ShenandoahHeapRegionSet* const set) {
- _set = set;
- _current_index = 0;
-}
-
ShenandoahHeapRegionSet::ShenandoahHeapRegionSet() :
_heap(ShenandoahHeap::heap()),
_map_size(_heap->num_regions()),
- _region_size_bytes_shift(ShenandoahHeapRegion::region_size_bytes_shift()),
_set_map(NEW_C_HEAP_ARRAY(jbyte, _map_size, mtGC)),
- _biased_set_map(_set_map - ((uintx)_heap->base() >> _region_size_bytes_shift)),
_region_count(0)
{
// Use 1-byte data type
@@ -58,83 +51,40 @@ ShenandoahHeapRegionSet::~ShenandoahHeapRegionSet() {
}
void ShenandoahHeapRegionSet::add_region(ShenandoahHeapRegion* r) {
- assert(!is_in(r), "Already in collection set");
+ assert(!is_in(r), "Already in region set");
_set_map[r->index()] = 1;
_region_count++;
}
-bool ShenandoahHeapRegionSet::add_region_check_for_duplicates(ShenandoahHeapRegion* r) {
- if (!is_in(r)) {
- add_region(r);
- return true;
- } else {
- return false;
- }
-}
-
void ShenandoahHeapRegionSet::remove_region(ShenandoahHeapRegion* r) {
assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
assert(Thread::current()->is_VM_thread(), "Must be VMThread");
assert(is_in(r), "Not in region set");
_set_map[r->index()] = 0;
- _region_count --;
+ _region_count--;
}
void ShenandoahHeapRegionSet::clear() {
assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
Copy::zero_to_bytes(_set_map, _map_size);
-
_region_count = 0;
}
-ShenandoahHeapRegion* ShenandoahHeapRegionSetIterator::claim_next() {
- size_t num_regions = _heap->num_regions();
- if (_current_index >= (jint)num_regions) {
- return NULL;
- }
-
- jint saved_current = _current_index;
- size_t index = (size_t)saved_current;
-
- while(index < num_regions) {
- if (_set->is_in(index)) {
- jint cur = Atomic::cmpxchg(&_current_index, saved_current, (jint)(index + 1));
- assert(cur >= (jint)saved_current, "Must move forward");
- if (cur == saved_current) {
- assert(_set->is_in(index), "Invariant");
- return _heap->get_region(index);
- } else {
- index = (size_t)cur;
- saved_current = cur;
- }
- } else {
- index ++;
- }
- }
- return NULL;
-}
-
ShenandoahHeapRegion* ShenandoahHeapRegionSetIterator::next() {
- size_t num_regions = _heap->num_regions();
- for (size_t index = (size_t)_current_index; index < num_regions; index ++) {
+ for (size_t index = _current_index; index < _heap->num_regions(); index++) {
if (_set->is_in(index)) {
- _current_index = (jint)(index + 1);
+ _current_index = index + 1;
return _heap->get_region(index);
}
}
-
return NULL;
}
void ShenandoahHeapRegionSet::print_on(outputStream* out) const {
out->print_cr("Region Set : " SIZE_FORMAT "", count());
-
- debug_only(size_t regions = 0;)
- for (size_t index = 0; index < _heap->num_regions(); index ++) {
+ for (size_t index = 0; index < _heap->num_regions(); index++) {
if (is_in(index)) {
_heap->get_region(index)->print_on(out);
- debug_only(regions ++;)
}
}
- assert(regions == count(), "Must match");
}
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahHeapRegionSet.hpp b/src/hotspot/share/gc/shenandoah/shenandoahHeapRegionSet.hpp
index e910c6d21f771cd1a9fb4bbf69232baa993237f3..d933fda60b1779af328f071590511893c9b1d8e8 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahHeapRegionSet.hpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahHeapRegionSet.hpp
@@ -37,10 +37,7 @@ class ShenandoahHeapRegionSetIterator : public StackObj {
private:
const ShenandoahHeapRegionSet* _set;
ShenandoahHeap* const _heap;
-
- shenandoah_padding(0);
- volatile jint _current_index;
- shenandoah_padding(1);
+ size_t _current_index;
// No implicit copying: iterators should be passed by reference to capture the state
NONCOPYABLE(ShenandoahHeapRegionSetIterator);
@@ -48,12 +45,6 @@ private:
public:
ShenandoahHeapRegionSetIterator(const ShenandoahHeapRegionSet* const set);
- // Reset existing iterator to new set
- void reset(const ShenandoahHeapRegionSet* const set);
-
- // MT version
- ShenandoahHeapRegion* claim_next();
-
// Single-thread version
ShenandoahHeapRegion* next();
};
@@ -63,21 +54,14 @@ class ShenandoahHeapRegionSet : public CHeapObj {
private:
ShenandoahHeap* const _heap;
size_t const _map_size;
- size_t const _region_size_bytes_shift;
jbyte* const _set_map;
- // Bias set map's base address for fast test if an oop is in set
- jbyte* const _biased_set_map;
size_t _region_count;
public:
ShenandoahHeapRegionSet();
~ShenandoahHeapRegionSet();
- // Add region to set
void add_region(ShenandoahHeapRegion* r);
- bool add_region_check_for_duplicates(ShenandoahHeapRegion* r);
-
- // Remove region from set
void remove_region(ShenandoahHeapRegion* r);
size_t count() const { return _region_count; }
@@ -85,16 +69,10 @@ public:
inline bool is_in(ShenandoahHeapRegion* r) const;
inline bool is_in(size_t region_idx) const;
- inline bool is_in(oop p) const;
void print_on(outputStream* out) const;
void clear();
-
-private:
- jbyte* biased_map_address() const {
- return _biased_set_map;
- }
};
#endif // SHARE_GC_SHENANDOAH_SHENANDOAHHEAPREGIONSET_HPP
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahHeapRegionSet.inline.hpp b/src/hotspot/share/gc/shenandoah/shenandoahHeapRegionSet.inline.hpp
index d6781c4ad78a3e77babb70bce29d5711e61e2306..84f58d3018949d1a08effb9f7e22ab08f441ebe7 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahHeapRegionSet.inline.hpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahHeapRegionSet.inline.hpp
@@ -25,10 +25,8 @@
#ifndef SHARE_GC_SHENANDOAH_SHENANDOAHHEAPREGIONSET_INLINE_HPP
#define SHARE_GC_SHENANDOAH_SHENANDOAHHEAPREGIONSET_INLINE_HPP
-#include "gc/shenandoah/shenandoahAsserts.hpp"
#include "gc/shenandoah/shenandoahHeapRegionSet.hpp"
#include "gc/shenandoah/shenandoahHeap.hpp"
-#include "gc/shenandoah/shenandoahHeap.inline.hpp"
#include "gc/shenandoah/shenandoahHeapRegion.hpp"
bool ShenandoahHeapRegionSet::is_in(size_t region_idx) const {
@@ -40,12 +38,4 @@ bool ShenandoahHeapRegionSet::is_in(ShenandoahHeapRegion* r) const {
return is_in(r->index());
}
-bool ShenandoahHeapRegionSet::is_in(oop p) const {
- shenandoah_assert_in_heap(NULL, p);
- uintx index = (cast_from_oop(p)) >> _region_size_bytes_shift;
- // no need to subtract the bottom of the heap from p,
- // _biased_set_map is biased
- return _biased_set_map[index] == 1;
-}
-
#endif // SHARE_GC_SHENANDOAH_SHENANDOAHHEAPREGIONSET_INLINE_HPP
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahMarkBitMap.hpp b/src/hotspot/share/gc/shenandoah/shenandoahMarkBitMap.hpp
index a37523f8197f675634cc211829de8c6555c7a476..a71cad75baedd87d72fb4c9480c049940ef5205c 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahMarkBitMap.hpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahMarkBitMap.hpp
@@ -82,8 +82,6 @@ private:
return map() + to_words_align_down(bit);
}
- static inline const bm_word_t load_word_ordered(const volatile bm_word_t* const addr, atomic_memory_order memory_order);
-
bool at(idx_t index) const {
verify_index(index);
return (*word_addr(index) & bit_mask(index)) != 0;
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahMarkBitMap.inline.hpp b/src/hotspot/share/gc/shenandoah/shenandoahMarkBitMap.inline.hpp
index b9e0bb61f54c57bfa8a83efa003f63a5063f5d89..9dd3f7298993f7a40ccb042e8703588f854e2000 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahMarkBitMap.inline.hpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahMarkBitMap.inline.hpp
@@ -46,7 +46,7 @@ inline bool ShenandoahMarkBitMap::mark_strong(HeapWord* heap_addr, bool& was_upg
volatile bm_word_t* const addr = word_addr(bit);
const bm_word_t mask = bit_mask(bit);
const bm_word_t mask_weak = (bm_word_t)1 << (bit_in_word(bit) + 1);
- bm_word_t old_val = load_word_ordered(addr, memory_order_conservative);
+ bm_word_t old_val = Atomic::load(addr);
do {
const bm_word_t new_val = old_val | mask;
@@ -54,7 +54,7 @@ inline bool ShenandoahMarkBitMap::mark_strong(HeapWord* heap_addr, bool& was_upg
assert(!was_upgraded, "Should be false already");
return false; // Someone else beat us to it.
}
- const bm_word_t cur_val = Atomic::cmpxchg(addr, old_val, new_val, memory_order_conservative);
+ const bm_word_t cur_val = Atomic::cmpxchg(addr, old_val, new_val, memory_order_relaxed);
if (cur_val == old_val) {
was_upgraded = (cur_val & mask_weak) != 0;
return true; // Success.
@@ -71,7 +71,7 @@ inline bool ShenandoahMarkBitMap::mark_weak(HeapWord* heap_addr) {
volatile bm_word_t* const addr = word_addr(bit);
const bm_word_t mask_weak = (bm_word_t)1 << (bit_in_word(bit) + 1);
const bm_word_t mask_strong = (bm_word_t)1 << bit_in_word(bit);
- bm_word_t old_val = load_word_ordered(addr, memory_order_conservative);
+ bm_word_t old_val = Atomic::load(addr);
do {
if ((old_val & mask_strong) != 0) {
@@ -81,7 +81,7 @@ inline bool ShenandoahMarkBitMap::mark_weak(HeapWord* heap_addr) {
if (new_val == old_val) {
return false; // Someone else beat us to it.
}
- const bm_word_t cur_val = Atomic::cmpxchg(addr, old_val, new_val, memory_order_conservative);
+ const bm_word_t cur_val = Atomic::cmpxchg(addr, old_val, new_val, memory_order_relaxed);
if (cur_val == old_val) {
return true; // Success.
}
@@ -107,18 +107,6 @@ inline bool ShenandoahMarkBitMap::is_marked(HeapWord* addr) const {
return (*word_addr(index) & mask) != 0;
}
-inline const ShenandoahMarkBitMap::bm_word_t ShenandoahMarkBitMap::load_word_ordered(const volatile bm_word_t* const addr, atomic_memory_order memory_order) {
- if (memory_order == memory_order_relaxed || memory_order == memory_order_release) {
- return Atomic::load(addr);
- } else {
- assert(memory_order == memory_order_acq_rel ||
- memory_order == memory_order_acquire ||
- memory_order == memory_order_conservative,
- "unexpected memory ordering");
- return Atomic::load_acquire(addr);
- }
-}
-
template
inline ShenandoahMarkBitMap::idx_t ShenandoahMarkBitMap::get_next_bit_impl(idx_t l_index, idx_t r_index) const {
STATIC_ASSERT(flip == find_ones_flip || flip == find_zeros_flip);
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahPacer.cpp b/src/hotspot/share/gc/shenandoah/shenandoahPacer.cpp
index d8866ebd6ddc9e19090dbffee75af750ccdd3757..b7553b7125ca753df2439afee8979e5210f56415 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahPacer.cpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahPacer.cpp
@@ -30,6 +30,7 @@
#include "gc/shenandoah/shenandoahPhaseTimings.hpp"
#include "runtime/atomic.hpp"
#include "runtime/mutexLocker.hpp"
+#include "runtime/threadSMR.hpp"
/*
* In normal concurrent cycle, we have to pace the application to let GC finish.
@@ -179,7 +180,7 @@ size_t ShenandoahPacer::update_and_get_progress_history() {
void ShenandoahPacer::restart_with(size_t non_taxable_bytes, double tax_rate) {
size_t initial = (size_t)(non_taxable_bytes * tax_rate) >> LogHeapWordSize;
STATIC_ASSERT(sizeof(size_t) <= sizeof(intptr_t));
- Atomic::xchg(&_budget, (intptr_t)initial);
+ Atomic::xchg(&_budget, (intptr_t)initial, memory_order_relaxed);
Atomic::store(&_tax_rate, tax_rate);
Atomic::inc(&_epoch);
@@ -201,14 +202,14 @@ bool ShenandoahPacer::claim_for_alloc(size_t words, bool force) {
return false;
}
new_val = cur - tax;
- } while (Atomic::cmpxchg(&_budget, cur, new_val) != cur);
+ } while (Atomic::cmpxchg(&_budget, cur, new_val, memory_order_relaxed) != cur);
return true;
}
void ShenandoahPacer::unpace_for_alloc(intptr_t epoch, size_t words) {
assert(ShenandoahPacing, "Only be here when pacing is enabled");
- if (_epoch != epoch) {
+ if (Atomic::load(&_epoch) != epoch) {
// Stale ticket, no need to unpace.
return;
}
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahPacer.inline.hpp b/src/hotspot/share/gc/shenandoah/shenandoahPacer.inline.hpp
index c416d85234fe9149c17d03d35d4a4c41bcf21ccc..783755a432026067f95e6c22ff564ac135e5c56a 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahPacer.inline.hpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahPacer.inline.hpp
@@ -53,13 +53,13 @@ inline void ShenandoahPacer::report_internal(size_t words) {
inline void ShenandoahPacer::report_progress_internal(size_t words) {
assert(ShenandoahPacing, "Only be here when pacing is enabled");
STATIC_ASSERT(sizeof(size_t) <= sizeof(intptr_t));
- Atomic::add(&_progress, (intptr_t)words);
+ Atomic::add(&_progress, (intptr_t)words, memory_order_relaxed);
}
inline void ShenandoahPacer::add_budget(size_t words) {
STATIC_ASSERT(sizeof(size_t) <= sizeof(intptr_t));
intptr_t inc = (intptr_t) words;
- intptr_t new_budget = Atomic::add(&_budget, inc);
+ intptr_t new_budget = Atomic::add(&_budget, inc, memory_order_relaxed);
// Was the budget replenished beyond zero? Then all pacing claims
// are satisfied, notify the waiters. Avoid taking any locks here,
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahRootProcessor.cpp b/src/hotspot/share/gc/shenandoah/shenandoahRootProcessor.cpp
index 9aa10d17d32ec6bb0bb23b56441f1a71cfe2fd36..91d5a278e1e0b617b27dae7b665848ed18ba7005 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahRootProcessor.cpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahRootProcessor.cpp
@@ -46,7 +46,7 @@ ShenandoahJavaThreadsIterator::ShenandoahJavaThreadsIterator(ShenandoahPhaseTimi
}
uint ShenandoahJavaThreadsIterator::claim() {
- return Atomic::fetch_and_add(&_claimed, _stride);
+ return Atomic::fetch_and_add(&_claimed, _stride, memory_order_relaxed);
}
void ShenandoahJavaThreadsIterator::threads_do(ThreadClosure* cl, uint worker_id) {
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahRootVerifier.cpp b/src/hotspot/share/gc/shenandoah/shenandoahRootVerifier.cpp
index 6992279c41e0993203245e70b0175566c7b314d6..ac835d55f6a1ede5999054777d764749f4916fa7 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahRootVerifier.cpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahRootVerifier.cpp
@@ -55,69 +55,6 @@ ShenandoahGCStateResetter::~ShenandoahGCStateResetter() {
_heap->set_concurrent_weak_root_in_progress(_concurrent_weak_root_in_progress);
}
-// Check for overflow of number of root types.
-STATIC_ASSERT((static_cast(ShenandoahRootVerifier::AllRoots) + 1) > static_cast(ShenandoahRootVerifier::AllRoots));
-
-ShenandoahRootVerifier::ShenandoahRootVerifier(RootTypes types) : _types(types) {
- Threads::change_thread_claim_token();
-}
-
-void ShenandoahRootVerifier::excludes(RootTypes types) {
- _types = static_cast(static_cast(_types) & (~static_cast(types)));
-}
-
-bool ShenandoahRootVerifier::verify(RootTypes type) const {
- return (_types & type) == type;
-}
-
-ShenandoahRootVerifier::RootTypes ShenandoahRootVerifier::combine(RootTypes t1, RootTypes t2) {
- return static_cast(static_cast(t1) | static_cast(t2));
-}
-
-void ShenandoahRootVerifier::oops_do(OopClosure* oops) {
- ShenandoahGCStateResetter resetter;
-
- CodeBlobToOopClosure blobs(oops, !CodeBlobToOopClosure::FixRelocations);
- if (verify(CodeRoots)) {
- shenandoah_assert_locked_or_safepoint(CodeCache_lock);
- CodeCache::blobs_do(&blobs);
- }
-
- if (verify(CLDGRoots)) {
- shenandoah_assert_locked_or_safepoint(ClassLoaderDataGraph_lock);
- CLDToOopClosure clds(oops, ClassLoaderData::_claim_none);
- ClassLoaderDataGraph::cld_do(&clds);
- }
-
- if (verify(SerialRoots)) {
- shenandoah_assert_safepoint();
- }
-
- if (verify(JNIHandleRoots)) {
- shenandoah_assert_safepoint();
- JNIHandles::oops_do(oops);
- Universe::vm_global()->oops_do(oops);
- }
-
- if (verify(WeakRoots)) {
- shenandoah_assert_safepoint();
- weak_roots_do(oops);
- }
-
- if (ShenandoahStringDedup::is_enabled() && verify(StringDedupRoots)) {
- shenandoah_assert_safepoint();
- ShenandoahStringDedup::oops_do_slow(oops);
- }
-
- if (verify(ThreadRoots)) {
- shenandoah_assert_safepoint();
- // Do thread roots the last. This allows verification code to find
- // any broken objects from those special roots first, not the accidental
- // dangling reference from the thread root.
- Threads::possibly_parallel_oops_do(false, oops, &blobs);
- }
-}
-
void ShenandoahRootVerifier::roots_do(OopClosure* oops) {
ShenandoahGCStateResetter resetter;
shenandoah_assert_safepoint();
@@ -128,35 +65,37 @@ void ShenandoahRootVerifier::roots_do(OopClosure* oops) {
CLDToOopClosure clds(oops, ClassLoaderData::_claim_none);
ClassLoaderDataGraph::cld_do(&clds);
- JNIHandles::oops_do(oops);
- Universe::vm_global()->oops_do(oops);
+ if (ShenandoahStringDedup::is_enabled()) {
+ ShenandoahStringDedup::oops_do_slow(oops);
+ }
+
+ for (auto id : EnumRange()) {
+ OopStorageSet::storage(id)->oops_do(oops);
+ }
// Do thread roots the last. This allows verification code to find
// any broken objects from those special roots first, not the accidental
// dangling reference from the thread root.
- Threads::possibly_parallel_oops_do(true, oops, &blobs);
+ Threads::possibly_parallel_oops_do(true, oops, NULL);
}
void ShenandoahRootVerifier::strong_roots_do(OopClosure* oops) {
ShenandoahGCStateResetter resetter;
shenandoah_assert_safepoint();
- CodeBlobToOopClosure blobs(oops, !CodeBlobToOopClosure::FixRelocations);
-
CLDToOopClosure clds(oops, ClassLoaderData::_claim_none);
- ClassLoaderDataGraph::roots_cld_do(&clds, NULL);
+ ClassLoaderDataGraph::always_strong_cld_do(&clds);
- JNIHandles::oops_do(oops);
- Universe::vm_global()->oops_do(oops);
+ if (ShenandoahStringDedup::is_enabled()) {
+ ShenandoahStringDedup::oops_do_slow(oops);
+ }
+ for (auto id : EnumRange()) {
+ OopStorageSet::storage(id)->oops_do(oops);
+ }
// Do thread roots the last. This allows verification code to find
// any broken objects from those special roots first, not the accidental
// dangling reference from the thread root.
+ CodeBlobToOopClosure blobs(oops, !CodeBlobToOopClosure::FixRelocations);
Threads::possibly_parallel_oops_do(true, oops, &blobs);
}
-
-void ShenandoahRootVerifier::weak_roots_do(OopClosure* cl) {
- for (auto id : EnumRange()) {
- OopStorageSet::storage(id)->oops_do(cl);
- }
-}
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahRootVerifier.hpp b/src/hotspot/share/gc/shenandoah/shenandoahRootVerifier.hpp
index d79624f6f37c51edc40d872b5ff1a6187dd860dd..d7ec54e5873f50414a519f6fb5d6fc536bd49bd5 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahRootVerifier.hpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahRootVerifier.hpp
@@ -39,38 +39,11 @@ public:
~ShenandoahGCStateResetter();
};
-class ShenandoahRootVerifier : public StackObj {
+class ShenandoahRootVerifier : public AllStatic {
public:
- enum RootTypes {
- None = 0,
- SerialRoots = 1 << 0,
- ThreadRoots = 1 << 1,
- CodeRoots = 1 << 2,
- CLDGRoots = 1 << 3,
- WeakRoots = 1 << 4,
- StringDedupRoots = 1 << 5,
- JNIHandleRoots = 1 << 6,
- AllRoots = (SerialRoots | ThreadRoots | CodeRoots | CLDGRoots | WeakRoots | StringDedupRoots | JNIHandleRoots)
- };
-
-private:
- RootTypes _types;
-
-public:
- ShenandoahRootVerifier(RootTypes types = AllRoots);
-
- void excludes(RootTypes types);
- void oops_do(OopClosure* cl);
-
// Used to seed ShenandoahVerifier, do not honor root type filter
- void roots_do(OopClosure* cl);
- void strong_roots_do(OopClosure* cl);
-
- static RootTypes combine(RootTypes t1, RootTypes t2);
-private:
- bool verify(RootTypes type) const;
-
- void weak_roots_do(OopClosure* cl);
+ static void roots_do(OopClosure* cl);
+ static void strong_roots_do(OopClosure* cl);
};
#endif // SHARE_GC_SHENANDOAH_SHENANDOAHROOTVERIFIER_HPP
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahUnload.cpp b/src/hotspot/share/gc/shenandoah/shenandoahUnload.cpp
index e916690ec647d2e3a141b252b1f173b6ac1100ff..dc7af9cbeba7643901de0f6aec1dfbdc231fb6ad 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahUnload.cpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahUnload.cpp
@@ -40,6 +40,7 @@
#include "gc/shenandoah/shenandoahUnload.hpp"
#include "gc/shenandoah/shenandoahVerifier.hpp"
#include "memory/iterator.hpp"
+#include "memory/metaspaceUtils.hpp"
#include "memory/resourceArea.hpp"
#include "oops/access.inline.hpp"
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahVMOperations.hpp b/src/hotspot/share/gc/shenandoah/shenandoahVMOperations.hpp
index 3f989df6f3b0b23fb3b6b062963c291f00d72917..fd58c92ee9bc4b1965280c563d06244dd14948c3 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahVMOperations.hpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahVMOperations.hpp
@@ -35,11 +35,12 @@ class ShenandoahFullGC;
//
// VM_ShenandoahOperation
// - VM_ShenandoahInitMark: initiate concurrent marking
+// - VM_ShenandoahFinalMarkStartEvac: finish up concurrent marking, and start evacuation
+// - VM_ShenandoahInitUpdateRefs: initiate update references
+// - VM_ShenandoahFinalUpdateRefs: finish up update references
// - VM_ShenandoahReferenceOperation:
-// - VM_ShenandoahFinalMarkStartEvac: finish up concurrent marking, and start evacuation
-// - VM_ShenandoahInitUpdateRefs: initiate update references
-// - VM_ShenandoahFinalUpdateRefs: finish up update references
// - VM_ShenandoahFullGC: do full GC
+// - VM_ShenandoahDegeneratedGC: do STW degenerated GC
class VM_ShenandoahOperation : public VM_Operation {
protected:
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahVerifier.cpp b/src/hotspot/share/gc/shenandoah/shenandoahVerifier.cpp
index 3eb2bbc78c9da923f59c19c55d98f64c5f66ca59..0fd315ce8ddc08fef96948eff94dcc66f6ce85e7 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahVerifier.cpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahVerifier.cpp
@@ -160,7 +160,7 @@ private:
// skip
break;
case ShenandoahVerifier::_verify_liveness_complete:
- Atomic::add(&_ld[obj_reg->index()], (uint) obj->size());
+ Atomic::add(&_ld[obj_reg->index()], (uint) obj->size(), memory_order_relaxed);
// fallthrough for fast failure for un-live regions:
case ShenandoahVerifier::_verify_liveness_conservative:
check(ShenandoahAsserts::_safe_oop, obj, obj_reg->has_live(),
@@ -424,7 +424,6 @@ public:
class ShenandoahVerifierReachableTask : public AbstractGangTask {
private:
const char* _label;
- ShenandoahRootVerifier* _verifier;
ShenandoahVerifier::VerifyOptions _options;
ShenandoahHeap* _heap;
ShenandoahLivenessData* _ld;
@@ -434,12 +433,10 @@ private:
public:
ShenandoahVerifierReachableTask(MarkBitMap* bitmap,
ShenandoahLivenessData* ld,
- ShenandoahRootVerifier* verifier,
const char* label,
ShenandoahVerifier::VerifyOptions options) :
AbstractGangTask("Shenandoah Verifier Reachable Objects"),
_label(label),
- _verifier(verifier),
_options(options),
_heap(ShenandoahHeap::heap()),
_ld(ld),
@@ -464,9 +461,9 @@ public:
ShenandoahMessageBuffer("%s, Roots", _label),
_options);
if (_heap->unload_classes()) {
- _verifier->strong_roots_do(&cl);
+ ShenandoahRootVerifier::strong_roots_do(&cl);
} else {
- _verifier->roots_do(&cl);
+ ShenandoahRootVerifier::roots_do(&cl);
}
}
@@ -483,7 +480,7 @@ public:
}
}
- Atomic::add(&_processed, processed);
+ Atomic::add(&_processed, processed, memory_order_relaxed);
}
};
@@ -512,7 +509,7 @@ public:
_processed(0) {};
size_t processed() {
- return _processed;
+ return Atomic::load(&_processed);
}
virtual void work(uint worker_id) {
@@ -522,7 +519,7 @@ public:
_options);
while (true) {
- size_t v = Atomic::fetch_and_add(&_claimed, 1u);
+ size_t v = Atomic::fetch_and_add(&_claimed, 1u, memory_order_relaxed);
if (v < _heap->num_regions()) {
ShenandoahHeapRegion* r = _heap->get_region(v);
if (!r->is_humongous() && !r->is_trash()) {
@@ -542,7 +539,7 @@ public:
if (_heap->complete_marking_context()->is_marked((oop)obj)) {
verify_and_follow(obj, stack, cl, &processed);
}
- Atomic::add(&_processed, processed);
+ Atomic::add(&_processed, processed, memory_order_relaxed);
}
virtual void work_regular(ShenandoahHeapRegion *r, ShenandoahVerifierStack &stack, ShenandoahVerifyOopClosure &cl) {
@@ -575,7 +572,7 @@ public:
}
}
- Atomic::add(&_processed, processed);
+ Atomic::add(&_processed, processed, memory_order_relaxed);
}
void verify_and_follow(HeapWord *addr, ShenandoahVerifierStack &stack, ShenandoahVerifyOopClosure &cl, size_t *processed) {
@@ -618,8 +615,7 @@ void ShenandoahVerifier::verify_at_safepoint(const char *label,
VerifyForwarded forwarded, VerifyMarked marked,
VerifyCollectionSet cset,
VerifyLiveness liveness, VerifyRegions regions,
- VerifyGCState gcstate,
- VerifyWeakRoots weak_roots) {
+ VerifyGCState gcstate) {
guarantee(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "only when nothing else happens");
guarantee(ShenandoahVerify, "only when enabled, and bitmap is initialized in ShenandoahHeap::initialize");
@@ -713,8 +709,7 @@ void ShenandoahVerifier::verify_at_safepoint(const char *label,
// This verifies what application can see, since it only cares about reachable objects.
size_t count_reachable = 0;
if (ShenandoahVerifyLevel >= 2) {
- ShenandoahRootVerifier verifier;
- ShenandoahVerifierReachableTask task(_verification_bit_map, ld, &verifier, label, options);
+ ShenandoahVerifierReachableTask task(_verification_bit_map, ld, label, options);
_heap->workers()->run_task(&task);
count_reachable = task.processed();
}
@@ -747,12 +742,12 @@ void ShenandoahVerifier::verify_at_safepoint(const char *label,
if (r->is_humongous()) {
// For humongous objects, test if start region is marked live, and if so,
// all humongous regions in that chain have live data equal to their "used".
- juint start_live = Atomic::load_acquire(&ld[r->humongous_start_region()->index()]);
+ juint start_live = Atomic::load(&ld[r->humongous_start_region()->index()]);
if (start_live > 0) {
verf_live = (juint)(r->used() / HeapWordSize);
}
} else {
- verf_live = Atomic::load_acquire(&ld[r->index()]);
+ verf_live = Atomic::load(&ld[r->index()]);
}
size_t reg_live = r->get_live_data_words();
@@ -780,8 +775,7 @@ void ShenandoahVerifier::verify_generic(VerifyOption vo) {
_verify_cset_disable, // cset may be inconsistent
_verify_liveness_disable, // no reliable liveness data
_verify_regions_disable, // no reliable region data
- _verify_gcstate_disable, // no data about gcstate
- _verify_all_weak_roots
+ _verify_gcstate_disable // no data about gcstate
);
}
@@ -793,8 +787,7 @@ void ShenandoahVerifier::verify_before_concmark() {
_verify_cset_none, // UR should have fixed this
_verify_liveness_disable, // no reliable liveness data
_verify_regions_notrash, // no trash regions
- _verify_gcstate_stable, // there are no forwarded objects
- _verify_all_weak_roots
+ _verify_gcstate_stable // there are no forwarded objects
);
}
@@ -806,17 +799,11 @@ void ShenandoahVerifier::verify_after_concmark() {
_verify_cset_none, // no references to cset anymore
_verify_liveness_complete, // liveness data must be complete here
_verify_regions_disable, // trash regions not yet recycled
- _verify_gcstate_stable, // mark should have stabilized the heap
- _verify_all_weak_roots
+ _verify_gcstate_stable // mark should have stabilized the heap
);
}
void ShenandoahVerifier::verify_before_evacuation() {
- // Concurrent weak roots are evacuated during concurrent phase
- VerifyWeakRoots verify_weak_roots = _heap->unload_classes() ?
- _verify_serial_weak_roots :
- _verify_all_weak_roots;
-
verify_at_safepoint(
"Before Evacuation",
_verify_forwarded_none, // no forwarded references
@@ -824,17 +811,11 @@ void ShenandoahVerifier::verify_before_evacuation() {
_verify_cset_disable, // non-forwarded references to cset expected
_verify_liveness_complete, // liveness data must be complete here
_verify_regions_disable, // trash regions not yet recycled
- _verify_gcstate_stable, // mark should have stabilized the heap
- verify_weak_roots
+ _verify_gcstate_stable // mark should have stabilized the heap
);
}
void ShenandoahVerifier::verify_during_evacuation() {
- // Concurrent weak roots are evacuated during concurrent phase
- VerifyWeakRoots verify_weak_roots = _heap->unload_classes() ?
- _verify_serial_weak_roots :
- _verify_all_weak_roots;
-
verify_at_safepoint(
"During Evacuation",
_verify_forwarded_allow, // some forwarded references are allowed
@@ -842,8 +823,7 @@ void ShenandoahVerifier::verify_during_evacuation() {
_verify_cset_disable, // some cset references are not forwarded yet
_verify_liveness_disable, // liveness data might be already stale after pre-evacs
_verify_regions_disable, // trash regions not yet recycled
- _verify_gcstate_evacuation, // evacuation is in progress
- verify_weak_roots
+ _verify_gcstate_evacuation // evacuation is in progress
);
}
@@ -855,8 +835,7 @@ void ShenandoahVerifier::verify_after_evacuation() {
_verify_cset_forwarded, // all cset refs are fully forwarded
_verify_liveness_disable, // no reliable liveness data anymore
_verify_regions_notrash, // trash regions have been recycled already
- _verify_gcstate_forwarded, // evacuation produced some forwarded objects
- _verify_all_weak_roots
+ _verify_gcstate_forwarded // evacuation produced some forwarded objects
);
}
@@ -868,8 +847,7 @@ void ShenandoahVerifier::verify_before_updaterefs() {
_verify_cset_forwarded, // all cset refs are fully forwarded
_verify_liveness_disable, // no reliable liveness data anymore
_verify_regions_notrash, // trash regions have been recycled already
- _verify_gcstate_forwarded, // evacuation should have produced some forwarded objects
- _verify_all_weak_roots
+ _verify_gcstate_forwarded // evacuation should have produced some forwarded objects
);
}
@@ -881,8 +859,7 @@ void ShenandoahVerifier::verify_after_updaterefs() {
_verify_cset_none, // no cset references, all updated
_verify_liveness_disable, // no reliable liveness data anymore
_verify_regions_nocset, // no cset regions, trash regions have appeared
- _verify_gcstate_stable, // update refs had cleaned up forwarded objects
- _verify_all_weak_roots
+ _verify_gcstate_stable // update refs had cleaned up forwarded objects
);
}
@@ -894,8 +871,7 @@ void ShenandoahVerifier::verify_after_degenerated() {
_verify_cset_none, // no cset references
_verify_liveness_disable, // no reliable liveness data anymore
_verify_regions_notrash_nocset, // no trash, no cset
- _verify_gcstate_stable, // degenerated refs had cleaned up forwarded objects
- _verify_all_weak_roots
+ _verify_gcstate_stable // degenerated refs had cleaned up forwarded objects
);
}
@@ -907,8 +883,7 @@ void ShenandoahVerifier::verify_before_fullgc() {
_verify_cset_disable, // cset might be foobared
_verify_liveness_disable, // no reliable liveness data anymore
_verify_regions_disable, // no reliable region data here
- _verify_gcstate_disable, // no reliable gcstate data
- _verify_all_weak_roots
+ _verify_gcstate_disable // no reliable gcstate data
);
}
@@ -920,8 +895,7 @@ void ShenandoahVerifier::verify_after_fullgc() {
_verify_cset_none, // no cset references
_verify_liveness_disable, // no reliable liveness data anymore
_verify_regions_notrash_nocset, // no trash, no cset
- _verify_gcstate_stable, // full gc cleaned up everything
- _verify_all_weak_roots
+ _verify_gcstate_stable // full gc cleaned up everything
);
}
@@ -978,33 +952,11 @@ public:
};
void ShenandoahVerifier::verify_roots_in_to_space() {
- ShenandoahRootVerifier verifier;
- ShenandoahVerifyInToSpaceClosure cl;
- verifier.oops_do(&cl);
-}
-
-void ShenandoahVerifier::verify_roots_in_to_space_except(ShenandoahRootVerifier::RootTypes types) {
- ShenandoahRootVerifier verifier;
- verifier.excludes(types);
ShenandoahVerifyInToSpaceClosure cl;
- verifier.oops_do(&cl);
+ ShenandoahRootVerifier::roots_do(&cl);
}
void ShenandoahVerifier::verify_roots_no_forwarded() {
- ShenandoahRootVerifier verifier;
- ShenandoahVerifyNoForwared cl;
- verifier.oops_do(&cl);
-}
-
-void ShenandoahVerifier::verify_roots_no_forwarded(ShenandoahRootVerifier::RootTypes types) {
- ShenandoahRootVerifier verifier(types);
- ShenandoahVerifyNoForwared cl;
- verifier.oops_do(&cl);
-}
-
-void ShenandoahVerifier::verify_roots_no_forwarded_except(ShenandoahRootVerifier::RootTypes types) {
- ShenandoahRootVerifier verifier;
- verifier.excludes(types);
ShenandoahVerifyNoForwared cl;
- verifier.oops_do(&cl);
+ ShenandoahRootVerifier::roots_do(&cl);
}
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahVerifier.hpp b/src/hotspot/share/gc/shenandoah/shenandoahVerifier.hpp
index 838daf955b953a7f1f439ccdfb6139b3a32a8d84..9c9cd6117d5a73cffe7588e62e07b84023a47882 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahVerifier.hpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahVerifier.hpp
@@ -136,12 +136,6 @@ public:
_verify_gcstate_evacuation
} VerifyGCState;
- typedef enum {
- _verify_all_weak_roots,
- _verify_serial_weak_roots,
- _verify_concurrent_weak_roots
- } VerifyWeakRoots;
-
struct VerifyOptions {
VerifyForwarded _verify_forwarded;
VerifyMarked _verify_marked;
@@ -149,20 +143,17 @@ public:
VerifyLiveness _verify_liveness;
VerifyRegions _verify_regions;
VerifyGCState _verify_gcstate;
- VerifyWeakRoots _verify_weak_roots;
VerifyOptions(VerifyForwarded verify_forwarded,
VerifyMarked verify_marked,
VerifyCollectionSet verify_collection_set,
VerifyLiveness verify_liveness,
VerifyRegions verify_regions,
- VerifyGCState verify_gcstate,
- VerifyWeakRoots verify_weak_roots = _verify_all_weak_roots) :
+ VerifyGCState verify_gcstate) :
_verify_forwarded(verify_forwarded), _verify_marked(verify_marked),
_verify_cset(verify_collection_set),
_verify_liveness(verify_liveness), _verify_regions(verify_regions),
- _verify_gcstate(verify_gcstate),
- _verify_weak_roots(verify_weak_roots) {}
+ _verify_gcstate(verify_gcstate) {}
};
private:
@@ -172,8 +163,7 @@ private:
VerifyCollectionSet cset,
VerifyLiveness liveness,
VerifyRegions regions,
- VerifyGCState gcstate,
- VerifyWeakRoots weakRoots);
+ VerifyGCState gcstate);
public:
ShenandoahVerifier(ShenandoahHeap* heap, MarkBitMap* verification_bitmap) :
@@ -193,11 +183,8 @@ public:
// Roots should only contain to-space oops
void verify_roots_in_to_space();
- void verify_roots_in_to_space_except(ShenandoahRootVerifier::RootTypes types);
void verify_roots_no_forwarded();
- void verify_roots_no_forwarded(ShenandoahRootVerifier::RootTypes types);
- void verify_roots_no_forwarded_except(ShenandoahRootVerifier::RootTypes types);
};
#endif // SHARE_GC_SHENANDOAH_SHENANDOAHVERIFIER_HPP
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahWorkGroup.cpp b/src/hotspot/share/gc/shenandoah/shenandoahWorkGroup.cpp
index 295a25b4a3ee4ca1cd35567564fa52f45604b8f1..538d739bf815c41a6a244e4a9c2aa3744757f4c1 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahWorkGroup.cpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahWorkGroup.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017, 2018, Red Hat, Inc. All rights reserved.
+ * Copyright (c) 2017, 2021, Red Hat, Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -71,16 +71,6 @@ ShenandoahPushWorkerScope::~ShenandoahPushWorkerScope() {
assert(nworkers == _old_workers, "Must be able to restore");
}
-ShenandoahPushWorkerQueuesScope::ShenandoahPushWorkerQueuesScope(WorkGang* workers, ShenandoahObjToScanQueueSet* queues, uint nworkers, bool check) :
- ShenandoahPushWorkerScope(workers, nworkers, check), _queues(queues) {
- _queues->reserve(_n_workers);
-}
-
-ShenandoahPushWorkerQueuesScope::~ShenandoahPushWorkerQueuesScope() {
- // Restore old worker value
- _queues->reserve(_old_workers);
-}
-
AbstractGangWorker* ShenandoahWorkGang::install_worker(uint which) {
AbstractGangWorker* worker = WorkGang::install_worker(which);
ShenandoahThreadLocalData::create(worker);
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahWorkGroup.hpp b/src/hotspot/share/gc/shenandoah/shenandoahWorkGroup.hpp
index 090676bc534969adab61086abc17129a1d87aad7..ab1ea8f5da2c7d565dd36eb33cb98ae62e3a8292 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahWorkGroup.hpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahWorkGroup.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017, 2019, Red Hat, Inc. All rights reserved.
+ * Copyright (c) 2017, 2021, Red Hat, Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -51,15 +51,6 @@ public:
~ShenandoahPushWorkerScope();
};
-class ShenandoahPushWorkerQueuesScope : public ShenandoahPushWorkerScope {
-private:
- ShenandoahObjToScanQueueSet* _queues;
-
-public:
- ShenandoahPushWorkerQueuesScope(WorkGang* workers, ShenandoahObjToScanQueueSet* queues, uint nworkers, bool do_check = true);
- ~ShenandoahPushWorkerQueuesScope();
-};
-
class ShenandoahWorkGang : public WorkGang {
private:
bool _initialize_gclab;
diff --git a/src/hotspot/share/gc/z/zCollectedHeap.cpp b/src/hotspot/share/gc/z/zCollectedHeap.cpp
index f07d4b4c7aaabab493b82d72f5ce1d1311fe4db2..24bd1e318166bde11e113569377d47ea2c3127e2 100644
--- a/src/hotspot/share/gc/z/zCollectedHeap.cpp
+++ b/src/hotspot/share/gc/z/zCollectedHeap.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015, 2020, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2021, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -22,6 +22,7 @@
*/
#include "precompiled.hpp"
+#include "classfile/classLoaderData.hpp"
#include "gc/shared/gcHeapSummary.hpp"
#include "gc/shared/suspendibleThreadSet.hpp"
#include "gc/z/zCollectedHeap.hpp"
diff --git a/src/hotspot/share/gc/z/zHeap.cpp b/src/hotspot/share/gc/z/zHeap.cpp
index 89b99181b0f0a3b3a56c6522f2c039cd2b5b097c..ef53d4725d08a476c838d1f49d0c12f8b63db75a 100644
--- a/src/hotspot/share/gc/z/zHeap.cpp
+++ b/src/hotspot/share/gc/z/zHeap.cpp
@@ -42,6 +42,7 @@
#include "gc/z/zWorkers.inline.hpp"
#include "logging/log.hpp"
#include "memory/iterator.hpp"
+#include "memory/metaspaceUtils.hpp"
#include "memory/resourceArea.hpp"
#include "prims/jvmtiTagMap.hpp"
#include "runtime/handshake.hpp"
diff --git a/src/hotspot/share/gc/z/zStat.cpp b/src/hotspot/share/gc/z/zStat.cpp
index afba6a9db309509722f5a870f0b57305c59fd2d9..7b1faab50008effcee7278fe3a96c6d325d93a5e 100644
--- a/src/hotspot/share/gc/z/zStat.cpp
+++ b/src/hotspot/share/gc/z/zStat.cpp
@@ -32,6 +32,7 @@
#include "gc/z/zStat.hpp"
#include "gc/z/zTracer.inline.hpp"
#include "gc/z/zUtils.hpp"
+#include "memory/metaspaceUtils.hpp"
#include "memory/resourceArea.hpp"
#include "runtime/atomic.hpp"
#include "runtime/os.hpp"
diff --git a/src/hotspot/share/gc/z/zUnload.cpp b/src/hotspot/share/gc/z/zUnload.cpp
index 072530a6c23439c8429feef1c8b597de7622ec6f..335b01721d97771723f2a8a75fee916ae410593d 100644
--- a/src/hotspot/share/gc/z/zUnload.cpp
+++ b/src/hotspot/share/gc/z/zUnload.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018, 2020, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2018, 2021, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -34,6 +34,7 @@
#include "gc/z/zNMethod.hpp"
#include "gc/z/zStat.hpp"
#include "gc/z/zUnload.hpp"
+#include "memory/metaspaceUtils.hpp"
#include "oops/access.inline.hpp"
static const ZStatSubPhase ZSubPhaseConcurrentClassesUnlink("Concurrent Classes Unlink");
diff --git a/src/hotspot/share/include/cds.h b/src/hotspot/share/include/cds.h
index 9187445329da76f86647f1ecdf0ed7c969686fc2..ee821eb73ac6678c180281ee44709a407c83ba06 100644
--- a/src/hotspot/share/include/cds.h
+++ b/src/hotspot/share/include/cds.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018, 2020, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2018, 2021, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -33,10 +33,10 @@
//
// Also, this is a C header file. Do not use C++ here.
-#define NUM_CDS_REGIONS 8 // this must be the same as MetaspaceShared::n_regions
+#define NUM_CDS_REGIONS 7 // this must be the same as MetaspaceShared::n_regions
#define CDS_ARCHIVE_MAGIC 0xf00baba2
#define CDS_DYNAMIC_ARCHIVE_MAGIC 0xf00baba8
-#define CURRENT_CDS_ARCHIVE_VERSION 10
+#define CURRENT_CDS_ARCHIVE_VERSION 11
#define INVALID_CDS_ARCHIVE_VERSION -1
struct CDSFileMapRegion {
@@ -44,7 +44,7 @@ struct CDSFileMapRegion {
int _read_only; // read only region?
int _allow_exec; // executable code in this region?
int _is_heap_region; // Used by SA and debug build.
- int _is_bitmap_region; // Relocation bitmap for RO/RW/MC/MD regions (used by SA and debug build).
+ int _is_bitmap_region; // Relocation bitmap for RO/RW regions (used by SA and debug build).
int _mapped_from_file; // Is this region mapped from a file?
// If false, this region was initialized using os::read().
size_t _file_offset; // Data for this region starts at this offset in the archive file.
diff --git a/src/hotspot/share/interpreter/abstractInterpreter.cpp b/src/hotspot/share/interpreter/abstractInterpreter.cpp
index 3d47c60f02fb6265b345fe26f694fdd9b5402d33..9db41dc10ed8738fd038b070bd9af5b4840251a3 100644
--- a/src/hotspot/share/interpreter/abstractInterpreter.cpp
+++ b/src/hotspot/share/interpreter/abstractInterpreter.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2020, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2021, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -92,7 +92,6 @@ address AbstractInterpreter::_native_entry_begin = NU
address AbstractInterpreter::_native_entry_end = NULL;
address AbstractInterpreter::_slow_signature_handler;
address AbstractInterpreter::_entry_table [AbstractInterpreter::number_of_method_entries];
-address AbstractInterpreter::_cds_entry_table [AbstractInterpreter::number_of_method_entries];
address AbstractInterpreter::_native_abi_to_tosca [AbstractInterpreter::number_of_result_handlers];
//------------------------------------------------------------------------------------------------------------------------
@@ -200,49 +199,11 @@ AbstractInterpreter::MethodKind AbstractInterpreter::method_kind(const methodHan
return zerolocals;
}
-#if INCLUDE_CDS
-
-// For a shared Method m, to improve sharing across processes, we avoid writing to m->_i2i_entry
-// at runtime. Instead, m->_i2i_entry points to a fixed location inside the CDS archive.
-// This location contains a trampoline (generated by generate_entry_for_cds_method)
-// which jumps to _entry_table[kind].
-address AbstractInterpreter::entry_for_cds_method(const methodHandle& m) {
- MethodKind kind = method_kind(m);
- assert(0 <= kind && kind < number_of_method_entries, "illegal kind");
- return entry_for_cds_method(kind);
-}
-
-address AbstractInterpreter::entry_for_cds_method(AbstractInterpreter::MethodKind kind) {
- const size_t trampoline_size = SharedRuntime::trampoline_size();
- address addr = MetaspaceShared::i2i_entry_code_buffers();
- addr += (size_t)(kind) * trampoline_size;
-
- return addr;
-}
-
-void AbstractInterpreter::generate_entry_for_cds_method(AbstractInterpreter::MethodKind kind) {
- if (UseSharedSpaces) {
- address trampoline = entry_for_cds_method(kind);
- CodeBuffer buffer(trampoline, (int)(SharedRuntime::trampoline_size()));
- MacroAssembler _masm(&buffer);
- SharedRuntime::generate_trampoline(&_masm, _entry_table[kind]);
- _masm.flush();
-
- if (PrintInterpreter) {
- Disassembler::decode(buffer.insts_begin(), buffer.insts_end());
- }
- }
-}
-
-#endif
-
void AbstractInterpreter::set_entry_for_kind(AbstractInterpreter::MethodKind kind, address entry) {
assert(kind >= method_handle_invoke_FIRST &&
kind <= method_handle_invoke_LAST, "late initialization only for MH entry points");
assert(_entry_table[kind] == _entry_table[abstract], "previous value must be AME entry");
_entry_table[kind] = entry;
-
- generate_entry_for_cds_method(kind);
}
// Return true if the interpreter can prove that the given bytecode has
@@ -479,6 +440,5 @@ void AbstractInterpreter::initialize_method_handle_entries() {
for (int i = method_handle_invoke_FIRST; i <= method_handle_invoke_LAST; i++) {
MethodKind kind = (MethodKind) i;
_entry_table[kind] = _entry_table[Interpreter::abstract];
- Interpreter::generate_entry_for_cds_method(kind);
}
}
diff --git a/src/hotspot/share/interpreter/abstractInterpreter.hpp b/src/hotspot/share/interpreter/abstractInterpreter.hpp
index b565ade3b34a69b50d949cac59ae649046e2a219..2fb5b97e8086257e974a680296c957502c08ba57 100644
--- a/src/hotspot/share/interpreter/abstractInterpreter.hpp
+++ b/src/hotspot/share/interpreter/abstractInterpreter.hpp
@@ -116,7 +116,6 @@ class AbstractInterpreter: AllStatic {
// method entry points
static address _entry_table[number_of_method_entries]; // entry points for a given method
- static address _cds_entry_table[number_of_method_entries]; // entry points for methods in the CDS archive
static address _native_abi_to_tosca[number_of_result_handlers]; // for native method result handlers
static address _slow_signature_handler; // the native method generic (slow) signature handler
@@ -136,11 +135,6 @@ class AbstractInterpreter: AllStatic {
static address entry_for_kind(MethodKind k) { assert(0 <= k && k < number_of_method_entries, "illegal kind"); return _entry_table[k]; }
static address entry_for_method(const methodHandle& m) { return entry_for_kind(method_kind(m)); }
- // used by class data sharing
- static address entry_for_cds_method(const methodHandle& m) NOT_CDS_RETURN_(NULL);
- static address entry_for_cds_method(AbstractInterpreter::MethodKind kind) NOT_CDS_RETURN_(NULL);
- static void generate_entry_for_cds_method(MethodKind kind) NOT_CDS_RETURN;
-
// used for bootstrapping method handles:
static void set_entry_for_kind(MethodKind k, address e);
diff --git a/src/hotspot/share/interpreter/templateInterpreterGenerator.cpp b/src/hotspot/share/interpreter/templateInterpreterGenerator.cpp
index 5228fb9eefbac5292448b277231abd438aaa478c..af4771e7a78307cf349dc5d9e638799bb3561daa 100644
--- a/src/hotspot/share/interpreter/templateInterpreterGenerator.cpp
+++ b/src/hotspot/share/interpreter/templateInterpreterGenerator.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2020, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2021, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -181,7 +181,6 @@ void TemplateInterpreterGenerator::generate_all() {
#define method_entry(kind) \
{ CodeletMark cm(_masm, "method entry point (kind = " #kind ")"); \
Interpreter::_entry_table[Interpreter::kind] = generate_method_entry(Interpreter::kind); \
- Interpreter::generate_entry_for_cds_method(Interpreter::kind); \
}
// all non-native method kinds
diff --git a/src/hotspot/share/interpreter/zero/bytecodeInterpreter.cpp b/src/hotspot/share/interpreter/zero/bytecodeInterpreter.cpp
index 774dee95830be80a10a8d79179f02678751b8bc9..c28efdbef53439f665a803ff94cc04f755245664 100644
--- a/src/hotspot/share/interpreter/zero/bytecodeInterpreter.cpp
+++ b/src/hotspot/share/interpreter/zero/bytecodeInterpreter.cpp
@@ -1636,46 +1636,74 @@ run:
if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
OrderAccess::fence();
}
- if (tos_type == atos) {
- VERIFY_OOP(obj->obj_field_acquire(field_offset));
- SET_STACK_OBJECT(obj->obj_field_acquire(field_offset), -1);
- } else if (tos_type == itos) {
- SET_STACK_INT(obj->int_field_acquire(field_offset), -1);
- } else if (tos_type == ltos) {
- SET_STACK_LONG(obj->long_field_acquire(field_offset), 0);
- MORE_STACK(1);
- } else if (tos_type == btos || tos_type == ztos) {
- SET_STACK_INT(obj->byte_field_acquire(field_offset), -1);
- } else if (tos_type == ctos) {
- SET_STACK_INT(obj->char_field_acquire(field_offset), -1);
- } else if (tos_type == stos) {
- SET_STACK_INT(obj->short_field_acquire(field_offset), -1);
- } else if (tos_type == ftos) {
- SET_STACK_FLOAT(obj->float_field_acquire(field_offset), -1);
- } else {
- SET_STACK_DOUBLE(obj->double_field_acquire(field_offset), 0);
- MORE_STACK(1);
+ switch (tos_type) {
+ case btos:
+ case ztos:
+ SET_STACK_INT(obj->byte_field_acquire(field_offset), -1);
+ break;
+ case ctos:
+ SET_STACK_INT(obj->char_field_acquire(field_offset), -1);
+ break;
+ case stos:
+ SET_STACK_INT(obj->short_field_acquire(field_offset), -1);
+ break;
+ case itos:
+ SET_STACK_INT(obj->int_field_acquire(field_offset), -1);
+ break;
+ case ftos:
+ SET_STACK_FLOAT(obj->float_field_acquire(field_offset), -1);
+ break;
+ case ltos:
+ SET_STACK_LONG(obj->long_field_acquire(field_offset), 0);
+ MORE_STACK(1);
+ break;
+ case dtos:
+ SET_STACK_DOUBLE(obj->double_field_acquire(field_offset), 0);
+ MORE_STACK(1);
+ break;
+ case atos: {
+ oop val = obj->obj_field_acquire(field_offset);
+ VERIFY_OOP(val);
+ SET_STACK_OBJECT(val, -1);
+ break;
+ }
+ default:
+ ShouldNotReachHere();
}
} else {
- if (tos_type == atos) {
- VERIFY_OOP(obj->obj_field(field_offset));
- SET_STACK_OBJECT(obj->obj_field(field_offset), -1);
- } else if (tos_type == itos) {
- SET_STACK_INT(obj->int_field(field_offset), -1);
- } else if (tos_type == ltos) {
- SET_STACK_LONG(obj->long_field(field_offset), 0);
- MORE_STACK(1);
- } else if (tos_type == btos || tos_type == ztos) {
- SET_STACK_INT(obj->byte_field(field_offset), -1);
- } else if (tos_type == ctos) {
- SET_STACK_INT(obj->char_field(field_offset), -1);
- } else if (tos_type == stos) {
- SET_STACK_INT(obj->short_field(field_offset), -1);
- } else if (tos_type == ftos) {
- SET_STACK_FLOAT(obj->float_field(field_offset), -1);
- } else {
- SET_STACK_DOUBLE(obj->double_field(field_offset), 0);
- MORE_STACK(1);
+ switch (tos_type) {
+ case btos:
+ case ztos:
+ SET_STACK_INT(obj->byte_field(field_offset), -1);
+ break;
+ case ctos:
+ SET_STACK_INT(obj->char_field(field_offset), -1);
+ break;
+ case stos:
+ SET_STACK_INT(obj->short_field(field_offset), -1);
+ break;
+ case itos:
+ SET_STACK_INT(obj->int_field(field_offset), -1);
+ break;
+ case ftos:
+ SET_STACK_FLOAT(obj->float_field(field_offset), -1);
+ break;
+ case ltos:
+ SET_STACK_LONG(obj->long_field(field_offset), 0);
+ MORE_STACK(1);
+ break;
+ case dtos:
+ SET_STACK_DOUBLE(obj->double_field(field_offset), 0);
+ MORE_STACK(1);
+ break;
+ case atos: {
+ oop val = obj->obj_field(field_offset);
+ VERIFY_OOP(val);
+ SET_STACK_OBJECT(val, -1);
+ break;
+ }
+ default:
+ ShouldNotReachHere();
}
}
@@ -1745,49 +1773,75 @@ run:
//
int field_offset = cache->f2_as_index();
if (cache->is_volatile()) {
- if (tos_type == itos) {
- obj->release_int_field_put(field_offset, STACK_INT(-1));
- } else if (tos_type == atos) {
- VERIFY_OOP(STACK_OBJECT(-1));
- obj->release_obj_field_put(field_offset, STACK_OBJECT(-1));
- } else if (tos_type == btos) {
- obj->release_byte_field_put(field_offset, STACK_INT(-1));
- } else if (tos_type == ztos) {
- int bool_field = STACK_INT(-1); // only store LSB
- obj->release_byte_field_put(field_offset, (bool_field & 1));
- } else if (tos_type == ltos) {
- obj->release_long_field_put(field_offset, STACK_LONG(-1));
- } else if (tos_type == ctos) {
- obj->release_char_field_put(field_offset, STACK_INT(-1));
- } else if (tos_type == stos) {
- obj->release_short_field_put(field_offset, STACK_INT(-1));
- } else if (tos_type == ftos) {
- obj->release_float_field_put(field_offset, STACK_FLOAT(-1));
- } else {
- obj->release_double_field_put(field_offset, STACK_DOUBLE(-1));
+ switch (tos_type) {
+ case ztos:
+ obj->release_byte_field_put(field_offset, (STACK_INT(-1) & 1)); // only store LSB
+ break;
+ case btos:
+ obj->release_byte_field_put(field_offset, STACK_INT(-1));
+ break;
+ case ctos:
+ obj->release_char_field_put(field_offset, STACK_INT(-1));
+ break;
+ case stos:
+ obj->release_short_field_put(field_offset, STACK_INT(-1));
+ break;
+ case itos:
+ obj->release_int_field_put(field_offset, STACK_INT(-1));
+ break;
+ case ftos:
+ obj->release_float_field_put(field_offset, STACK_FLOAT(-1));
+ break;
+ case ltos:
+ obj->release_long_field_put(field_offset, STACK_LONG(-1));
+ break;
+ case dtos:
+ obj->release_double_field_put(field_offset, STACK_DOUBLE(-1));
+ break;
+ case atos: {
+ oop val = STACK_OBJECT(-1);
+ VERIFY_OOP(val);
+ obj->release_obj_field_put(field_offset, val);
+ break;
+ }
+ default:
+ ShouldNotReachHere();
}
OrderAccess::storeload();
} else {
- if (tos_type == itos) {
- obj->int_field_put(field_offset, STACK_INT(-1));
- } else if (tos_type == atos) {
- VERIFY_OOP(STACK_OBJECT(-1));
- obj->obj_field_put(field_offset, STACK_OBJECT(-1));
- } else if (tos_type == btos) {
- obj->byte_field_put(field_offset, STACK_INT(-1));
- } else if (tos_type == ztos) {
- int bool_field = STACK_INT(-1); // only store LSB
- obj->byte_field_put(field_offset, (bool_field & 1));
- } else if (tos_type == ltos) {
- obj->long_field_put(field_offset, STACK_LONG(-1));
- } else if (tos_type == ctos) {
- obj->char_field_put(field_offset, STACK_INT(-1));
- } else if (tos_type == stos) {
- obj->short_field_put(field_offset, STACK_INT(-1));
- } else if (tos_type == ftos) {
- obj->float_field_put(field_offset, STACK_FLOAT(-1));
- } else {
- obj->double_field_put(field_offset, STACK_DOUBLE(-1));
+ switch (tos_type) {
+ case ztos:
+ obj->byte_field_put(field_offset, (STACK_INT(-1) & 1)); // only store LSB
+ break;
+ case btos:
+ obj->byte_field_put(field_offset, STACK_INT(-1));
+ break;
+ case ctos:
+ obj->char_field_put(field_offset, STACK_INT(-1));
+ break;
+ case stos:
+ obj->short_field_put(field_offset, STACK_INT(-1));
+ break;
+ case itos:
+ obj->int_field_put(field_offset, STACK_INT(-1));
+ break;
+ case ftos:
+ obj->float_field_put(field_offset, STACK_FLOAT(-1));
+ break;
+ case ltos:
+ obj->long_field_put(field_offset, STACK_LONG(-1));
+ break;
+ case dtos:
+ obj->double_field_put(field_offset, STACK_DOUBLE(-1));
+ break;
+ case atos: {
+ oop val = STACK_OBJECT(-1);
+ VERIFY_OOP(val);
+ obj->obj_field_put(field_offset, val);
+ break;
+ }
+ default:
+ ShouldNotReachHere();
}
}
diff --git a/src/hotspot/share/jfr/instrumentation/jfrEventClassTransformer.cpp b/src/hotspot/share/jfr/instrumentation/jfrEventClassTransformer.cpp
index 3bf90ccacbf84c79f04f389af2c7717e11c98d59..7f968bab58ab4d9f6944482776530c9e9cf36a6f 100644
--- a/src/hotspot/share/jfr/instrumentation/jfrEventClassTransformer.cpp
+++ b/src/hotspot/share/jfr/instrumentation/jfrEventClassTransformer.cpp
@@ -1513,7 +1513,7 @@ static bool is_retransforming(const InstanceKlass* ik, TRAPS) {
assert(name != NULL, "invariant");
Handle class_loader(THREAD, ik->class_loader());
Handle protection_domain(THREAD, ik->protection_domain());
- return SystemDictionary::find(name, class_loader, protection_domain, THREAD) != NULL;
+ return SystemDictionary::find_instance_klass(name, class_loader, protection_domain) != NULL;
}
// target for JFR_ON_KLASS_CREATION hook
diff --git a/src/hotspot/share/jfr/periodic/jfrOSInterface.cpp b/src/hotspot/share/jfr/periodic/jfrOSInterface.cpp
index 950dfd40c1e4a72ea43cf40837ce00d3c9680ca7..85f6614ff5e9d5ebfac29cb60f73a2497420757f 100644
--- a/src/hotspot/share/jfr/periodic/jfrOSInterface.cpp
+++ b/src/hotspot/share/jfr/periodic/jfrOSInterface.cpp
@@ -34,14 +34,6 @@
#include "utilities/ostream.hpp"
#include // for environment variables
-#ifdef __APPLE__
-#include
-#define environ (*_NSGetEnviron())
-#endif
-
-#ifndef environ
-extern char** environ;
-#endif
static JfrOSInterface* _instance = NULL;
@@ -281,14 +273,14 @@ const char* JfrOSInterface::virtualization_name() {
}
int JfrOSInterface::generate_initial_environment_variable_events() {
- if (environ == NULL) {
+ if (os::get_environ() == NULL) {
return OS_ERR;
}
if (EventInitialEnvironmentVariable::is_enabled()) {
// One time stamp for all events, so they can be grouped together
JfrTicks time_stamp = JfrTicks::now();
- for (char** p = environ; *p != NULL; p++) {
+ for (char** p = os::get_environ(); *p != NULL; p++) {
char* variable = *p;
char* equal_sign = strchr(variable, '=');
if (equal_sign != NULL) {
diff --git a/src/hotspot/share/jfr/recorder/checkpoint/types/jfrType.cpp b/src/hotspot/share/jfr/recorder/checkpoint/types/jfrType.cpp
index 38b028ac6247e0fd433c32f61388b66e43aceb7a..680460b88b28d3a5ea445aeaa9fead7505010d82 100644
--- a/src/hotspot/share/jfr/recorder/checkpoint/types/jfrType.cpp
+++ b/src/hotspot/share/jfr/recorder/checkpoint/types/jfrType.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, 2020, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -42,6 +42,7 @@
#include "jfr/utilities/jfrThreadIterator.hpp"
#include "memory/iterator.hpp"
#include "memory/metaspace.hpp"
+#include "memory/metaspaceUtils.hpp"
#include "memory/referenceType.hpp"
#include "memory/universe.hpp"
#include "oops/compressedOops.hpp"
diff --git a/src/hotspot/share/jfr/recorder/repository/jfrEmergencyDump.cpp b/src/hotspot/share/jfr/recorder/repository/jfrEmergencyDump.cpp
index 45a3db363387345ea2d4b9ada532e2f1eda5d73e..55c1b7ece4c5965f24f430063d0746e4ed111f15 100644
--- a/src/hotspot/share/jfr/recorder/repository/jfrEmergencyDump.cpp
+++ b/src/hotspot/share/jfr/recorder/repository/jfrEmergencyDump.cpp
@@ -369,9 +369,10 @@ static void write_emergency_dump_file(const RepositoryIterator& iterator) {
if (copy_block == NULL) {
log_error(jfr, system)("Unable to malloc memory during jfr emergency dump");
log_error(jfr, system)("Unable to write jfr emergency dump file");
+ } else {
+ write_repository_files(iterator, copy_block, block_size);
+ os::free(copy_block);
}
- write_repository_files(iterator, copy_block, block_size);
- os::free(copy_block);
}
void JfrEmergencyDump::on_vm_error(const char* repository_path) {
diff --git a/src/hotspot/share/jvmci/jvmci.cpp b/src/hotspot/share/jvmci/jvmci.cpp
index 6c8a4ed16ed838c3f88eb684bc1dd5c343f6184b..e8fa7dbed262d58bf6a63a75a9e5c13b495ab02d 100644
--- a/src/hotspot/share/jvmci/jvmci.cpp
+++ b/src/hotspot/share/jvmci/jvmci.cpp
@@ -216,7 +216,9 @@ void JVMCI::vlog(int level, const char* format, va_list ap) {
StringEventLog* events = level == 1 ? _events : _verbose_events;
guarantee(events != NULL, "JVMCI event log not yet initialized");
Thread* thread = Thread::current_or_null_safe();
- events->logv(thread, format, ap);
+ if (thread != NULL) {
+ events->logv(thread, format, ap);
+ }
}
}
diff --git a/src/hotspot/share/jvmci/jvmciCompilerToVM.cpp b/src/hotspot/share/jvmci/jvmciCompilerToVM.cpp
index 90e7df47ecc9aebc00a11c170c3e8c5815226aee..1941160ed945d44df465318e1dfddab5ebd78282 100644
--- a/src/hotspot/share/jvmci/jvmciCompilerToVM.cpp
+++ b/src/hotspot/share/jvmci/jvmciCompilerToVM.cpp
@@ -547,16 +547,17 @@ C2V_VMENTRY_NULL(jobject, lookupType, (JNIEnv* env, jobject, jstring jname, jcla
// This is a name from a signature. Strip off the trimmings.
// Call recursive to keep scope of strippedsym.
TempNewSymbol strippedsym = Signature::strip_envelope(class_name);
- resolved_klass = SystemDictionary::find(strippedsym, class_loader, protection_domain, CHECK_NULL);
+ resolved_klass = SystemDictionary::find_instance_klass(strippedsym,
+ class_loader,
+ protection_domain);
} else if (Signature::is_array(class_name)) {
SignatureStream ss(class_name, false);
int ndim = ss.skip_array_prefix();
if (ss.type() == T_OBJECT) {
Symbol* strippedsym = ss.as_symbol();
- resolved_klass = SystemDictionary::find(strippedsym,
- class_loader,
- protection_domain,
- CHECK_NULL);
+ resolved_klass = SystemDictionary::find_instance_klass(strippedsym,
+ class_loader,
+ protection_domain);
if (!resolved_klass.is_null()) {
resolved_klass = resolved_klass->array_klass(ndim, CHECK_NULL);
}
@@ -564,7 +565,9 @@ C2V_VMENTRY_NULL(jobject, lookupType, (JNIEnv* env, jobject, jstring jname, jcla
resolved_klass = TypeArrayKlass::cast(Universe::typeArrayKlassObj(ss.type()))->array_klass(ndim, CHECK_NULL);
}
} else {
- resolved_klass = SystemDictionary::find(class_name, class_loader, protection_domain, CHECK_NULL);
+ resolved_klass = SystemDictionary::find_instance_klass(class_name,
+ class_loader,
+ protection_domain);
}
}
JVMCIObject result = JVMCIENV->get_jvmci_type(resolved_klass, JVMCI_CHECK_NULL);
@@ -1230,7 +1233,7 @@ C2V_VMENTRY_NULL(jobject, iterateFrames, (JNIEnv* env, jobject compilerToVM, job
jobjectArray methods = initial_methods;
int frame_number = 0;
- vframe* vf = vframe::new_vframe(fst.current(), fst.register_map(), thread);
+ vframe* vf = vframe::new_vframe(fst, thread);
while (true) {
// look for the given method
@@ -1340,7 +1343,7 @@ C2V_VMENTRY_NULL(jobject, iterateFrames, (JNIEnv* env, jobject compilerToVM, job
if (fst.current()->sp() != stack_pointer) {
THROW_MSG_NULL(vmSymbols::java_lang_IllegalStateException(), "stack frame not found after deopt")
}
- vf = vframe::new_vframe(fst.current(), fst.register_map(), thread);
+ vf = vframe::new_vframe(fst, thread);
if (!vf->is_compiled_frame()) {
THROW_MSG_NULL(vmSymbols::java_lang_IllegalStateException(), "compiled stack frame expected")
}
@@ -1367,7 +1370,7 @@ C2V_VMENTRY_NULL(jobject, iterateFrames, (JNIEnv* env, jobject compilerToVM, job
break;
}
fst.next();
- vf = vframe::new_vframe(fst.current(), fst.register_map(), thread);
+ vf = vframe::new_vframe(fst, thread);
frame_number = 0;
} // end of frame loop
@@ -1575,91 +1578,22 @@ C2V_VMENTRY(void, materializeVirtualObjects, (JNIEnv* env, jobject, jobject _hs_
HotSpotJVMCI::HotSpotStackFrameReference::set_objectsMaterialized(JVMCIENV, hs_frame, JNI_TRUE);
C2V_END
-// Creates a scope where the current thread is attached and detached
-// from HotSpot if it wasn't already attached when entering the scope.
-extern "C" int jio_printf(const char *fmt, ...);
-class AttachDetach : public StackObj {
- public:
- bool _attached;
- AttachDetach(JNIEnv* env, JavaThread* current_thread) {
- if (current_thread == NULL) {
- extern struct JavaVM_ main_vm;
- JNIEnv* hotspotEnv;
- jint res = main_vm.AttachCurrentThread((void**)&hotspotEnv, NULL);
- _attached = res == JNI_OK;
- static volatile int report_attach_error = 0;
- if (res != JNI_OK && report_attach_error == 0 && Atomic::cmpxchg(&report_attach_error, 0, 1) == 0) {
- // Only report an attach error once
- jio_printf("Warning: attaching current thread to VM failed with %d (future attach errors are suppressed)\n", res);
- }
- } else {
- _attached = false;
- }
- }
- ~AttachDetach() {
- if (_attached && get_current_thread() != NULL) {
- extern struct JavaVM_ main_vm;
- jint res = main_vm.DetachCurrentThread();
- static volatile int report_detach_error = 0;
- if (res != JNI_OK && report_detach_error == 0 && Atomic::cmpxchg(&report_detach_error, 0, 1) == 0) {
- // Only report an attach error once
- jio_printf("Warning: detaching current thread from VM failed with %d (future attach errors are suppressed)\n", res);
- }
- }
- }
-};
-
-C2V_VMENTRY_PREFIX(jint, writeDebugOutput, (JNIEnv* env, jobject, jbyteArray bytes, jint offset, jint length, bool flush, bool can_throw))
- AttachDetach ad(env, thread);
- bool use_tty = true;
- if (thread == NULL) {
- if (!ad._attached) {
- // Can only use tty if the current thread is attached
- JVMCI_event_1("Cannot write to tty on unattached thread");
- return 0;
- }
- thread = get_current_thread();
- }
- JVMCITraceMark jtm("writeDebugOutput");
- C2V_BLOCK(void, writeDebugOutput, (JNIEnv* env, jobject, jbyteArray bytes, jint offset, jint length))
- if (bytes == NULL) {
- if (can_throw) {
- JVMCI_THROW_0(NullPointerException);
- }
- return -1;
- }
- JVMCIPrimitiveArray array = JVMCIENV->wrap(bytes);
-
- // Check if offset and length are non negative.
- if (offset < 0 || length < 0) {
- if (can_throw) {
- JVMCI_THROW_0(ArrayIndexOutOfBoundsException);
- }
- return -2;
- }
- // Check if the range is valid.
- int array_length = JVMCIENV->get_length(array);
- if ((((unsigned int) length + (unsigned int) offset) > (unsigned int) array_length)) {
- if (can_throw) {
- JVMCI_THROW_0(ArrayIndexOutOfBoundsException);
- }
- return -2;
- }
- jbyte buffer[O_BUFLEN];
- while (length > 0) {
- int copy_len = MIN2(length, (jint)O_BUFLEN);
- JVMCIENV->copy_bytes_to(array, buffer, offset, copy_len);
- tty->write((char*) buffer, copy_len);
- length -= O_BUFLEN;
- offset += O_BUFLEN;
+// Use of tty does not require the current thread to be attached to the VM
+// so no need for a full C2V_VMENTRY transition.
+C2V_VMENTRY_PREFIX(void, writeDebugOutput, (JNIEnv* env, jobject, jlong buffer, jint length, bool flush))
+ if (length <= 8) {
+ tty->write((char*) &buffer, length);
+ } else {
+ tty->write((char*) buffer, length);
}
if (flush) {
tty->flush();
}
- return 0;
C2V_END
-C2V_VMENTRY(void, flushDebugOutput, (JNIEnv* env, jobject))
+// Use of tty does not require the current thread to be attached to the VM
+// so no need for a full C2V_VMENTRY transition.
+C2V_VMENTRY_PREFIX(void, flushDebugOutput, (JNIEnv* env, jobject))
tty->flush();
C2V_END
@@ -2793,7 +2727,7 @@ JNINativeMethod CompilerToVM::methods[] = {
{CC "iterateFrames", CC "([" RESOLVED_METHOD "[" RESOLVED_METHOD "I" INSPECTED_FRAME_VISITOR ")" OBJECT, FN_PTR(iterateFrames)},
{CC "materializeVirtualObjects", CC "(" HS_STACK_FRAME_REF "Z)V", FN_PTR(materializeVirtualObjects)},
{CC "shouldDebugNonSafepoints", CC "()Z", FN_PTR(shouldDebugNonSafepoints)},
- {CC "writeDebugOutput", CC "([BIIZZ)I", FN_PTR(writeDebugOutput)},
+ {CC "writeDebugOutput", CC "(JIZ)V", FN_PTR(writeDebugOutput)},
{CC "flushDebugOutput", CC "()V", FN_PTR(flushDebugOutput)},
{CC "methodDataProfileDataSize", CC "(JI)I", FN_PTR(methodDataProfileDataSize)},
{CC "getFingerprint", CC "(J)J", FN_PTR(getFingerprint)},
diff --git a/src/hotspot/share/jvmci/jvmciRuntime.cpp b/src/hotspot/share/jvmci/jvmciRuntime.cpp
index c4278a9f231a69d7b27cabe83aa9d504eb72a843..3d817a5d129ecb7932358e924b66204cfd17432d 100644
--- a/src/hotspot/share/jvmci/jvmciRuntime.cpp
+++ b/src/hotspot/share/jvmci/jvmciRuntime.cpp
@@ -1160,9 +1160,9 @@ Klass* JVMCIRuntime::get_klass_by_name_impl(Klass*& accessing_klass,
ttyUnlocker ttyul; // release tty lock to avoid ordering problems
MutexLocker ml(Compile_lock);
if (!require_local) {
- found_klass = SystemDictionary::find_constrained_instance_or_array_klass(sym, loader, CHECK_NULL);
+ found_klass = SystemDictionary::find_constrained_instance_or_array_klass(sym, loader, THREAD);
} else {
- found_klass = SystemDictionary::find_instance_or_array_klass(sym, loader, domain, CHECK_NULL);
+ found_klass = SystemDictionary::find_instance_or_array_klass(sym, loader, domain);
}
}
@@ -1627,7 +1627,7 @@ JVMCI::CodeInstallResult JVMCIRuntime::register_method(JVMCIEnv* JVMCIENV,
debug_info, dependencies, code_buffer,
frame_words, oop_map_set,
handler_table, implicit_exception_table,
- compiler, comp_level, GrowableArrayView::EMPTY,
+ compiler, comp_level, GrowableArrayView::EMPTY,
speculations, speculations_len,
nmethod_mirror_index, nmethod_mirror_name, failed_speculations);
diff --git a/src/hotspot/share/jvmci/metadataHandles.cpp b/src/hotspot/share/jvmci/metadataHandles.cpp
index da1ad7b61a08894b7f5c7640d7a16c9307ed918e..bcf66721f52ce9edbaa91a651b1c3cad3ecc67e8 100644
--- a/src/hotspot/share/jvmci/metadataHandles.cpp
+++ b/src/hotspot/share/jvmci/metadataHandles.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2019, 2021, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -22,6 +22,7 @@
*/
#include "precompiled.hpp"
+#include "classfile/classLoaderData.hpp"
#include "jvmci/metadataHandles.hpp"
#include "runtime/atomic.hpp"
diff --git a/src/hotspot/share/logging/log.hpp b/src/hotspot/share/logging/log.hpp
index 3d9a268218286862a17a3b3b316781eafcc4eae3..b0d6d83434ecbc9a8bb306ddae6c0cf8a349a352 100644
--- a/src/hotspot/share/logging/log.hpp
+++ b/src/hotspot/share/logging/log.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015, 2020, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2021, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -72,16 +72,16 @@ class LogMessageBuffer;
// Log class for more advanced logging scenarios.
// Has printf-style member functions for each log level (trace(), debug(), etc).
//
-// Also has outputStream compatible API for the different log-levels.
-// The streams are resource allocated when requested and are accessed through
-// calls to _stream() functions (trace_stream(), debug_stream(), etc).
+// The (trace(), debug(), etc) functions can also be used along with the LogStream
+// class to obtain an outputStream object, to be passed to various printing
+// functions that accept an outputStream:
//
// Example usage:
-// Log(logging) log;
+// Log(codecache, sweep) log;
// if (log.is_debug()) {
-// ...
// log.debug("result = %d", result).trace(" tracing info");
-// obj->print_on(log.debug_stream());
+// LogStream ls(log.debug());
+// CodeCache::print_summary(&ls, false);
// }
//
#define Log(...) LogImpl
@@ -93,13 +93,11 @@ class LogMessageBuffer;
// so that redundant specification of tags or levels can be avoided.
//
// Example usage:
-// LogTarget(Debug, gc) out;
+// LogTarget(Debug, codecache, sweep) out;
// if (out.is_enabled()) {
-// ...
-// out.print("Worker: %u", i);
-// out.print(" data: %d", x);
-// ...
-// print_stats(out.stream());
+// out.print("result = %d", result);
+// LogStream ls(out);
+// CodeCache::print_summary(&ls, false);
// }
//
#define LogTarget(level, ...) LogTargetImpl
diff --git a/src/hotspot/share/memory/allocation.cpp b/src/hotspot/share/memory/allocation.cpp
index a4c55edac87aa672b986e98fbd72744e2d92c4f8..c3024bcb3725b6cbaab174312cd40deb11a1ee6e 100644
--- a/src/hotspot/share/memory/allocation.cpp
+++ b/src/hotspot/share/memory/allocation.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2021, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -26,7 +26,7 @@
#include "memory/allocation.hpp"
#include "memory/allocation.inline.hpp"
#include "memory/arena.hpp"
-#include "memory/metaspaceShared.hpp"
+#include "memory/metaspace.hpp"
#include "memory/resourceArea.hpp"
#include "runtime/os.hpp"
#include "runtime/task.hpp"
diff --git a/src/hotspot/share/memory/archiveBuilder.cpp b/src/hotspot/share/memory/archiveBuilder.cpp
index a4361c4ddeafcbea8aca043b646a601aaebd7c02..c84b860450a950c560ca635d2ba748e1ad16ccf1 100644
--- a/src/hotspot/share/memory/archiveBuilder.cpp
+++ b/src/hotspot/share/memory/archiveBuilder.cpp
@@ -27,6 +27,7 @@
#include "classfile/symbolTable.hpp"
#include "classfile/systemDictionaryShared.hpp"
#include "classfile/vmClasses.hpp"
+#include "interpreter/abstractInterpreter.hpp"
#include "logging/log.hpp"
#include "logging/logStream.hpp"
#include "memory/allStatic.hpp"
@@ -44,32 +45,13 @@
#include "runtime/thread.hpp"
#include "utilities/align.hpp"
#include "utilities/bitMap.inline.hpp"
+#include "utilities/formatBuffer.hpp"
#include "utilities/hashtable.inline.hpp"
ArchiveBuilder* ArchiveBuilder::_current = NULL;
-class AdapterHandlerEntry;
-
-class MethodTrampolineInfo {
- address _c2i_entry_trampoline;
- AdapterHandlerEntry** _adapter_trampoline;
-public:
- address c2i_entry_trampoline() { return _c2i_entry_trampoline; }
- AdapterHandlerEntry** adapter_trampoline() { return _adapter_trampoline; }
- void set_c2i_entry_trampoline(address addr) { _c2i_entry_trampoline = addr; }
- void set_adapter_trampoline(AdapterHandlerEntry** entry) { _adapter_trampoline = entry; }
-};
-
-class AdapterToTrampoline : public ResourceHashtable<
- AdapterHandlerEntry*, MethodTrampolineInfo,
- primitive_hash,
- primitive_equals,
- 941, // prime number
- ResourceObj::C_HEAP> {};
-
-static AdapterToTrampoline* _adapter_to_trampoline = NULL;
ArchiveBuilder::OtherROAllocMark::~OtherROAllocMark() {
- char* newtop = ArchiveBuilder::current()->_ro_region->top();
+ char* newtop = ArchiveBuilder::current()->_ro_region.top();
ArchiveBuilder::alloc_stats()->record_other_type(int(newtop - _oldtop), true);
}
@@ -159,37 +141,38 @@ void ArchiveBuilder::SourceObjList::relocate(int i, ArchiveBuilder* builder) {
_ptrmap.iterate(&relocator, start, end);
}
-ArchiveBuilder::ArchiveBuilder(DumpRegion* mc_region, DumpRegion* rw_region, DumpRegion* ro_region)
- : _rw_src_objs(), _ro_src_objs(), _src_obj_table(INITIAL_TABLE_SIZE) {
- assert(_current == NULL, "must be");
- _current = this;
-
+ArchiveBuilder::ArchiveBuilder() :
+ _current_dump_space(NULL),
+ _buffer_bottom(NULL),
+ _last_verified_top(NULL),
+ _num_dump_regions_used(0),
+ _other_region_used_bytes(0),
+ _requested_static_archive_bottom(NULL),
+ _requested_static_archive_top(NULL),
+ _requested_dynamic_archive_bottom(NULL),
+ _requested_dynamic_archive_top(NULL),
+ _mapped_static_archive_bottom(NULL),
+ _mapped_static_archive_top(NULL),
+ _buffer_to_requested_delta(0),
+ _rw_region("rw", MAX_SHARED_DELTA),
+ _ro_region("ro", MAX_SHARED_DELTA),
+ _rw_src_objs(),
+ _ro_src_objs(),
+ _src_obj_table(INITIAL_TABLE_SIZE),
+ _num_instance_klasses(0),
+ _num_obj_array_klasses(0),
+ _num_type_array_klasses(0),
+ _total_closed_heap_region_size(0),
+ _total_open_heap_region_size(0),
+ _estimated_metaspaceobj_bytes(0),
+ _estimated_hashtable_bytes(0)
+{
_klasses = new (ResourceObj::C_HEAP, mtClassShared) GrowableArray(4 * K, mtClassShared);
_symbols = new (ResourceObj::C_HEAP, mtClassShared) GrowableArray(256 * K, mtClassShared);
_special_refs = new (ResourceObj::C_HEAP, mtClassShared) GrowableArray(24 * K, mtClassShared);
- _num_instance_klasses = 0;
- _num_obj_array_klasses = 0;
- _num_type_array_klasses = 0;
- _alloc_stats = new (ResourceObj::C_HEAP, mtClassShared) DumpAllocStats;
-
- _mc_region = mc_region;
- _rw_region = rw_region;
- _ro_region = ro_region;
-
- _num_dump_regions_used = 0;
-
- _estimated_metaspaceobj_bytes = 0;
- _estimated_hashtable_bytes = 0;
- _estimated_trampoline_bytes = 0;
-
- _requested_static_archive_bottom = NULL;
- _requested_static_archive_top = NULL;
- _mapped_static_archive_bottom = NULL;
- _mapped_static_archive_top = NULL;
- _requested_dynamic_archive_bottom = NULL;
- _requested_dynamic_archive_top = NULL;
- _buffer_to_requested_delta = 0;
+ assert(_current == NULL, "must be");
+ _current = this;
}
ArchiveBuilder::~ArchiveBuilder() {
@@ -205,7 +188,10 @@ ArchiveBuilder::~ArchiveBuilder() {
delete _klasses;
delete _symbols;
delete _special_refs;
- delete _alloc_stats;
+}
+
+bool ArchiveBuilder::is_dumping_full_module_graph() {
+ return DumpSharedSpaces && MetaspaceShared::use_full_module_graph();
}
class GatherKlassesAndSymbols : public UniqueMetaspaceClosure {
@@ -261,7 +247,7 @@ void ArchiveBuilder::gather_klasses_and_symbols() {
GatherKlassesAndSymbols doit(this);
iterate_roots(&doit, /*is_relocating_pointers=*/false);
#if INCLUDE_CDS_JAVA_HEAP
- if (DumpSharedSpaces && MetaspaceShared::use_full_module_graph()) {
+ if (is_dumping_full_module_graph()) {
ClassLoaderDataShared::iterate_symbols(&doit);
}
#endif
@@ -335,13 +321,10 @@ size_t ArchiveBuilder::estimate_archive_size() {
size_t dictionary_est = SystemDictionaryShared::estimate_size_for_archive();
_estimated_hashtable_bytes = symbol_table_est + dictionary_est;
- _estimated_trampoline_bytes = allocate_method_trampoline_info();
-
size_t total = 0;
total += _estimated_metaspaceobj_bytes;
total += _estimated_hashtable_bytes;
- total += _estimated_trampoline_bytes;
// allow fragmentation at the end of each dump region
total += _total_dump_regions * reserve_alignment();
@@ -349,7 +332,6 @@ size_t ArchiveBuilder::estimate_archive_size() {
log_info(cds)("_estimated_hashtable_bytes = " SIZE_FORMAT " + " SIZE_FORMAT " = " SIZE_FORMAT,
symbol_table_est, dictionary_est, _estimated_hashtable_bytes);
log_info(cds)("_estimated_metaspaceobj_bytes = " SIZE_FORMAT, _estimated_metaspaceobj_bytes);
- log_info(cds)("_estimated_trampoline_bytes = " SIZE_FORMAT, _estimated_trampoline_bytes);
log_info(cds)("total estimate bytes = " SIZE_FORMAT, total);
return align_up(total, reserve_alignment());
@@ -366,18 +348,18 @@ address ArchiveBuilder::reserve_buffer() {
// buffer_bottom is the lowest address of the 3 core regions (mc, rw, ro) when
// we are copying the class metadata into the buffer.
address buffer_bottom = (address)rs.base();
- log_info(cds)("Reserved output buffer space at : " PTR_FORMAT " [" SIZE_FORMAT " bytes]",
+ log_info(cds)("Reserved output buffer space at " PTR_FORMAT " [" SIZE_FORMAT " bytes]",
p2i(buffer_bottom), buffer_size);
- MetaspaceShared::set_shared_rs(rs);
+ _shared_rs = rs;
- MetaspaceShared::init_shared_dump_space(_mc_region);
_buffer_bottom = buffer_bottom;
_last_verified_top = buffer_bottom;
- _current_dump_space = _mc_region;
+ _current_dump_space = &_rw_region;
_num_dump_regions_used = 1;
_other_region_used_bytes = 0;
+ _current_dump_space->init(&_shared_rs, &_shared_vs);
- ArchivePtrMarker::initialize(&_ptrmap, (address*)_mc_region->base(), (address*)_mc_region->top());
+ ArchivePtrMarker::initialize(&_ptrmap, &_shared_vs);
// The bottom of the static archive should be mapped at this address by default.
_requested_static_archive_bottom = (address)MetaspaceShared::requested_base_address();
@@ -413,6 +395,12 @@ address ArchiveBuilder::reserve_buffer() {
vm_direct_exit(0);
}
+ if (DumpSharedSpaces) {
+ // We don't want any valid object to be at the very bottom of the archive.
+ // See ArchivePtrMarker::mark_pointer().
+ rw_region()->allocate(16);
+ }
+
return buffer_bottom;
}
@@ -520,6 +508,7 @@ void ArchiveBuilder::remember_embedded_pointer_in_copied_obj(MetaspaceClosure::R
void ArchiveBuilder::gather_source_objs() {
ResourceMark rm;
log_info(cds)("Gathering all archivable objects ... ");
+ gather_klasses_and_symbols();
GatherSortedSourceObjs doit(this);
iterate_sorted_roots(&doit, /*is_relocating_pointers=*/false);
doit.finish();
@@ -565,16 +554,60 @@ ArchiveBuilder::FollowMode ArchiveBuilder::get_follow_mode(MetaspaceClosure::Ref
}
}
-void ArchiveBuilder::dump_rw_region() {
+void ArchiveBuilder::start_dump_space(DumpRegion* next) {
+ address bottom = _last_verified_top;
+ address top = (address)(current_dump_space()->top());
+ _other_region_used_bytes += size_t(top - bottom);
+
+ current_dump_space()->pack(next);
+ _current_dump_space = next;
+ _num_dump_regions_used ++;
+
+ _last_verified_top = (address)(current_dump_space()->top());
+}
+
+void ArchiveBuilder::verify_estimate_size(size_t estimate, const char* which) {
+ address bottom = _last_verified_top;
+ address top = (address)(current_dump_space()->top());
+ size_t used = size_t(top - bottom) + _other_region_used_bytes;
+ int diff = int(estimate) - int(used);
+
+ log_info(cds)("%s estimate = " SIZE_FORMAT " used = " SIZE_FORMAT "; diff = %d bytes", which, estimate, used, diff);
+ assert(diff >= 0, "Estimate is too small");
+
+ _last_verified_top = top;
+ _other_region_used_bytes = 0;
+}
+
+void ArchiveBuilder::dump_rw_metadata() {
ResourceMark rm;
log_info(cds)("Allocating RW objects ... ");
- make_shallow_copies(_rw_region, &_rw_src_objs);
+ make_shallow_copies(&_rw_region, &_rw_src_objs);
+
+#if INCLUDE_CDS_JAVA_HEAP
+ if (is_dumping_full_module_graph()) {
+ // Archive the ModuleEntry's and PackageEntry's of the 3 built-in loaders
+ char* start = rw_region()->top();
+ ClassLoaderDataShared::allocate_archived_tables();
+ alloc_stats()->record_modules(rw_region()->top() - start, /*read_only*/false);
+ }
+#endif
}
-void ArchiveBuilder::dump_ro_region() {
+void ArchiveBuilder::dump_ro_metadata() {
ResourceMark rm;
log_info(cds)("Allocating RO objects ... ");
- make_shallow_copies(_ro_region, &_ro_src_objs);
+
+ start_dump_space(&_ro_region);
+ make_shallow_copies(&_ro_region, &_ro_src_objs);
+
+#if INCLUDE_CDS_JAVA_HEAP
+ if (is_dumping_full_module_graph()) {
+ char* start = ro_region()->top();
+ ClassLoaderDataShared::init_archived_tables();
+ alloc_stats()->record_modules(ro_region()->top() - start, /*read_only*/true);
+ }
+#endif
}
void ArchiveBuilder::make_shallow_copies(DumpRegion *dump_region,
@@ -619,7 +652,7 @@ void ArchiveBuilder::make_shallow_copy(DumpRegion *dump_region, SourceObjInfo* s
log_trace(cds)("Copy: " PTR_FORMAT " ==> " PTR_FORMAT " %d", p2i(src), p2i(dest), bytes);
src_info->set_dumped_addr((address)dest);
- _alloc_stats->record(ref->msotype(), int(newtop - oldtop), src_info->read_only());
+ _alloc_stats.record(ref->msotype(), int(newtop - oldtop), src_info->read_only());
}
address ArchiveBuilder::get_dumped_addr(address src_obj) const {
@@ -821,6 +854,8 @@ class RelocateBufferToRequested : public BitMapClosure {
void ArchiveBuilder::relocate_to_requested() {
+ ro_region()->pack();
+
size_t my_archive_size = buffer_top() - buffer_bottom();
if (DumpSharedSpaces) {
@@ -989,14 +1024,8 @@ public:
write_header(mapinfo);
write_data(header, header_end, 0);
- DumpRegion* mc_region = builder->_mc_region;
- DumpRegion* rw_region = builder->_rw_region;
- DumpRegion* ro_region = builder->_ro_region;
-
- address mc = address(mc_region->base());
- address mc_end = address(mc_region->end());
- write_dump_region("mc region", mc_region);
- write_data(mc, mc_end, mc + buffer_to_runtime_delta());
+ DumpRegion* rw_region = &builder->_rw_region;
+ DumpRegion* ro_region = &builder->_ro_region;
write_dump_region("rw region", rw_region);
write_objects(rw_region, &builder->_rw_src_objs);
@@ -1019,18 +1048,8 @@ public:
}
};
-void ArchiveBuilder::write_cds_map_to_log(FileMapInfo* mapinfo,
- GrowableArray *closed_heap_regions,
- GrowableArray *open_heap_regions,
- char* bitmap, size_t bitmap_size_in_bytes) {
- if (log_is_enabled(Info, cds, map)) {
- CDSMapLogger::write(this, mapinfo, closed_heap_regions, open_heap_regions,
- bitmap, bitmap_size_in_bytes);
- }
-}
-
-void ArchiveBuilder::print_stats(int ro_all, int rw_all, int mc_all) {
- _alloc_stats->print_stats(ro_all, rw_all, mc_all);
+void ArchiveBuilder::print_stats() {
+ _alloc_stats.print_stats(int(_ro_region.used()), int(_rw_region.used()));
}
void ArchiveBuilder::clean_up_src_obj_table() {
@@ -1038,92 +1057,116 @@ void ArchiveBuilder::clean_up_src_obj_table() {
_src_obj_table.iterate(&cleaner);
}
-void ArchiveBuilder::allocate_method_trampolines_for(InstanceKlass* ik) {
- if (ik->methods() != NULL) {
- for (int j = 0; j < ik->methods()->length(); j++) {
- // Walk the methods in a deterministic order so that the trampolines are
- // created in a deterministic order.
- Method* m = ik->methods()->at(j);
- AdapterHandlerEntry* ent = m->adapter(); // different methods can share the same AdapterHandlerEntry
- MethodTrampolineInfo* info = _adapter_to_trampoline->get(ent);
- if (info->c2i_entry_trampoline() == NULL) {
- info->set_c2i_entry_trampoline(
- (address)MetaspaceShared::misc_code_space_alloc(SharedRuntime::trampoline_size()));
- info->set_adapter_trampoline(
- (AdapterHandlerEntry**)MetaspaceShared::misc_code_space_alloc(sizeof(AdapterHandlerEntry*)));
- }
- }
+void ArchiveBuilder::write_archive(FileMapInfo* mapinfo,
+ GrowableArray* closed_heap_regions,
+ GrowableArray* open_heap_regions,
+ GrowableArray* closed_heap_oopmaps,
+ GrowableArray* open_heap_oopmaps) {
+ // Make sure NUM_CDS_REGIONS (exported in cds.h) agrees with
+ // MetaspaceShared::n_regions (internal to hotspot).
+ assert(NUM_CDS_REGIONS == MetaspaceShared::n_regions, "sanity");
+
+ write_region(mapinfo, MetaspaceShared::rw, &_rw_region, /*read_only=*/false,/*allow_exec=*/false);
+ write_region(mapinfo, MetaspaceShared::ro, &_ro_region, /*read_only=*/true, /*allow_exec=*/false);
+
+ size_t bitmap_size_in_bytes;
+ char* bitmap = mapinfo->write_bitmap_region(ArchivePtrMarker::ptrmap(), closed_heap_oopmaps, open_heap_oopmaps,
+ bitmap_size_in_bytes);
+
+ if (closed_heap_regions != NULL) {
+ _total_closed_heap_region_size = mapinfo->write_archive_heap_regions(
+ closed_heap_regions,
+ closed_heap_oopmaps,
+ MetaspaceShared::first_closed_archive_heap_region,
+ MetaspaceShared::max_closed_archive_heap_region);
+ _total_open_heap_region_size = mapinfo->write_archive_heap_regions(
+ open_heap_regions,
+ open_heap_oopmaps,
+ MetaspaceShared::first_open_archive_heap_region,
+ MetaspaceShared::max_open_archive_heap_region);
}
-}
-void ArchiveBuilder::allocate_method_trampolines() {
- for (int i = 0; i < _klasses->length(); i++) {
- Klass* k = _klasses->at(i);
- if (k->is_instance_klass()) {
- InstanceKlass* ik = InstanceKlass::cast(k);
- allocate_method_trampolines_for(ik);
- }
- }
-}
+ print_region_stats(mapinfo, closed_heap_regions, open_heap_regions);
-// Allocate MethodTrampolineInfo for all Methods that will be archived. Also
-// return the total number of bytes needed by the method trampolines in the MC
-// region.
-size_t ArchiveBuilder::allocate_method_trampoline_info() {
- size_t total = 0;
- size_t each_method_bytes =
- align_up(SharedRuntime::trampoline_size(), BytesPerWord) +
- align_up(sizeof(AdapterHandlerEntry*), BytesPerWord);
+ mapinfo->set_requested_base((char*)MetaspaceShared::requested_base_address());
+ mapinfo->set_header_crc(mapinfo->compute_header_crc());
+ mapinfo->write_header();
+ mapinfo->close();
- if (_adapter_to_trampoline == NULL) {
- _adapter_to_trampoline = new (ResourceObj::C_HEAP, mtClass)AdapterToTrampoline();
+ if (log_is_enabled(Info, cds)) {
+ print_stats();
}
- int count = 0;
- for (int i = 0; i < _klasses->length(); i++) {
- Klass* k = _klasses->at(i);
- if (k->is_instance_klass()) {
- InstanceKlass* ik = InstanceKlass::cast(k);
- if (ik->methods() != NULL) {
- for (int j = 0; j < ik->methods()->length(); j++) {
- Method* m = ik->methods()->at(j);
- AdapterHandlerEntry* ent = m->adapter(); // different methods can share the same AdapterHandlerEntry
- bool is_created = false;
- MethodTrampolineInfo* info = _adapter_to_trampoline->put_if_absent(ent, &is_created);
- if (is_created) {
- count++;
- }
- }
- }
- }
+
+ if (log_is_enabled(Info, cds, map)) {
+ CDSMapLogger::write(this, mapinfo, closed_heap_regions, open_heap_regions,
+ bitmap, bitmap_size_in_bytes);
}
- if (count == 0) {
- // We have nothing to archive, but let's avoid having an empty region.
- total = SharedRuntime::trampoline_size();
- } else {
- total = count * each_method_bytes;
+ FREE_C_HEAP_ARRAY(char, bitmap);
+}
+
+void ArchiveBuilder::write_region(FileMapInfo* mapinfo, int region_idx, DumpRegion* dump_region, bool read_only, bool allow_exec) {
+ mapinfo->write_region(region_idx, dump_region->base(), dump_region->used(), read_only, allow_exec);
+}
+
+void ArchiveBuilder::print_region_stats(FileMapInfo *mapinfo,
+ GrowableArray* closed_heap_regions,
+ GrowableArray* open_heap_regions) {
+ // Print statistics of all the regions
+ const size_t bitmap_used = mapinfo->space_at(MetaspaceShared::bm)->used();
+ const size_t bitmap_reserved = mapinfo->space_at(MetaspaceShared::bm)->used_aligned();
+ const size_t total_reserved = _ro_region.reserved() + _rw_region.reserved() +
+ bitmap_reserved +
+ _total_closed_heap_region_size +
+ _total_open_heap_region_size;
+ const size_t total_bytes = _ro_region.used() + _rw_region.used() +
+ bitmap_used +
+ _total_closed_heap_region_size +
+ _total_open_heap_region_size;
+ const double total_u_perc = percent_of(total_bytes, total_reserved);
+
+ _rw_region.print(total_reserved);
+ _ro_region.print(total_reserved);
+
+ print_bitmap_region_stats(bitmap_used, total_reserved);
+
+ if (closed_heap_regions != NULL) {
+ print_heap_region_stats(closed_heap_regions, "ca", total_reserved);
+ print_heap_region_stats(open_heap_regions, "oa", total_reserved);
}
- return align_up(total, SharedSpaceObjectAlignment);
+
+ log_debug(cds)("total : " SIZE_FORMAT_W(9) " [100.0%% of total] out of " SIZE_FORMAT_W(9) " bytes [%5.1f%% used]",
+ total_bytes, total_reserved, total_u_perc);
}
-void ArchiveBuilder::update_method_trampolines() {
- for (int i = 0; i < klasses()->length(); i++) {
- Klass* k = klasses()->at(i);
- if (k->is_instance_klass()) {
- InstanceKlass* ik = InstanceKlass::cast(k);
- Array* methods = ik->methods();
- for (int j = 0; j < methods->length(); j++) {
- Method* m = methods->at(j);
- AdapterHandlerEntry* ent = m->adapter();
- MethodTrampolineInfo* info = _adapter_to_trampoline->get(ent);
- // m is the "copy" of the original Method, but its adapter() field is still valid because
- // we haven't called make_klasses_shareable() yet.
- m->set_from_compiled_entry(info->c2i_entry_trampoline());
- m->set_adapter_trampoline(info->adapter_trampoline());
- }
- }
+void ArchiveBuilder::print_bitmap_region_stats(size_t size, size_t total_size) {
+ log_debug(cds)("bm space: " SIZE_FORMAT_W(9) " [ %4.1f%% of total] out of " SIZE_FORMAT_W(9) " bytes [100.0%% used]",
+ size, size/double(total_size)*100.0, size);
+}
+
+void ArchiveBuilder::print_heap_region_stats(GrowableArray *heap_mem,
+ const char *name, size_t total_size) {
+ int arr_len = heap_mem == NULL ? 0 : heap_mem->length();
+ for (int i = 0; i < arr_len; i++) {
+ char* start = (char*)heap_mem->at(i).start();
+ size_t size = heap_mem->at(i).byte_size();
+ char* top = start + size;
+ log_debug(cds)("%s%d space: " SIZE_FORMAT_W(9) " [ %4.1f%% of total] out of " SIZE_FORMAT_W(9) " bytes [100.0%% used] at " INTPTR_FORMAT,
+ name, i, size, size/double(total_size)*100.0, size, p2i(start));
}
}
+void ArchiveBuilder::report_out_of_space(const char* name, size_t needed_bytes) {
+ // This is highly unlikely to happen on 64-bits because we have reserved a 4GB space.
+ // On 32-bit we reserve only 256MB so you could run out of space with 100,000 classes
+ // or so.
+ _rw_region.print_out_of_space_msg(name, needed_bytes);
+ _ro_region.print_out_of_space_msg(name, needed_bytes);
+
+ vm_exit_during_initialization(err_msg("Unable to allocate from '%s' region", name),
+ "Please reduce the number of shared classes.");
+}
+
+
#ifndef PRODUCT
void ArchiveBuilder::assert_is_vm_thread() {
assert(Thread::current()->is_VM_thread(), "ArchiveBuilder should be used only inside the VMThread");
diff --git a/src/hotspot/share/memory/archiveBuilder.hpp b/src/hotspot/share/memory/archiveBuilder.hpp
index 0859b6a14b1763da812290f93f608d383c926476..2fbc1b34b15096e6f58756e1d7f766dec8cf2809 100644
--- a/src/hotspot/share/memory/archiveBuilder.hpp
+++ b/src/hotspot/share/memory/archiveBuilder.hpp
@@ -26,7 +26,9 @@
#define SHARE_MEMORY_ARCHIVEBUILDER_HPP
#include "memory/archiveUtils.hpp"
+#include "memory/dumpAllocStats.hpp"
#include "memory/metaspaceClosure.hpp"
+#include "oops/array.hpp"
#include "oops/klass.hpp"
#include "runtime/os.hpp"
#include "utilities/bitMap.hpp"
@@ -34,33 +36,35 @@
#include "utilities/hashtable.hpp"
#include "utilities/resourceHash.hpp"
+struct ArchiveHeapOopmapInfo;
class CHeapBitMap;
-class DumpAllocStats;
class FileMapInfo;
class Klass;
class MemRegion;
class Symbol;
+// Metaspace::allocate() requires that all blocks must be aligned with KlassAlignmentInBytes.
+// We enforce the same alignment rule in blocks allocated from the shared space.
+const int SharedSpaceObjectAlignment = KlassAlignmentInBytes;
+
// Overview of CDS archive creation (for both static and dynamic dump):
//
// [1] Load all classes (static dump: from the classlist, dynamic dump: as part of app execution)
// [2] Allocate "output buffer"
-// [3] Copy contents of the 3 "core" regions (mc/rw/ro) into the output buffer.
-// - mc region:
-// allocate_method_trampolines();
-// allocate the cpp vtables (static dump only)
+// [3] Copy contents of the 2 "core" regions (rw/ro) into the output buffer.
+// - allocate the cpp vtables in rw (static dump only)
// - memcpy the MetaspaceObjs into rw/ro:
// dump_rw_region();
// dump_ro_region();
// - fix all the pointers in the MetaspaceObjs to point to the copies
// relocate_metaspaceobj_embedded_pointers()
// [4] Copy symbol table, dictionary, etc, into the ro region
-// [5] Relocate all the pointers in mc/rw/ro, so that the archive can be mapped to
+// [5] Relocate all the pointers in rw/ro, so that the archive can be mapped to
// the "requested" location without runtime relocation. See relocate_to_requested()
class ArchiveBuilder : public StackObj {
protected:
DumpRegion* _current_dump_space;
- address _buffer_bottom; // for writing the contents of mc/rw/ro regions
+ address _buffer_bottom; // for writing the contents of rw/ro regions
address _last_verified_top;
int _num_dump_regions_used;
size_t _other_region_used_bytes;
@@ -186,9 +190,11 @@ private:
static const int INITIAL_TABLE_SIZE = 15889;
static const int MAX_TABLE_SIZE = 1000000;
- DumpRegion* _mc_region;
- DumpRegion* _rw_region;
- DumpRegion* _ro_region;
+ ReservedSpace _shared_rs;
+ VirtualSpace _shared_vs;
+
+ DumpRegion _rw_region;
+ DumpRegion _ro_region;
CHeapBitMap _ptrmap; // bitmap used by ArchivePtrMarker
SourceObjList _rw_src_objs; // objs to put in rw region
@@ -202,7 +208,16 @@ private:
int _num_instance_klasses;
int _num_obj_array_klasses;
int _num_type_array_klasses;
- DumpAllocStats* _alloc_stats;
+ DumpAllocStats _alloc_stats;
+ size_t _total_closed_heap_region_size;
+ size_t _total_open_heap_region_size;
+
+ void print_region_stats(FileMapInfo *map_info,
+ GrowableArray* closed_heap_regions,
+ GrowableArray* open_heap_regions);
+ void print_bitmap_region_stats(size_t size, size_t total_size);
+ void print_heap_region_stats(GrowableArray *heap_mem,
+ const char *name, size_t total_size);
// For global access.
static ArchiveBuilder* _current;
@@ -215,12 +230,13 @@ public:
char* _oldtop;
public:
OtherROAllocMark() {
- _oldtop = _current->_ro_region->top();
+ _oldtop = _current->_ro_region.top();
}
~OtherROAllocMark();
};
private:
+ bool is_dumping_full_module_graph();
FollowMode get_follow_mode(MetaspaceClosure::Ref *ref);
void iterate_sorted_roots(MetaspaceClosure* it, bool is_relocating_pointers);
@@ -244,9 +260,8 @@ protected:
// Conservative estimate for number of bytes needed for:
size_t _estimated_metaspaceobj_bytes; // all archived MetaspaceObj's.
size_t _estimated_hashtable_bytes; // symbol table and dictionaries
- size_t _estimated_trampoline_bytes; // method entry trampolines
- static const int _total_dump_regions = 3;
+ static const int _total_dump_regions = 2;
size_t estimate_archive_size();
@@ -254,8 +269,10 @@ protected:
return os::vm_allocation_granularity();
}
+ void start_dump_space(DumpRegion* next);
+ void verify_estimate_size(size_t estimate, const char* which);
+
public:
- void set_current_dump_space(DumpRegion* r) { _current_dump_space = r; }
address reserve_buffer();
address buffer_bottom() const { return _buffer_bottom; }
@@ -317,7 +334,7 @@ public:
static void assert_is_vm_thread() PRODUCT_RETURN;
public:
- ArchiveBuilder(DumpRegion* mc_region, DumpRegion* rw_region, DumpRegion* ro_region);
+ ArchiveBuilder();
~ArchiveBuilder();
void gather_klasses_and_symbols();
@@ -327,17 +344,52 @@ public:
void add_special_ref(MetaspaceClosure::SpecialRef type, address src_obj, size_t field_offset);
void remember_embedded_pointer_in_copied_obj(MetaspaceClosure::Ref* enclosing_ref, MetaspaceClosure::Ref* ref);
- void dump_rw_region();
- void dump_ro_region();
+ DumpRegion* rw_region() { return &_rw_region; }
+ DumpRegion* ro_region() { return &_ro_region; }
+
+ static char* rw_region_alloc(size_t num_bytes) {
+ return current()->rw_region()->allocate(num_bytes);
+ }
+ static char* ro_region_alloc(size_t num_bytes) {
+ return current()->ro_region()->allocate(num_bytes);
+ }
+
+ template
+ static Array* new_ro_array(int length) {
+ size_t byte_size = Array::byte_sizeof(length, sizeof(T));
+ Array* array = (Array*)ro_region_alloc(byte_size);
+ array->initialize(length);
+ return array;
+ }
+
+ template
+ static Array* new_rw_array(int length) {
+ size_t byte_size = Array::byte_sizeof(length, sizeof(T));
+ Array* array = (Array*)rw_region_alloc(byte_size);
+ array->initialize(length);
+ return array;
+ }
+
+ template
+ static size_t ro_array_bytesize(int length) {
+ size_t byte_size = Array::byte_sizeof(length, sizeof(T));
+ return align_up(byte_size, SharedSpaceObjectAlignment);
+ }
+
+ void dump_rw_metadata();
+ void dump_ro_metadata();
void relocate_metaspaceobj_embedded_pointers();
void relocate_roots();
void relocate_vm_classes();
void make_klasses_shareable();
void relocate_to_requested();
- void write_cds_map_to_log(FileMapInfo* mapinfo,
- GrowableArray *closed_heap_regions,
- GrowableArray *open_heap_regions,
- char* bitmap, size_t bitmap_size_in_bytes);
+ void write_archive(FileMapInfo* mapinfo,
+ GrowableArray* closed_heap_regions,
+ GrowableArray* open_heap_regions,
+ GrowableArray* closed_heap_oopmaps,
+ GrowableArray* open_heap_oopmaps);
+ void write_region(FileMapInfo* mapinfo, int region_idx, DumpRegion* dump_region,
+ bool read_only, bool allow_exec);
address get_dumped_addr(address src_obj) const;
@@ -356,7 +408,15 @@ public:
}
static DumpAllocStats* alloc_stats() {
- return current()->_alloc_stats;
+ return &(current()->_alloc_stats);
+ }
+
+ static CompactHashtableStats* symbol_stats() {
+ return alloc_stats()->symbol_stats();
+ }
+
+ static CompactHashtableStats* string_stats() {
+ return alloc_stats()->string_stats();
}
void relocate_klass_ptr(oop o);
@@ -371,13 +431,8 @@ public:
return (Symbol*)current()->get_dumped_addr((address)orig_symbol);
}
- void print_stats(int ro_all, int rw_all, int mc_all);
-
- // Method trampolines related functions
- void allocate_method_trampolines();
- void allocate_method_trampolines_for(InstanceKlass* ik);
- size_t allocate_method_trampoline_info();
- void update_method_trampolines();
+ void print_stats();
+ void report_out_of_space(const char* name, size_t needed_bytes);
};
#endif // SHARE_MEMORY_ARCHIVEBUILDER_HPP
diff --git a/src/hotspot/share/memory/archiveUtils.cpp b/src/hotspot/share/memory/archiveUtils.cpp
index 5a3bb5cf461cfac08f19bdf29631baf9a25c9408..7f701e1e87fcdc47a4fcc20620d5bd525c40d11e 100644
--- a/src/hotspot/share/memory/archiveUtils.cpp
+++ b/src/hotspot/share/memory/archiveUtils.cpp
@@ -33,21 +33,20 @@
#include "memory/dynamicArchive.hpp"
#include "memory/filemap.hpp"
#include "memory/heapShared.inline.hpp"
-#include "memory/metaspace.hpp"
#include "memory/metaspaceShared.hpp"
+#include "memory/metaspaceUtils.hpp"
#include "memory/resourceArea.hpp"
#include "oops/compressedOops.inline.hpp"
#include "utilities/bitMap.inline.hpp"
CHeapBitMap* ArchivePtrMarker::_ptrmap = NULL;
-address* ArchivePtrMarker::_ptr_base;
-address* ArchivePtrMarker::_ptr_end;
+VirtualSpace* ArchivePtrMarker::_vs;
+
bool ArchivePtrMarker::_compacted;
-void ArchivePtrMarker::initialize(CHeapBitMap* ptrmap, address* ptr_base, address* ptr_end) {
+void ArchivePtrMarker::initialize(CHeapBitMap* ptrmap, VirtualSpace* vs) {
assert(_ptrmap == NULL, "initialize only once");
- _ptr_base = ptr_base;
- _ptr_end = ptr_end;
+ _vs = vs;
_compacted = false;
_ptrmap = ptrmap;
@@ -66,17 +65,17 @@ void ArchivePtrMarker::mark_pointer(address* ptr_loc) {
assert(_ptrmap != NULL, "not initialized");
assert(!_compacted, "cannot mark anymore");
- if (_ptr_base <= ptr_loc && ptr_loc < _ptr_end) {
+ if (ptr_base() <= ptr_loc && ptr_loc < ptr_end()) {
address value = *ptr_loc;
// We don't want any pointer that points to very bottom of the archive, otherwise when
// MetaspaceShared::default_base_address()==0, we can't distinguish between a pointer
// to nothing (NULL) vs a pointer to an objects that happens to be at the very bottom
// of the archive.
- assert(value != (address)_ptr_base, "don't point to the bottom of the archive");
+ assert(value != (address)ptr_base(), "don't point to the bottom of the archive");
if (value != NULL) {
assert(uintx(ptr_loc) % sizeof(intptr_t) == 0, "pointers must be stored in aligned addresses");
- size_t idx = ptr_loc - _ptr_base;
+ size_t idx = ptr_loc - ptr_base();
if (_ptrmap->size() <= idx) {
_ptrmap->resize((idx + 1) * 2);
}
@@ -91,9 +90,9 @@ void ArchivePtrMarker::clear_pointer(address* ptr_loc) {
assert(_ptrmap != NULL, "not initialized");
assert(!_compacted, "cannot clear anymore");
- assert(_ptr_base <= ptr_loc && ptr_loc < _ptr_end, "must be");
+ assert(ptr_base() <= ptr_loc && ptr_loc < ptr_end(), "must be");
assert(uintx(ptr_loc) % sizeof(intptr_t) == 0, "pointers must be stored in aligned addresses");
- size_t idx = ptr_loc - _ptr_base;
+ size_t idx = ptr_loc - ptr_base();
assert(idx < _ptrmap->size(), "cannot clear pointers that have not been marked");
_ptrmap->clear_bit(idx);
//tty->print_cr("Clearing pointer [" PTR_FORMAT "] -> " PTR_FORMAT " @ " SIZE_FORMAT_W(5), p2i(ptr_loc), p2i(*ptr_loc), idx);
@@ -132,7 +131,7 @@ public:
void ArchivePtrMarker::compact(address relocatable_base, address relocatable_end) {
assert(!_compacted, "cannot compact again");
- ArchivePtrBitmapCleaner cleaner(_ptrmap, _ptr_base, relocatable_base, relocatable_end);
+ ArchivePtrBitmapCleaner cleaner(_ptrmap, ptr_base(), relocatable_base, relocatable_end);
_ptrmap->iterate(&cleaner);
compact(cleaner.max_non_null_offset());
}
@@ -147,16 +146,16 @@ char* DumpRegion::expand_top_to(char* newtop) {
assert(is_allocatable(), "must be initialized and not packed");
assert(newtop >= _top, "must not grow backwards");
if (newtop > _end) {
- MetaspaceShared::report_out_of_space(_name, newtop - _top);
+ ArchiveBuilder::current()->report_out_of_space(_name, newtop - _top);
ShouldNotReachHere();
}
- MetaspaceShared::commit_to(_rs, _vs, newtop);
+ commit_to(newtop);
_top = newtop;
- if (_rs == MetaspaceShared::shared_rs()) {
+ if (_max_delta > 0) {
uintx delta = ArchiveBuilder::current()->buffer_to_offset((address)(newtop-1));
- if (delta > ArchiveBuilder::MAX_SHARED_DELTA) {
+ if (delta > _max_delta) {
// This is just a sanity check and should not appear in any real world usage. This
// happens only if you allocate more than 2GB of shared objects and would require
// millions of shared classes.
@@ -168,6 +167,39 @@ char* DumpRegion::expand_top_to(char* newtop) {
return _top;
}
+void DumpRegion::commit_to(char* newtop) {
+ Arguments::assert_is_dumping_archive();
+ char* base = _rs->base();
+ size_t need_committed_size = newtop - base;
+ size_t has_committed_size = _vs->committed_size();
+ if (need_committed_size < has_committed_size) {
+ return;
+ }
+
+ size_t min_bytes = need_committed_size - has_committed_size;
+ size_t preferred_bytes = 1 * M;
+ size_t uncommitted = _vs->reserved_size() - has_committed_size;
+
+ size_t commit = MAX2(min_bytes, preferred_bytes);
+ commit = MIN2(commit, uncommitted);
+ assert(commit <= uncommitted, "sanity");
+
+ if (!_vs->expand_by(commit, false)) {
+ vm_exit_during_initialization(err_msg("Failed to expand shared space to " SIZE_FORMAT " bytes",
+ need_committed_size));
+ }
+
+ const char* which;
+ if (_rs->base() == (char*)MetaspaceShared::symbol_rs_base()) {
+ which = "symbol";
+ } else {
+ which = "shared";
+ }
+ log_debug(cds)("Expanding %s spaces by " SIZE_FORMAT_W(7) " bytes [total " SIZE_FORMAT_W(9) " bytes ending at %p]",
+ which, commit, _vs->actual_committed_size(), _vs->high());
+}
+
+
char* DumpRegion::allocate(size_t num_bytes) {
char* p = (char*)align_up(_top, (size_t)SharedSpaceObjectAlignment);
char* newtop = p + align_up(num_bytes, (size_t)SharedSpaceObjectAlignment);
@@ -204,8 +236,7 @@ void DumpRegion::print_out_of_space_msg(const char* failing_region, size_t neede
void DumpRegion::init(ReservedSpace* rs, VirtualSpace* vs) {
_rs = rs;
_vs = vs;
- // Start with 0 committed bytes. The memory will be committed as needed by
- // MetaspaceShared::commit_to().
+ // Start with 0 committed bytes. The memory will be committed as needed.
if (!_vs->initialize(*_rs, 0)) {
fatal("Unable to allocate memory for shared space");
}
diff --git a/src/hotspot/share/memory/archiveUtils.hpp b/src/hotspot/share/memory/archiveUtils.hpp
index d61f974135df8786e7167f1aaa4a7e6d4e94ca95..4121b955e4300d527b0e53c7abeb64083938d7cb 100644
--- a/src/hotspot/share/memory/archiveUtils.hpp
+++ b/src/hotspot/share/memory/archiveUtils.hpp
@@ -27,6 +27,7 @@
#include "logging/log.hpp"
#include "memory/iterator.hpp"
+#include "memory/virtualspace.hpp"
#include "runtime/arguments.hpp"
#include "utilities/bitMap.hpp"
@@ -39,15 +40,18 @@ class VirtualSpace;
// mark_pointer(/*ptr_loc=*/&k->_name). It's required that (_prt_base <= ptr_loc < _ptr_end). _ptr_base is
// fixed, but _ptr_end can be expanded as more objects are dumped.
class ArchivePtrMarker : AllStatic {
- static CHeapBitMap* _ptrmap;
- static address* _ptr_base;
- static address* _ptr_end;
+ static CHeapBitMap* _ptrmap;
+ static VirtualSpace* _vs;
// Once _ptrmap is compacted, we don't allow bit marking anymore. This is to
// avoid unintentional copy operations after the bitmap has been finalized and written.
static bool _compacted;
+
+ static address* ptr_base() { return (address*)_vs->low(); } // committed lower bound (inclusive)
+ static address* ptr_end() { return (address*)_vs->high(); } // committed upper bound (exclusive)
+
public:
- static void initialize(CHeapBitMap* ptrmap, address* ptr_base, address* ptr_end);
+ static void initialize(CHeapBitMap* ptrmap, VirtualSpace* vs);
static void mark_pointer(address* ptr_loc);
static void clear_pointer(address* ptr_loc);
static void compact(address relocatable_base, address relocatable_end);
@@ -64,11 +68,6 @@ public:
mark_pointer(ptr_loc);
}
- static void expand_ptr_end(address *new_ptr_end) {
- assert(_ptr_end <= new_ptr_end, "must be");
- _ptr_end = new_ptr_end;
- }
-
static CHeapBitMap* ptrmap() {
return _ptrmap;
}
@@ -128,12 +127,17 @@ private:
char* _base;
char* _top;
char* _end;
+ uintx _max_delta;
bool _is_packed;
ReservedSpace* _rs;
VirtualSpace* _vs;
+ void commit_to(char* newtop);
+
public:
- DumpRegion(const char* name) : _name(name), _base(NULL), _top(NULL), _end(NULL), _is_packed(false) {}
+ DumpRegion(const char* name, uintx max_delta = 0)
+ : _name(name), _base(NULL), _top(NULL), _end(NULL),
+ _max_delta(max_delta), _is_packed(false) {}
char* expand_top_to(char* newtop);
char* allocate(size_t num_bytes);
diff --git a/src/hotspot/share/memory/arena.cpp b/src/hotspot/share/memory/arena.cpp
index e7939d6f29e3af7de84dd4f06a61e4103f5014ed..8388f68c3592e3d1dc026e781a122763a1f9a1ab 100644
--- a/src/hotspot/share/memory/arena.cpp
+++ b/src/hotspot/share/memory/arena.cpp
@@ -25,7 +25,6 @@
#include "precompiled.hpp"
#include "memory/allocation.hpp"
#include "memory/allocation.inline.hpp"
-#include "memory/metaspaceShared.hpp"
#include "memory/resourceArea.hpp"
#include "runtime/os.hpp"
#include "runtime/task.hpp"
diff --git a/src/hotspot/share/memory/classLoaderMetaspace.cpp b/src/hotspot/share/memory/classLoaderMetaspace.cpp
index 6ec474e6d9765da610ecd4c00d904e7b13a41824..a876f9e7adf49989c792058ac5f8050ed82e2270 100644
--- a/src/hotspot/share/memory/classLoaderMetaspace.cpp
+++ b/src/hotspot/share/memory/classLoaderMetaspace.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2020, 2021, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2020 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -27,6 +27,7 @@
#include "logging/log.hpp"
#include "memory/classLoaderMetaspace.hpp"
#include "memory/metaspace.hpp"
+#include "memory/metaspaceUtils.hpp"
#include "memory/metaspace/chunkManager.hpp"
#include "memory/metaspace/internalStats.hpp"
#include "memory/metaspace/metaspaceArena.hpp"
diff --git a/src/hotspot/share/memory/cppVtables.cpp b/src/hotspot/share/memory/cppVtables.cpp
index 617c4f4c8ef0a6412f9c272c2cb665735dfa8561..0b791184d9e26f766040a4b215118d313b8d7411 100644
--- a/src/hotspot/share/memory/cppVtables.cpp
+++ b/src/hotspot/share/memory/cppVtables.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2020, 2021, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -25,6 +25,7 @@
#include "precompiled.hpp"
#include "logging/log.hpp"
#include "memory/archiveUtils.hpp"
+#include "memory/archiveBuilder.hpp"
#include "memory/cppVtables.hpp"
#include "memory/metaspaceShared.hpp"
#include "oops/instanceClassLoaderKlass.hpp"
@@ -100,7 +101,7 @@ template
CppVtableInfo* CppVtableCloner::allocate_and_initialize(const char* name) {
int n = get_vtable_length(name);
CppVtableInfo* info =
- (CppVtableInfo*)MetaspaceShared::misc_code_dump_space()->allocate(CppVtableInfo::byte_size(n));
+ (CppVtableInfo*)ArchiveBuilder::current()->rw_region()->allocate(CppVtableInfo::byte_size(n));
info->set_vtable_size(n);
initialize(name, info);
return info;
@@ -211,13 +212,16 @@ void CppVtableCloner::init_orig_cpp_vtptr(int kind) {
// _index[InstanceKlass_Kind]->cloned_vtable() == ((intptr_t**)ik)[0]
CppVtableInfo** CppVtables::_index = NULL;
-char* CppVtables::dumptime_init() {
+char* CppVtables::dumptime_init(ArchiveBuilder* builder) {
assert(DumpSharedSpaces, "must");
size_t vtptrs_bytes = _num_cloned_vtable_kinds * sizeof(CppVtableInfo*);
- _index = (CppVtableInfo**)MetaspaceShared::misc_code_dump_space()->allocate(vtptrs_bytes);
+ _index = (CppVtableInfo**)builder->rw_region()->allocate(vtptrs_bytes);
CPP_VTABLE_TYPES_DO(ALLOCATE_AND_INITIALIZE_VTABLE);
+ size_t cpp_tables_size = builder->rw_region()->top() - builder->rw_region()->base();
+ builder->alloc_stats()->record_cpp_vtables((int)cpp_tables_size);
+
return (char*)_index;
}
diff --git a/src/hotspot/share/memory/cppVtables.hpp b/src/hotspot/share/memory/cppVtables.hpp
index c476d67575519ebee67a9b388f5bab9b80a73dee..dbfe639cd6dfd197515f765a7ef5c3257b82d1ed 100644
--- a/src/hotspot/share/memory/cppVtables.hpp
+++ b/src/hotspot/share/memory/cppVtables.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2020, 2021, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -29,6 +29,7 @@
#include "memory/allStatic.hpp"
#include "utilities/globalDefinitions.hpp"
+class ArchiveBuilder;
class Method;
class SerializeClosure;
class CppVtableInfo;
@@ -37,7 +38,7 @@ class CppVtableInfo;
class CppVtables : AllStatic {
static CppVtableInfo** _index;
public:
- static char* dumptime_init();
+ static char* dumptime_init(ArchiveBuilder* builder);
static void zero_archived_vtables();
static intptr_t* get_archived_vtable(MetaspaceObj::Type msotype, address obj);
static void serialize(SerializeClosure* sc);
diff --git a/src/hotspot/share/memory/dumpAllocStats.cpp b/src/hotspot/share/memory/dumpAllocStats.cpp
index 23264ca11bf017c913f55a80ab41b25ec0511794..ffec46cd7f57c5e2fe2c8a5ac4b4aec1ddd2b102 100644
--- a/src/hotspot/share/memory/dumpAllocStats.cpp
+++ b/src/hotspot/share/memory/dumpAllocStats.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2020, 2021, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -26,29 +26,21 @@
#include "logging/log.hpp"
#include "logging/logMessage.hpp"
#include "memory/dumpAllocStats.hpp"
-#include "memory/metaspaceShared.hpp"
-
-void DumpAllocStats::print_stats(int ro_all, int rw_all, int mc_all) {
- // Calculate size of data that was not allocated by Metaspace::allocate()
- MetaspaceSharedStats *stats = MetaspaceShared::stats();
+void DumpAllocStats::print_stats(int ro_all, int rw_all) {
// symbols
- _counts[RO][SymbolHashentryType] = stats->symbol.hashentry_count;
- _bytes [RO][SymbolHashentryType] = stats->symbol.hashentry_bytes;
+ _counts[RO][SymbolHashentryType] = _symbol_stats.hashentry_count;
+ _bytes [RO][SymbolHashentryType] = _symbol_stats.hashentry_bytes;
- _counts[RO][SymbolBucketType] = stats->symbol.bucket_count;
- _bytes [RO][SymbolBucketType] = stats->symbol.bucket_bytes;
+ _counts[RO][SymbolBucketType] = _symbol_stats.bucket_count;
+ _bytes [RO][SymbolBucketType] = _symbol_stats.bucket_bytes;
// strings
- _counts[RO][StringHashentryType] = stats->string.hashentry_count;
- _bytes [RO][StringHashentryType] = stats->string.hashentry_bytes;
-
- _counts[RO][StringBucketType] = stats->string.bucket_count;
- _bytes [RO][StringBucketType] = stats->string.bucket_bytes;
+ _counts[RO][StringHashentryType] = _string_stats.hashentry_count;
+ _bytes [RO][StringHashentryType] = _string_stats.hashentry_bytes;
- // TODO: count things like dictionary, vtable, etc
- _bytes[RW][OtherType] += mc_all;
- rw_all += mc_all; // mc is mapped Read/Write
+ _counts[RO][StringBucketType] = _string_stats.bucket_count;
+ _bytes [RO][StringBucketType] = _string_stats.bucket_bytes;
// prevent divide-by-zero
if (ro_all < 1) {
@@ -70,7 +62,7 @@ void DumpAllocStats::print_stats(int ro_all, int rw_all, int mc_all) {
LogMessage(cds) msg;
- msg.debug("Detailed metadata info (excluding st regions; rw stats include mc regions):");
+ msg.debug("Detailed metadata info (excluding heap regions):");
msg.debug("%s", hdr);
msg.debug("%s", sep);
for (int type = 0; type < int(_number_of_types); type ++) {
@@ -115,4 +107,3 @@ void DumpAllocStats::print_stats(int ro_all, int rw_all, int mc_all) {
#undef fmt_stats
}
-
diff --git a/src/hotspot/share/memory/dumpAllocStats.hpp b/src/hotspot/share/memory/dumpAllocStats.hpp
index bb1e20b1191399aa26a49e5658e067945e5711d4..5bf7b9371a920042cf2ef420d5f1e69597fd1e1e 100644
--- a/src/hotspot/share/memory/dumpAllocStats.hpp
+++ b/src/hotspot/share/memory/dumpAllocStats.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2020, 2021, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -26,6 +26,7 @@
#define SHARE_MEMORY_DUMPALLOCSTATS_HPP
#include "memory/allocation.hpp"
+#include "classfile/compactHashtable.hpp"
// This is for dumping detailed statistics for the allocations
// in the shared spaces.
@@ -40,6 +41,7 @@ public:
f(StringHashentry) \
f(StringBucket) \
f(ModulesNatives) \
+ f(CppVTables) \
f(Other)
enum Type {
@@ -57,17 +59,23 @@ public:
}
}
-public:
- enum { RO = 0, RW = 1 };
+ CompactHashtableStats _symbol_stats;
+ CompactHashtableStats _string_stats;
int _counts[2][_number_of_types];
int _bytes [2][_number_of_types];
+public:
+ enum { RO = 0, RW = 1 };
+
DumpAllocStats() {
memset(_counts, 0, sizeof(_counts));
memset(_bytes, 0, sizeof(_bytes));
};
+ CompactHashtableStats* symbol_stats() { return &_symbol_stats; }
+ CompactHashtableStats* string_stats() { return &_string_stats; }
+
void record(MetaspaceObj::Type type, int byte_size, bool read_only) {
assert(int(type) >= 0 && type < MetaspaceObj::_number_of_types, "sanity");
int which = (read_only) ? RO : RW;
@@ -84,7 +92,12 @@ public:
int which = (read_only) ? RO : RW;
_bytes [which][OtherType] += byte_size;
}
- void print_stats(int ro_all, int rw_all, int mc_all);
+
+ void record_cpp_vtables(int byte_size) {
+ _bytes[RW][CppVTablesType] += byte_size;
+ }
+
+ void print_stats(int ro_all, int rw_all);
};
#endif // SHARE_MEMORY_DUMPALLOCSTATS_HPP
diff --git a/src/hotspot/share/memory/dynamicArchive.cpp b/src/hotspot/share/memory/dynamicArchive.cpp
index dc23c869de5839561d2fb447aaaae5f2759541b4..54a457561eb415d383a526db6dc1507b6e7942d8 100644
--- a/src/hotspot/share/memory/dynamicArchive.cpp
+++ b/src/hotspot/share/memory/dynamicArchive.cpp
@@ -92,35 +92,7 @@ public:
void write_archive(char* serialized_data);
public:
- DynamicArchiveBuilder() : ArchiveBuilder(MetaspaceShared::misc_code_dump_space(),
- MetaspaceShared::read_write_dump_space(),
- MetaspaceShared::read_only_dump_space()) {
- }
-
- void start_dump_space(DumpRegion* next) {
- address bottom = _last_verified_top;
- address top = (address)(current_dump_space()->top());
- _other_region_used_bytes += size_t(top - bottom);
-
- MetaspaceShared::pack_dump_space(current_dump_space(), next, MetaspaceShared::shared_rs());
- _current_dump_space = next;
- _num_dump_regions_used ++;
-
- _last_verified_top = (address)(current_dump_space()->top());
- }
-
- void verify_estimate_size(size_t estimate, const char* which) {
- address bottom = _last_verified_top;
- address top = (address)(current_dump_space()->top());
- size_t used = size_t(top - bottom) + _other_region_used_bytes;
- int diff = int(estimate) - int(used);
-
- log_info(cds)("%s estimate = " SIZE_FORMAT " used = " SIZE_FORMAT "; diff = %d bytes", which, estimate, used, diff);
- assert(diff >= 0, "Estimate is too small");
-
- _last_verified_top = top;
- _other_region_used_bytes = 0;
- }
+ DynamicArchiveBuilder() : ArchiveBuilder() { }
// Do this before and after the archive dump to see if any corruption
// is caused by dynamic dumping.
@@ -140,28 +112,14 @@ public:
DEBUG_ONLY(SystemDictionaryShared::NoClassLoadingMark nclm);
SystemDictionaryShared::check_excluded_classes();
- gather_klasses_and_symbols();
-
- // mc space starts ...
- reserve_buffer();
init_header();
-
- allocate_method_trampolines();
- verify_estimate_size(_estimated_trampoline_bytes, "Trampolines");
-
gather_source_objs();
- // rw space starts ...
- start_dump_space(MetaspaceShared::read_write_dump_space());
+ reserve_buffer();
log_info(cds, dynamic)("Copying %d klasses and %d symbols",
klasses()->length(), symbols()->length());
-
- dump_rw_region();
-
- // ro space starts ...
- DumpRegion* ro_space = MetaspaceShared::read_only_dump_space();
- start_dump_space(ro_space);
- dump_ro_region();
+ dump_rw_metadata();
+ dump_ro_metadata();
relocate_metaspaceobj_embedded_pointers();
relocate_roots();
@@ -173,19 +131,20 @@ public:
// Note that these tables still point to the *original* objects, so
// they would need to call DynamicArchive::original_to_target() to
// get the correct addresses.
- assert(current_dump_space() == ro_space, "Must be RO space");
+ assert(current_dump_space() == ro_region(), "Must be RO space");
SymbolTable::write_to_archive(symbols());
+
+ ArchiveBuilder::OtherROAllocMark mark;
SystemDictionaryShared::write_to_archive(false);
- serialized_data = ro_space->top();
- WriteClosure wc(ro_space);
+ serialized_data = ro_region()->top();
+ WriteClosure wc(ro_region());
SymbolTable::serialize_shared_table_header(&wc, false);
SystemDictionaryShared::serialize_dictionary_headers(&wc, false);
}
verify_estimate_size(_estimated_hashtable_bytes, "Hashtables");
- update_method_trampolines();
sort_methods();
log_info(cds)("Make classes shareable");
@@ -333,9 +292,6 @@ void DynamicArchiveBuilder::remark_pointers_for_instance_klass(InstanceKlass* k,
}
void DynamicArchiveBuilder::write_archive(char* serialized_data) {
- int num_klasses = klasses()->length();
- int num_symbols = symbols()->length();
-
Array* table = FileMapInfo::saved_shared_path_table().table();
SharedPathTable runtime_table(table, FileMapInfo::shared_path_table().size());
_header->set_shared_path_table(runtime_table);
@@ -344,19 +300,8 @@ void DynamicArchiveBuilder::write_archive(char* serialized_data) {
FileMapInfo* dynamic_info = FileMapInfo::dynamic_info();
assert(dynamic_info != NULL, "Sanity");
- // Now write the archived data including the file offsets.
- const char* archive_name = Arguments::GetSharedDynamicArchivePath();
- dynamic_info->open_for_write(archive_name);
- size_t bitmap_size_in_bytes;
- char* bitmap = MetaspaceShared::write_core_archive_regions(dynamic_info, NULL, NULL, bitmap_size_in_bytes);
- dynamic_info->set_requested_base((char*)MetaspaceShared::requested_base_address());
- dynamic_info->set_header_crc(dynamic_info->compute_header_crc());
- dynamic_info->write_header();
- dynamic_info->close();
-
- write_cds_map_to_log(dynamic_info, NULL, NULL,
- bitmap, bitmap_size_in_bytes);
- FREE_C_HEAP_ARRAY(char, bitmap);
+ dynamic_info->open_for_write(Arguments::GetSharedDynamicArchivePath());
+ ArchiveBuilder::write_archive(dynamic_info, NULL, NULL, NULL, NULL);
address base = _requested_dynamic_archive_bottom;
address top = _requested_dynamic_archive_top;
@@ -366,13 +311,13 @@ void DynamicArchiveBuilder::write_archive(char* serialized_data) {
" [" SIZE_FORMAT " bytes header, " SIZE_FORMAT " bytes total]",
p2i(base), p2i(top), _header->header_size(), file_size);
- log_info(cds, dynamic)("%d klasses; %d symbols", num_klasses, num_symbols);
+ log_info(cds, dynamic)("%d klasses; %d symbols", klasses()->length(), symbols()->length());
}
class VM_PopulateDynamicDumpSharedSpace: public VM_GC_Sync_Operation {
- DynamicArchiveBuilder* _builder;
+ DynamicArchiveBuilder builder;
public:
- VM_PopulateDynamicDumpSharedSpace(DynamicArchiveBuilder* builder) : VM_GC_Sync_Operation(), _builder(builder) {}
+ VM_PopulateDynamicDumpSharedSpace() : VM_GC_Sync_Operation() {}
VMOp_Type type() const { return VMOp_PopulateDumpSharedSpace; }
void doit() {
ResourceMark rm;
@@ -386,7 +331,7 @@ public:
}
FileMapInfo::check_nonempty_dir_in_shared_path_table();
- _builder->doit();
+ builder.doit();
}
};
@@ -397,8 +342,7 @@ void DynamicArchive::dump() {
return;
}
- DynamicArchiveBuilder builder;
- VM_PopulateDynamicDumpSharedSpace op(&builder);
+ VM_PopulateDynamicDumpSharedSpace op;
VMThread::execute(&op);
}
diff --git a/src/hotspot/share/memory/filemap.cpp b/src/hotspot/share/memory/filemap.cpp
index 551a93b89c7f828307d249b1e39770f3a14e860b..3fea397fc60c818cab742d0a02a850ee99d00d82 100644
--- a/src/hotspot/share/memory/filemap.cpp
+++ b/src/hotspot/share/memory/filemap.cpp
@@ -280,7 +280,6 @@ void FileMapHeader::print(outputStream* st) {
st->print_cr("- compressed_class_ptrs: %d", _compressed_class_ptrs);
st->print_cr("- cloned_vtables_offset: " SIZE_FORMAT_HEX, _cloned_vtables_offset);
st->print_cr("- serialized_data_offset: " SIZE_FORMAT_HEX, _serialized_data_offset);
- st->print_cr("- i2i_entry_code_buffers_offset: " SIZE_FORMAT_HEX, _i2i_entry_code_buffers_offset);
st->print_cr("- heap_end: " INTPTR_FORMAT, p2i(_heap_end));
st->print_cr("- base_archive_is_default: %d", _base_archive_is_default);
st->print_cr("- jvm_ident: %s", _jvm_ident);
@@ -304,7 +303,7 @@ void FileMapHeader::print(outputStream* st) {
void SharedClassPathEntry::init_as_non_existent(const char* path, TRAPS) {
_type = non_existent_entry;
- set_name(path, THREAD);
+ set_name(path, CHECK);
}
void SharedClassPathEntry::init(bool is_modules_image,
@@ -343,12 +342,12 @@ void SharedClassPathEntry::init(bool is_modules_image,
// No need to save the name of the module file, as it will be computed at run time
// to allow relocation of the JDK directory.
const char* name = is_modules_image ? "" : cpe->name();
- set_name(name, THREAD);
+ set_name(name, CHECK);
}
void SharedClassPathEntry::set_name(const char* name, TRAPS) {
size_t len = strlen(name) + 1;
- _name = MetadataFactory::new_array(ClassLoaderData::the_null_class_loader_data(), (int)len, THREAD);
+ _name = MetadataFactory::new_array(ClassLoaderData::the_null_class_loader_data(), (int)len, CHECK);
strcpy(_name->data(), name);
}
@@ -358,12 +357,12 @@ void SharedClassPathEntry::copy_from(SharedClassPathEntry* ent, ClassLoaderData*
_timestamp = ent->_timestamp;
_filesize = ent->_filesize;
_from_class_path_attr = ent->_from_class_path_attr;
- set_name(ent->name(), THREAD);
+ set_name(ent->name(), CHECK);
if (ent->is_jar() && !ent->is_signed() && ent->manifest() != NULL) {
Array* buf = MetadataFactory::new_array(loader_data,
ent->manifest_size(),
- THREAD);
+ CHECK);
char* p = (char*)(buf->data());
memcpy(p, ent->manifest(), ent->manifest_size());
set_manifest(buf);
@@ -449,7 +448,7 @@ void SharedPathTable::metaspace_pointers_do(MetaspaceClosure* it) {
}
}
-void SharedPathTable::dumptime_init(ClassLoaderData* loader_data, Thread* THREAD) {
+void SharedPathTable::dumptime_init(ClassLoaderData* loader_data, TRAPS) {
size_t entry_size = sizeof(SharedClassPathEntry);
int num_entries = 0;
num_entries += ClassLoader::num_boot_classpath_entries();
@@ -458,7 +457,7 @@ void SharedPathTable::dumptime_init(ClassLoaderData* loader_data, Thread* THREAD
num_entries += FileMapInfo::num_non_existent_class_paths();
size_t bytes = entry_size * num_entries;
- _table = MetadataFactory::new_array(loader_data, (int)bytes, THREAD);
+ _table = MetadataFactory::new_array(loader_data, (int)bytes, CHECK);
_size = num_entries;
}
@@ -466,44 +465,43 @@ void SharedPathTable::dumptime_init(ClassLoaderData* loader_data, Thread* THREAD
// It is needed because some Java code continues to execute after dynamic dump has finished.
// However, during dynamic dump, we have modified FileMapInfo::_shared_path_table so
// FileMapInfo::shared_path(i) returns incorrect information in ClassLoader::record_result().
-void FileMapInfo::copy_shared_path_table(ClassLoaderData* loader_data, Thread* THREAD) {
+void FileMapInfo::copy_shared_path_table(ClassLoaderData* loader_data, TRAPS) {
size_t entry_size = sizeof(SharedClassPathEntry);
size_t bytes = entry_size * _shared_path_table.size();
- _saved_shared_path_table = SharedPathTable(MetadataFactory::new_array(loader_data, (int)bytes, THREAD),
- _shared_path_table.size());
+ Array* array = MetadataFactory::new_array(loader_data, (int)bytes, CHECK);
+ _saved_shared_path_table = SharedPathTable(array, _shared_path_table.size());
for (int i = 0; i < _shared_path_table.size(); i++) {
- _saved_shared_path_table.path_at(i)->copy_from(shared_path(i), loader_data, THREAD);
+ _saved_shared_path_table.path_at(i)->copy_from(shared_path(i), loader_data, CHECK);
}
}
-void FileMapInfo::allocate_shared_path_table() {
+void FileMapInfo::allocate_shared_path_table(TRAPS) {
Arguments::assert_is_dumping_archive();
- EXCEPTION_MARK; // The following calls should never throw, but would exit VM on error.
ClassLoaderData* loader_data = ClassLoaderData::the_null_class_loader_data();
ClassPathEntry* jrt = ClassLoader::get_jrt_entry();
assert(jrt != NULL,
"No modular java runtime image present when allocating the CDS classpath entry table");
- _shared_path_table.dumptime_init(loader_data, THREAD);
+ _shared_path_table.dumptime_init(loader_data, CHECK);
// 1. boot class path
int i = 0;
- i = add_shared_classpaths(i, "boot", jrt, THREAD);
- i = add_shared_classpaths(i, "app", ClassLoader::app_classpath_entries(), THREAD);
- i = add_shared_classpaths(i, "module", ClassLoader::module_path_entries(), THREAD);
+ i = add_shared_classpaths(i, "boot", jrt, CHECK);
+ i = add_shared_classpaths(i, "app", ClassLoader::app_classpath_entries(), CHECK);
+ i = add_shared_classpaths(i, "module", ClassLoader::module_path_entries(), CHECK);
for (int x = 0; x < num_non_existent_class_paths(); x++, i++) {
const char* path = _non_existent_class_paths->at(x);
- shared_path(i)->init_as_non_existent(path, THREAD);
+ shared_path(i)->init_as_non_existent(path, CHECK);
}
assert(i == _shared_path_table.size(), "number of shared path entry mismatch");
- copy_shared_path_table(loader_data, THREAD);
+ copy_shared_path_table(loader_data, CHECK);
}
int FileMapInfo::add_shared_classpaths(int i, const char* which, ClassPathEntry *cpe, TRAPS) {
@@ -513,9 +511,9 @@ int FileMapInfo::add_shared_classpaths(int i, const char* which, ClassPathEntry
const char* type = (is_jrt ? "jrt" : (cpe->is_jar_file() ? "jar" : "dir"));
log_info(class, path)("add %s shared path (%s) %s", which, type, cpe->name());
SharedClassPathEntry* ent = shared_path(i);
- ent->init(is_jrt, is_module_path, cpe, THREAD);
+ ent->init(is_jrt, is_module_path, cpe, CHECK_0);
if (cpe->is_jar_file()) {
- update_jar_manifest(cpe, ent, THREAD);
+ update_jar_manifest(cpe, ent, CHECK_0);
}
if (is_jrt) {
cpe = ClassLoader::get_next_boot_classpath_entry(cpe);
@@ -670,7 +668,7 @@ void FileMapInfo::update_jar_manifest(ClassPathEntry *cpe, SharedClassPathEntry*
manifest = ClassLoaderExt::read_raw_manifest(cpe, &manifest_size, CHECK);
Array* buf = MetadataFactory::new_array(loader_data,
manifest_size,
- THREAD);
+ CHECK);
char* p = (char*)(buf->data());
memcpy(p, manifest, manifest_size);
ent->set_manifest(buf);
@@ -1272,7 +1270,7 @@ void FileMapRegion::init(int region_index, size_t mapping_offset, size_t size, b
static const char* region_name(int region_index) {
static const char* names[] = {
- "mc", "rw", "ro", "bm", "ca0", "ca1", "oa0", "oa1"
+ "rw", "ro", "bm", "ca0", "ca1", "oa0", "oa1"
};
const int num_regions = sizeof(names)/sizeof(names[0]);
assert(0 <= region_index && region_index < num_regions, "sanity");
@@ -1533,7 +1531,7 @@ bool FileMapInfo::remap_shared_readonly_as_readwrite() {
}
// Memory map a region in the address space.
-static const char* shared_region_name[] = { "MiscCode", "ReadWrite", "ReadOnly", "Bitmap",
+static const char* shared_region_name[] = { "ReadWrite", "ReadOnly", "Bitmap",
"String1", "String2", "OpenArchive1", "OpenArchive2" };
MapArchiveResult FileMapInfo::map_regions(int regions[], int num_regions, char* mapped_base_address, ReservedSpace rs) {
@@ -1678,7 +1676,7 @@ char* FileMapInfo::map_bitmap_region() {
}
// This is called when we cannot map the archive at the requested[ base address (usually 0x800000000).
-// We relocate all pointers in the 3 core regions (mc, ro, rw).
+// We relocate all pointers in the 2 core regions (ro, rw).
bool FileMapInfo::relocate_pointers_in_core_regions(intx addr_delta) {
log_debug(cds, reloc)("runtime archive relocation start");
char* bitmap_base = map_bitmap_region();
@@ -2173,9 +2171,9 @@ char* FileMapInfo::region_addr(int idx) {
}
}
-// The 3 core spaces are MC->RW->RO
+// The 2 core spaces are RW->RO
FileMapRegion* FileMapInfo::first_core_space() const {
- return space_at(MetaspaceShared::mc);
+ return space_at(MetaspaceShared::rw);
}
FileMapRegion* FileMapInfo::last_core_space() const {
@@ -2281,8 +2279,7 @@ bool FileMapInfo::validate_header() {
// Check if a given address is within one of the shared regions
bool FileMapInfo::is_in_shared_region(const void* p, int idx) {
assert(idx == MetaspaceShared::ro ||
- idx == MetaspaceShared::rw ||
- idx == MetaspaceShared::mc, "invalid region index");
+ idx == MetaspaceShared::rw, "invalid region index");
char* base = region_addr(idx);
if (p >= base && p < base + space_at(idx)->used()) {
return true;
@@ -2364,7 +2361,8 @@ ClassFileStream* FileMapInfo::open_stream_for_jvmti(InstanceKlass* ik, Handle cl
name->utf8_length());
ClassLoaderData* loader_data = ClassLoaderData::class_loader_data(class_loader());
ClassFileStream* cfs = cpe->open_stream_for_loader(file_name, loader_data, THREAD);
- assert(cfs != NULL, "must be able to read the classfile data of shared classes for built-in loaders.");
+ assert(!HAS_PENDING_EXCEPTION &&
+ cfs != NULL, "must be able to read the classfile data of shared classes for built-in loaders.");
log_debug(cds, jvmti)("classfile data for %s [%d: %s] = %d bytes", class_name, path_index,
cfs->source(), cfs->length());
return cfs;
diff --git a/src/hotspot/share/memory/filemap.hpp b/src/hotspot/share/memory/filemap.hpp
index b402f9955903581efd83562b3b11f60b9e9231ce..457ce4459142ecacbf18761d909f8919159b6f2d 100644
--- a/src/hotspot/share/memory/filemap.hpp
+++ b/src/hotspot/share/memory/filemap.hpp
@@ -25,25 +25,23 @@
#ifndef SHARE_MEMORY_FILEMAP_HPP
#define SHARE_MEMORY_FILEMAP_HPP
-#include "classfile/classLoader.hpp"
#include "include/cds.h"
#include "memory/metaspaceShared.hpp"
-#include "memory/metaspace.hpp"
#include "oops/compressedOops.hpp"
#include "utilities/align.hpp"
-// Layout of the file:
-// header: dump of archive instance plus versioning info, datestamp, etc.
-// [magic # = 0xF00BABA2]
-// ... padding to align on page-boundary
-// read-write space
-// read-only space
-// misc data (block offset table, string table, symbols, dictionary, etc.)
-// tag(666)
+// To understand the layout of the CDS archive file:
+//
+// java -Xlog:cds+map=info:file=cds.map:none:filesize=0
+// java -Xlog:cds+map=debug:file=cds.map:none:filesize=0
+// java -Xlog:cds+map=trace:file=cds.map:none:filesize=0
static const int JVM_IDENT_MAX = 256;
class CHeapBitMap;
+class ClassFileStream;
+class ClassLoaderData;
+class ClassPathEntry;
class outputStream;
class SharedClassPathEntry {
@@ -117,7 +115,7 @@ public:
SharedPathTable() : _table(NULL), _size(0) {}
SharedPathTable(Array* table, int size) : _table(table), _size(size) {}
- void dumptime_init(ClassLoaderData* loader_data, Thread* THREAD);
+ void dumptime_init(ClassLoaderData* loader_data, TRAPS);
void metaspace_pointers_do(MetaspaceClosure* it);
int size() {
@@ -201,7 +199,6 @@ class FileMapHeader: private CDSFileMapHeaderBase {
bool _compressed_class_ptrs; // save the flag UseCompressedClassPointers
size_t _cloned_vtables_offset; // The address of the first cloned vtable
size_t _serialized_data_offset; // Data accessed using {ReadClosure,WriteClosure}::serialize()
- size_t _i2i_entry_code_buffers_offset;
address _heap_end; // heap end at dump time.
bool _base_archive_is_default; // indicates if the base archive is the system default one
@@ -265,7 +262,6 @@ public:
address narrow_klass_base() const { return (address)mapped_base_address(); }
char* cloned_vtables() const { return from_mapped_offset(_cloned_vtables_offset); }
char* serialized_data() const { return from_mapped_offset(_serialized_data_offset); }
- address i2i_entry_code_buffers() const { return (address)from_mapped_offset(_i2i_entry_code_buffers_offset); }
address heap_end() const { return _heap_end; }
bool base_archive_is_default() const { return _base_archive_is_default; }
const char* jvm_ident() const { return _jvm_ident; }
@@ -292,9 +288,6 @@ public:
void set_ptrmap_size_in_bits(size_t s) { _ptrmap_size_in_bits = s; }
void set_mapped_base_address(char* p) { _mapped_base_address = p; }
void set_heap_obj_roots(narrowOop r) { _heap_obj_roots = r; }
- void set_i2i_entry_code_buffers(address p) {
- set_as_offset((char*)p, &_i2i_entry_code_buffers_offset);
- }
void set_shared_path_table(SharedPathTable table) {
set_as_offset((char*)table.table(), &_shared_path_table_offset);
@@ -409,11 +402,6 @@ public:
bool is_file_position_aligned() const;
void align_file_position();
- address i2i_entry_code_buffers() const { return header()->i2i_entry_code_buffers(); }
- void set_i2i_entry_code_buffers(address addr) const {
- header()->set_i2i_entry_code_buffers(addr);
- }
-
bool is_static() const { return _is_static; }
bool is_mapped() const { return _is_mapped; }
void set_is_mapped(bool v) { _is_mapped = v; }
@@ -498,8 +486,8 @@ public:
// Stop CDS sharing and unmap CDS regions.
static void stop_sharing_and_unmap(const char* msg);
- static void allocate_shared_path_table();
- static void copy_shared_path_table(ClassLoaderData* loader_data, Thread* THREAD);
+ static void allocate_shared_path_table(TRAPS);
+ static void copy_shared_path_table(ClassLoaderData* loader_data, TRAPS);
static int add_shared_classpaths(int i, const char* which, ClassPathEntry *cpe, TRAPS);
static void check_nonempty_dir_in_shared_path_table();
bool validate_shared_path_table();
diff --git a/src/hotspot/share/memory/heap.cpp b/src/hotspot/share/memory/heap.cpp
index 098095fb29353df2116276c940a4e4f16fcc2ab5..fa9de51598078464108af82d41c6b1cc6526f7a7 100644
--- a/src/hotspot/share/memory/heap.cpp
+++ b/src/hotspot/share/memory/heap.cpp
@@ -207,17 +207,12 @@ bool CodeHeap::reserve(ReservedSpace rs, size_t committed_size, size_t segment_s
_log2_segment_size = exact_log2(segment_size);
// Reserve and initialize space for _memory.
- size_t page_size = os::vm_page_size();
- if (os::can_execute_large_page_memory()) {
- const size_t min_pages = 8;
- page_size = MIN2(os::page_size_for_region_aligned(committed_size, min_pages),
- os::page_size_for_region_aligned(rs.size(), min_pages));
- }
-
+ const size_t page_size = ReservedSpace::actual_reserved_page_size(rs);
const size_t granularity = os::vm_allocation_granularity();
const size_t c_size = align_up(committed_size, page_size);
+ assert(c_size <= rs.size(), "alignment made committed size to large");
- os::trace_page_sizes(_name, committed_size, rs.size(), page_size,
+ os::trace_page_sizes(_name, c_size, rs.size(), page_size,
rs.base(), rs.size());
if (!_memory.initialize(rs, c_size)) {
return false;
diff --git a/src/hotspot/share/memory/heapShared.cpp b/src/hotspot/share/memory/heapShared.cpp
index 3fd2c228079ba51d59dd3c4f5e4dce792b83c7a8..b2471029c644b6ea466f9d2a48b47b217e81c0bc 100644
--- a/src/hotspot/share/memory/heapShared.cpp
+++ b/src/hotspot/share/memory/heapShared.cpp
@@ -298,7 +298,7 @@ oop HeapShared::archive_heap_object(oop obj) {
}
void HeapShared::archive_klass_objects() {
- GrowableArray* klasses = MetaspaceShared::collected_klasses();
+ GrowableArray* klasses = ArchiveBuilder::current()->klasses();
assert(klasses != NULL, "sanity");
for (int i = 0; i < klasses->length(); i++) {
Klass* k = ArchiveBuilder::get_relocated_klass(klasses->at(i));
@@ -573,7 +573,7 @@ void ArchivedKlassSubGraphInfoRecord::init(KlassSubGraphInfo* info) {
int num_entry_fields = entry_fields->length();
assert(num_entry_fields % 2 == 0, "sanity");
_entry_field_records =
- MetaspaceShared::new_ro_array(num_entry_fields);
+ ArchiveBuilder::new_ro_array(num_entry_fields);
for (int i = 0 ; i < num_entry_fields; i++) {
_entry_field_records->at_put(i, entry_fields->at(i));
}
@@ -584,7 +584,7 @@ void ArchivedKlassSubGraphInfoRecord::init(KlassSubGraphInfo* info) {
if (subgraph_object_klasses != NULL) {
int num_subgraphs_klasses = subgraph_object_klasses->length();
_subgraph_object_klasses =
- MetaspaceShared::new_ro_array(num_subgraphs_klasses);
+ ArchiveBuilder::new_ro_array(num_subgraphs_klasses);
for (int i = 0; i < num_subgraphs_klasses; i++) {
Klass* subgraph_k = subgraph_object_klasses->at(i);
if (log_is_enabled(Info, cds, heap)) {
@@ -610,7 +610,7 @@ struct CopyKlassSubGraphInfoToArchive : StackObj {
bool do_entry(Klass* klass, KlassSubGraphInfo& info) {
if (info.subgraph_object_klasses() != NULL || info.subgraph_entry_fields() != NULL) {
ArchivedKlassSubGraphInfoRecord* record =
- (ArchivedKlassSubGraphInfoRecord*)MetaspaceShared::read_only_space_alloc(sizeof(ArchivedKlassSubGraphInfoRecord));
+ (ArchivedKlassSubGraphInfoRecord*)ArchiveBuilder::ro_region_alloc(sizeof(ArchivedKlassSubGraphInfoRecord));
record->init(&info);
unsigned int hash = SystemDictionaryShared::hash_for_shared_dictionary((address)klass);
diff --git a/src/hotspot/share/memory/metadataFactory.hpp b/src/hotspot/share/memory/metadataFactory.hpp
index d18f1301120b871f2659acf84bd535bc4830b6f5..45237efd6780bb46568f78a14694f4feb5e6300e 100644
--- a/src/hotspot/share/memory/metadataFactory.hpp
+++ b/src/hotspot/share/memory/metadataFactory.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2010, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2010, 2021, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -27,7 +27,7 @@
#include "classfile/classLoaderData.hpp"
#include "memory/classLoaderMetaspace.hpp"
-#include "oops/array.hpp"
+#include "oops/array.inline.hpp"
#include "utilities/exceptions.hpp"
#include "utilities/globalDefinitions.hpp"
diff --git a/src/hotspot/share/memory/metaspace.cpp b/src/hotspot/share/memory/metaspace.cpp
index 4462b681680dbb4dc45bbc8c213476a9fc4a89f0..87a0e5764b8469fb716a036a1c0fd346a8c5ef81 100644
--- a/src/hotspot/share/memory/metaspace.cpp
+++ b/src/hotspot/share/memory/metaspace.cpp
@@ -43,6 +43,7 @@
#include "memory/metaspace/virtualSpaceList.hpp"
#include "memory/metaspaceShared.hpp"
#include "memory/metaspaceTracer.hpp"
+#include "memory/metaspaceUtils.hpp"
#include "memory/resourceArea.hpp"
#include "memory/universe.hpp"
#include "oops/compressedOops.hpp"
diff --git a/src/hotspot/share/memory/metaspace.hpp b/src/hotspot/share/memory/metaspace.hpp
index 182660f01125d8cb62ecb04976b955be8b6b90cd..1309a2fe3adaff4a539e048909f46d5a2370555c 100644
--- a/src/hotspot/share/memory/metaspace.hpp
+++ b/src/hotspot/share/memory/metaspace.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2011, 2020, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2011, 2021, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -25,9 +25,6 @@
#define SHARE_MEMORY_METASPACE_HPP
#include "memory/allocation.hpp"
-#include "memory/memRegion.hpp"
-#include "memory/metaspaceChunkFreeListSummary.hpp"
-#include "memory/virtualspace.hpp"
#include "runtime/globals.hpp"
#include "utilities/exceptions.hpp"
#include "utilities/globalDefinitions.hpp"
@@ -37,6 +34,7 @@ class MetaspaceShared;
class MetaspaceTracer;
class Mutex;
class outputStream;
+class ReservedSpace;
namespace metaspace {
class MetaspaceSizesSnapshot;
@@ -152,115 +150,5 @@ public:
};
-////////////////// MetaspaceGC ///////////////////////
-
-// Metaspace are deallocated when their class loader are GC'ed.
-// This class implements a policy for inducing GC's to recover
-// Metaspaces.
-
-class MetaspaceGCThresholdUpdater : public AllStatic {
- public:
- enum Type {
- ComputeNewSize,
- ExpandAndAllocate,
- Last
- };
-
- static const char* to_string(MetaspaceGCThresholdUpdater::Type updater) {
- switch (updater) {
- case ComputeNewSize:
- return "compute_new_size";
- case ExpandAndAllocate:
- return "expand_and_allocate";
- default:
- assert(false, "Got bad updater: %d", (int) updater);
- return NULL;
- };
- }
-};
-
-class MetaspaceGC : public AllStatic {
-
- // The current high-water-mark for inducing a GC.
- // When committed memory of all metaspaces reaches this value,
- // a GC is induced and the value is increased. Size is in bytes.
- static volatile size_t _capacity_until_GC;
- static uint _shrink_factor;
-
- static size_t shrink_factor() { return _shrink_factor; }
- void set_shrink_factor(uint v) { _shrink_factor = v; }
-
- public:
-
- static void initialize();
- static void post_initialize();
-
- static size_t capacity_until_GC();
- static bool inc_capacity_until_GC(size_t v,
- size_t* new_cap_until_GC = NULL,
- size_t* old_cap_until_GC = NULL,
- bool* can_retry = NULL);
- static size_t dec_capacity_until_GC(size_t v);
-
- // The amount to increase the high-water-mark (_capacity_until_GC)
- static size_t delta_capacity_until_GC(size_t bytes);
-
- // Tells if we have can expand metaspace without hitting set limits.
- static bool can_expand(size_t words, bool is_class);
-
- // Returns amount that we can expand without hitting a GC,
- // measured in words.
- static size_t allowed_expansion();
-
- // Calculate the new high-water mark at which to induce
- // a GC.
- static void compute_new_size();
-};
-
-class MetaspaceUtils : AllStatic {
-public:
-
- // Committed space actually in use by Metadata
- static size_t used_words();
- static size_t used_words(Metaspace::MetadataType mdtype);
-
- // Space committed for Metaspace
- static size_t committed_words();
- static size_t committed_words(Metaspace::MetadataType mdtype);
-
- // Space reserved for Metaspace
- static size_t reserved_words();
- static size_t reserved_words(Metaspace::MetadataType mdtype);
-
- // _bytes() variants for convenience...
- static size_t used_bytes() { return used_words() * BytesPerWord; }
- static size_t used_bytes(Metaspace::MetadataType mdtype) { return used_words(mdtype) * BytesPerWord; }
- static size_t committed_bytes() { return committed_words() * BytesPerWord; }
- static size_t committed_bytes(Metaspace::MetadataType mdtype) { return committed_words(mdtype) * BytesPerWord; }
- static size_t reserved_bytes() { return reserved_words() * BytesPerWord; }
- static size_t reserved_bytes(Metaspace::MetadataType mdtype) { return reserved_words(mdtype) * BytesPerWord; }
-
- // (See JDK-8251342). Implement or Consolidate.
- static MetaspaceChunkFreeListSummary chunk_free_list_summary(Metaspace::MetadataType mdtype) {
- return MetaspaceChunkFreeListSummary(0,0,0,0,0,0,0,0);
- }
-
- // Log change in used metadata.
- static void print_metaspace_change(const metaspace::MetaspaceSizesSnapshot& pre_meta_values);
-
- // This will print out a basic metaspace usage report but
- // unlike print_report() is guaranteed not to lock or to walk the CLDG.
- static void print_basic_report(outputStream* st, size_t scale = 0);
-
- // Prints a report about the current metaspace state.
- // Function will walk the CLDG and will lock the expand lock; if that is not
- // convenient, use print_basic_report() instead.
- static void print_report(outputStream* out, size_t scale = 0);
-
- static void print_on(outputStream * out);
-
- DEBUG_ONLY(static void verify();)
-
-};
#endif // SHARE_MEMORY_METASPACE_HPP
diff --git a/src/hotspot/share/memory/metaspace/commitLimiter.cpp b/src/hotspot/share/memory/metaspace/commitLimiter.cpp
index 94bffb0492a81a4b8187763a8618e8928f527e5f..8887804b030d11d2be117d01aae63cf262b67194 100644
--- a/src/hotspot/share/memory/metaspace/commitLimiter.cpp
+++ b/src/hotspot/share/memory/metaspace/commitLimiter.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2020, 2021, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2020 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -26,6 +26,7 @@
#include "precompiled.hpp"
#include "memory/metaspace.hpp"
#include "memory/metaspace/commitLimiter.hpp"
+#include "memory/metaspaceUtils.hpp"
#include "utilities/debug.hpp"
#include "utilities/globalDefinitions.hpp"
diff --git a/src/hotspot/share/memory/metaspace/metaspaceDCmd.cpp b/src/hotspot/share/memory/metaspace/metaspaceDCmd.cpp
index 7e0e0969f7c9e0ef992533204132a801fcb59e27..d54c9d236b904edefbec88b553c1ad6d18c672e8 100644
--- a/src/hotspot/share/memory/metaspace/metaspaceDCmd.cpp
+++ b/src/hotspot/share/memory/metaspace/metaspaceDCmd.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018, 2020, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2018, 2021, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2018, 2020 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -24,9 +24,12 @@
*/
#include "precompiled.hpp"
+#include "memory/metaspace.hpp"
#include "memory/metaspace/metaspaceDCmd.hpp"
#include "memory/metaspace/metaspaceReporter.hpp"
+#include "memory/metaspaceUtils.hpp"
#include "memory/resourceArea.hpp"
+#include "runtime/vmOperations.hpp"
#include "services/diagnosticCommand.hpp"
#include "services/nmtCommon.hpp"
diff --git a/src/hotspot/share/memory/metaspace/metaspaceReporter.cpp b/src/hotspot/share/memory/metaspace/metaspaceReporter.cpp
index d772d07162530e20751842b4649688b142a6e758..f055f78730526640f2ea907c020ea7e0da9cf7bf 100644
--- a/src/hotspot/share/memory/metaspace/metaspaceReporter.cpp
+++ b/src/hotspot/share/memory/metaspace/metaspaceReporter.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018, 2020, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2018, 2021, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2018, 2020 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -37,6 +37,7 @@
#include "memory/metaspace/printCLDMetaspaceInfoClosure.hpp"
#include "memory/metaspace/runningCounters.hpp"
#include "memory/metaspace/virtualSpaceList.hpp"
+#include "memory/metaspaceUtils.hpp"
#include "runtime/os.hpp"
namespace metaspace {
@@ -95,7 +96,8 @@ static void print_vs(outputStream* out, size_t scale) {
static void print_settings(outputStream* out, size_t scale) {
out->print("MaxMetaspaceSize: ");
- if (MaxMetaspaceSize >= (max_uintx) - (2 * os::vm_page_size())) {
+ // See Metaspace::ergo_initialize() for how MaxMetaspaceSize is rounded
+ if (MaxMetaspaceSize >= align_down(max_uintx, Metaspace::commit_alignment())) {
// aka "very big". Default is max_uintx, but due to rounding in arg parsing the real
// value is smaller.
out->print("unlimited");
@@ -106,8 +108,18 @@ static void print_settings(outputStream* out, size_t scale) {
if (Metaspace::using_class_space()) {
out->print("CompressedClassSpaceSize: ");
print_human_readable_size(out, CompressedClassSpaceSize, scale);
+ } else {
+ out->print("No class space");
}
out->cr();
+ out->print("Initial GC threshold: ");
+ print_human_readable_size(out, MetaspaceSize, scale);
+ out->cr();
+ out->print("Current GC threshold: ");
+ print_human_readable_size(out, MetaspaceGC::capacity_until_GC(), scale);
+ out->cr();
+ out->print_cr("CDS: %s", (UseSharedSpaces ? "on" : (DumpSharedSpaces ? "dump" : "off")));
+ out->print_cr("MetaspaceReclaimPolicy: %s", MetaspaceReclaimPolicy);
Settings::print_on(out);
}
diff --git a/src/hotspot/share/memory/metaspace/metaspaceSizesSnapshot.cpp b/src/hotspot/share/memory/metaspace/metaspaceSizesSnapshot.cpp
index 26e07441504d22daf00d700ac5fcfdf93cefd524..b1d7269c629c868b2b36b407101fde6eb1129291 100644
--- a/src/hotspot/share/memory/metaspace/metaspaceSizesSnapshot.cpp
+++ b/src/hotspot/share/memory/metaspace/metaspaceSizesSnapshot.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2019, 2021, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2019, Twitter, Inc.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -26,6 +26,7 @@
#include "precompiled.hpp"
#include "memory/metaspace.hpp"
#include "memory/metaspace/metaspaceSizesSnapshot.hpp"
+#include "memory/metaspaceUtils.hpp"
namespace metaspace {
diff --git a/src/hotspot/share/memory/metaspace/printMetaspaceInfoKlassClosure.cpp b/src/hotspot/share/memory/metaspace/printMetaspaceInfoKlassClosure.cpp
index b100bf957c0276fdcb57b39ebff5833da583bff5..f852a5782ef92d012bd15b5a97f2d659cc47e3ff 100644
--- a/src/hotspot/share/memory/metaspace/printMetaspaceInfoKlassClosure.cpp
+++ b/src/hotspot/share/memory/metaspace/printMetaspaceInfoKlassClosure.cpp
@@ -24,7 +24,6 @@
*/
#include "precompiled.hpp"
#include "memory/metaspace/printMetaspaceInfoKlassClosure.hpp"
-#include "memory/metaspaceShared.hpp"
#include "memory/resourceArea.hpp"
#include "oops/klass.hpp"
#include "oops/reflectionAccessorImplKlassHelper.hpp"
diff --git a/src/hotspot/share/memory/metaspaceCounters.cpp b/src/hotspot/share/memory/metaspaceCounters.cpp
index 09e0f47c4addd6c5f640ae4ed2193abc0b9adbfd..34f2011f2e624b4f744259211c8647d8b27cd243 100644
--- a/src/hotspot/share/memory/metaspaceCounters.cpp
+++ b/src/hotspot/share/memory/metaspaceCounters.cpp
@@ -25,6 +25,7 @@
#include "precompiled.hpp"
#include "memory/metaspace.hpp"
#include "memory/metaspaceCounters.hpp"
+#include "memory/metaspaceUtils.hpp"
#include "memory/resourceArea.hpp"
#include "runtime/globals.hpp"
#include "runtime/perfData.hpp"
diff --git a/src/hotspot/share/memory/metaspaceShared.cpp b/src/hotspot/share/memory/metaspaceShared.cpp
index 7603b4a4a5740f3272b1acbecfcf68bb66df2671..cdadceaf345c2820c9a28bde0cb5c4a62ec76de4 100644
--- a/src/hotspot/share/memory/metaspaceShared.cpp
+++ b/src/hotspot/share/memory/metaspaceShared.cpp
@@ -40,18 +40,15 @@
#include "classfile/vmSymbols.hpp"
#include "code/codeCache.hpp"
#include "gc/shared/gcVMOperations.hpp"
-#include "interpreter/abstractInterpreter.hpp"
#include "interpreter/bytecodeStream.hpp"
#include "interpreter/bytecodes.hpp"
#include "logging/log.hpp"
#include "logging/logMessage.hpp"
#include "memory/archiveBuilder.hpp"
-#include "memory/archiveUtils.inline.hpp"
#include "memory/cppVtables.hpp"
#include "memory/dumpAllocStats.hpp"
-#include "memory/dynamicArchive.hpp"
#include "memory/filemap.hpp"
-#include "memory/heapShared.inline.hpp"
+#include "memory/heapShared.hpp"
#include "memory/metaspace.hpp"
#include "memory/metaspaceClosure.hpp"
#include "memory/metaspaceShared.hpp"
@@ -75,20 +72,15 @@
#include "utilities/bitMap.inline.hpp"
#include "utilities/ostream.hpp"
#include "utilities/defaultStream.hpp"
-#include "utilities/hashtable.inline.hpp"
#if INCLUDE_G1GC
#include "gc/g1/g1CollectedHeap.inline.hpp"
#endif
-ReservedSpace MetaspaceShared::_shared_rs;
-VirtualSpace MetaspaceShared::_shared_vs;
ReservedSpace MetaspaceShared::_symbol_rs;
VirtualSpace MetaspaceShared::_symbol_vs;
-MetaspaceSharedStats MetaspaceShared::_stats;
bool MetaspaceShared::_has_error_classes;
bool MetaspaceShared::_archive_loading_failed = false;
bool MetaspaceShared::_remapped_readwrite = false;
-address MetaspaceShared::_i2i_entry_code_buffers = NULL;
void* MetaspaceShared::_shared_metaspace_static_top = NULL;
intx MetaspaceShared::_relocation_delta;
char* MetaspaceShared::_requested_base_address;
@@ -96,7 +88,6 @@ bool MetaspaceShared::_use_optimized_module_handling = true;
bool MetaspaceShared::_use_full_module_graph = true;
// The CDS archive is divided into the following regions:
-// mc - misc code (the method entry trampolines, c++ vtables)
// rw - read-write metadata
// ro - read-only metadata and read-only tables
//
@@ -105,63 +96,30 @@ bool MetaspaceShared::_use_full_module_graph = true;
// oa0 - open archive heap space #0
// oa1 - open archive heap space #1 (may be empty)
//
-// The mc, rw, and ro regions are linearly allocated, starting from
-// SharedBaseAddress, in the order of mc->rw->ro. The size of these 3 regions
-// are page-aligned, and there's no gap between any consecutive regions.
+// bm - bitmap for relocating the above 7 regions.
//
-// These 3 regions are populated in the following steps:
-// [1] All classes are loaded in MetaspaceShared::preload_classes(). All metadata are
-// temporarily allocated outside of the shared regions. Only the method entry
-// trampolines are written into the mc region.
-// [2] C++ vtables are copied into the mc region.
+// The rw, and ro regions are linearly allocated, in the order of rw->ro.
+// These regions are aligned with MetaspaceShared::reserved_space_alignment().
+//
+// These 2 regions are populated in the following steps:
+// [0] All classes are loaded in MetaspaceShared::preload_classes(). All metadata are
+// temporarily allocated outside of the shared regions.
+// [1] We enter a safepoint and allocate a buffer for the rw/ro regions.
+// [2] C++ vtables are copied into the rw region.
// [3] ArchiveBuilder copies RW metadata into the rw region.
// [4] ArchiveBuilder copies RO metadata into the ro region.
// [5] SymbolTable, StringTable, SystemDictionary, and a few other read-only data
// are copied into the ro region as read-only tables.
//
-// The s0/s1 and oa0/oa1 regions are populated inside HeapShared::archive_java_heap_objects.
-// Their layout is independent of the other 4 regions.
-
-static DumpRegion _mc_region("mc"), _ro_region("ro"), _rw_region("rw"), _symbol_region("symbols");
-static size_t _total_closed_archive_region_size = 0, _total_open_archive_region_size = 0;
-
-void MetaspaceShared::init_shared_dump_space(DumpRegion* first_space) {
- first_space->init(&_shared_rs, &_shared_vs);
-}
-
-DumpRegion* MetaspaceShared::misc_code_dump_space() {
- return &_mc_region;
-}
-
-DumpRegion* MetaspaceShared::read_write_dump_space() {
- return &_rw_region;
-}
-
-DumpRegion* MetaspaceShared::read_only_dump_space() {
- return &_ro_region;
-}
+// The ca0/ca1 and oa0/oa1 regions are populated inside HeapShared::archive_java_heap_objects.
+// Their layout is independent of the rw/ro regions.
-void MetaspaceShared::pack_dump_space(DumpRegion* current, DumpRegion* next,
- ReservedSpace* rs) {
- current->pack(next);
-}
+static DumpRegion _symbol_region("symbols");
char* MetaspaceShared::symbol_space_alloc(size_t num_bytes) {
return _symbol_region.allocate(num_bytes);
}
-char* MetaspaceShared::misc_code_space_alloc(size_t num_bytes) {
- return _mc_region.allocate(num_bytes);
-}
-
-char* MetaspaceShared::read_only_space_alloc(size_t num_bytes) {
- return _ro_region.allocate(num_bytes);
-}
-
-char* MetaspaceShared::read_write_space_alloc(size_t num_bytes) {
- return _rw_region.allocate(num_bytes);
-}
-
size_t MetaspaceShared::reserved_space_alignment() { return os::vm_allocation_granularity(); }
static bool shared_base_valid(char* shared_base) {
@@ -316,39 +274,6 @@ void MetaspaceShared::read_extra_data(const char* filename, TRAPS) {
}
}
-void MetaspaceShared::commit_to(ReservedSpace* rs, VirtualSpace* vs, char* newtop) {
- Arguments::assert_is_dumping_archive();
- char* base = rs->base();
- size_t need_committed_size = newtop - base;
- size_t has_committed_size = vs->committed_size();
- if (need_committed_size < has_committed_size) {
- return;
- }
-
- size_t min_bytes = need_committed_size - has_committed_size;
- size_t preferred_bytes = 1 * M;
- size_t uncommitted = vs->reserved_size() - has_committed_size;
-
- size_t commit =MAX2(min_bytes, preferred_bytes);
- commit = MIN2(commit, uncommitted);
- assert(commit <= uncommitted, "sanity");
-
- bool result = vs->expand_by(commit, false);
- if (rs == &_shared_rs) {
- ArchivePtrMarker::expand_ptr_end((address*)vs->high());
- }
-
- if (!result) {
- vm_exit_during_initialization(err_msg("Failed to expand shared space to " SIZE_FORMAT " bytes",
- need_committed_size));
- }
-
- assert(rs == &_shared_rs || rs == &_symbol_rs, "must be");
- const char* which = (rs == &_shared_rs) ? "shared" : "symbol";
- log_debug(cds)("Expanding %s spaces by " SIZE_FORMAT_W(7) " bytes [total " SIZE_FORMAT_W(9) " bytes ending at %p]",
- which, commit, vs->actual_committed_size(), vs->high());
-}
-
// Read/write a data stream for restoring/preserving metadata pointers and
// miscellaneous data from/to the shared archive file.
@@ -395,30 +320,6 @@ void MetaspaceShared::serialize(SerializeClosure* soc) {
soc->do_tag(666);
}
-void MetaspaceShared::init_misc_code_space() {
- // We don't want any valid object to be at the very bottom of the archive.
- // See ArchivePtrMarker::mark_pointer().
- MetaspaceShared::misc_code_space_alloc(16);
-
- size_t trampoline_size = SharedRuntime::trampoline_size();
- size_t buf_size = (size_t)AbstractInterpreter::number_of_method_entries * trampoline_size;
- _i2i_entry_code_buffers = (address)misc_code_space_alloc(buf_size);
-}
-
-address MetaspaceShared::i2i_entry_code_buffers() {
- assert(DumpSharedSpaces || UseSharedSpaces, "must be");
- assert(_i2i_entry_code_buffers != NULL, "must already been initialized");
- return _i2i_entry_code_buffers;
-}
-
-// Global object for holding classes that have been loaded. Since this
-// is run at a safepoint just before exit, this is the entire set of classes.
-static GrowableArray* _global_klass_objects;
-
-GrowableArray* MetaspaceShared::collected_klasses() {
- return _global_klass_objects;
-}
-
static void rewrite_nofast_bytecode(const methodHandle& method) {
BytecodeStream bcs(method);
while (!bcs.is_last_bytecode()) {
@@ -459,7 +360,7 @@ private:
GrowableArray *_closed_archive_heap_oopmaps;
GrowableArray *_open_archive_heap_oopmaps;
- void dump_java_heap_objects() NOT_CDS_JAVA_HEAP_RETURN;
+ void dump_java_heap_objects(GrowableArray* klasses) NOT_CDS_JAVA_HEAP_RETURN;
void dump_archive_heap_oopmaps() NOT_CDS_JAVA_HEAP_RETURN;
void dump_archive_heap_oopmaps(GrowableArray* regions,
GrowableArray* oopmaps);
@@ -468,10 +369,6 @@ private:
SymbolTable::write_to_archive(symbols);
}
char* dump_read_only_tables();
- void print_region_stats(FileMapInfo* map_info);
- void print_bitmap_region_stats(size_t size, size_t total_size);
- void print_heap_region_stats(GrowableArray *heap_mem,
- const char *name, size_t total_size);
public:
@@ -488,8 +385,7 @@ public:
class StaticArchiveBuilder : public ArchiveBuilder {
public:
- StaticArchiveBuilder(DumpRegion* mc_region, DumpRegion* rw_region, DumpRegion* ro_region)
- : ArchiveBuilder(mc_region, rw_region, ro_region) {}
+ StaticArchiveBuilder() : ArchiveBuilder() {}
virtual void iterate_roots(MetaspaceClosure* it, bool is_relocating_pointers) {
FileMapInfo::metaspace_pointers_do(it, false);
@@ -516,8 +412,9 @@ char* VM_PopulateDumpSharedSpace::dump_read_only_tables() {
SystemDictionaryShared::write_to_archive();
// Write the other data to the output array.
- char* start = _ro_region.top();
- WriteClosure wc(&_ro_region);
+ DumpRegion* ro_region = ArchiveBuilder::current()->ro_region();
+ char* start = ro_region->top();
+ WriteClosure wc(ro_region);
MetaspaceShared::serialize(&wc);
// Write the bitmaps for patching the archive heap regions
@@ -557,111 +454,52 @@ void VM_PopulateDumpSharedSpace::doit() {
// that so we don't have to walk the SystemDictionary again.
SystemDictionaryShared::check_excluded_classes();
- StaticArchiveBuilder builder(&_mc_region, &_rw_region, &_ro_region);
- builder.gather_klasses_and_symbols();
- builder.reserve_buffer();
- _global_klass_objects = builder.klasses();
-
+ StaticArchiveBuilder builder;
builder.gather_source_objs();
+ builder.reserve_buffer();
- MetaspaceShared::init_misc_code_space();
- builder.allocate_method_trampoline_info();
- builder.allocate_method_trampolines();
-
- char* cloned_vtables = CppVtables::dumptime_init();
+ char* cloned_vtables = CppVtables::dumptime_init(&builder);
- {
- _mc_region.pack(&_rw_region);
- builder.set_current_dump_space(&_rw_region);
- builder.dump_rw_region();
-#if INCLUDE_CDS_JAVA_HEAP
- if (MetaspaceShared::use_full_module_graph()) {
- // Archive the ModuleEntry's and PackageEntry's of the 3 built-in loaders
- char* start = _rw_region.top();
- ClassLoaderDataShared::allocate_archived_tables();
- ArchiveBuilder::alloc_stats()->record_modules(_rw_region.top() - start, /*read_only*/false);
- }
-#endif
- }
- {
- _rw_region.pack(&_ro_region);
- builder.set_current_dump_space(&_ro_region);
- builder.dump_ro_region();
-#if INCLUDE_CDS_JAVA_HEAP
- if (MetaspaceShared::use_full_module_graph()) {
- char* start = _ro_region.top();
- ClassLoaderDataShared::init_archived_tables();
- ArchiveBuilder::alloc_stats()->record_modules(_ro_region.top() - start, /*read_only*/true);
- }
-#endif
- }
+ builder.dump_rw_metadata();
+ builder.dump_ro_metadata();
builder.relocate_metaspaceobj_embedded_pointers();
// Dump supported java heap objects
_closed_archive_heap_regions = NULL;
_open_archive_heap_regions = NULL;
- dump_java_heap_objects();
+ dump_java_heap_objects(builder.klasses());
builder.relocate_roots();
dump_shared_symbol_table(builder.symbols());
builder.relocate_vm_classes();
- log_info(cds)("Update method trampolines");
- builder.update_method_trampolines();
-
log_info(cds)("Make classes shareable");
builder.make_klasses_shareable();
char* serialized_data = dump_read_only_tables();
- _ro_region.pack();
SystemDictionaryShared::adjust_lambda_proxy_class_dictionary();
// The vtable clones contain addresses of the current process.
- // We don't want to write these addresses into the archive. Same for i2i buffer.
+ // We don't want to write these addresses into the archive.
CppVtables::zero_archived_vtables();
// relocate the data so that it can be mapped to MetaspaceShared::requested_base_address()
// without runtime relocation.
builder.relocate_to_requested();
- // Create and write the archive file that maps the shared spaces.
-
+ // Write the archive file
FileMapInfo* mapinfo = new FileMapInfo(true);
mapinfo->populate_header(os::vm_allocation_granularity());
mapinfo->set_serialized_data(serialized_data);
mapinfo->set_cloned_vtables(cloned_vtables);
- mapinfo->set_i2i_entry_code_buffers(MetaspaceShared::i2i_entry_code_buffers());
mapinfo->open_for_write();
- size_t bitmap_size_in_bytes;
- char* bitmap = MetaspaceShared::write_core_archive_regions(mapinfo, _closed_archive_heap_oopmaps,
- _open_archive_heap_oopmaps,
- bitmap_size_in_bytes);
- _total_closed_archive_region_size = mapinfo->write_archive_heap_regions(
- _closed_archive_heap_regions,
- _closed_archive_heap_oopmaps,
- MetaspaceShared::first_closed_archive_heap_region,
- MetaspaceShared::max_closed_archive_heap_region);
- _total_open_archive_region_size = mapinfo->write_archive_heap_regions(
- _open_archive_heap_regions,
- _open_archive_heap_oopmaps,
- MetaspaceShared::first_open_archive_heap_region,
- MetaspaceShared::max_open_archive_heap_region);
-
- mapinfo->set_requested_base((char*)MetaspaceShared::requested_base_address());
- mapinfo->set_header_crc(mapinfo->compute_header_crc());
- mapinfo->write_header();
- print_region_stats(mapinfo);
- mapinfo->close();
-
- builder.write_cds_map_to_log(mapinfo, _closed_archive_heap_regions, _open_archive_heap_regions,
- bitmap, bitmap_size_in_bytes);
- FREE_C_HEAP_ARRAY(char, bitmap);
-
- if (log_is_enabled(Info, cds)) {
- builder.print_stats(int(_ro_region.used()), int(_rw_region.used()), int(_mc_region.used()));
- }
+ builder.write_archive(mapinfo,
+ _closed_archive_heap_regions,
+ _open_archive_heap_regions,
+ _closed_archive_heap_oopmaps,
+ _open_archive_heap_oopmaps);
if (PrintSystemDictionaryAtExit) {
SystemDictionary::print();
@@ -678,73 +516,6 @@ void VM_PopulateDumpSharedSpace::doit() {
vm_direct_exit(0);
}
-void VM_PopulateDumpSharedSpace::print_region_stats(FileMapInfo *map_info) {
- // Print statistics of all the regions
- const size_t bitmap_used = map_info->space_at(MetaspaceShared::bm)->used();
- const size_t bitmap_reserved = map_info->space_at(MetaspaceShared::bm)->used_aligned();
- const size_t total_reserved = _ro_region.reserved() + _rw_region.reserved() +
- _mc_region.reserved() +
- bitmap_reserved +
- _total_closed_archive_region_size +
- _total_open_archive_region_size;
- const size_t total_bytes = _ro_region.used() + _rw_region.used() +
- _mc_region.used() +
- bitmap_used +
- _total_closed_archive_region_size +
- _total_open_archive_region_size;
- const double total_u_perc = percent_of(total_bytes, total_reserved);
-
- _mc_region.print(total_reserved);
- _rw_region.print(total_reserved);
- _ro_region.print(total_reserved);
- print_bitmap_region_stats(bitmap_used, total_reserved);
- print_heap_region_stats(_closed_archive_heap_regions, "ca", total_reserved);
- print_heap_region_stats(_open_archive_heap_regions, "oa", total_reserved);
-
- log_debug(cds)("total : " SIZE_FORMAT_W(9) " [100.0%% of total] out of " SIZE_FORMAT_W(9) " bytes [%5.1f%% used]",
- total_bytes, total_reserved, total_u_perc);
-}
-
-void VM_PopulateDumpSharedSpace::print_bitmap_region_stats(size_t size, size_t total_size) {
- log_debug(cds)("bm space: " SIZE_FORMAT_W(9) " [ %4.1f%% of total] out of " SIZE_FORMAT_W(9) " bytes [100.0%% used]",
- size, size/double(total_size)*100.0, size);
-}
-
-void VM_PopulateDumpSharedSpace::print_heap_region_stats(GrowableArray *heap_mem,
- const char *name, size_t total_size) {
- int arr_len = heap_mem == NULL ? 0 : heap_mem->length();
- for (int i = 0; i < arr_len; i++) {
- char* start = (char*)heap_mem->at(i).start();
- size_t size = heap_mem->at(i).byte_size();
- char* top = start + size;
- log_debug(cds)("%s%d space: " SIZE_FORMAT_W(9) " [ %4.1f%% of total] out of " SIZE_FORMAT_W(9) " bytes [100.0%% used] at " INTPTR_FORMAT,
- name, i, size, size/double(total_size)*100.0, size, p2i(start));
-
- }
-}
-
-char* MetaspaceShared::write_core_archive_regions(FileMapInfo* mapinfo,
- GrowableArray* closed_oopmaps,
- GrowableArray