diff --git a/.github/workflows/submit.yml b/.github/workflows/submit.yml index ae5acf5000a9fe9de453cb84cb592aa436d35729..aca69121e21fd59048fbced30e8bcac27a7de149 100644 --- a/.github/workflows/submit.yml +++ b/.github/workflows/submit.yml @@ -536,10 +536,6 @@ jobs: echo "cross_flags= --openjdk-target=${{ matrix.gnu-arch }}-linux-gnu${{ matrix.gnu-flavor}} --with-sysroot=${HOME}/sysroot-${{ matrix.debian-arch }}/ - --with-toolchain-path=${HOME}/sysroot-${{ matrix.debian-arch }}/ - --with-freetype-lib=${HOME}/sysroot-${{ matrix.debian-arch }}/usr/lib/${{ matrix.gnu-arch }}-linux-gnu${{ matrix.gnu-flavor}}/ - --with-freetype-include=${HOME}/sysroot-${{ matrix.debian-arch }}/usr/include/freetype2/ - --x-libraries=${HOME}/sysroot-${{ matrix.debian-arch }}/usr/lib/${{ matrix.gnu-arch }}-linux-gnu${{ matrix.gnu-flavor}}/ " >> $GITHUB_ENV if: matrix.debian-arch != '' diff --git a/doc/building.html b/doc/building.html index 0522bc4888bbebce06aa99fb6aa83ac798d0eea8..4d5cdf67c3a35907b86116616bf28084213f7c69 100644 --- a/doc/building.html +++ b/doc/building.html @@ -849,7 +849,7 @@ sudo mv /tmp/configure /usr/local/bin

If you update the repository and part of the configure script has changed, the build system will force you to re-run configure.

Most of the time, you will be fine by running configure again with the same arguments as the last time, which can easily be performed by make reconfigure. To simplify this, you can use the CONF_CHECK make control variable, either as make CONF_CHECK=auto, or by setting an environment variable. For instance, if you add export CONF_CHECK=auto to your .bashrc file, make will always run reconfigure automatically whenever the configure script has changed.

You can also use CONF_CHECK=ignore to skip the check for a needed configure update. This might speed up the build, but comes at the risk of an incorrect build result. This is only recommended if you know what you're doing.

-

From time to time, you will also need to modify the command line to configure due to changes. Use make print-configure to show the command line used for your current configuration.

+

From time to time, you will also need to modify the command line to configure due to changes. Use make print-configuration to show the command line used for your current configuration.

Using Fine-Grained Make Targets

The default behavior for make is to create consistent and correct output, at the expense of build speed, if necessary.

If you are prepared to take some risk of an incorrect build, and know enough of the system to understand how things build and interact, you can speed up the build process considerably by instructing make to only build a portion of the product.

diff --git a/doc/building.md b/doc/building.md index 926148d463df88935feabc4701e0e8290093b9fb..69b7fe640e81f1cf4b984c42bbd42c7f952e3a54 100644 --- a/doc/building.md +++ b/doc/building.md @@ -1556,8 +1556,8 @@ update. This might speed up the build, but comes at the risk of an incorrect build result. This is only recommended if you know what you're doing. From time to time, you will also need to modify the command line to `configure` -due to changes. Use `make print-configure` to show the command line used for -your current configuration. +due to changes. Use `make print-configuration` to show the command line used +for your current configuration. ### Using Fine-Grained Make Targets diff --git a/make/CompileJavaModules.gmk b/make/CompileJavaModules.gmk index c039ad30b002c430e90a43c8f6fe4c9ee41b0aaf..b4ee5e78adfb7fc220c3d0cc4b678f5837840d43 100644 --- a/make/CompileJavaModules.gmk +++ b/make/CompileJavaModules.gmk @@ -86,7 +86,7 @@ CreateHkTargets = \ ################################################################################ # Include module specific build settings --include $(TOPDIR)/make/modules/$(MODULE)/Java.gmk +-include Java.gmk ################################################################################ # Setup the main compilation diff --git a/make/Docs.gmk b/make/Docs.gmk index 89cea6a7c3fea99b7186caca0c3009474304c0b6..295cf7d9119d64e1b92b449ed10ff37bc61f7bb9 100644 --- a/make/Docs.gmk +++ b/make/Docs.gmk @@ -99,7 +99,7 @@ JAVADOC_TAGS := \ REFERENCE_TAGS := $(JAVADOC_TAGS) # Which doclint checks to ignore -JAVADOC_DISABLED_DOCLINT := accessibility html missing syntax reference +JAVADOC_DISABLED_DOCLINT := missing # The initial set of options for javadoc JAVADOC_OPTIONS := -use -keywords -notimestamp \ @@ -261,6 +261,7 @@ endef # SHORT_NAME - The short name of this documentation collection # LONG_NAME - The long name of this documentation collection # TARGET_DIR - Where to store the output +# OTHER_VERSIONS - URL for other page listing versions # SetupApiDocsGeneration = $(NamedParamsMacroTemplate) define SetupApiDocsGenerationBody @@ -297,10 +298,16 @@ define SetupApiDocsGenerationBody # Ignore the doclint warnings in the W3C DOM package $1_OPTIONS += -Xdoclint/package:-org.w3c.* + ifneq ($$($1_OTHER_VERSIONS), ) + $1_LINKED_SHORT_NAME = $$($1_SHORT_NAME) + else + $1_LINKED_SHORT_NAME = $$($1_SHORT_NAME) + endif + $1_DOC_TITLE := $$($1_LONG_NAME)
Version $$(VERSION_SPECIFICATION) API \ Specification $1_WINDOW_TITLE := $$(subst &,&,$$($1_SHORT_NAME))$$(DRAFT_MARKER_TITLE) - $1_HEADER_TITLE :=
$$($1_SHORT_NAME) \ + $1_HEADER_TITLE :=
$$($1_LINKED_SHORT_NAME) \ $$(DRAFT_MARKER_STR)
$1_OPTIONS += -doctitle '$$($1_DOC_TITLE)' @@ -438,6 +445,7 @@ $(eval $(call SetupApiDocsGeneration, JDK_API, \ SHORT_NAME := $(JDK_SHORT_NAME), \ LONG_NAME := $(JDK_LONG_NAME), \ TARGET_DIR := $(DOCS_OUTPUTDIR)/api, \ + OTHER_VERSIONS := $(OTHER_JDK_VERSIONS_URL), \ )) # Targets generated are returned in JDK_API_JAVADOC_TARGETS and diff --git a/make/Main.gmk b/make/Main.gmk index cdb4be67c560e99f44b562475df0f45b09ea0775..d0c81c84fed52e3a4f77c313db28f5a53b25c5bc 100644 --- a/make/Main.gmk +++ b/make/Main.gmk @@ -187,6 +187,7 @@ JAVA_TARGETS := $(addsuffix -java, $(JAVA_MODULES)) define DeclareCompileJavaRecipe $1-java: +($(CD) $(TOPDIR)/make && $(MAKE) $(MAKE_ARGS) \ + $(patsubst %,-I%/modules/$1,$(PHASE_MAKEDIRS)) \ -f CompileJavaModules.gmk MODULE=$1) endef diff --git a/make/MainSupport.gmk b/make/MainSupport.gmk index 44296b86bbc3bb192ae2d46defa618b0df933adb..34137c502d4ae6a89e94a6db2185816c18c20363 100644 --- a/make/MainSupport.gmk +++ b/make/MainSupport.gmk @@ -150,9 +150,7 @@ define DeclareRecipeForModuleMakefile $2-$$($1_TARGET_SUFFIX): +($(CD) $(TOPDIR)/make && $(MAKE) $(MAKE_ARGS) \ -f ModuleWrapper.gmk -I $$(TOPDIR)/make/common/modules \ - $$(addprefix -I, $$(PHASE_MAKEDIRS) \ - $$(addsuffix /modules/$2, $$(PHASE_MAKEDIRS)) \ - ) \ + $$(patsubst %,-I%/modules/$2,$$(PHASE_MAKEDIRS)) \ MODULE=$2 MAKEFILE_PREFIX=$$($1_FILE_PREFIX) $$($1_EXTRA_ARGS)) endef diff --git a/make/autoconf/lib-freetype.m4 b/make/autoconf/lib-freetype.m4 index 6bfc0ae6f15cea8d6f4ac80dce8615d2b84b6e49..6a7109342477be106a6676fa6452f884dd3f4cb2 100644 --- a/make/autoconf/lib-freetype.m4 +++ b/make/autoconf/lib-freetype.m4 @@ -192,6 +192,16 @@ AC_DEFUN_ONCE([LIB_SETUP_FREETYPE], [$FREETYPE_BASE_DIR/lib], [well-known location]) fi + if test "x$FOUND_FREETYPE" != "xyes" ; then + LIB_CHECK_POTENTIAL_FREETYPE([$FREETYPE_BASE_DIR/include], + [$FREETYPE_BASE_DIR/lib/$OPENJDK_TARGET_CPU-$OPENJDK_TARGET_OS-$OPENJDK_TARGET_ABI], [well-known location]) + fi + + if test "x$FOUND_FREETYPE" != "xyes" ; then + LIB_CHECK_POTENTIAL_FREETYPE([$FREETYPE_BASE_DIR/include], + [$FREETYPE_BASE_DIR/lib/$OPENJDK_TARGET_CPU_AUTOCONF-$OPENJDK_TARGET_OS-$OPENJDK_TARGET_ABI], [well-known location]) + fi + if test "x$FOUND_FREETYPE" != "xyes" ; then FREETYPE_BASE_DIR="$SYSROOT/usr/X11" LIB_CHECK_POTENTIAL_FREETYPE([$FREETYPE_BASE_DIR/include], diff --git a/make/autoconf/lib-x11.m4 b/make/autoconf/lib-x11.m4 index f0f96f39c3e71b16217499c864f2e738647d2f67..203586d6317bafcf505c30d9ca4fec63d45bc475 100644 --- a/make/autoconf/lib-x11.m4 +++ b/make/autoconf/lib-x11.m4 @@ -68,6 +68,10 @@ AC_DEFUN_ONCE([LIB_SETUP_X11], x_libraries="$SYSROOT/usr/lib64" elif test -f "$SYSROOT/usr/lib/libX11.so"; then x_libraries="$SYSROOT/usr/lib" + elif test -f "$SYSROOT/usr/lib/$OPENJDK_TARGET_CPU-$OPENJDK_TARGET_OS-$OPENJDK_TARGET_ABI/libX11.so"; then + x_libraries="$SYSROOT/usr/lib/$OPENJDK_TARGET_CPU-$OPENJDK_TARGET_OS-$OPENJDK_TARGET_ABI/libX11.so" + elif test -f "$SYSROOT/usr/lib/$OPENJDK_TARGET_CPU_AUTOCONF-$OPENJDK_TARGET_OS-$OPENJDK_TARGET_ABI/libX11.so"; then + x_libraries="$SYSROOT/usr/lib/$OPENJDK_TARGET_CPU_AUTOCONF-$OPENJDK_TARGET_OS-$OPENJDK_TARGET_ABI/libX11.so" fi fi fi diff --git a/make/autoconf/platform.m4 b/make/autoconf/platform.m4 index 1890491773bf226217efd9bfe34bb469781b3a81..181fdbf701d858f1f71a228ae10ca1a2511c48e1 100644 --- a/make/autoconf/platform.m4 +++ b/make/autoconf/platform.m4 @@ -238,6 +238,33 @@ AC_DEFUN([PLATFORM_EXTRACT_VARS_FROM_LIBC], esac ]) +# Support macro for PLATFORM_EXTRACT_TARGET_AND_BUILD. +# Converts autoconf style OS name to OpenJDK style, into +# VAR_ABI. +AC_DEFUN([PLATFORM_EXTRACT_VARS_FROM_ABI], +[ + case "$1" in + *linux*-musl) + VAR_ABI=musl + ;; + *linux*-gnu) + VAR_ABI=gnu + ;; + *linux*-gnueabi) + VAR_ABI=gnueabi + ;; + *linux*-gnueabihf) + VAR_ABI=gnueabihf + ;; + *linux*-gnuabi64) + VAR_ABI=gnuabi64 + ;; + *) + VAR_ABI=default + ;; + esac +]) + # Expects $host_os $host_cpu $build_os and $build_cpu # and $with_target_bits to have been setup! # @@ -259,6 +286,7 @@ AC_DEFUN([PLATFORM_EXTRACT_TARGET_AND_BUILD], PLATFORM_EXTRACT_VARS_FROM_OS($build_os) PLATFORM_EXTRACT_VARS_FROM_CPU($build_cpu) PLATFORM_EXTRACT_VARS_FROM_LIBC($build_os) + PLATFORM_EXTRACT_VARS_FROM_ABI($build_os) # ..and setup our own variables. (Do this explicitly to facilitate searching) OPENJDK_BUILD_OS="$VAR_OS" if test "x$VAR_OS_TYPE" != x; then @@ -275,7 +303,9 @@ AC_DEFUN([PLATFORM_EXTRACT_TARGET_AND_BUILD], OPENJDK_BUILD_CPU_ARCH="$VAR_CPU_ARCH" OPENJDK_BUILD_CPU_BITS="$VAR_CPU_BITS" OPENJDK_BUILD_CPU_ENDIAN="$VAR_CPU_ENDIAN" + OPENJDK_BUILD_CPU_AUTOCONF="$build_cpu" OPENJDK_BUILD_LIBC="$VAR_LIBC" + OPENJDK_BUILD_ABI="$VAR_ABI" AC_SUBST(OPENJDK_BUILD_OS) AC_SUBST(OPENJDK_BUILD_OS_TYPE) AC_SUBST(OPENJDK_BUILD_OS_ENV) @@ -283,7 +313,9 @@ AC_DEFUN([PLATFORM_EXTRACT_TARGET_AND_BUILD], AC_SUBST(OPENJDK_BUILD_CPU_ARCH) AC_SUBST(OPENJDK_BUILD_CPU_BITS) AC_SUBST(OPENJDK_BUILD_CPU_ENDIAN) + AC_SUBST(OPENJDK_BUILD_CPU_AUTOCONF) AC_SUBST(OPENJDK_BUILD_LIBC) + AC_SUBST(OPENJDK_BUILD_ABI) AC_MSG_CHECKING([openjdk-build os-cpu]) AC_MSG_RESULT([$OPENJDK_BUILD_OS-$OPENJDK_BUILD_CPU]) @@ -297,6 +329,7 @@ AC_DEFUN([PLATFORM_EXTRACT_TARGET_AND_BUILD], PLATFORM_EXTRACT_VARS_FROM_OS($host_os) PLATFORM_EXTRACT_VARS_FROM_CPU($host_cpu) PLATFORM_EXTRACT_VARS_FROM_LIBC($host_os) + PLATFORM_EXTRACT_VARS_FROM_ABI($host_os) # ... and setup our own variables. (Do this explicitly to facilitate searching) OPENJDK_TARGET_OS="$VAR_OS" if test "x$VAR_OS_TYPE" != x; then @@ -313,8 +346,10 @@ AC_DEFUN([PLATFORM_EXTRACT_TARGET_AND_BUILD], OPENJDK_TARGET_CPU_ARCH="$VAR_CPU_ARCH" OPENJDK_TARGET_CPU_BITS="$VAR_CPU_BITS" OPENJDK_TARGET_CPU_ENDIAN="$VAR_CPU_ENDIAN" + OPENJDK_TARGET_CPU_AUTOCONF="$host_cpu" OPENJDK_TARGET_OS_UPPERCASE=`$ECHO $OPENJDK_TARGET_OS | $TR 'abcdefghijklmnopqrstuvwxyz' 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'` OPENJDK_TARGET_LIBC="$VAR_LIBC" + OPENJDK_TARGET_ABI="$VAR_ABI" AC_SUBST(OPENJDK_TARGET_OS) AC_SUBST(OPENJDK_TARGET_OS_TYPE) @@ -324,7 +359,9 @@ AC_DEFUN([PLATFORM_EXTRACT_TARGET_AND_BUILD], AC_SUBST(OPENJDK_TARGET_CPU_ARCH) AC_SUBST(OPENJDK_TARGET_CPU_BITS) AC_SUBST(OPENJDK_TARGET_CPU_ENDIAN) + AC_SUBST(OPENJDK_TARGET_CPU_AUTOCONF) AC_SUBST(OPENJDK_TARGET_LIBC) + AC_SUBST(OPENJDK_TARGET_ABI) AC_MSG_CHECKING([openjdk-target os-cpu]) AC_MSG_RESULT([$OPENJDK_TARGET_OS-$OPENJDK_TARGET_CPU]) diff --git a/make/conf/javadoc.conf b/make/conf/javadoc.conf index df25452533a9b380ad1dffae9d23a795f5bd9bfd..6c92e40329afaec04f9a7f303f3b877ef04ba407 100644 --- a/make/conf/javadoc.conf +++ b/make/conf/javadoc.conf @@ -28,3 +28,4 @@ BUG_SUBMIT_URL=https://bugreport.java.com/bugreport/ COPYRIGHT_URL=legal/copyright.html LICENSE_URL=https://www.oracle.com/java/javase/terms/license/java$(VERSION_NUMBER)speclicense.html REDISTRIBUTION_URL=https://www.oracle.com/technetwork/java/redist-137594.html +OTHER_JDK_VERSIONS_URL=https://docs.oracle.com/en/java/javase/index.html diff --git a/make/conf/jib-profiles.js b/make/conf/jib-profiles.js index b0761dd0ba097f46b8777f43b9319c760f434b5a..f1995e76e299644dd27c272b01f4f4eac9ee00df 100644 --- a/make/conf/jib-profiles.js +++ b/make/conf/jib-profiles.js @@ -477,7 +477,6 @@ var getJibProfilesProfiles = function (input, common, data) { dependencies: ["devkit", "gtest", "build_devkit", "pandoc"], configure_args: [ "--openjdk-target=aarch64-linux-gnu", - "--disable-jvm-feature-jvmci", ], }, @@ -1115,7 +1114,7 @@ var getJibProfilesDependencies = function (input, common) { jmh: { organization: common.organization, ext: "tar.gz", - revision: "1.21+1.0" + revision: "1.28+1.0" }, jcov: { diff --git a/make/data/characterdata/CharacterData00.java.template b/make/data/characterdata/CharacterData00.java.template index 89a36023d35b02e4eeb2d030377fc9ad26e018ee..5705297a53ef5d4448c9745a81addcc72a678bb5 100644 --- a/make/data/characterdata/CharacterData00.java.template +++ b/make/data/characterdata/CharacterData00.java.template @@ -84,16 +84,6 @@ class CharacterData00 extends CharacterData { return (props & $$maskType); } - boolean isOtherLowercase(int ch) { - int props = getPropertiesEx(ch); - return (props & $$maskOtherLowercase) != 0; - } - - boolean isOtherUppercase(int ch) { - int props = getPropertiesEx(ch); - return (props & $$maskOtherUppercase) != 0; - } - boolean isOtherAlphabetic(int ch) { int props = getPropertiesEx(ch); return (props & $$maskOtherAlphabetic) != 0; @@ -765,13 +755,13 @@ class CharacterData00 extends CharacterData { } boolean isLowerCase(int ch) { - int props = getProperties(ch); - return (props & $$maskType) == Character.LOWERCASE_LETTER; + return (getProperties(ch) & $$maskType) == Character.LOWERCASE_LETTER + || (getPropertiesEx(ch) & $$maskOtherLowercase) != 0; } boolean isUpperCase(int ch) { - int props = getProperties(ch); - return (props & $$maskType) == Character.UPPERCASE_LETTER; + return (getProperties(ch) & $$maskType) == Character.UPPERCASE_LETTER + || (getPropertiesEx(ch) & $$maskOtherUppercase) != 0; } boolean isWhitespace(int ch) { diff --git a/make/data/characterdata/CharacterData01.java.template b/make/data/characterdata/CharacterData01.java.template index 430fde0ae960a9a1f1f924b2d7f8ce9be538481c..a44450b37ea6f5e71b11bf77c95f74fd03b3bd3a 100644 --- a/make/data/characterdata/CharacterData01.java.template +++ b/make/data/characterdata/CharacterData01.java.template @@ -83,16 +83,6 @@ class CharacterData01 extends CharacterData { return (props & $$maskType); } - boolean isOtherLowercase(int ch) { - int props = getPropertiesEx(ch); - return (props & $$maskOtherLowercase) != 0; - } - - boolean isOtherUppercase(int ch) { - int props = getPropertiesEx(ch); - return (props & $$maskOtherUppercase) != 0; - } - boolean isOtherAlphabetic(int ch) { int props = getPropertiesEx(ch); return (props & $$maskOtherAlphabetic) != 0; @@ -503,13 +493,13 @@ class CharacterData01 extends CharacterData { } boolean isLowerCase(int ch) { - int props = getProperties(ch); - return (props & $$maskType) == Character.LOWERCASE_LETTER; + return (getProperties(ch) & $$maskType) == Character.LOWERCASE_LETTER + || (getPropertiesEx(ch) & $$maskOtherLowercase) != 0; } boolean isUpperCase(int ch) { - int props = getProperties(ch); - return (props & $$maskType) == Character.UPPERCASE_LETTER; + return (getProperties(ch) & $$maskType) == Character.UPPERCASE_LETTER + || (getPropertiesEx(ch) & $$maskOtherUppercase) != 0; } boolean isWhitespace(int ch) { diff --git a/make/data/characterdata/CharacterData02.java.template b/make/data/characterdata/CharacterData02.java.template index 57289ed36a550d9c19cb599187ae1baedeee1845..739bc9d32ab052f11111f97e15389b24ff44d90a 100644 --- a/make/data/characterdata/CharacterData02.java.template +++ b/make/data/characterdata/CharacterData02.java.template @@ -77,16 +77,6 @@ class CharacterData02 extends CharacterData { return props; } - boolean isOtherLowercase(int ch) { - int props = getPropertiesEx(ch); - return (props & $$maskOtherLowercase) != 0; - } - - boolean isOtherUppercase(int ch) { - int props = getPropertiesEx(ch); - return (props & $$maskOtherUppercase) != 0; - } - boolean isOtherAlphabetic(int ch) { int props = getPropertiesEx(ch); return (props & $$maskOtherAlphabetic) != 0; @@ -222,15 +212,16 @@ class CharacterData02 extends CharacterData { } boolean isLowerCase(int ch) { - int props = getProperties(ch); - return (props & $$maskType) == Character.LOWERCASE_LETTER; + return (getProperties(ch) & $$maskType) == Character.LOWERCASE_LETTER + || (getPropertiesEx(ch) & $$maskOtherLowercase) != 0; } boolean isUpperCase(int ch) { - int props = getProperties(ch); - return (props & $$maskType) == Character.UPPERCASE_LETTER; + return (getProperties(ch) & $$maskType) == Character.UPPERCASE_LETTER + || (getPropertiesEx(ch) & $$maskOtherUppercase) != 0; } + boolean isWhitespace(int ch) { return (getProperties(ch) & $$maskIdentifierInfo) == $$valueJavaWhitespace; } diff --git a/make/data/characterdata/CharacterData03.java.template b/make/data/characterdata/CharacterData03.java.template index 730169b029009e95448eb2231b4a16cfa1780f0b..06d4dfbdc2cfb0bb5debd7b7006c228406b7ed95 100644 --- a/make/data/characterdata/CharacterData03.java.template +++ b/make/data/characterdata/CharacterData03.java.template @@ -77,16 +77,6 @@ class CharacterData03 extends CharacterData { return props; } - boolean isOtherLowercase(int ch) { - int props = getPropertiesEx(ch); - return (props & $$maskOtherLowercase) != 0; - } - - boolean isOtherUppercase(int ch) { - int props = getPropertiesEx(ch); - return (props & $$maskOtherUppercase) != 0; - } - boolean isOtherAlphabetic(int ch) { int props = getPropertiesEx(ch); return (props & $$maskOtherAlphabetic) != 0; @@ -222,13 +212,13 @@ class CharacterData03 extends CharacterData { } boolean isLowerCase(int ch) { - int props = getProperties(ch); - return (props & $$maskType) == Character.LOWERCASE_LETTER; + return (getProperties(ch) & $$maskType) == Character.LOWERCASE_LETTER + || (getPropertiesEx(ch) & $$maskOtherLowercase) != 0; } boolean isUpperCase(int ch) { - int props = getProperties(ch); - return (props & $$maskType) == Character.UPPERCASE_LETTER; + return (getProperties(ch) & $$maskType) == Character.UPPERCASE_LETTER + || (getPropertiesEx(ch) & $$maskOtherUppercase) != 0; } boolean isWhitespace(int ch) { diff --git a/make/data/characterdata/CharacterData0E.java.template b/make/data/characterdata/CharacterData0E.java.template index d0e2b772525ff51880e470747dcd13234a386d76..aa6db8469a0c04912af97b1b2a697283eb448a2b 100644 --- a/make/data/characterdata/CharacterData0E.java.template +++ b/make/data/characterdata/CharacterData0E.java.template @@ -77,16 +77,6 @@ class CharacterData0E extends CharacterData { return props; } - boolean isOtherLowercase(int ch) { - int props = getPropertiesEx(ch); - return (props & $$maskOtherLowercase) != 0; - } - - boolean isOtherUppercase(int ch) { - int props = getPropertiesEx(ch); - return (props & $$maskOtherUppercase) != 0; - } - boolean isOtherAlphabetic(int ch) { int props = getPropertiesEx(ch); return (props & $$maskOtherAlphabetic) != 0; @@ -222,13 +212,13 @@ class CharacterData0E extends CharacterData { } boolean isLowerCase(int ch) { - int props = getProperties(ch); - return (props & $$maskType) == Character.LOWERCASE_LETTER; + return (getProperties(ch) & $$maskType) == Character.LOWERCASE_LETTER + || (getPropertiesEx(ch) & $$maskOtherLowercase) != 0; } boolean isUpperCase(int ch) { - int props = getProperties(ch); - return (props & $$maskType) == Character.UPPERCASE_LETTER; + return (getProperties(ch) & $$maskType) == Character.UPPERCASE_LETTER + || (getPropertiesEx(ch) & $$maskOtherUppercase) != 0; } boolean isWhitespace(int ch) { diff --git a/make/data/characterdata/CharacterDataLatin1.java.template b/make/data/characterdata/CharacterDataLatin1.java.template index 70559bdc346d5862701d307cee593f4ce22adff9..c2ff37321e517e517d7bbee5fe09595eb4ed2cb3 100644 --- a/make/data/characterdata/CharacterDataLatin1.java.template +++ b/make/data/characterdata/CharacterDataLatin1.java.template @@ -87,24 +87,13 @@ class CharacterDataLatin1 extends CharacterData { @IntrinsicCandidate boolean isLowerCase(int ch) { - int props = getProperties(ch); - return (props & $$maskType) == Character.LOWERCASE_LETTER; + return (getProperties(ch) & $$maskType) == Character.LOWERCASE_LETTER + || (getPropertiesEx(ch) & $$maskOtherLowercase) != 0; // 0xaa, 0xba } @IntrinsicCandidate boolean isUpperCase(int ch) { - int props = getProperties(ch); - return (props & $$maskType) == Character.UPPERCASE_LETTER; - } - - boolean isOtherLowercase(int ch) { - int props = getPropertiesEx(ch); - return (props & $$maskOtherLowercase) != 0; - } - - boolean isOtherUppercase(int ch) { - int props = getPropertiesEx(ch); - return (props & $$maskOtherUppercase) != 0; + return (getProperties(ch) & $$maskType) == Character.UPPERCASE_LETTER; } boolean isOtherAlphabetic(int ch) { @@ -290,6 +279,6 @@ class CharacterDataLatin1 extends CharacterData { static { $$Initializers - } + } } diff --git a/make/devkit/createJMHBundle.sh b/make/devkit/createJMHBundle.sh index b460ee75311a6a1e0cc656b6525e27a2ccf5fad9..9e0b9c06e4f1fc421f124845751215e44b39a5cd 100644 --- a/make/devkit/createJMHBundle.sh +++ b/make/devkit/createJMHBundle.sh @@ -26,7 +26,7 @@ # Create a bundle in the build directory, containing what's needed to # build and run JMH microbenchmarks from the OpenJDK build. -JMH_VERSION=1.26 +JMH_VERSION=1.28 COMMONS_MATH3_VERSION=3.2 JOPT_SIMPLE_VERSION=4.6 diff --git a/make/jdk/src/classes/build/tools/taglet/JSpec.java b/make/jdk/src/classes/build/tools/taglet/JSpec.java index 0d7bfc3e776c23dbbfca838a50534ed8b2098916..1fdd224ae777129031322feb547e71b1c1b58215 100644 --- a/make/jdk/src/classes/build/tools/taglet/JSpec.java +++ b/make/jdk/src/classes/build/tools/taglet/JSpec.java @@ -206,7 +206,7 @@ public class JSpec implements Taglet { private String escape(String s) { return s.replace("&", "&") .replace("<", "<") - .replace(">", ">"); + .replace(">", ">"); } }).visit(trees, new StringBuilder()).toString(); } diff --git a/make/test/BuildMicrobenchmark.gmk b/make/test/BuildMicrobenchmark.gmk index 55e5026eb3869463222659126205e43f696fdeb6..4e1567c27483c328651a88685ae0d884a258ec7e 100644 --- a/make/test/BuildMicrobenchmark.gmk +++ b/make/test/BuildMicrobenchmark.gmk @@ -84,6 +84,7 @@ $(eval $(call SetupJavaCompilation, BUILD_INDIFY, \ #### Compile Targets # Building microbenchmark requires the jdk.unsupported and java.management modules. +# sun.security.util is required to compile Cache benchmark # Build microbenchmark suite for the current JDK $(eval $(call SetupJavaCompilation, BUILD_JDK_MICROBENCHMARK, \ @@ -93,6 +94,7 @@ $(eval $(call SetupJavaCompilation, BUILD_JDK_MICROBENCHMARK, \ DISABLED_WARNINGS := processing rawtypes cast serial, \ SRC := $(MICROBENCHMARK_SRC), \ BIN := $(MICROBENCHMARK_CLASSES), \ + JAVAC_FLAGS := --add-exports java.base/sun.security.util=ALL-UNNAMED, \ JAVA_FLAGS := --add-modules jdk.unsupported --limit-modules java.management, \ )) diff --git a/src/hotspot/cpu/aarch64/aarch64.ad b/src/hotspot/cpu/aarch64/aarch64.ad index 01915b33e9bbac45eb646a82ac64e742c745310a..f8dd0ee663f07ded7e74d93f168b56697f2410d7 100644 --- a/src/hotspot/cpu/aarch64/aarch64.ad +++ b/src/hotspot/cpu/aarch64/aarch64.ad @@ -11299,8 +11299,7 @@ instruct regI_not_reg(iRegINoSp dst, // This pattern is automatically generated from aarch64_ad.m4 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE instruct AndI_reg_not_reg(iRegINoSp dst, - iRegIorL2I src1, iRegIorL2I src2, immI_M1 m1, - rFlagsReg cr) %{ + iRegIorL2I src1, iRegIorL2I src2, immI_M1 m1) %{ match(Set dst (AndI src1 (XorI src2 m1))); ins_cost(INSN_COST); format %{ "bicw $dst, $src1, $src2" %} @@ -11318,8 +11317,7 @@ instruct AndI_reg_not_reg(iRegINoSp dst, // This pattern is automatically generated from aarch64_ad.m4 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE instruct AndL_reg_not_reg(iRegLNoSp dst, - iRegL src1, iRegL src2, immL_M1 m1, - rFlagsReg cr) %{ + iRegL src1, iRegL src2, immL_M1 m1) %{ match(Set dst (AndL src1 (XorL src2 m1))); ins_cost(INSN_COST); format %{ "bic $dst, $src1, $src2" %} @@ -11337,8 +11335,7 @@ instruct AndL_reg_not_reg(iRegLNoSp dst, // This pattern is automatically generated from aarch64_ad.m4 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE instruct OrI_reg_not_reg(iRegINoSp dst, - iRegIorL2I src1, iRegIorL2I src2, immI_M1 m1, - rFlagsReg cr) %{ + iRegIorL2I src1, iRegIorL2I src2, immI_M1 m1) %{ match(Set dst (OrI src1 (XorI src2 m1))); ins_cost(INSN_COST); format %{ "ornw $dst, $src1, $src2" %} @@ -11356,8 +11353,7 @@ instruct OrI_reg_not_reg(iRegINoSp dst, // This pattern is automatically generated from aarch64_ad.m4 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE instruct OrL_reg_not_reg(iRegLNoSp dst, - iRegL src1, iRegL src2, immL_M1 m1, - rFlagsReg cr) %{ + iRegL src1, iRegL src2, immL_M1 m1) %{ match(Set dst (OrL src1 (XorL src2 m1))); ins_cost(INSN_COST); format %{ "orn $dst, $src1, $src2" %} @@ -11375,8 +11371,7 @@ instruct OrL_reg_not_reg(iRegLNoSp dst, // This pattern is automatically generated from aarch64_ad.m4 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE instruct XorI_reg_not_reg(iRegINoSp dst, - iRegIorL2I src1, iRegIorL2I src2, immI_M1 m1, - rFlagsReg cr) %{ + iRegIorL2I src1, iRegIorL2I src2, immI_M1 m1) %{ match(Set dst (XorI m1 (XorI src2 src1))); ins_cost(INSN_COST); format %{ "eonw $dst, $src1, $src2" %} @@ -11394,8 +11389,7 @@ instruct XorI_reg_not_reg(iRegINoSp dst, // This pattern is automatically generated from aarch64_ad.m4 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE instruct XorL_reg_not_reg(iRegLNoSp dst, - iRegL src1, iRegL src2, immL_M1 m1, - rFlagsReg cr) %{ + iRegL src1, iRegL src2, immL_M1 m1) %{ match(Set dst (XorL m1 (XorL src2 src1))); ins_cost(INSN_COST); format %{ "eon $dst, $src1, $src2" %} @@ -11412,9 +11406,10 @@ instruct XorL_reg_not_reg(iRegLNoSp dst, // This pattern is automatically generated from aarch64_ad.m4 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE +// val & (-1 ^ (val >>> shift)) ==> bicw instruct AndI_reg_URShift_not_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, - immI src3, immI_M1 src4, rFlagsReg cr) %{ + immI src3, immI_M1 src4) %{ match(Set dst (AndI src1 (XorI(URShiftI src2 src3) src4))); ins_cost(1.9 * INSN_COST); format %{ "bicw $dst, $src1, $src2, LSR $src3" %} @@ -11432,9 +11427,10 @@ instruct AndI_reg_URShift_not_reg(iRegINoSp dst, // This pattern is automatically generated from aarch64_ad.m4 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE +// val & (-1 ^ (val >>> shift)) ==> bic instruct AndL_reg_URShift_not_reg(iRegLNoSp dst, iRegL src1, iRegL src2, - immI src3, immL_M1 src4, rFlagsReg cr) %{ + immI src3, immL_M1 src4) %{ match(Set dst (AndL src1 (XorL(URShiftL src2 src3) src4))); ins_cost(1.9 * INSN_COST); format %{ "bic $dst, $src1, $src2, LSR $src3" %} @@ -11452,9 +11448,10 @@ instruct AndL_reg_URShift_not_reg(iRegLNoSp dst, // This pattern is automatically generated from aarch64_ad.m4 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE +// val & (-1 ^ (val >> shift)) ==> bicw instruct AndI_reg_RShift_not_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, - immI src3, immI_M1 src4, rFlagsReg cr) %{ + immI src3, immI_M1 src4) %{ match(Set dst (AndI src1 (XorI(RShiftI src2 src3) src4))); ins_cost(1.9 * INSN_COST); format %{ "bicw $dst, $src1, $src2, ASR $src3" %} @@ -11472,9 +11469,10 @@ instruct AndI_reg_RShift_not_reg(iRegINoSp dst, // This pattern is automatically generated from aarch64_ad.m4 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE +// val & (-1 ^ (val >> shift)) ==> bic instruct AndL_reg_RShift_not_reg(iRegLNoSp dst, iRegL src1, iRegL src2, - immI src3, immL_M1 src4, rFlagsReg cr) %{ + immI src3, immL_M1 src4) %{ match(Set dst (AndL src1 (XorL(RShiftL src2 src3) src4))); ins_cost(1.9 * INSN_COST); format %{ "bic $dst, $src1, $src2, ASR $src3" %} @@ -11492,9 +11490,52 @@ instruct AndL_reg_RShift_not_reg(iRegLNoSp dst, // This pattern is automatically generated from aarch64_ad.m4 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE +// val & (-1 ^ (val ror shift)) ==> bicw +instruct AndI_reg_RotateRight_not_reg(iRegINoSp dst, + iRegIorL2I src1, iRegIorL2I src2, + immI src3, immI_M1 src4) %{ + match(Set dst (AndI src1 (XorI(RotateRight src2 src3) src4))); + ins_cost(1.9 * INSN_COST); + format %{ "bicw $dst, $src1, $src2, ROR $src3" %} + + ins_encode %{ + __ bicw(as_Register($dst$$reg), + as_Register($src1$$reg), + as_Register($src2$$reg), + Assembler::ROR, + $src3$$constant & 0x1f); + %} + + ins_pipe(ialu_reg_reg_shift); +%} + +// This pattern is automatically generated from aarch64_ad.m4 +// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE +// val & (-1 ^ (val ror shift)) ==> bic +instruct AndL_reg_RotateRight_not_reg(iRegLNoSp dst, + iRegL src1, iRegL src2, + immI src3, immL_M1 src4) %{ + match(Set dst (AndL src1 (XorL(RotateRight src2 src3) src4))); + ins_cost(1.9 * INSN_COST); + format %{ "bic $dst, $src1, $src2, ROR $src3" %} + + ins_encode %{ + __ bic(as_Register($dst$$reg), + as_Register($src1$$reg), + as_Register($src2$$reg), + Assembler::ROR, + $src3$$constant & 0x3f); + %} + + ins_pipe(ialu_reg_reg_shift); +%} + +// This pattern is automatically generated from aarch64_ad.m4 +// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE +// val & (-1 ^ (val << shift)) ==> bicw instruct AndI_reg_LShift_not_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, - immI src3, immI_M1 src4, rFlagsReg cr) %{ + immI src3, immI_M1 src4) %{ match(Set dst (AndI src1 (XorI(LShiftI src2 src3) src4))); ins_cost(1.9 * INSN_COST); format %{ "bicw $dst, $src1, $src2, LSL $src3" %} @@ -11512,9 +11553,10 @@ instruct AndI_reg_LShift_not_reg(iRegINoSp dst, // This pattern is automatically generated from aarch64_ad.m4 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE +// val & (-1 ^ (val << shift)) ==> bic instruct AndL_reg_LShift_not_reg(iRegLNoSp dst, iRegL src1, iRegL src2, - immI src3, immL_M1 src4, rFlagsReg cr) %{ + immI src3, immL_M1 src4) %{ match(Set dst (AndL src1 (XorL(LShiftL src2 src3) src4))); ins_cost(1.9 * INSN_COST); format %{ "bic $dst, $src1, $src2, LSL $src3" %} @@ -11532,9 +11574,10 @@ instruct AndL_reg_LShift_not_reg(iRegLNoSp dst, // This pattern is automatically generated from aarch64_ad.m4 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE +// val ^ (-1 ^ (val >>> shift)) ==> eonw instruct XorI_reg_URShift_not_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, - immI src3, immI_M1 src4, rFlagsReg cr) %{ + immI src3, immI_M1 src4) %{ match(Set dst (XorI src4 (XorI(URShiftI src2 src3) src1))); ins_cost(1.9 * INSN_COST); format %{ "eonw $dst, $src1, $src2, LSR $src3" %} @@ -11552,9 +11595,10 @@ instruct XorI_reg_URShift_not_reg(iRegINoSp dst, // This pattern is automatically generated from aarch64_ad.m4 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE +// val ^ (-1 ^ (val >>> shift)) ==> eon instruct XorL_reg_URShift_not_reg(iRegLNoSp dst, iRegL src1, iRegL src2, - immI src3, immL_M1 src4, rFlagsReg cr) %{ + immI src3, immL_M1 src4) %{ match(Set dst (XorL src4 (XorL(URShiftL src2 src3) src1))); ins_cost(1.9 * INSN_COST); format %{ "eon $dst, $src1, $src2, LSR $src3" %} @@ -11572,9 +11616,10 @@ instruct XorL_reg_URShift_not_reg(iRegLNoSp dst, // This pattern is automatically generated from aarch64_ad.m4 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE +// val ^ (-1 ^ (val >> shift)) ==> eonw instruct XorI_reg_RShift_not_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, - immI src3, immI_M1 src4, rFlagsReg cr) %{ + immI src3, immI_M1 src4) %{ match(Set dst (XorI src4 (XorI(RShiftI src2 src3) src1))); ins_cost(1.9 * INSN_COST); format %{ "eonw $dst, $src1, $src2, ASR $src3" %} @@ -11592,9 +11637,10 @@ instruct XorI_reg_RShift_not_reg(iRegINoSp dst, // This pattern is automatically generated from aarch64_ad.m4 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE +// val ^ (-1 ^ (val >> shift)) ==> eon instruct XorL_reg_RShift_not_reg(iRegLNoSp dst, iRegL src1, iRegL src2, - immI src3, immL_M1 src4, rFlagsReg cr) %{ + immI src3, immL_M1 src4) %{ match(Set dst (XorL src4 (XorL(RShiftL src2 src3) src1))); ins_cost(1.9 * INSN_COST); format %{ "eon $dst, $src1, $src2, ASR $src3" %} @@ -11612,9 +11658,52 @@ instruct XorL_reg_RShift_not_reg(iRegLNoSp dst, // This pattern is automatically generated from aarch64_ad.m4 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE +// val ^ (-1 ^ (val ror shift)) ==> eonw +instruct XorI_reg_RotateRight_not_reg(iRegINoSp dst, + iRegIorL2I src1, iRegIorL2I src2, + immI src3, immI_M1 src4) %{ + match(Set dst (XorI src4 (XorI(RotateRight src2 src3) src1))); + ins_cost(1.9 * INSN_COST); + format %{ "eonw $dst, $src1, $src2, ROR $src3" %} + + ins_encode %{ + __ eonw(as_Register($dst$$reg), + as_Register($src1$$reg), + as_Register($src2$$reg), + Assembler::ROR, + $src3$$constant & 0x1f); + %} + + ins_pipe(ialu_reg_reg_shift); +%} + +// This pattern is automatically generated from aarch64_ad.m4 +// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE +// val ^ (-1 ^ (val ror shift)) ==> eon +instruct XorL_reg_RotateRight_not_reg(iRegLNoSp dst, + iRegL src1, iRegL src2, + immI src3, immL_M1 src4) %{ + match(Set dst (XorL src4 (XorL(RotateRight src2 src3) src1))); + ins_cost(1.9 * INSN_COST); + format %{ "eon $dst, $src1, $src2, ROR $src3" %} + + ins_encode %{ + __ eon(as_Register($dst$$reg), + as_Register($src1$$reg), + as_Register($src2$$reg), + Assembler::ROR, + $src3$$constant & 0x3f); + %} + + ins_pipe(ialu_reg_reg_shift); +%} + +// This pattern is automatically generated from aarch64_ad.m4 +// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE +// val ^ (-1 ^ (val << shift)) ==> eonw instruct XorI_reg_LShift_not_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, - immI src3, immI_M1 src4, rFlagsReg cr) %{ + immI src3, immI_M1 src4) %{ match(Set dst (XorI src4 (XorI(LShiftI src2 src3) src1))); ins_cost(1.9 * INSN_COST); format %{ "eonw $dst, $src1, $src2, LSL $src3" %} @@ -11632,9 +11721,10 @@ instruct XorI_reg_LShift_not_reg(iRegINoSp dst, // This pattern is automatically generated from aarch64_ad.m4 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE +// val ^ (-1 ^ (val << shift)) ==> eon instruct XorL_reg_LShift_not_reg(iRegLNoSp dst, iRegL src1, iRegL src2, - immI src3, immL_M1 src4, rFlagsReg cr) %{ + immI src3, immL_M1 src4) %{ match(Set dst (XorL src4 (XorL(LShiftL src2 src3) src1))); ins_cost(1.9 * INSN_COST); format %{ "eon $dst, $src1, $src2, LSL $src3" %} @@ -11652,9 +11742,10 @@ instruct XorL_reg_LShift_not_reg(iRegLNoSp dst, // This pattern is automatically generated from aarch64_ad.m4 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE +// val | (-1 ^ (val >>> shift)) ==> ornw instruct OrI_reg_URShift_not_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, - immI src3, immI_M1 src4, rFlagsReg cr) %{ + immI src3, immI_M1 src4) %{ match(Set dst (OrI src1 (XorI(URShiftI src2 src3) src4))); ins_cost(1.9 * INSN_COST); format %{ "ornw $dst, $src1, $src2, LSR $src3" %} @@ -11672,9 +11763,10 @@ instruct OrI_reg_URShift_not_reg(iRegINoSp dst, // This pattern is automatically generated from aarch64_ad.m4 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE +// val | (-1 ^ (val >>> shift)) ==> orn instruct OrL_reg_URShift_not_reg(iRegLNoSp dst, iRegL src1, iRegL src2, - immI src3, immL_M1 src4, rFlagsReg cr) %{ + immI src3, immL_M1 src4) %{ match(Set dst (OrL src1 (XorL(URShiftL src2 src3) src4))); ins_cost(1.9 * INSN_COST); format %{ "orn $dst, $src1, $src2, LSR $src3" %} @@ -11692,9 +11784,10 @@ instruct OrL_reg_URShift_not_reg(iRegLNoSp dst, // This pattern is automatically generated from aarch64_ad.m4 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE +// val | (-1 ^ (val >> shift)) ==> ornw instruct OrI_reg_RShift_not_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, - immI src3, immI_M1 src4, rFlagsReg cr) %{ + immI src3, immI_M1 src4) %{ match(Set dst (OrI src1 (XorI(RShiftI src2 src3) src4))); ins_cost(1.9 * INSN_COST); format %{ "ornw $dst, $src1, $src2, ASR $src3" %} @@ -11712,9 +11805,10 @@ instruct OrI_reg_RShift_not_reg(iRegINoSp dst, // This pattern is automatically generated from aarch64_ad.m4 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE +// val | (-1 ^ (val >> shift)) ==> orn instruct OrL_reg_RShift_not_reg(iRegLNoSp dst, iRegL src1, iRegL src2, - immI src3, immL_M1 src4, rFlagsReg cr) %{ + immI src3, immL_M1 src4) %{ match(Set dst (OrL src1 (XorL(RShiftL src2 src3) src4))); ins_cost(1.9 * INSN_COST); format %{ "orn $dst, $src1, $src2, ASR $src3" %} @@ -11732,9 +11826,52 @@ instruct OrL_reg_RShift_not_reg(iRegLNoSp dst, // This pattern is automatically generated from aarch64_ad.m4 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE +// val | (-1 ^ (val ror shift)) ==> ornw +instruct OrI_reg_RotateRight_not_reg(iRegINoSp dst, + iRegIorL2I src1, iRegIorL2I src2, + immI src3, immI_M1 src4) %{ + match(Set dst (OrI src1 (XorI(RotateRight src2 src3) src4))); + ins_cost(1.9 * INSN_COST); + format %{ "ornw $dst, $src1, $src2, ROR $src3" %} + + ins_encode %{ + __ ornw(as_Register($dst$$reg), + as_Register($src1$$reg), + as_Register($src2$$reg), + Assembler::ROR, + $src3$$constant & 0x1f); + %} + + ins_pipe(ialu_reg_reg_shift); +%} + +// This pattern is automatically generated from aarch64_ad.m4 +// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE +// val | (-1 ^ (val ror shift)) ==> orn +instruct OrL_reg_RotateRight_not_reg(iRegLNoSp dst, + iRegL src1, iRegL src2, + immI src3, immL_M1 src4) %{ + match(Set dst (OrL src1 (XorL(RotateRight src2 src3) src4))); + ins_cost(1.9 * INSN_COST); + format %{ "orn $dst, $src1, $src2, ROR $src3" %} + + ins_encode %{ + __ orn(as_Register($dst$$reg), + as_Register($src1$$reg), + as_Register($src2$$reg), + Assembler::ROR, + $src3$$constant & 0x3f); + %} + + ins_pipe(ialu_reg_reg_shift); +%} + +// This pattern is automatically generated from aarch64_ad.m4 +// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE +// val | (-1 ^ (val << shift)) ==> ornw instruct OrI_reg_LShift_not_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, - immI src3, immI_M1 src4, rFlagsReg cr) %{ + immI src3, immI_M1 src4) %{ match(Set dst (OrI src1 (XorI(LShiftI src2 src3) src4))); ins_cost(1.9 * INSN_COST); format %{ "ornw $dst, $src1, $src2, LSL $src3" %} @@ -11752,9 +11889,10 @@ instruct OrI_reg_LShift_not_reg(iRegINoSp dst, // This pattern is automatically generated from aarch64_ad.m4 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE +// val | (-1 ^ (val << shift)) ==> orn instruct OrL_reg_LShift_not_reg(iRegLNoSp dst, iRegL src1, iRegL src2, - immI src3, immL_M1 src4, rFlagsReg cr) %{ + immI src3, immL_M1 src4) %{ match(Set dst (OrL src1 (XorL(LShiftL src2 src3) src4))); ins_cost(1.9 * INSN_COST); format %{ "orn $dst, $src1, $src2, LSL $src3" %} @@ -11774,7 +11912,7 @@ instruct OrL_reg_LShift_not_reg(iRegLNoSp dst, // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE instruct AndI_reg_URShift_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, - immI src3, rFlagsReg cr) %{ + immI src3) %{ match(Set dst (AndI src1 (URShiftI src2 src3))); ins_cost(1.9 * INSN_COST); @@ -11795,7 +11933,7 @@ instruct AndI_reg_URShift_reg(iRegINoSp dst, // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE instruct AndL_reg_URShift_reg(iRegLNoSp dst, iRegL src1, iRegL src2, - immI src3, rFlagsReg cr) %{ + immI src3) %{ match(Set dst (AndL src1 (URShiftL src2 src3))); ins_cost(1.9 * INSN_COST); @@ -11816,7 +11954,7 @@ instruct AndL_reg_URShift_reg(iRegLNoSp dst, // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE instruct AndI_reg_RShift_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, - immI src3, rFlagsReg cr) %{ + immI src3) %{ match(Set dst (AndI src1 (RShiftI src2 src3))); ins_cost(1.9 * INSN_COST); @@ -11837,7 +11975,7 @@ instruct AndI_reg_RShift_reg(iRegINoSp dst, // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE instruct AndL_reg_RShift_reg(iRegLNoSp dst, iRegL src1, iRegL src2, - immI src3, rFlagsReg cr) %{ + immI src3) %{ match(Set dst (AndL src1 (RShiftL src2 src3))); ins_cost(1.9 * INSN_COST); @@ -11858,7 +11996,7 @@ instruct AndL_reg_RShift_reg(iRegLNoSp dst, // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE instruct AndI_reg_LShift_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, - immI src3, rFlagsReg cr) %{ + immI src3) %{ match(Set dst (AndI src1 (LShiftI src2 src3))); ins_cost(1.9 * INSN_COST); @@ -11879,7 +12017,7 @@ instruct AndI_reg_LShift_reg(iRegINoSp dst, // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE instruct AndL_reg_LShift_reg(iRegLNoSp dst, iRegL src1, iRegL src2, - immI src3, rFlagsReg cr) %{ + immI src3) %{ match(Set dst (AndL src1 (LShiftL src2 src3))); ins_cost(1.9 * INSN_COST); @@ -11896,11 +12034,53 @@ instruct AndL_reg_LShift_reg(iRegLNoSp dst, ins_pipe(ialu_reg_reg_shift); %} +// This pattern is automatically generated from aarch64_ad.m4 +// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE +instruct AndI_reg_RotateRight_reg(iRegINoSp dst, + iRegIorL2I src1, iRegIorL2I src2, + immI src3) %{ + match(Set dst (AndI src1 (RotateRight src2 src3))); + + ins_cost(1.9 * INSN_COST); + format %{ "andw $dst, $src1, $src2, ROR $src3" %} + + ins_encode %{ + __ andw(as_Register($dst$$reg), + as_Register($src1$$reg), + as_Register($src2$$reg), + Assembler::ROR, + $src3$$constant & 0x1f); + %} + + ins_pipe(ialu_reg_reg_shift); +%} + +// This pattern is automatically generated from aarch64_ad.m4 +// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE +instruct AndL_reg_RotateRight_reg(iRegLNoSp dst, + iRegL src1, iRegL src2, + immI src3) %{ + match(Set dst (AndL src1 (RotateRight src2 src3))); + + ins_cost(1.9 * INSN_COST); + format %{ "andr $dst, $src1, $src2, ROR $src3" %} + + ins_encode %{ + __ andr(as_Register($dst$$reg), + as_Register($src1$$reg), + as_Register($src2$$reg), + Assembler::ROR, + $src3$$constant & 0x3f); + %} + + ins_pipe(ialu_reg_reg_shift); +%} + // This pattern is automatically generated from aarch64_ad.m4 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE instruct XorI_reg_URShift_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, - immI src3, rFlagsReg cr) %{ + immI src3) %{ match(Set dst (XorI src1 (URShiftI src2 src3))); ins_cost(1.9 * INSN_COST); @@ -11921,7 +12101,7 @@ instruct XorI_reg_URShift_reg(iRegINoSp dst, // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE instruct XorL_reg_URShift_reg(iRegLNoSp dst, iRegL src1, iRegL src2, - immI src3, rFlagsReg cr) %{ + immI src3) %{ match(Set dst (XorL src1 (URShiftL src2 src3))); ins_cost(1.9 * INSN_COST); @@ -11942,7 +12122,7 @@ instruct XorL_reg_URShift_reg(iRegLNoSp dst, // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE instruct XorI_reg_RShift_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, - immI src3, rFlagsReg cr) %{ + immI src3) %{ match(Set dst (XorI src1 (RShiftI src2 src3))); ins_cost(1.9 * INSN_COST); @@ -11963,7 +12143,7 @@ instruct XorI_reg_RShift_reg(iRegINoSp dst, // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE instruct XorL_reg_RShift_reg(iRegLNoSp dst, iRegL src1, iRegL src2, - immI src3, rFlagsReg cr) %{ + immI src3) %{ match(Set dst (XorL src1 (RShiftL src2 src3))); ins_cost(1.9 * INSN_COST); @@ -11984,7 +12164,7 @@ instruct XorL_reg_RShift_reg(iRegLNoSp dst, // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE instruct XorI_reg_LShift_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, - immI src3, rFlagsReg cr) %{ + immI src3) %{ match(Set dst (XorI src1 (LShiftI src2 src3))); ins_cost(1.9 * INSN_COST); @@ -12005,7 +12185,7 @@ instruct XorI_reg_LShift_reg(iRegINoSp dst, // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE instruct XorL_reg_LShift_reg(iRegLNoSp dst, iRegL src1, iRegL src2, - immI src3, rFlagsReg cr) %{ + immI src3) %{ match(Set dst (XorL src1 (LShiftL src2 src3))); ins_cost(1.9 * INSN_COST); @@ -12022,11 +12202,53 @@ instruct XorL_reg_LShift_reg(iRegLNoSp dst, ins_pipe(ialu_reg_reg_shift); %} +// This pattern is automatically generated from aarch64_ad.m4 +// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE +instruct XorI_reg_RotateRight_reg(iRegINoSp dst, + iRegIorL2I src1, iRegIorL2I src2, + immI src3) %{ + match(Set dst (XorI src1 (RotateRight src2 src3))); + + ins_cost(1.9 * INSN_COST); + format %{ "eorw $dst, $src1, $src2, ROR $src3" %} + + ins_encode %{ + __ eorw(as_Register($dst$$reg), + as_Register($src1$$reg), + as_Register($src2$$reg), + Assembler::ROR, + $src3$$constant & 0x1f); + %} + + ins_pipe(ialu_reg_reg_shift); +%} + +// This pattern is automatically generated from aarch64_ad.m4 +// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE +instruct XorL_reg_RotateRight_reg(iRegLNoSp dst, + iRegL src1, iRegL src2, + immI src3) %{ + match(Set dst (XorL src1 (RotateRight src2 src3))); + + ins_cost(1.9 * INSN_COST); + format %{ "eor $dst, $src1, $src2, ROR $src3" %} + + ins_encode %{ + __ eor(as_Register($dst$$reg), + as_Register($src1$$reg), + as_Register($src2$$reg), + Assembler::ROR, + $src3$$constant & 0x3f); + %} + + ins_pipe(ialu_reg_reg_shift); +%} + // This pattern is automatically generated from aarch64_ad.m4 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE instruct OrI_reg_URShift_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, - immI src3, rFlagsReg cr) %{ + immI src3) %{ match(Set dst (OrI src1 (URShiftI src2 src3))); ins_cost(1.9 * INSN_COST); @@ -12047,7 +12269,7 @@ instruct OrI_reg_URShift_reg(iRegINoSp dst, // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE instruct OrL_reg_URShift_reg(iRegLNoSp dst, iRegL src1, iRegL src2, - immI src3, rFlagsReg cr) %{ + immI src3) %{ match(Set dst (OrL src1 (URShiftL src2 src3))); ins_cost(1.9 * INSN_COST); @@ -12068,7 +12290,7 @@ instruct OrL_reg_URShift_reg(iRegLNoSp dst, // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE instruct OrI_reg_RShift_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, - immI src3, rFlagsReg cr) %{ + immI src3) %{ match(Set dst (OrI src1 (RShiftI src2 src3))); ins_cost(1.9 * INSN_COST); @@ -12089,7 +12311,7 @@ instruct OrI_reg_RShift_reg(iRegINoSp dst, // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE instruct OrL_reg_RShift_reg(iRegLNoSp dst, iRegL src1, iRegL src2, - immI src3, rFlagsReg cr) %{ + immI src3) %{ match(Set dst (OrL src1 (RShiftL src2 src3))); ins_cost(1.9 * INSN_COST); @@ -12110,7 +12332,7 @@ instruct OrL_reg_RShift_reg(iRegLNoSp dst, // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE instruct OrI_reg_LShift_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, - immI src3, rFlagsReg cr) %{ + immI src3) %{ match(Set dst (OrI src1 (LShiftI src2 src3))); ins_cost(1.9 * INSN_COST); @@ -12131,7 +12353,7 @@ instruct OrI_reg_LShift_reg(iRegINoSp dst, // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE instruct OrL_reg_LShift_reg(iRegLNoSp dst, iRegL src1, iRegL src2, - immI src3, rFlagsReg cr) %{ + immI src3) %{ match(Set dst (OrL src1 (LShiftL src2 src3))); ins_cost(1.9 * INSN_COST); @@ -12148,11 +12370,53 @@ instruct OrL_reg_LShift_reg(iRegLNoSp dst, ins_pipe(ialu_reg_reg_shift); %} +// This pattern is automatically generated from aarch64_ad.m4 +// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE +instruct OrI_reg_RotateRight_reg(iRegINoSp dst, + iRegIorL2I src1, iRegIorL2I src2, + immI src3) %{ + match(Set dst (OrI src1 (RotateRight src2 src3))); + + ins_cost(1.9 * INSN_COST); + format %{ "orrw $dst, $src1, $src2, ROR $src3" %} + + ins_encode %{ + __ orrw(as_Register($dst$$reg), + as_Register($src1$$reg), + as_Register($src2$$reg), + Assembler::ROR, + $src3$$constant & 0x1f); + %} + + ins_pipe(ialu_reg_reg_shift); +%} + +// This pattern is automatically generated from aarch64_ad.m4 +// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE +instruct OrL_reg_RotateRight_reg(iRegLNoSp dst, + iRegL src1, iRegL src2, + immI src3) %{ + match(Set dst (OrL src1 (RotateRight src2 src3))); + + ins_cost(1.9 * INSN_COST); + format %{ "orr $dst, $src1, $src2, ROR $src3" %} + + ins_encode %{ + __ orr(as_Register($dst$$reg), + as_Register($src1$$reg), + as_Register($src2$$reg), + Assembler::ROR, + $src3$$constant & 0x3f); + %} + + ins_pipe(ialu_reg_reg_shift); +%} + // This pattern is automatically generated from aarch64_ad.m4 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE instruct AddI_reg_URShift_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, - immI src3, rFlagsReg cr) %{ + immI src3) %{ match(Set dst (AddI src1 (URShiftI src2 src3))); ins_cost(1.9 * INSN_COST); @@ -12173,7 +12437,7 @@ instruct AddI_reg_URShift_reg(iRegINoSp dst, // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE instruct AddL_reg_URShift_reg(iRegLNoSp dst, iRegL src1, iRegL src2, - immI src3, rFlagsReg cr) %{ + immI src3) %{ match(Set dst (AddL src1 (URShiftL src2 src3))); ins_cost(1.9 * INSN_COST); @@ -12194,7 +12458,7 @@ instruct AddL_reg_URShift_reg(iRegLNoSp dst, // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE instruct AddI_reg_RShift_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, - immI src3, rFlagsReg cr) %{ + immI src3) %{ match(Set dst (AddI src1 (RShiftI src2 src3))); ins_cost(1.9 * INSN_COST); @@ -12215,7 +12479,7 @@ instruct AddI_reg_RShift_reg(iRegINoSp dst, // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE instruct AddL_reg_RShift_reg(iRegLNoSp dst, iRegL src1, iRegL src2, - immI src3, rFlagsReg cr) %{ + immI src3) %{ match(Set dst (AddL src1 (RShiftL src2 src3))); ins_cost(1.9 * INSN_COST); @@ -12236,7 +12500,7 @@ instruct AddL_reg_RShift_reg(iRegLNoSp dst, // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE instruct AddI_reg_LShift_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, - immI src3, rFlagsReg cr) %{ + immI src3) %{ match(Set dst (AddI src1 (LShiftI src2 src3))); ins_cost(1.9 * INSN_COST); @@ -12257,7 +12521,7 @@ instruct AddI_reg_LShift_reg(iRegINoSp dst, // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE instruct AddL_reg_LShift_reg(iRegLNoSp dst, iRegL src1, iRegL src2, - immI src3, rFlagsReg cr) %{ + immI src3) %{ match(Set dst (AddL src1 (LShiftL src2 src3))); ins_cost(1.9 * INSN_COST); @@ -12278,7 +12542,7 @@ instruct AddL_reg_LShift_reg(iRegLNoSp dst, // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE instruct SubI_reg_URShift_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, - immI src3, rFlagsReg cr) %{ + immI src3) %{ match(Set dst (SubI src1 (URShiftI src2 src3))); ins_cost(1.9 * INSN_COST); @@ -12299,7 +12563,7 @@ instruct SubI_reg_URShift_reg(iRegINoSp dst, // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE instruct SubL_reg_URShift_reg(iRegLNoSp dst, iRegL src1, iRegL src2, - immI src3, rFlagsReg cr) %{ + immI src3) %{ match(Set dst (SubL src1 (URShiftL src2 src3))); ins_cost(1.9 * INSN_COST); @@ -12320,7 +12584,7 @@ instruct SubL_reg_URShift_reg(iRegLNoSp dst, // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE instruct SubI_reg_RShift_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, - immI src3, rFlagsReg cr) %{ + immI src3) %{ match(Set dst (SubI src1 (RShiftI src2 src3))); ins_cost(1.9 * INSN_COST); @@ -12341,7 +12605,7 @@ instruct SubI_reg_RShift_reg(iRegINoSp dst, // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE instruct SubL_reg_RShift_reg(iRegLNoSp dst, iRegL src1, iRegL src2, - immI src3, rFlagsReg cr) %{ + immI src3) %{ match(Set dst (SubL src1 (RShiftL src2 src3))); ins_cost(1.9 * INSN_COST); @@ -12362,7 +12626,7 @@ instruct SubL_reg_RShift_reg(iRegLNoSp dst, // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE instruct SubI_reg_LShift_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, - immI src3, rFlagsReg cr) %{ + immI src3) %{ match(Set dst (SubI src1 (LShiftI src2 src3))); ins_cost(1.9 * INSN_COST); @@ -12383,7 +12647,7 @@ instruct SubI_reg_LShift_reg(iRegINoSp dst, // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE instruct SubL_reg_LShift_reg(iRegLNoSp dst, iRegL src1, iRegL src2, - immI src3, rFlagsReg cr) %{ + immI src3) %{ match(Set dst (SubL src1 (LShiftL src2 src3))); ins_cost(1.9 * INSN_COST); @@ -12400,7 +12664,6 @@ instruct SubL_reg_LShift_reg(iRegLNoSp dst, ins_pipe(ialu_reg_reg_shift); %} - // This pattern is automatically generated from aarch64_ad.m4 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE diff --git a/src/hotspot/cpu/aarch64/aarch64_ad.m4 b/src/hotspot/cpu/aarch64/aarch64_ad.m4 index 97b4b9bc71c153cc9968d0f2455598f3b35c7952..a76be239b556f8329de82ae98fa0cb1967d91c22 100644 --- a/src/hotspot/cpu/aarch64/aarch64_ad.m4 +++ b/src/hotspot/cpu/aarch64/aarch64_ad.m4 @@ -35,8 +35,8 @@ define(`BASE_SHIFT_INSN', // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE instruct $2$1_reg_$4_reg(iReg$1NoSp dst, iReg$1`'ORL2I($1) src1, iReg$1`'ORL2I($1) src2, - immI src3, rFlagsReg cr) %{ - match(Set dst ($2$1 src1 ($4$1 src2 src3))); + immI src3) %{ + match(Set dst ($2$1 src1 (ifelse($4, RotateRight, $4, $4$1) src2 src3))); ins_cost(1.9 * INSN_COST); format %{ "$3 $dst, $src1, $src2, $5 $src3" %} @@ -56,8 +56,7 @@ define(`BASE_INVERTED_INSN', `// This pattern is automatically generated from aarch64_ad.m4 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE instruct $2$1_reg_not_reg(iReg$1NoSp dst, - iReg$1`'ORL2I($1) src1, iReg$1`'ORL2I($1) src2, imm$1_M1 m1, - rFlagsReg cr) %{ + iReg$1`'ORL2I($1) src1, iReg$1`'ORL2I($1) src2, imm$1_M1 m1) %{ dnl This ifelse is because hotspot reassociates (xor (xor ..)..) dnl into this canonical form. ifelse($2,Xor, @@ -79,14 +78,15 @@ dnl into this canonical form. define(`INVERTED_SHIFT_INSN', `// This pattern is automatically generated from aarch64_ad.m4 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE +// val ifelse($2, Xor, ^, $2, And, &, |) (-1 ^ (val ifelse($4, RShift, >>, $4, LShift, <<, $4, URShift, >>>, ror) shift)) ==> $3 instruct $2$1_reg_$4_not_reg(iReg$1NoSp dst, iReg$1`'ORL2I($1) src1, iReg$1`'ORL2I($1) src2, - immI src3, imm$1_M1 src4, rFlagsReg cr) %{ + immI src3, imm$1_M1 src4) %{ dnl This ifelse is because hotspot reassociates (xor (xor ..)..) dnl into this canonical form. ifelse($2,Xor, - match(Set dst ($2$1 src4 (Xor$1($4$1 src2 src3) src1)));, - match(Set dst ($2$1 src1 (Xor$1($4$1 src2 src3) src4)));) + match(Set dst ($2$1 src4 (Xor$1(ifelse($4, RotateRight, $4, $4$1) src2 src3) src1)));, + match(Set dst ($2$1 src1 (Xor$1(ifelse($4, RotateRight, $4, $4$1) src2 src3) src4)));) ins_cost(1.9 * INSN_COST); format %{ "$3 $dst, $src1, $src2, $5 $src3" %} @@ -131,17 +131,22 @@ define(`BOTH_INVERTED_INSNS', BASE_INVERTED_INSN(L, $1, $2, $3, $4)')dnl dnl define(`BOTH_INVERTED_SHIFT_INSNS', -`INVERTED_SHIFT_INSN(I, $1, $2w, $3, $4, ~0, int) -INVERTED_SHIFT_INSN(L, $1, $2, $3, $4, ~0l, jlong)')dnl +`INVERTED_SHIFT_INSN(I, $1, $2w, $3, $4) +INVERTED_SHIFT_INSN(L, $1, $2, $3, $4)')dnl dnl -define(`ALL_SHIFT_KINDS', +define(`ALL_SHIFT_KINDS_WITHOUT_ROR', `BOTH_SHIFT_INSNS($1, $2, URShift, LSR) BOTH_SHIFT_INSNS($1, $2, RShift, ASR) BOTH_SHIFT_INSNS($1, $2, LShift, LSL)')dnl dnl +define(`ALL_SHIFT_KINDS', +`ALL_SHIFT_KINDS_WITHOUT_ROR($1, $2) +BOTH_SHIFT_INSNS($1, $2, RotateRight, ROR)')dnl +dnl define(`ALL_INVERTED_SHIFT_KINDS', `BOTH_INVERTED_SHIFT_INSNS($1, $2, URShift, LSR) BOTH_INVERTED_SHIFT_INSNS($1, $2, RShift, ASR) +BOTH_INVERTED_SHIFT_INSNS($1, $2, RotateRight, ROR) BOTH_INVERTED_SHIFT_INSNS($1, $2, LShift, LSL)')dnl dnl NOT_INSN(L, eon) @@ -155,8 +160,8 @@ ALL_INVERTED_SHIFT_KINDS(Or, orn) ALL_SHIFT_KINDS(And, andr) ALL_SHIFT_KINDS(Xor, eor) ALL_SHIFT_KINDS(Or, orr) -ALL_SHIFT_KINDS(Add, add) -ALL_SHIFT_KINDS(Sub, sub) +ALL_SHIFT_KINDS_WITHOUT_ROR(Add, add) +ALL_SHIFT_KINDS_WITHOUT_ROR(Sub, sub) dnl dnl EXTEND mode, rshift_op, src, lshift_count, rshift_count define(`EXTEND', `($2$1 (LShift$1 $3 $4) $5)') dnl diff --git a/src/hotspot/cpu/aarch64/assembler_aarch64.hpp b/src/hotspot/cpu/aarch64/assembler_aarch64.hpp index b9188737faf8dde041172abe278ccde4a0d7e90f..915bc6b81d703c77d6165b77e66c1e562d680c0e 100644 --- a/src/hotspot/cpu/aarch64/assembler_aarch64.hpp +++ b/src/hotspot/cpu/aarch64/assembler_aarch64.hpp @@ -1,6 +1,6 @@ /* - * Copyright (c) 1997, 2020, Oracle and/or its affiliates. All rights reserved. - * Copyright (c) 2014, 2020, Red Hat Inc. All rights reserved. + * Copyright (c) 1997, 2021, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2014, 2021, Red Hat Inc. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -2657,16 +2657,18 @@ public: f(sidx<<(int)T, 14, 11), f(1, 10), rf(Vn, 5), rf(Vd, 0); } -#define INSN(NAME, op) \ - void NAME(Register Rd, FloatRegister Vn, SIMD_RegVariant T, int idx) { \ - starti; \ - f(0, 31), f(T==D ? 1:0, 30), f(0b001110000, 29, 21); \ - f(((idx<<1)|1)<<(int)T, 20, 16), f(op, 15, 10); \ - rf(Vn, 5), rf(Rd, 0); \ +#define INSN(NAME, cond, op1, op2) \ + void NAME(Register Rd, FloatRegister Vn, SIMD_RegVariant T, int idx) { \ + starti; \ + assert(cond, "invalid register variant"); \ + f(0, 31), f(op1, 30), f(0b001110000, 29, 21); \ + f(((idx << 1) | 1) << (int)T, 20, 16), f(op2, 15, 10); \ + rf(Vn, 5), rf(Rd, 0); \ } - INSN(umov, 0b001111); - INSN(smov, 0b001011); + INSN(umov, (T != Q), (T == D ? 1 : 0), 0b001111); + INSN(smov, (T < D), 1, 0b001011); + #undef INSN #define INSN(NAME, opc, opc2, isSHR) \ @@ -2685,6 +2687,7 @@ public: * 1xxx xxx 1D/2D, shift = UInt(immh:immb) - 64 \ * (1D is RESERVED) \ */ \ + assert(!isSHR || (isSHR && (shift != 0)), "Zero right shift"); \ assert((1 << ((T>>1)+3)) > shift, "Invalid Shift value"); \ int cVal = (1 << (((T >> 1) + 3) + (isSHR ? 1 : 0))); \ int encodedShift = isSHR ? cVal - shift : cVal + shift; \ diff --git a/src/hotspot/cpu/aarch64/atomic_aarch64.hpp b/src/hotspot/cpu/aarch64/atomic_aarch64.hpp new file mode 100644 index 0000000000000000000000000000000000000000..ac12ba9e23d7d1e4657b671d782f42457fd2fde3 --- /dev/null +++ b/src/hotspot/cpu/aarch64/atomic_aarch64.hpp @@ -0,0 +1,49 @@ +/* Copyright (c) 2021, Red Hat Inc. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef CPU_AARCH64_ATOMIC_AARCH64_HPP +#define CPU_AARCH64_ATOMIC_AARCH64_HPP + +// Atomic stub implementation. +// Default implementations are in atomic_linux_aarch64.S +// +// All stubs pass arguments the same way +// x0: src/dest address +// x1: arg1 +// x2: arg2 (optional) +// x3, x8, x9: scratch +typedef uint64_t (*aarch64_atomic_stub_t)(volatile void *ptr, uint64_t arg1, uint64_t arg2); + +// Pointers to stubs +extern aarch64_atomic_stub_t aarch64_atomic_fetch_add_4_impl; +extern aarch64_atomic_stub_t aarch64_atomic_fetch_add_8_impl; +extern aarch64_atomic_stub_t aarch64_atomic_xchg_4_impl; +extern aarch64_atomic_stub_t aarch64_atomic_xchg_8_impl; +extern aarch64_atomic_stub_t aarch64_atomic_cmpxchg_1_impl; +extern aarch64_atomic_stub_t aarch64_atomic_cmpxchg_4_impl; +extern aarch64_atomic_stub_t aarch64_atomic_cmpxchg_8_impl; +extern aarch64_atomic_stub_t aarch64_atomic_cmpxchg_1_relaxed_impl; +extern aarch64_atomic_stub_t aarch64_atomic_cmpxchg_4_relaxed_impl; +extern aarch64_atomic_stub_t aarch64_atomic_cmpxchg_8_relaxed_impl; + +#endif // CPU_AARCH64_ATOMIC_AARCH64_HPP diff --git a/src/hotspot/cpu/aarch64/c1_Runtime1_aarch64.cpp b/src/hotspot/cpu/aarch64/c1_Runtime1_aarch64.cpp index d4d74ac1f4fbcc03b8b3d57b5460972705808a89..7acc1bf19f2e17b9ba3594f47f5f5b5f0296053a 100644 --- a/src/hotspot/cpu/aarch64/c1_Runtime1_aarch64.cpp +++ b/src/hotspot/cpu/aarch64/c1_Runtime1_aarch64.cpp @@ -147,7 +147,7 @@ int StubAssembler::call_RT(Register oop_result1, Register metadata_result, addre if (arg1 == c_rarg2 || arg1 == c_rarg3 || arg2 == c_rarg1 || arg2 == c_rarg3 || arg3 == c_rarg1 || arg3 == c_rarg2) { - stp(arg3, arg2, Address(pre(sp, 2 * wordSize))); + stp(arg3, arg2, Address(pre(sp, -2 * wordSize))); stp(arg1, zr, Address(pre(sp, -2 * wordSize))); ldp(c_rarg1, zr, Address(post(sp, 2 * wordSize))); ldp(c_rarg3, c_rarg2, Address(post(sp, 2 * wordSize))); diff --git a/src/hotspot/cpu/aarch64/c1_globals_aarch64.hpp b/src/hotspot/cpu/aarch64/c1_globals_aarch64.hpp index ab4f16e9c180718de0453f57e9b0363e66d46eb3..50bbbd786d93707eeab5e525bcc5ed13d71ed3d9 100644 --- a/src/hotspot/cpu/aarch64/c1_globals_aarch64.hpp +++ b/src/hotspot/cpu/aarch64/c1_globals_aarch64.hpp @@ -52,7 +52,6 @@ define_pd_global(bool, ProfileInterpreter, false); define_pd_global(intx, CodeCacheExpansionSize, 32*K ); define_pd_global(uintx, CodeCacheMinBlockLength, 1); define_pd_global(uintx, CodeCacheMinimumUseSpace, 400*K); -define_pd_global(uintx, MetaspaceSize, 12*M ); define_pd_global(bool, NeverActAsServerClassMachine, true ); define_pd_global(uint64_t,MaxRAM, 1ULL*G); define_pd_global(bool, CICompileOSR, true ); diff --git a/src/hotspot/cpu/aarch64/c2_globals_aarch64.hpp b/src/hotspot/cpu/aarch64/c2_globals_aarch64.hpp index e1bfeb3548638eb2fa4155489cd182359adb32ff..f15b6faa79d06a8d1cac9b17ac3dae3d8899b15e 100644 --- a/src/hotspot/cpu/aarch64/c2_globals_aarch64.hpp +++ b/src/hotspot/cpu/aarch64/c2_globals_aarch64.hpp @@ -77,9 +77,6 @@ define_pd_global(intx, NonNMethodCodeHeapSize, 5*M ); define_pd_global(uintx, CodeCacheMinBlockLength, 6); define_pd_global(uintx, CodeCacheMinimumUseSpace, 400*K); -// Heap related flags -define_pd_global(uintx,MetaspaceSize, ScaleForWordSize(16*M)); - // Ergonomics related flags define_pd_global(bool, NeverActAsServerClassMachine, false); diff --git a/src/hotspot/cpu/aarch64/frame_aarch64.cpp b/src/hotspot/cpu/aarch64/frame_aarch64.cpp index 255477f6a49b5aabf9a3cf6626742849b3baea02..939669ef3f11fe9814a12cdbf4e8db2319214ce9 100644 --- a/src/hotspot/cpu/aarch64/frame_aarch64.cpp +++ b/src/hotspot/cpu/aarch64/frame_aarch64.cpp @@ -355,10 +355,6 @@ frame frame::sender_for_entry_frame(RegisterMap* map) const { vmassert(jfa->last_Java_pc() != NULL, "not walkable"); frame fr(jfa->last_Java_sp(), jfa->last_Java_fp(), jfa->last_Java_pc()); - if (jfa->saved_fp_address()) { - update_map_with_saved_link(map, jfa->saved_fp_address()); - } - return fr; } diff --git a/src/hotspot/cpu/aarch64/gc/shared/barrierSetAssembler_aarch64.cpp b/src/hotspot/cpu/aarch64/gc/shared/barrierSetAssembler_aarch64.cpp index 2841e68cf56589bd0f078264fa1ec2286ae423ba..777448c6a8ce4794733f827d8fd48f36f09592fc 100644 --- a/src/hotspot/cpu/aarch64/gc/shared/barrierSetAssembler_aarch64.cpp +++ b/src/hotspot/cpu/aarch64/gc/shared/barrierSetAssembler_aarch64.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018, 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2018, 2021, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -23,6 +23,7 @@ */ #include "precompiled.hpp" +#include "classfile/classLoaderData.hpp" #include "gc/shared/barrierSet.hpp" #include "gc/shared/barrierSetAssembler.hpp" #include "gc/shared/barrierSetNMethod.hpp" diff --git a/src/hotspot/cpu/aarch64/gc/shared/cardTableBarrierSetAssembler_aarch64.cpp b/src/hotspot/cpu/aarch64/gc/shared/cardTableBarrierSetAssembler_aarch64.cpp index 9c0a66b255adc9004dad9568fc79b5d174196ccd..fb677828e20e640df27cc874e6183f7a59036eed 100644 --- a/src/hotspot/cpu/aarch64/gc/shared/cardTableBarrierSetAssembler_aarch64.cpp +++ b/src/hotspot/cpu/aarch64/gc/shared/cardTableBarrierSetAssembler_aarch64.cpp @@ -38,9 +38,6 @@ void CardTableBarrierSetAssembler::store_check(MacroAssembler* masm, Register ob BarrierSet* bs = BarrierSet::barrier_set(); assert(bs->kind() == BarrierSet::CardTableBarrierSet, "Wrong barrier set kind"); - CardTableBarrierSet* ctbs = barrier_set_cast(bs); - CardTable* ct = ctbs->card_table(); - __ lsr(obj, obj, CardTable::card_shift); assert(CardTable::dirty_card_val() == 0, "must be"); @@ -49,25 +46,17 @@ void CardTableBarrierSetAssembler::store_check(MacroAssembler* masm, Register ob if (UseCondCardMark) { Label L_already_dirty; - __ membar(Assembler::StoreLoad); __ ldrb(rscratch2, Address(obj, rscratch1)); __ cbz(rscratch2, L_already_dirty); __ strb(zr, Address(obj, rscratch1)); __ bind(L_already_dirty); } else { - if (ct->scanned_concurrently()) { - __ membar(Assembler::StoreStore); - } __ strb(zr, Address(obj, rscratch1)); } } void CardTableBarrierSetAssembler::gen_write_ref_array_post_barrier(MacroAssembler* masm, DecoratorSet decorators, Register start, Register count, Register scratch, RegSet saved_regs) { - BarrierSet* bs = BarrierSet::barrier_set(); - CardTableBarrierSet* ctbs = barrier_set_cast(bs); - CardTable* ct = ctbs->card_table(); - Label L_loop, L_done; const Register end = count; @@ -81,9 +70,6 @@ void CardTableBarrierSetAssembler::gen_write_ref_array_post_barrier(MacroAssembl __ load_byte_map_base(scratch); __ add(start, start, scratch); - if (ct->scanned_concurrently()) { - __ membar(__ StoreStore); - } __ bind(L_loop); __ strb(zr, Address(start, count)); __ subs(count, count, 1); diff --git a/src/hotspot/cpu/aarch64/interpreterRT_aarch64.cpp b/src/hotspot/cpu/aarch64/interpreterRT_aarch64.cpp index 8a89fb56a83893f68ce6002b8b83caceca89c14e..731e45643aa03aacf51ffeb1809d6342cbf2cd22 100644 --- a/src/hotspot/cpu/aarch64/interpreterRT_aarch64.cpp +++ b/src/hotspot/cpu/aarch64/interpreterRT_aarch64.cpp @@ -43,215 +43,103 @@ Register InterpreterRuntime::SignatureHandlerGenerator::from() { return rlocals; Register InterpreterRuntime::SignatureHandlerGenerator::to() { return sp; } Register InterpreterRuntime::SignatureHandlerGenerator::temp() { return rscratch1; } +Register InterpreterRuntime::SignatureHandlerGenerator::next_gpr() { + if (_num_reg_int_args < Argument::n_int_register_parameters_c-1) { + return as_Register(_num_reg_int_args++ + c_rarg1->encoding()); + } + return noreg; +} + +FloatRegister InterpreterRuntime::SignatureHandlerGenerator::next_fpr() { + if (_num_reg_fp_args < Argument::n_float_register_parameters_c) { + return as_FloatRegister(_num_reg_fp_args++); + } + return fnoreg; +} + +int InterpreterRuntime::SignatureHandlerGenerator::next_stack_offset() { + int ret = _stack_offset; + _stack_offset += wordSize; + return ret; +} + InterpreterRuntime::SignatureHandlerGenerator::SignatureHandlerGenerator( const methodHandle& method, CodeBuffer* buffer) : NativeSignatureIterator(method) { _masm = new MacroAssembler(buffer); - _num_int_args = (method->is_static() ? 1 : 0); - _num_fp_args = 0; + _num_reg_int_args = (method->is_static() ? 1 : 0); + _num_reg_fp_args = 0; _stack_offset = 0; } void InterpreterRuntime::SignatureHandlerGenerator::pass_int() { const Address src(from(), Interpreter::local_offset_in_bytes(offset())); - switch (_num_int_args) { - case 0: - __ ldr(c_rarg1, src); - _num_int_args++; - break; - case 1: - __ ldr(c_rarg2, src); - _num_int_args++; - break; - case 2: - __ ldr(c_rarg3, src); - _num_int_args++; - break; - case 3: - __ ldr(c_rarg4, src); - _num_int_args++; - break; - case 4: - __ ldr(c_rarg5, src); - _num_int_args++; - break; - case 5: - __ ldr(c_rarg6, src); - _num_int_args++; - break; - case 6: - __ ldr(c_rarg7, src); - _num_int_args++; - break; - default: - __ ldr(r0, src); - __ str(r0, Address(to(), _stack_offset)); - _stack_offset += wordSize; - _num_int_args++; - break; + Register reg = next_gpr(); + if (reg != noreg) { + __ ldr(reg, src); + } else { + __ ldrw(r0, src); + __ strw(r0, Address(to(), next_stack_offset())); } } void InterpreterRuntime::SignatureHandlerGenerator::pass_long() { const Address src(from(), Interpreter::local_offset_in_bytes(offset() + 1)); - switch (_num_int_args) { - case 0: - __ ldr(c_rarg1, src); - _num_int_args++; - break; - case 1: - __ ldr(c_rarg2, src); - _num_int_args++; - break; - case 2: - __ ldr(c_rarg3, src); - _num_int_args++; - break; - case 3: - __ ldr(c_rarg4, src); - _num_int_args++; - break; - case 4: - __ ldr(c_rarg5, src); - _num_int_args++; - break; - case 5: - __ ldr(c_rarg6, src); - _num_int_args++; - break; - case 6: - __ ldr(c_rarg7, src); - _num_int_args++; - break; - default: + Register reg = next_gpr(); + if (reg != noreg) { + __ ldr(reg, src); + } else { __ ldr(r0, src); - __ str(r0, Address(to(), _stack_offset)); - _stack_offset += wordSize; - _num_int_args++; - break; + __ str(r0, Address(to(), next_stack_offset())); } } void InterpreterRuntime::SignatureHandlerGenerator::pass_float() { const Address src(from(), Interpreter::local_offset_in_bytes(offset())); - if (_num_fp_args < Argument::n_float_register_parameters_c) { - __ ldrs(as_FloatRegister(_num_fp_args++), src); + FloatRegister reg = next_fpr(); + if (reg != fnoreg) { + __ ldrs(reg, src); } else { __ ldrw(r0, src); - __ strw(r0, Address(to(), _stack_offset)); - _stack_offset += wordSize; - _num_fp_args++; + __ strw(r0, Address(to(), next_stack_offset())); } } void InterpreterRuntime::SignatureHandlerGenerator::pass_double() { const Address src(from(), Interpreter::local_offset_in_bytes(offset() + 1)); - if (_num_fp_args < Argument::n_float_register_parameters_c) { - __ ldrd(as_FloatRegister(_num_fp_args++), src); + FloatRegister reg = next_fpr(); + if (reg != fnoreg) { + __ ldrd(reg, src); } else { __ ldr(r0, src); - __ str(r0, Address(to(), _stack_offset)); - _stack_offset += wordSize; - _num_fp_args++; + __ str(r0, Address(to(), next_stack_offset())); } } void InterpreterRuntime::SignatureHandlerGenerator::pass_object() { - - switch (_num_int_args) { - case 0: + Register reg = next_gpr(); + if (reg == c_rarg1) { assert(offset() == 0, "argument register 1 can only be (non-null) receiver"); __ add(c_rarg1, from(), Interpreter::local_offset_in_bytes(offset())); - _num_int_args++; - break; - case 1: - { - __ add(r0, from(), Interpreter::local_offset_in_bytes(offset())); - __ mov(c_rarg2, 0); - __ ldr(temp(), r0); - Label L; - __ cbz(temp(), L); - __ mov(c_rarg2, r0); - __ bind(L); - _num_int_args++; - break; - } - case 2: - { - __ add(r0, from(), Interpreter::local_offset_in_bytes(offset())); - __ mov(c_rarg3, 0); - __ ldr(temp(), r0); - Label L; - __ cbz(temp(), L); - __ mov(c_rarg3, r0); - __ bind(L); - _num_int_args++; - break; - } - case 3: - { - __ add(r0, from(), Interpreter::local_offset_in_bytes(offset())); - __ mov(c_rarg4, 0); - __ ldr(temp(), r0); - Label L; - __ cbz(temp(), L); - __ mov(c_rarg4, r0); - __ bind(L); - _num_int_args++; - break; - } - case 4: - { - __ add(r0, from(), Interpreter::local_offset_in_bytes(offset())); - __ mov(c_rarg5, 0); - __ ldr(temp(), r0); - Label L; - __ cbz(temp(), L); - __ mov(c_rarg5, r0); - __ bind(L); - _num_int_args++; - break; - } - case 5: - { - __ add(r0, from(), Interpreter::local_offset_in_bytes(offset())); - __ mov(c_rarg6, 0); - __ ldr(temp(), r0); - Label L; - __ cbz(temp(), L); - __ mov(c_rarg6, r0); - __ bind(L); - _num_int_args++; - break; - } - case 6: - { - __ add(r0, from(), Interpreter::local_offset_in_bytes(offset())); - __ mov(c_rarg7, 0); - __ ldr(temp(), r0); - Label L; - __ cbz(temp(), L); - __ mov(c_rarg7, r0); - __ bind(L); - _num_int_args++; - break; - } - default: - { - __ add(r0, from(), Interpreter::local_offset_in_bytes(offset())); - __ ldr(temp(), r0); - Label L; - __ cbnz(temp(), L); - __ mov(r0, zr); - __ bind(L); - __ str(r0, Address(to(), _stack_offset)); - _stack_offset += wordSize; - _num_int_args++; - break; - } + } else if (reg != noreg) { + __ add(r0, from(), Interpreter::local_offset_in_bytes(offset())); + __ mov(reg, 0); + __ ldr(temp(), r0); + Label L; + __ cbz(temp(), L); + __ mov(reg, r0); + __ bind(L); + } else { + __ add(r0, from(), Interpreter::local_offset_in_bytes(offset())); + __ ldr(temp(), r0); + Label L; + __ cbnz(temp(), L); + __ mov(r0, zr); + __ bind(L); + __ str(r0, Address(to(), next_stack_offset())); } } @@ -280,77 +168,77 @@ class SlowSignatureHandler intptr_t* _int_args; intptr_t* _fp_args; intptr_t* _fp_identifiers; - unsigned int _num_int_args; - unsigned int _num_fp_args; + unsigned int _num_reg_int_args; + unsigned int _num_reg_fp_args; - virtual void pass_int() - { - jint from_obj = *(jint *)(_from+Interpreter::local_offset_in_bytes(0)); + intptr_t* single_slot_addr() { + intptr_t* from_addr = (intptr_t*)(_from+Interpreter::local_offset_in_bytes(0)); _from -= Interpreter::stackElementSize; - - if (_num_int_args < Argument::n_int_register_parameters_c-1) { - *_int_args++ = from_obj; - _num_int_args++; - } else { - *_to++ = from_obj; - _num_int_args++; - } + return from_addr; } - virtual void pass_long() - { - intptr_t from_obj = *(intptr_t*)(_from+Interpreter::local_offset_in_bytes(1)); + intptr_t* double_slot_addr() { + intptr_t* from_addr = (intptr_t*)(_from+Interpreter::local_offset_in_bytes(1)); _from -= 2*Interpreter::stackElementSize; + return from_addr; + } - if (_num_int_args < Argument::n_int_register_parameters_c-1) { - *_int_args++ = from_obj; - _num_int_args++; - } else { - *_to++ = from_obj; - _num_int_args++; + int pass_gpr(intptr_t value) { + if (_num_reg_int_args < Argument::n_int_register_parameters_c-1) { + *_int_args++ = value; + return _num_reg_int_args++; } + return -1; } - virtual void pass_object() - { - intptr_t *from_addr = (intptr_t*)(_from + Interpreter::local_offset_in_bytes(0)); - _from -= Interpreter::stackElementSize; + int pass_fpr(intptr_t value) { + if (_num_reg_fp_args < Argument::n_float_register_parameters_c) { + *_fp_args++ = value; + return _num_reg_fp_args++; + } + return -1; + } - if (_num_int_args < Argument::n_int_register_parameters_c-1) { - *_int_args++ = (*from_addr == 0) ? NULL : (intptr_t)from_addr; - _num_int_args++; - } else { - *_to++ = (*from_addr == 0) ? NULL : (intptr_t) from_addr; - _num_int_args++; + void pass_stack(intptr_t value) { + *_to++ = value; + } + + virtual void pass_int() { + jint value = *(jint*)single_slot_addr(); + if (pass_gpr(value) < 0) { + pass_stack(value); } } - virtual void pass_float() - { - jint from_obj = *(jint*)(_from+Interpreter::local_offset_in_bytes(0)); - _from -= Interpreter::stackElementSize; + virtual void pass_long() { + intptr_t value = *double_slot_addr(); + if (pass_gpr(value) < 0) { + pass_stack(value); + } + } - if (_num_fp_args < Argument::n_float_register_parameters_c) { - *_fp_args++ = from_obj; - _num_fp_args++; - } else { - *_to++ = from_obj; - _num_fp_args++; + virtual void pass_object() { + intptr_t* addr = single_slot_addr(); + intptr_t value = *addr == 0 ? NULL : (intptr_t)addr; + if (pass_gpr(value) < 0) { + pass_stack(value); } } - virtual void pass_double() - { - intptr_t from_obj = *(intptr_t*)(_from+Interpreter::local_offset_in_bytes(1)); - _from -= 2*Interpreter::stackElementSize; + virtual void pass_float() { + jint value = *(jint*)single_slot_addr(); + if (pass_fpr(value) < 0) { + pass_stack(value); + } + } - if (_num_fp_args < Argument::n_float_register_parameters_c) { - *_fp_args++ = from_obj; - *_fp_identifiers |= (1ull << _num_fp_args); // mark as double - _num_fp_args++; + virtual void pass_double() { + intptr_t value = *double_slot_addr(); + int arg = pass_fpr(value); + if (0 <= arg) { + *_fp_identifiers |= (1ull << arg); // mark as double } else { - *_to++ = from_obj; - _num_fp_args++; + pass_stack(value); } } @@ -365,8 +253,8 @@ class SlowSignatureHandler _fp_args = to - 8; _fp_identifiers = to - 9; *(int*) _fp_identifiers = 0; - _num_int_args = (method->is_static() ? 1 : 0); - _num_fp_args = 0; + _num_reg_int_args = (method->is_static() ? 1 : 0); + _num_reg_fp_args = 0; } }; diff --git a/src/hotspot/cpu/aarch64/interpreterRT_aarch64.hpp b/src/hotspot/cpu/aarch64/interpreterRT_aarch64.hpp index ee7c2d1bf76774ae6d7bf943220b60e358c15e46..023760a469f286e1dceb46b00cba2d04e5f3b8b4 100644 --- a/src/hotspot/cpu/aarch64/interpreterRT_aarch64.hpp +++ b/src/hotspot/cpu/aarch64/interpreterRT_aarch64.hpp @@ -34,8 +34,8 @@ class SignatureHandlerGenerator: public NativeSignatureIterator { private: MacroAssembler* _masm; - unsigned int _num_fp_args; - unsigned int _num_int_args; + unsigned int _num_reg_fp_args; + unsigned int _num_reg_int_args; int _stack_offset; void pass_int(); @@ -44,6 +44,10 @@ class SignatureHandlerGenerator: public NativeSignatureIterator { void pass_double(); void pass_object(); + Register next_gpr(); + FloatRegister next_fpr(); + int next_stack_offset(); + public: // Creation SignatureHandlerGenerator(const methodHandle& method, CodeBuffer* buffer); diff --git a/src/hotspot/cpu/aarch64/javaFrameAnchor_aarch64.hpp b/src/hotspot/cpu/aarch64/javaFrameAnchor_aarch64.hpp index 2d5b9a62b455f22dafc18ddf6df7cefbbef8eaff..6ff3c037407e67a47a1122b1e4310a3a8b572ede 100644 --- a/src/hotspot/cpu/aarch64/javaFrameAnchor_aarch64.hpp +++ b/src/hotspot/cpu/aarch64/javaFrameAnchor_aarch64.hpp @@ -31,9 +31,6 @@ private: // FP value associated with _last_Java_sp: intptr_t* volatile _last_Java_fp; // pointer is volatile not what it points to - // (Optional) location of saved FP register, which GCs want to inspect - intptr_t** volatile _saved_fp_address; - public: // Each arch must define reset, save, restore // These are used by objects that only care about: @@ -47,7 +44,6 @@ public: OrderAccess::release(); _last_Java_fp = NULL; _last_Java_pc = NULL; - _saved_fp_address = NULL; } void copy(JavaFrameAnchor* src) { @@ -66,8 +62,6 @@ public: _last_Java_pc = src->_last_Java_pc; // Must be last so profiler will always see valid frame if has_last_frame() is true _last_Java_sp = src->_last_Java_sp; - - _saved_fp_address = src->_saved_fp_address; } bool walkable(void) { return _last_Java_sp != NULL && _last_Java_pc != NULL; } @@ -78,12 +72,9 @@ public: address last_Java_pc(void) { return _last_Java_pc; } - intptr_t** saved_fp_address(void) const { return _saved_fp_address; } - private: static ByteSize last_Java_fp_offset() { return byte_offset_of(JavaFrameAnchor, _last_Java_fp); } - static ByteSize saved_fp_address_offset() { return byte_offset_of(JavaFrameAnchor, _saved_fp_address); } public: diff --git a/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp b/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp index b742edb47dcefaf76ad445c71542de27a8397351..c577d6ada841bcfc730145131350cd70b1ac5447 100644 --- a/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp +++ b/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp @@ -320,8 +320,6 @@ void MacroAssembler::reset_last_Java_frame(bool clear_fp) { // Always clear the pc because it could have been set by make_walkable() str(zr, Address(rthread, JavaThread::last_Java_pc_offset())); - - str(zr, Address(rthread, JavaThread::saved_fp_address_offset())); } // Calls to C land @@ -2567,6 +2565,8 @@ void MacroAssembler::atomic_##OP(Register prev, Register newv, Register addr) { ATOMIC_XCHG(xchg, swp, ldxr, stxr, Assembler::xword) ATOMIC_XCHG(xchgw, swp, ldxrw, stxrw, Assembler::word) +ATOMIC_XCHG(xchgl, swpl, ldxr, stlxr, Assembler::xword) +ATOMIC_XCHG(xchglw, swpl, ldxrw, stlxrw, Assembler::word) ATOMIC_XCHG(xchgal, swpal, ldaxr, stlxr, Assembler::xword) ATOMIC_XCHG(xchgalw, swpal, ldaxrw, stlxrw, Assembler::word) @@ -5266,10 +5266,14 @@ void MacroAssembler::char_array_compress(Register src, Register dst, Register le // by the call to JavaThread::aarch64_get_thread_helper() or, indeed, // the call setup code. // -// aarch64_get_thread_helper() clobbers only r0, r1, and flags. +// On Linux, aarch64_get_thread_helper() clobbers only r0, r1, and flags. +// On other systems, the helper is a usual C function. // void MacroAssembler::get_thread(Register dst) { - RegSet saved_regs = RegSet::range(r0, r1) + lr - dst; + RegSet saved_regs = + LINUX_ONLY(RegSet::range(r0, r1) + lr - dst) + NOT_LINUX (RegSet::range(r0, r17) + lr - dst); + push(saved_regs, sp); mov(lr, CAST_FROM_FN_PTR(address, JavaThread::aarch64_get_thread_helper)); diff --git a/src/hotspot/cpu/aarch64/macroAssembler_aarch64.hpp b/src/hotspot/cpu/aarch64/macroAssembler_aarch64.hpp index 4ffc97bb377197ec615c1a9599b6f196abf12b4c..fd170d405262b152accd5b994d001da644716c7b 100644 --- a/src/hotspot/cpu/aarch64/macroAssembler_aarch64.hpp +++ b/src/hotspot/cpu/aarch64/macroAssembler_aarch64.hpp @@ -527,6 +527,33 @@ public: orr(Vd, T, Vn, Vn); } + // AdvSIMD shift by immediate. + // These are "user friendly" variants which allow a shift count of 0. +#define WRAP(INSN) \ + void INSN(FloatRegister Vd, SIMD_Arrangement T, FloatRegister Vn, int shift) { \ + if (shift == 0) { \ + SIMD_Arrangement arrange = (T & 1) == 0 ? T8B : T16B; \ + Assembler::orr(Vd, arrange, Vn, Vn); \ + } else { \ + Assembler::INSN(Vd, T, Vn, shift); \ + } \ + } \ + + WRAP(shl) WRAP(sshr) WRAP(ushr) +#undef WRAP + +#define WRAP(INSN) \ + void INSN(FloatRegister Vd, SIMD_Arrangement T, FloatRegister Vn, int shift) { \ + if (shift == 0) { \ + Assembler::addv(Vd, T, Vd, Vn); \ + } else { \ + Assembler::INSN(Vd, T, Vn, shift); \ + } \ + } \ + + WRAP(usra) WRAP(ssra) +#undef WRAP + public: // Generalized Test Bit And Branch, including a "far" variety which @@ -1039,6 +1066,8 @@ public: void atomic_xchg(Register prev, Register newv, Register addr); void atomic_xchgw(Register prev, Register newv, Register addr); + void atomic_xchgl(Register prev, Register newv, Register addr); + void atomic_xchglw(Register prev, Register newv, Register addr); void atomic_xchgal(Register prev, Register newv, Register addr); void atomic_xchgalw(Register prev, Register newv, Register addr); diff --git a/src/hotspot/cpu/aarch64/sharedRuntime_aarch64.cpp b/src/hotspot/cpu/aarch64/sharedRuntime_aarch64.cpp index c1a86c025953a165082afec419811ef0c45710ad..a478fd0236154040aa7d7a03b9ee636b6e1852f3 100644 --- a/src/hotspot/cpu/aarch64/sharedRuntime_aarch64.cpp +++ b/src/hotspot/cpu/aarch64/sharedRuntime_aarch64.cpp @@ -240,15 +240,6 @@ bool SharedRuntime::is_wide_vector(int size) { return size > 8; } -size_t SharedRuntime::trampoline_size() { - return 16; -} - -void SharedRuntime::generate_trampoline(MacroAssembler *masm, address destination) { - __ mov(rscratch1, destination); - __ br(rscratch1); -} - // The java_calling_convention describes stack locations as ideal slots on // a frame with no abi restrictions. Since we must observe abi restrictions // (like the placement of the register window) the slots must be biased by @@ -3072,7 +3063,6 @@ void OptoRuntime::generate_exception_blob() { // Set exception blob _exception_blob = ExceptionBlob::create(&buffer, oop_maps, SimpleRuntimeFrame::framesize >> 1); } -#endif // COMPILER2 // --------------------------------------------------------------- @@ -3082,6 +3072,10 @@ class NativeInvokerGenerator : public StubCodeGenerator { const GrowableArray& _input_registers; const GrowableArray& _output_registers; + + int _frame_complete; + int _framesize; + OopMapSet* _oop_maps; public: NativeInvokerGenerator(CodeBuffer* buffer, address call_target, @@ -3092,9 +3086,90 @@ public: _call_target(call_target), _shadow_space_bytes(shadow_space_bytes), _input_registers(input_registers), - _output_registers(output_registers) {} + _output_registers(output_registers), + _frame_complete(0), + _framesize(0), + _oop_maps(NULL) { + assert(_output_registers.length() <= 1 + || (_output_registers.length() == 2 && !_output_registers.at(1)->is_valid()), "no multi-reg returns"); + } + void generate(); + int spill_size_in_bytes() const { + if (_output_registers.length() == 0) { + return 0; + } + VMReg reg = _output_registers.at(0); + assert(reg->is_reg(), "must be a register"); + if (reg->is_Register()) { + return 8; + } else if (reg->is_FloatRegister()) { + bool use_sve = Matcher::supports_scalable_vector(); + if (use_sve) { + return Matcher::scalable_vector_reg_size(T_BYTE); + } + return 16; + } else { + ShouldNotReachHere(); + } + return 0; + } + + void spill_output_registers() { + if (_output_registers.length() == 0) { + return; + } + VMReg reg = _output_registers.at(0); + assert(reg->is_reg(), "must be a register"); + MacroAssembler* masm = _masm; + if (reg->is_Register()) { + __ spill(reg->as_Register(), true, 0); + } else if (reg->is_FloatRegister()) { + bool use_sve = Matcher::supports_scalable_vector(); + if (use_sve) { + __ spill_sve_vector(reg->as_FloatRegister(), 0, Matcher::scalable_vector_reg_size(T_BYTE)); + } else { + __ spill(reg->as_FloatRegister(), __ Q, 0); + } + } else { + ShouldNotReachHere(); + } + } + + void fill_output_registers() { + if (_output_registers.length() == 0) { + return; + } + VMReg reg = _output_registers.at(0); + assert(reg->is_reg(), "must be a register"); + MacroAssembler* masm = _masm; + if (reg->is_Register()) { + __ unspill(reg->as_Register(), true, 0); + } else if (reg->is_FloatRegister()) { + bool use_sve = Matcher::supports_scalable_vector(); + if (use_sve) { + __ unspill_sve_vector(reg->as_FloatRegister(), 0, Matcher::scalable_vector_reg_size(T_BYTE)); + } else { + __ unspill(reg->as_FloatRegister(), __ Q, 0); + } + } else { + ShouldNotReachHere(); + } + } + + int frame_complete() const { + return _frame_complete; + } + + int framesize() const { + return (_framesize >> (LogBytesPerWord - LogBytesPerInt)); + } + + OopMapSet* oop_maps() const { + return _oop_maps; + } + private: #ifdef ASSERT bool target_uses_register(VMReg reg) { @@ -3105,21 +3180,23 @@ private: static const int native_invoker_code_size = 1024; -BufferBlob* SharedRuntime::make_native_invoker(address call_target, - int shadow_space_bytes, - const GrowableArray& input_registers, - const GrowableArray& output_registers) { - BufferBlob* _invoke_native_blob = - BufferBlob::create("nep_invoker_blob", native_invoker_code_size); - if (_invoke_native_blob == NULL) - return NULL; // allocation failure - - CodeBuffer code(_invoke_native_blob); +RuntimeStub* SharedRuntime::make_native_invoker(address call_target, + int shadow_space_bytes, + const GrowableArray& input_registers, + const GrowableArray& output_registers) { + int locs_size = 64; + CodeBuffer code("nep_invoker_blob", native_invoker_code_size, locs_size); NativeInvokerGenerator g(&code, call_target, shadow_space_bytes, input_registers, output_registers); g.generate(); code.log_section_sizes("nep_invoker_blob"); - return _invoke_native_blob; + RuntimeStub* stub = + RuntimeStub::new_runtime_stub("nep_invoker_blob", + &code, + g.frame_complete(), + g.framesize(), + g.oop_maps(), false); + return stub; } void NativeInvokerGenerator::generate() { @@ -3128,26 +3205,40 @@ void NativeInvokerGenerator::generate() { || target_uses_register(rthread->as_VMReg())), "Register conflict"); + enum layout { + rbp_off, + rbp_off2, + return_off, + return_off2, + framesize // inclusive of return address + }; + + assert(_shadow_space_bytes == 0, "not expecting shadow space on AArch64"); + _framesize = align_up(framesize + (spill_size_in_bytes() >> LogBytesPerInt), 4); + assert(is_even(_framesize/2), "sp not 16-byte aligned"); + + _oop_maps = new OopMapSet(); MacroAssembler* masm = _masm; - __ set_last_Java_frame(sp, noreg, lr, rscratch1); + address start = __ pc(); __ enter(); - // Store a pointer to the previous R29 (RFP) saved on the stack as it - // may contain an oop if PreserveFramePointer is off. This value is - // retrieved later by frame::sender_for_entry_frame() when the stack - // is walked. - __ mov(rscratch1, sp); - __ str(rscratch1, Address(rthread, JavaThread::saved_fp_address_offset())); + // lr and fp are already in place + __ sub(sp, rfp, ((unsigned)_framesize-4) << LogBytesPerInt); // prolog + + _frame_complete = __ pc() - start; + + address the_pc = __ pc(); + __ set_last_Java_frame(sp, rfp, the_pc, rscratch1); + OopMap* map = new OopMap(_framesize, 0); + _oop_maps->add_gc_map(the_pc - start, map); // State transition __ mov(rscratch1, _thread_in_native); __ lea(rscratch2, Address(rthread, JavaThread::thread_state_offset())); __ stlrw(rscratch1, rscratch2); - assert(_shadow_space_bytes == 0, "not expecting shadow space on AArch64"); - rt_call(masm, _call_target); __ mov(rscratch1, _thread_in_native_trans); @@ -3193,27 +3284,14 @@ void NativeInvokerGenerator::generate() { __ bind(L_safepoint_poll_slow_path); // Need to save the native result registers around any runtime calls. - RegSet spills; - FloatRegSet fp_spills; - for (int i = 0; i < _output_registers.length(); i++) { - VMReg output = _output_registers.at(i); - if (output->is_Register()) { - spills += RegSet::of(output->as_Register()); - } else if (output->is_FloatRegister()) { - fp_spills += FloatRegSet::of(output->as_FloatRegister()); - } - } - - __ push(spills, sp); - __ push_fp(fp_spills, sp); + spill_output_registers(); __ mov(c_rarg0, rthread); assert(frame::arg_reg_save_area_bytes == 0, "not expecting frame reg save area"); __ lea(rscratch1, RuntimeAddress(CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans))); __ blr(rscratch1); - __ pop_fp(fp_spills, sp); - __ pop(spills, sp); + fill_output_registers(); __ b(L_after_safepoint_poll); __ block_comment("} L_safepoint_poll_slow_path"); @@ -3223,13 +3301,11 @@ void NativeInvokerGenerator::generate() { __ block_comment("{ L_reguard"); __ bind(L_reguard); - __ push(spills, sp); - __ push_fp(fp_spills, sp); + spill_output_registers(); rt_call(masm, CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages)); - __ pop_fp(fp_spills, sp); - __ pop(spills, sp); + fill_output_registers(); __ b(L_after_reguard); @@ -3239,3 +3315,4 @@ void NativeInvokerGenerator::generate() { __ flush(); } +#endif // COMPILER2 diff --git a/src/hotspot/cpu/aarch64/stubGenerator_aarch64.cpp b/src/hotspot/cpu/aarch64/stubGenerator_aarch64.cpp index 1cd6e171c6e91c7c648144ef9e0119b459d236c8..6326b29ed6d7697a2baa0491ab2d25cb7f3e8962 100644 --- a/src/hotspot/cpu/aarch64/stubGenerator_aarch64.cpp +++ b/src/hotspot/cpu/aarch64/stubGenerator_aarch64.cpp @@ -26,6 +26,7 @@ #include "precompiled.hpp" #include "asm/macroAssembler.hpp" #include "asm/macroAssembler.inline.hpp" +#include "atomic_aarch64.hpp" #include "gc/shared/barrierSet.hpp" #include "gc/shared/barrierSetAssembler.hpp" #include "gc/shared/gc_globals.hpp" @@ -38,6 +39,7 @@ #include "oops/objArrayKlass.hpp" #include "oops/oop.inline.hpp" #include "prims/methodHandles.hpp" +#include "runtime/atomic.hpp" #include "runtime/frame.inline.hpp" #include "runtime/handles.inline.hpp" #include "runtime/sharedRuntime.hpp" @@ -1361,7 +1363,7 @@ class StubGenerator: public StubCodeGenerator { // // If 'from' and/or 'to' are aligned on 4-byte boundaries, we let // the hardware handle it. The two dwords within qwords that span - // cache line boundaries will still be loaded and stored atomicly. + // cache line boundaries will still be loaded and stored atomically. // // Side Effects: // disjoint_int_copy_entry is set to the no-overlap entry point @@ -1431,7 +1433,7 @@ class StubGenerator: public StubCodeGenerator { // // If 'from' and/or 'to' are aligned on 4-byte boundaries, we let // the hardware handle it. The two dwords within qwords that span - // cache line boundaries will still be loaded and stored atomicly. + // cache line boundaries will still be loaded and stored atomically. // address generate_conjoint_copy(int size, bool aligned, bool is_oop, address nooverlap_target, address *entry, const char *name, @@ -1596,7 +1598,7 @@ class StubGenerator: public StubCodeGenerator { // // If 'from' and/or 'to' are aligned on 4-byte boundaries, we let // the hardware handle it. The two dwords within qwords that span - // cache line boundaries will still be loaded and stored atomicly. + // cache line boundaries will still be loaded and stored atomically. // // Side Effects: // disjoint_int_copy_entry is set to the no-overlap entry point @@ -1620,7 +1622,7 @@ class StubGenerator: public StubCodeGenerator { // // If 'from' and/or 'to' are aligned on 4-byte boundaries, we let // the hardware handle it. The two dwords within qwords that span - // cache line boundaries will still be loaded and stored atomicly. + // cache line boundaries will still be loaded and stored atomically. // address generate_conjoint_int_copy(bool aligned, address nooverlap_target, address *entry, const char *name, @@ -5571,6 +5573,171 @@ class StubGenerator: public StubCodeGenerator { return start; } +#ifdef LINUX + + // ARMv8.1 LSE versions of the atomic stubs used by Atomic::PlatformXX. + // + // If LSE is in use, generate LSE versions of all the stubs. The + // non-LSE versions are in atomic_aarch64.S. + + // class AtomicStubMark records the entry point of a stub and the + // stub pointer which will point to it. The stub pointer is set to + // the entry point when ~AtomicStubMark() is called, which must be + // after ICache::invalidate_range. This ensures safe publication of + // the generated code. + class AtomicStubMark { + address _entry_point; + aarch64_atomic_stub_t *_stub; + MacroAssembler *_masm; + public: + AtomicStubMark(MacroAssembler *masm, aarch64_atomic_stub_t *stub) { + _masm = masm; + __ align(32); + _entry_point = __ pc(); + _stub = stub; + } + ~AtomicStubMark() { + *_stub = (aarch64_atomic_stub_t)_entry_point; + } + }; + + // NB: For memory_order_conservative we need a trailing membar after + // LSE atomic operations but not a leading membar. + // + // We don't need a leading membar because a clause in the Arm ARM + // says: + // + // Barrier-ordered-before + // + // Barrier instructions order prior Memory effects before subsequent + // Memory effects generated by the same Observer. A read or a write + // RW1 is Barrier-ordered-before a read or a write RW 2 from the same + // Observer if and only if RW1 appears in program order before RW 2 + // and [ ... ] at least one of RW 1 and RW 2 is generated by an atomic + // instruction with both Acquire and Release semantics. + // + // All the atomic instructions {ldaddal, swapal, casal} have Acquire + // and Release semantics, therefore we don't need a leading + // barrier. However, there is no corresponding Barrier-ordered-after + // relationship, therefore we need a trailing membar to prevent a + // later store or load from being reordered with the store in an + // atomic instruction. + // + // This was checked by using the herd7 consistency model simulator + // (http://diy.inria.fr/) with this test case: + // + // AArch64 LseCas + // { 0:X1=x; 0:X2=y; 1:X1=x; 1:X2=y; } + // P0 | P1; + // LDR W4, [X2] | MOV W3, #0; + // DMB LD | MOV W4, #1; + // LDR W3, [X1] | CASAL W3, W4, [X1]; + // | DMB ISH; + // | STR W4, [X2]; + // exists + // (0:X3=0 /\ 0:X4=1) + // + // If X3 == 0 && X4 == 1, the store to y in P1 has been reordered + // with the store to x in P1. Without the DMB in P1 this may happen. + // + // At the time of writing we don't know of any AArch64 hardware that + // reorders stores in this way, but the Reference Manual permits it. + + void gen_cas_entry(Assembler::operand_size size, + atomic_memory_order order) { + Register prev = r3, ptr = c_rarg0, compare_val = c_rarg1, + exchange_val = c_rarg2; + bool acquire, release; + switch (order) { + case memory_order_relaxed: + acquire = false; + release = false; + break; + default: + acquire = true; + release = true; + break; + } + __ mov(prev, compare_val); + __ lse_cas(prev, exchange_val, ptr, size, acquire, release, /*not_pair*/true); + if (order == memory_order_conservative) { + __ membar(Assembler::StoreStore|Assembler::StoreLoad); + } + if (size == Assembler::xword) { + __ mov(r0, prev); + } else { + __ movw(r0, prev); + } + __ ret(lr); + } + + void gen_ldaddal_entry(Assembler::operand_size size) { + Register prev = r2, addr = c_rarg0, incr = c_rarg1; + __ ldaddal(size, incr, prev, addr); + __ membar(Assembler::StoreStore|Assembler::StoreLoad); + if (size == Assembler::xword) { + __ mov(r0, prev); + } else { + __ movw(r0, prev); + } + __ ret(lr); + } + + void gen_swpal_entry(Assembler::operand_size size) { + Register prev = r2, addr = c_rarg0, incr = c_rarg1; + __ swpal(size, incr, prev, addr); + __ membar(Assembler::StoreStore|Assembler::StoreLoad); + if (size == Assembler::xword) { + __ mov(r0, prev); + } else { + __ movw(r0, prev); + } + __ ret(lr); + } + + void generate_atomic_entry_points() { + if (! UseLSE) { + return; + } + + __ align(CodeEntryAlignment); + StubCodeMark mark(this, "StubRoutines", "atomic entry points"); + address first_entry = __ pc(); + + // All memory_order_conservative + AtomicStubMark mark_fetch_add_4(_masm, &aarch64_atomic_fetch_add_4_impl); + gen_ldaddal_entry(Assembler::word); + AtomicStubMark mark_fetch_add_8(_masm, &aarch64_atomic_fetch_add_8_impl); + gen_ldaddal_entry(Assembler::xword); + + AtomicStubMark mark_xchg_4(_masm, &aarch64_atomic_xchg_4_impl); + gen_swpal_entry(Assembler::word); + AtomicStubMark mark_xchg_8_impl(_masm, &aarch64_atomic_xchg_8_impl); + gen_swpal_entry(Assembler::xword); + + // CAS, memory_order_conservative + AtomicStubMark mark_cmpxchg_1(_masm, &aarch64_atomic_cmpxchg_1_impl); + gen_cas_entry(MacroAssembler::byte, memory_order_conservative); + AtomicStubMark mark_cmpxchg_4(_masm, &aarch64_atomic_cmpxchg_4_impl); + gen_cas_entry(MacroAssembler::word, memory_order_conservative); + AtomicStubMark mark_cmpxchg_8(_masm, &aarch64_atomic_cmpxchg_8_impl); + gen_cas_entry(MacroAssembler::xword, memory_order_conservative); + + // CAS, memory_order_relaxed + AtomicStubMark mark_cmpxchg_1_relaxed + (_masm, &aarch64_atomic_cmpxchg_1_relaxed_impl); + gen_cas_entry(MacroAssembler::byte, memory_order_relaxed); + AtomicStubMark mark_cmpxchg_4_relaxed + (_masm, &aarch64_atomic_cmpxchg_4_relaxed_impl); + gen_cas_entry(MacroAssembler::word, memory_order_relaxed); + AtomicStubMark mark_cmpxchg_8_relaxed + (_masm, &aarch64_atomic_cmpxchg_8_relaxed_impl); + gen_cas_entry(MacroAssembler::xword, memory_order_relaxed); + + ICache::invalidate_range(first_entry, __ pc() - first_entry); + } +#endif // LINUX + // Continuation point for throwing of implicit exceptions that are // not handled in the current activation. Fabricates an exception // oop and initiates normal exception dispatching in this @@ -6683,6 +6850,12 @@ class StubGenerator: public StubCodeGenerator { StubRoutines::_updateBytesAdler32 = generate_updateBytesAdler32(); } +#ifdef LINUX + + generate_atomic_entry_points(); + +#endif // LINUX + StubRoutines::aarch64::set_completed(); } @@ -6703,3 +6876,30 @@ void StubGenerator_generate(CodeBuffer* code, bool all) { } StubGenerator g(code, all); } + + +#ifdef LINUX + +// Define pointers to atomic stubs and initialize them to point to the +// code in atomic_aarch64.S. + +#define DEFAULT_ATOMIC_OP(OPNAME, SIZE, RELAXED) \ + extern "C" uint64_t aarch64_atomic_ ## OPNAME ## _ ## SIZE ## RELAXED ## _default_impl \ + (volatile void *ptr, uint64_t arg1, uint64_t arg2); \ + aarch64_atomic_stub_t aarch64_atomic_ ## OPNAME ## _ ## SIZE ## RELAXED ## _impl \ + = aarch64_atomic_ ## OPNAME ## _ ## SIZE ## RELAXED ## _default_impl; + +DEFAULT_ATOMIC_OP(fetch_add, 4, ) +DEFAULT_ATOMIC_OP(fetch_add, 8, ) +DEFAULT_ATOMIC_OP(xchg, 4, ) +DEFAULT_ATOMIC_OP(xchg, 8, ) +DEFAULT_ATOMIC_OP(cmpxchg, 1, ) +DEFAULT_ATOMIC_OP(cmpxchg, 4, ) +DEFAULT_ATOMIC_OP(cmpxchg, 8, ) +DEFAULT_ATOMIC_OP(cmpxchg, 1, _relaxed) +DEFAULT_ATOMIC_OP(cmpxchg, 4, _relaxed) +DEFAULT_ATOMIC_OP(cmpxchg, 8, _relaxed) + +#undef DEFAULT_ATOMIC_OP + +#endif // LINUX diff --git a/src/hotspot/cpu/aarch64/vm_version_aarch64.cpp b/src/hotspot/cpu/aarch64/vm_version_aarch64.cpp index 5c13f3e6fb268089a26da258e406fb99cf32f9af..8cae18ad5a6a00f258e9027fec09da7ce06ad1cb 100644 --- a/src/hotspot/cpu/aarch64/vm_version_aarch64.cpp +++ b/src/hotspot/cpu/aarch64/vm_version_aarch64.cpp @@ -95,6 +95,9 @@ void VM_Version::initialize() { SoftwarePrefetchHintDistance &= ~7; } + if (FLAG_IS_DEFAULT(ContendedPaddingWidth) && (dcache_line > ContendedPaddingWidth)) { + ContendedPaddingWidth = dcache_line; + } if (os::supports_map_sync()) { // if dcpop is available publish data cache line flush size via diff --git a/src/hotspot/cpu/aarch64/vtableStubs_aarch64.cpp b/src/hotspot/cpu/aarch64/vtableStubs_aarch64.cpp index 2f7a8224ff8e1b0fe325f0663518b1383a1a7bbc..4a2285b392f672d6d0623ae39023bb87db15886b 100644 --- a/src/hotspot/cpu/aarch64/vtableStubs_aarch64.cpp +++ b/src/hotspot/cpu/aarch64/vtableStubs_aarch64.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2021, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2014, Red Hat Inc. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -70,7 +70,7 @@ VtableStub* VtableStubs::create_vtable_stub(int vtable_index) { #if (!defined(PRODUCT) && defined(COMPILER2)) if (CountCompiledCalls) { __ lea(r16, ExternalAddress((address) SharedRuntime::nof_megamorphic_calls_addr())); - __ incrementw(Address(r16)); + __ increment(Address(r16)); } #endif @@ -145,6 +145,7 @@ VtableStub* VtableStubs::create_itable_stub(int itable_index) { if (s == NULL) { return NULL; } + // Count unused bytes in instruction sequences of variable size. // We add them to the computed buffer size in order to avoid // overflow in subsequently generated stubs. @@ -159,7 +160,7 @@ VtableStub* VtableStubs::create_itable_stub(int itable_index) { #if (!defined(PRODUCT) && defined(COMPILER2)) if (CountCompiledCalls) { __ lea(r10, ExternalAddress((address) SharedRuntime::nof_megamorphic_calls_addr())); - __ incrementw(Address(r10)); + __ increment(Address(r10)); } #endif diff --git a/src/hotspot/cpu/arm/c1_LIRGenerator_arm.cpp b/src/hotspot/cpu/arm/c1_LIRGenerator_arm.cpp index 5cba07805b9bfee32379014b8581477e2ed26030..f29d4c0744cf394ee5bb434668fd6284afe00172 100644 --- a/src/hotspot/cpu/arm/c1_LIRGenerator_arm.cpp +++ b/src/hotspot/cpu/arm/c1_LIRGenerator_arm.cpp @@ -366,9 +366,6 @@ void LIRGenerator::set_card(LIR_Opr value, LIR_Address* card_addr) { void LIRGenerator::CardTableBarrierSet_post_barrier_helper(LIR_OprDesc* addr, LIR_Const* card_table_base) { assert(addr->is_register(), "must be a register at this point"); - CardTableBarrierSet* ctbs = barrier_set_cast(BarrierSet::barrier_set()); - CardTable* ct = ctbs->card_table(); - LIR_Opr tmp = FrameMap::LR_ptr_opr; bool load_card_table_base_const = VM_Version::supports_movw(); @@ -382,9 +379,6 @@ void LIRGenerator::CardTableBarrierSet_post_barrier_helper(LIR_OprDesc* addr, LI // byte instruction does not support the addressing mode we need. LIR_Address* card_addr = new LIR_Address(tmp, addr, (LIR_Address::Scale) -CardTable::card_shift, 0, T_BOOLEAN); if (UseCondCardMark) { - if (ct->scanned_concurrently()) { - __ membar_storeload(); - } LIR_Opr cur_value = new_register(T_INT); __ move(card_addr, cur_value); @@ -394,9 +388,6 @@ void LIRGenerator::CardTableBarrierSet_post_barrier_helper(LIR_OprDesc* addr, LI set_card(tmp, card_addr); __ branch_destination(L_already_dirty->label()); } else { - if (ct->scanned_concurrently()) { - __ membar_storestore(); - } set_card(tmp, card_addr); } } diff --git a/src/hotspot/cpu/arm/c1_globals_arm.hpp b/src/hotspot/cpu/arm/c1_globals_arm.hpp index 7077a87092c28e5a6347a34782112b51839a6837..8f196bc5e6abb6f557157b824a9ddb82fdc70bd1 100644 --- a/src/hotspot/cpu/arm/c1_globals_arm.hpp +++ b/src/hotspot/cpu/arm/c1_globals_arm.hpp @@ -53,7 +53,6 @@ define_pd_global(bool, ProfileInterpreter, false); define_pd_global(size_t, CodeCacheExpansionSize, 32*K ); define_pd_global(uintx, CodeCacheMinBlockLength, 1); define_pd_global(size_t, CodeCacheMinimumUseSpace, 400*K); -define_pd_global(size_t, MetaspaceSize, 12*M ); define_pd_global(bool, NeverActAsServerClassMachine, true); define_pd_global(uint64_t, MaxRAM, 1ULL*G); define_pd_global(bool, CICompileOSR, true ); diff --git a/src/hotspot/cpu/arm/c2_globals_arm.hpp b/src/hotspot/cpu/arm/c2_globals_arm.hpp index 525af8b1edc9e2dc521bc81bbf9b0be90e6a0c7b..7754001dd0af8f65f2a8ea45b0a31cbf9d606487 100644 --- a/src/hotspot/cpu/arm/c2_globals_arm.hpp +++ b/src/hotspot/cpu/arm/c2_globals_arm.hpp @@ -100,9 +100,6 @@ define_pd_global(size_t, CodeCacheMinimumUseSpace, 400*K); define_pd_global(bool, TrapBasedRangeChecks, false); // Not needed -// Heap related flags -define_pd_global(size_t, MetaspaceSize, ScaleForWordSize(16*M)); - // Ergonomics related flags define_pd_global(bool, NeverActAsServerClassMachine, false); diff --git a/src/hotspot/cpu/arm/gc/shared/cardTableBarrierSetAssembler_arm.cpp b/src/hotspot/cpu/arm/gc/shared/cardTableBarrierSetAssembler_arm.cpp index fc32418daa80a0b678d984bce6bb301966f49289..86f43597e220bb1b7f302dc59422cf0320e36de1 100644 --- a/src/hotspot/cpu/arm/gc/shared/cardTableBarrierSetAssembler_arm.cpp +++ b/src/hotspot/cpu/arm/gc/shared/cardTableBarrierSetAssembler_arm.cpp @@ -128,16 +128,10 @@ void CardTableBarrierSetAssembler::store_check_part2(MacroAssembler* masm, Regis assert(bs->kind() == BarrierSet::CardTableBarrierSet, "Wrong barrier set kind"); - CardTableBarrierSet* ctbs = barrier_set_cast(bs); - CardTable* ct = ctbs->card_table(); - assert(CardTable::dirty_card_val() == 0, "Dirty card value must be 0 due to optimizations."); Address card_table_addr(card_table_base, obj, lsr, CardTable::card_shift); if (UseCondCardMark) { - if (ct->scanned_concurrently()) { - __ membar(MacroAssembler::Membar_mask_bits(MacroAssembler::StoreLoad), noreg); - } Label already_dirty; __ ldrb(tmp, card_table_addr); @@ -147,9 +141,6 @@ void CardTableBarrierSetAssembler::store_check_part2(MacroAssembler* masm, Regis __ bind(already_dirty); } else { - if (ct->scanned_concurrently()) { - __ membar(MacroAssembler::Membar_mask_bits(MacroAssembler::StoreStore), noreg); - } set_card(masm, card_table_base, card_table_addr, tmp); } } diff --git a/src/hotspot/cpu/arm/sharedRuntime_arm.cpp b/src/hotspot/cpu/arm/sharedRuntime_arm.cpp index b19bea1b2a12692d0f455b586ae3fa346c1d1264..66307e232b4be05067c206365e541e45952019bd 100644 --- a/src/hotspot/cpu/arm/sharedRuntime_arm.cpp +++ b/src/hotspot/cpu/arm/sharedRuntime_arm.cpp @@ -251,16 +251,6 @@ bool SharedRuntime::is_wide_vector(int size) { return false; } -size_t SharedRuntime::trampoline_size() { - return 16; -} - -void SharedRuntime::generate_trampoline(MacroAssembler *masm, address destination) { - InlinedAddress dest(destination); - __ indirect_jump(dest, Rtemp); - __ bind_literal(dest); -} - int SharedRuntime::c_calling_convention(const BasicType *sig_bt, VMRegPair *regs, VMRegPair *regs2, @@ -1898,10 +1888,12 @@ RuntimeStub* SharedRuntime::generate_resolve_blob(address destination, const cha return RuntimeStub::new_runtime_stub(name, &buffer, frame_complete, frame_size_words, oop_maps, true); } -BufferBlob* SharedRuntime::make_native_invoker(address call_target, - int shadow_space_bytes, - const GrowableArray& input_registers, - const GrowableArray& output_registers) { +#ifdef COMPILER2 +RuntimeStub* SharedRuntime::make_native_invoker(address call_target, + int shadow_space_bytes, + const GrowableArray& input_registers, + const GrowableArray& output_registers) { Unimplemented(); return nullptr; } +#endif diff --git a/src/hotspot/cpu/ppc/c1_globals_ppc.hpp b/src/hotspot/cpu/ppc/c1_globals_ppc.hpp index 84cf043b22d8f1969ee9d9c6f75ce30e422b8a01..af6db7555dd13d2a928d6cfca6c748265c463e58 100644 --- a/src/hotspot/cpu/ppc/c1_globals_ppc.hpp +++ b/src/hotspot/cpu/ppc/c1_globals_ppc.hpp @@ -51,7 +51,6 @@ define_pd_global(uintx, NonNMethodCodeHeapSize, 5*M ); define_pd_global(uintx, CodeCacheExpansionSize, 32*K); define_pd_global(uintx, CodeCacheMinBlockLength, 1); define_pd_global(uintx, CodeCacheMinimumUseSpace, 400*K); -define_pd_global(size_t, MetaspaceSize, 12*M); define_pd_global(bool, NeverActAsServerClassMachine, true); define_pd_global(size_t, NewSizeThreadIncrease, 16*K); define_pd_global(uint64_t, MaxRAM, 1ULL*G); diff --git a/src/hotspot/cpu/ppc/c2_globals_ppc.hpp b/src/hotspot/cpu/ppc/c2_globals_ppc.hpp index 88377001bcfecaa521156864d6e7f6a2b1c7341b..bb103cdf6091b8b05f29a9c6ae38efc8083f66b4 100644 --- a/src/hotspot/cpu/ppc/c2_globals_ppc.hpp +++ b/src/hotspot/cpu/ppc/c2_globals_ppc.hpp @@ -93,9 +93,6 @@ define_pd_global(uintx, CodeCacheMinimumUseSpace, 400*K); define_pd_global(bool, TrapBasedRangeChecks, true); -// Heap related flags -define_pd_global(size_t, MetaspaceSize, ScaleForWordSize(16*M)); - // Ergonomics related flags define_pd_global(bool, NeverActAsServerClassMachine, false); diff --git a/src/hotspot/cpu/ppc/gc/shared/barrierSetAssembler_ppc.cpp b/src/hotspot/cpu/ppc/gc/shared/barrierSetAssembler_ppc.cpp index cc78b0191539aaf75546bbffffd32285a1a797a6..800b34e4ba73663e206807a0c0ff7b489612c296 100644 --- a/src/hotspot/cpu/ppc/gc/shared/barrierSetAssembler_ppc.cpp +++ b/src/hotspot/cpu/ppc/gc/shared/barrierSetAssembler_ppc.cpp @@ -26,6 +26,7 @@ #include "nativeInst_ppc.hpp" #include "precompiled.hpp" #include "asm/macroAssembler.inline.hpp" +#include "classfile/classLoaderData.hpp" #include "gc/shared/barrierSetAssembler.hpp" #include "gc/shared/barrierSetNMethod.hpp" #include "interpreter/interp_masm.hpp" diff --git a/src/hotspot/cpu/ppc/gc/shared/cardTableBarrierSetAssembler_ppc.cpp b/src/hotspot/cpu/ppc/gc/shared/cardTableBarrierSetAssembler_ppc.cpp index fd0c4c6a54087212d928a14ff478bbbd55df1ee0..8337317e3f2cc1a431aa8832d87005a72c222381 100644 --- a/src/hotspot/cpu/ppc/gc/shared/cardTableBarrierSetAssembler_ppc.cpp +++ b/src/hotspot/cpu/ppc/gc/shared/cardTableBarrierSetAssembler_ppc.cpp @@ -49,8 +49,6 @@ void CardTableBarrierSetAssembler::gen_write_ref_array_post_barrier(MacroAssembl Label Lskip_loop, Lstore_loop; - if (ct->scanned_concurrently()) { __ membar(Assembler::StoreStore); } - __ sldi_(count, count, LogBytesPerHeapOop); __ beq(CCR0, Lskip_loop); // zero length __ addi(count, count, -BytesPerHeapOop); @@ -74,13 +72,10 @@ void CardTableBarrierSetAssembler::gen_write_ref_array_post_barrier(MacroAssembl void CardTableBarrierSetAssembler::card_table_write(MacroAssembler* masm, CardTable::CardValue* byte_map_base, Register tmp, Register obj) { - CardTableBarrierSet* ctbs = barrier_set_cast(BarrierSet::barrier_set()); - CardTable* ct = ctbs->card_table(); assert_different_registers(obj, tmp, R0); __ load_const_optimized(tmp, (address)byte_map_base, R0); __ srdi(obj, obj, CardTable::card_shift); __ li(R0, CardTable::dirty_card_val()); - if (ct->scanned_concurrently()) { __ membar(Assembler::StoreStore); } __ stbx(R0, tmp, obj); } diff --git a/src/hotspot/cpu/ppc/ppc.ad b/src/hotspot/cpu/ppc/ppc.ad index 0c3b96fa9ba96994728377f5ce6b7f090944d442..a1642b0ad169eb0ee9b54ade15b95d25a007e5ed 100644 --- a/src/hotspot/cpu/ppc/ppc.ad +++ b/src/hotspot/cpu/ppc/ppc.ad @@ -3035,36 +3035,6 @@ encode %{ __ stfd($src$$FloatRegister, Idisp, $mem$$base$$Register); %} - // Use release_store for card-marking to ensure that previous - // oop-stores are visible before the card-mark change. - enc_class enc_cms_card_mark(memory mem, iRegLdst releaseFieldAddr, flagsReg crx) %{ - // FIXME: Implement this as a cmove and use a fixed condition code - // register which is written on every transition to compiled code, - // e.g. in call-stub and when returning from runtime stubs. - // - // Proposed code sequence for the cmove implementation: - // - // Label skip_release; - // __ beq(CCRfixed, skip_release); - // __ release(); - // __ bind(skip_release); - // __ stb(card mark); - - C2_MacroAssembler _masm(&cbuf); - Label skip_storestore; - - __ li(R0, 0); - __ membar(Assembler::StoreStore); - - // Do the store. - if ($mem$$index == 0) { - __ stb(R0, $mem$$disp, $mem$$base$$Register); - } else { - assert(0 == $mem$$disp, "no displacement possible with indexed load/stores on ppc"); - __ stbx(R0, $mem$$base$$Register, $mem$$index$$Register); - } - %} - enc_class postalloc_expand_encode_oop(iRegNdst dst, iRegPdst src, flagsReg crx) %{ if (VM_Version::has_isel()) { @@ -6601,37 +6571,15 @@ instruct storeD(memory mem, regD src) %{ //----------Store Instructions With Zeros-------------------------------------- -// Card-mark for CMS garbage collection. -// This cardmark does an optimization so that it must not always -// do a releasing store. For this, it gets the address of -// CMSCollectorCardTableBarrierSetBSExt::_requires_release as input. -// (Using releaseFieldAddr in the match rule is a hack.) -instruct storeCM_CMS(memory mem, iRegLdst releaseFieldAddr, flagsReg crx) %{ - match(Set mem (StoreCM mem releaseFieldAddr)); - effect(TEMP crx); - predicate(false); - ins_cost(MEMORY_REF_COST); - - // See loadConP. - ins_cannot_rematerialize(true); - - format %{ "STB #0, $mem \t// CMS card-mark byte (must be 0!), checking requires_release in [$releaseFieldAddr]" %} - ins_encode( enc_cms_card_mark(mem, releaseFieldAddr, crx) ); - ins_pipe(pipe_class_memory); -%} - -instruct storeCM_G1(memory mem, immI_0 zero) %{ +instruct storeCM(memory mem, immI_0 zero) %{ match(Set mem (StoreCM mem zero)); - predicate(UseG1GC); ins_cost(MEMORY_REF_COST); - ins_cannot_rematerialize(true); - - format %{ "STB #0, $mem \t// CMS card-mark byte store (G1)" %} + format %{ "STB #0, $mem \t// CMS card-mark byte store" %} size(8); ins_encode %{ __ li(R0, 0); - //__ release(); // G1: oops are allowed to get visible after dirty marking + // No release barrier: Oops are allowed to get visible after marking. guarantee($mem$$base$$Register != R1_SP, "use frame_slots_bias"); __ stb(R0, $mem$$disp, $mem$$base$$Register); %} diff --git a/src/hotspot/cpu/ppc/sharedRuntime_ppc.cpp b/src/hotspot/cpu/ppc/sharedRuntime_ppc.cpp index 1e5cd5a5903501e1e7e2d84794b98ac1726207ac..32e6eb3c341a2d114aa29728eb2a387d975d565d 100644 --- a/src/hotspot/cpu/ppc/sharedRuntime_ppc.cpp +++ b/src/hotspot/cpu/ppc/sharedRuntime_ppc.cpp @@ -564,17 +564,6 @@ bool SharedRuntime::is_wide_vector(int size) { return size > 8; } -size_t SharedRuntime::trampoline_size() { - return Assembler::load_const_size + 8; -} - -void SharedRuntime::generate_trampoline(MacroAssembler *masm, address destination) { - Register Rtemp = R12; - __ load_const(Rtemp, destination); - __ mtctr(Rtemp); - __ bctr(); -} - static int reg2slot(VMReg r) { return r->reg2stack() + SharedRuntime::out_preserve_stack_slots(); } @@ -3442,10 +3431,12 @@ void SharedRuntime::montgomery_square(jint *a_ints, jint *n_ints, reverse_words(m, (unsigned long *)m_ints, longwords); } -BufferBlob* SharedRuntime::make_native_invoker(address call_target, - int shadow_space_bytes, - const GrowableArray& input_registers, - const GrowableArray& output_registers) { +#ifdef COMPILER2 +RuntimeStub* SharedRuntime::make_native_invoker(address call_target, + int shadow_space_bytes, + const GrowableArray& input_registers, + const GrowableArray& output_registers) { Unimplemented(); return nullptr; } +#endif diff --git a/src/hotspot/cpu/ppc/stubGenerator_ppc.cpp b/src/hotspot/cpu/ppc/stubGenerator_ppc.cpp index f762f62e226a02bd29ec91437e436ced179b5529..e632ec55e0b236d181341a60543a7e25868177d7 100644 --- a/src/hotspot/cpu/ppc/stubGenerator_ppc.cpp +++ b/src/hotspot/cpu/ppc/stubGenerator_ppc.cpp @@ -2598,7 +2598,7 @@ class StubGenerator: public StubCodeGenerator { address start = __ function_entry(); - Label L_doLast; + Label L_doLast, L_error; Register from = R3_ARG1; // source array address Register to = R4_ARG2; // destination array address @@ -2628,7 +2628,7 @@ class StubGenerator: public StubCodeGenerator { __ li (fifteen, 15); - // load unaligned from[0-15] to vsRet + // load unaligned from[0-15] to vRet __ lvx (vRet, from); __ lvx (vTmp1, fifteen, from); __ lvsl (fromPerm, from); @@ -2743,6 +2743,11 @@ class StubGenerator: public StubCodeGenerator { __ cmpwi (CCR0, keylen, 52); __ beq (CCR0, L_doLast); +#ifdef ASSERT + __ cmpwi (CCR0, keylen, 60); + __ bne (CCR0, L_error); +#endif + // 12th - 13th rounds __ vcipher (vRet, vRet, vKey1); __ vcipher (vRet, vRet, vKey2); @@ -2763,29 +2768,30 @@ class StubGenerator: public StubCodeGenerator { __ vcipher (vRet, vRet, vKey1); __ vcipherlast (vRet, vRet, vKey2); - // store result (unaligned) #ifdef VM_LITTLE_ENDIAN - __ lvsl (toPerm, to); -#else - __ lvsr (toPerm, to); -#endif - __ vspltisb (vTmp3, -1); - __ vspltisb (vTmp4, 0); - __ lvx (vTmp1, to); - __ lvx (vTmp2, fifteen, to); -#ifdef VM_LITTLE_ENDIAN - __ vperm (vTmp3, vTmp3, vTmp4, toPerm); // generate select mask - __ vxor (toPerm, toPerm, fSplt); // swap bytes -#else - __ vperm (vTmp3, vTmp4, vTmp3, toPerm); // generate select mask + // toPerm = 0x0F0E0D0C0B0A09080706050403020100 + __ lvsl (toPerm, keypos); // keypos is a multiple of 16 + __ vxor (toPerm, toPerm, fSplt); + + // Swap Bytes + __ vperm (vRet, vRet, vRet, toPerm); #endif - __ vperm (vTmp4, vRet, vRet, toPerm); // rotate data - __ vsel (vTmp2, vTmp4, vTmp2, vTmp3); - __ vsel (vTmp1, vTmp1, vTmp4, vTmp3); - __ stvx (vTmp2, fifteen, to); // store this one first (may alias) - __ stvx (vTmp1, to); + + // store result (unaligned) + // Note: We can't use a read-modify-write sequence which touches additional Bytes. + Register lo = temp, hi = fifteen; // Reuse + __ vsldoi (vTmp1, vRet, vRet, 8); + __ mfvrd (hi, vRet); + __ mfvrd (lo, vTmp1); + __ std (hi, 0 LITTLE_ENDIAN_ONLY(+ 8), to); + __ std (lo, 0 BIG_ENDIAN_ONLY(+ 8), to); __ blr(); + +#ifdef ASSERT + __ bind(L_error); + __ stop("aescrypt_encryptBlock: invalid key length"); +#endif return start; } @@ -2799,9 +2805,7 @@ class StubGenerator: public StubCodeGenerator { address start = __ function_entry(); - Label L_doLast; - Label L_do44; - Label L_do52; + Label L_doLast, L_do44, L_do52, L_error; Register from = R3_ARG1; // source array address Register to = R4_ARG2; // destination array address @@ -2832,7 +2836,7 @@ class StubGenerator: public StubCodeGenerator { __ li (fifteen, 15); - // load unaligned from[0-15] to vsRet + // load unaligned from[0-15] to vRet __ lvx (vRet, from); __ lvx (vTmp1, fifteen, from); __ lvsl (fromPerm, from); @@ -2861,6 +2865,11 @@ class StubGenerator: public StubCodeGenerator { __ cmpwi (CCR0, keylen, 52); __ beq (CCR0, L_do52); +#ifdef ASSERT + __ cmpwi (CCR0, keylen, 60); + __ bne (CCR0, L_error); +#endif + // load the 15th round key to vKey1 __ li (keypos, 240); __ lvx (vKey1, keypos, key); @@ -2897,6 +2906,7 @@ class StubGenerator: public StubCodeGenerator { __ b (L_doLast); + __ align(32); __ bind (L_do52); // load the 13th round key to vKey1 @@ -2923,6 +2933,7 @@ class StubGenerator: public StubCodeGenerator { __ b (L_doLast); + __ align(32); __ bind (L_do44); // load the 11th round key to vKey1 @@ -3000,29 +3011,30 @@ class StubGenerator: public StubCodeGenerator { __ vncipher (vRet, vRet, vKey4); __ vncipherlast (vRet, vRet, vKey5); - // store result (unaligned) -#ifdef VM_LITTLE_ENDIAN - __ lvsl (toPerm, to); -#else - __ lvsr (toPerm, to); -#endif - __ vspltisb (vTmp3, -1); - __ vspltisb (vTmp4, 0); - __ lvx (vTmp1, to); - __ lvx (vTmp2, fifteen, to); #ifdef VM_LITTLE_ENDIAN - __ vperm (vTmp3, vTmp3, vTmp4, toPerm); // generate select mask - __ vxor (toPerm, toPerm, fSplt); // swap bytes -#else - __ vperm (vTmp3, vTmp4, vTmp3, toPerm); // generate select mask + // toPerm = 0x0F0E0D0C0B0A09080706050403020100 + __ lvsl (toPerm, keypos); // keypos is a multiple of 16 + __ vxor (toPerm, toPerm, fSplt); + + // Swap Bytes + __ vperm (vRet, vRet, vRet, toPerm); #endif - __ vperm (vTmp4, vRet, vRet, toPerm); // rotate data - __ vsel (vTmp2, vTmp4, vTmp2, vTmp3); - __ vsel (vTmp1, vTmp1, vTmp4, vTmp3); - __ stvx (vTmp2, fifteen, to); // store this one first (may alias) - __ stvx (vTmp1, to); + + // store result (unaligned) + // Note: We can't use a read-modify-write sequence which touches additional Bytes. + Register lo = temp, hi = fifteen; // Reuse + __ vsldoi (vTmp1, vRet, vRet, 8); + __ mfvrd (hi, vRet); + __ mfvrd (lo, vTmp1); + __ std (hi, 0 LITTLE_ENDIAN_ONLY(+ 8), to); + __ std (lo, 0 BIG_ENDIAN_ONLY(+ 8), to); __ blr(); + +#ifdef ASSERT + __ bind(L_error); + __ stop("aescrypt_decryptBlock: invalid key length"); +#endif return start; } diff --git a/src/hotspot/cpu/ppc/vtableStubs_ppc_64.cpp b/src/hotspot/cpu/ppc/vtableStubs_ppc_64.cpp index 796a8b1bf78856fb9d3ec0d860806f7f67cd011a..14d3568d5d73e3288ea5095e6fafceb031088a8c 100644 --- a/src/hotspot/cpu/ppc/vtableStubs_ppc_64.cpp +++ b/src/hotspot/cpu/ppc/vtableStubs_ppc_64.cpp @@ -1,6 +1,6 @@ /* - * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved. - * Copyright (c) 2012, 2018 SAP SE. All rights reserved. + * Copyright (c) 1997, 2021, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2012, 2021 SAP SE. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -73,9 +73,9 @@ VtableStub* VtableStubs::create_vtable_stub(int vtable_index) { slop_delta = load_const_maxLen - (__ pc() - start_pc); slop_bytes += slop_delta; assert(slop_delta >= 0, "negative slop(%d) encountered, adjust code size estimate!", slop_delta); - __ lwz(R12_scratch2, offs, R11_scratch1); + __ ld(R12_scratch2, offs, R11_scratch1); __ addi(R12_scratch2, R12_scratch2, 1); - __ stw(R12_scratch2, offs, R11_scratch1); + __ std(R12_scratch2, offs, R11_scratch1); } #endif @@ -141,6 +141,7 @@ VtableStub* VtableStubs::create_itable_stub(int itable_index) { if (s == NULL) { return NULL; } + // Count unused bytes in instruction sequences of variable size. // We add them to the computed buffer size in order to avoid // overflow in subsequently generated stubs. @@ -160,9 +161,9 @@ VtableStub* VtableStubs::create_itable_stub(int itable_index) { slop_delta = load_const_maxLen - (__ pc() - start_pc); slop_bytes += slop_delta; assert(slop_delta >= 0, "negative slop(%d) encountered, adjust code size estimate!", slop_delta); - __ lwz(R12_scratch2, offs, R11_scratch1); + __ ld(R12_scratch2, offs, R11_scratch1); __ addi(R12_scratch2, R12_scratch2, 1); - __ stw(R12_scratch2, offs, R11_scratch1); + __ std(R12_scratch2, offs, R11_scratch1); } #endif diff --git a/src/hotspot/cpu/s390/c1_globals_s390.hpp b/src/hotspot/cpu/s390/c1_globals_s390.hpp index ec08cda96934c4b9274817fd2accf59c27224c4f..a939b32bd5be42c1c1eca89b59c4bbbf2af203d5 100644 --- a/src/hotspot/cpu/s390/c1_globals_s390.hpp +++ b/src/hotspot/cpu/s390/c1_globals_s390.hpp @@ -51,7 +51,6 @@ define_pd_global(uintx, NonNMethodCodeHeapSize, 5*M); define_pd_global(uintx, CodeCacheExpansionSize, 32*K); define_pd_global(uintx, CodeCacheMinBlockLength, 1); define_pd_global(uintx, CodeCacheMinimumUseSpace, 400*K); -define_pd_global(size_t, MetaspaceSize, 12*M); define_pd_global(bool, NeverActAsServerClassMachine, true); define_pd_global(size_t, NewSizeThreadIncrease, 16*K); define_pd_global(uint64_t, MaxRAM, 1ULL*G); diff --git a/src/hotspot/cpu/s390/c2_globals_s390.hpp b/src/hotspot/cpu/s390/c2_globals_s390.hpp index 0704a4bdab7ee95cf37410972cbc12866751be47..e747f6c8c517905a8aef397dac93ffb575360b6c 100644 --- a/src/hotspot/cpu/s390/c2_globals_s390.hpp +++ b/src/hotspot/cpu/s390/c2_globals_s390.hpp @@ -82,9 +82,6 @@ define_pd_global(uintx, CodeCacheMinimumUseSpace, 400*K); define_pd_global(bool, TrapBasedRangeChecks, false); // Not needed on z/Architecture. -// Heap related flags -define_pd_global(size_t, MetaspaceSize, ScaleForWordSize(16*M)); - // Ergonomics related flags define_pd_global(bool, NeverActAsServerClassMachine, false); diff --git a/src/hotspot/cpu/s390/macroAssembler_s390.cpp b/src/hotspot/cpu/s390/macroAssembler_s390.cpp index 9c06a20ae70d0af70b0feddbdba6697c095b5b7d..7554a3f00e8a54d079e3faa425d80675e3e09271 100644 --- a/src/hotspot/cpu/s390/macroAssembler_s390.cpp +++ b/src/hotspot/cpu/s390/macroAssembler_s390.cpp @@ -3603,6 +3603,7 @@ void MacroAssembler::encode_klass_not_null(Register dst, Register src) { Register current = (src != noreg) ? src : dst; // Klass is in dst if no src provided. (dst == src) also possible. address base = CompressedKlassPointers::base(); int shift = CompressedKlassPointers::shift(); + bool need_zero_extend = base != 0; assert(UseCompressedClassPointers, "only for compressed klass ptrs"); BLOCK_COMMENT("cKlass encoder {"); @@ -3619,28 +3620,76 @@ void MacroAssembler::encode_klass_not_null(Register dst, Register src) { bind(ok); #endif - if (base != NULL) { - unsigned int base_h = ((unsigned long)base)>>32; - unsigned int base_l = (unsigned int)((unsigned long)base); - if ((base_h != 0) && (base_l == 0) && VM_Version::has_HighWordInstr()) { - lgr_if_needed(dst, current); - z_aih(dst, -((int)base_h)); // Base has no set bits in lower half. - } else if ((base_h == 0) && (base_l != 0)) { - lgr_if_needed(dst, current); - z_agfi(dst, -(int)base_l); - } else { - load_const(Z_R0, base); - lgr_if_needed(dst, current); - z_sgr(dst, Z_R0); - } - current = dst; - } + // Scale down the incoming klass pointer first. + // We then can be sure we calculate an offset that fits into 32 bit. + // More generally speaking: all subsequent calculations are purely 32-bit. if (shift != 0) { assert (LogKlassAlignmentInBytes == shift, "decode alg wrong"); z_srlg(dst, current, shift); current = dst; } - lgr_if_needed(dst, current); // Move may be required (if neither base nor shift != 0). + + if (base != NULL) { + // Use scaled-down base address parts to match scaled-down klass pointer. + unsigned int base_h = ((unsigned long)base)>>(32+shift); + unsigned int base_l = (unsigned int)(((unsigned long)base)>>shift); + + // General considerations: + // - when calculating (current_h - base_h), all digits must cancel (become 0). + // Otherwise, we would end up with a compressed klass pointer which doesn't + // fit into 32-bit. + // - Only bit#33 of the difference could potentially be non-zero. For that + // to happen, (current_l < base_l) must hold. In this case, the subtraction + // will create a borrow out of bit#32, nicely killing bit#33. + // - With the above, we only need to consider current_l and base_l to + // calculate the result. + // - Both values are treated as unsigned. The unsigned subtraction is + // replaced by adding (unsigned) the 2's complement of the subtrahend. + + if (base_l == 0) { + // - By theory, the calculation to be performed here (current_h - base_h) MUST + // cancel all high-word bits. Otherwise, we would end up with an offset + // (i.e. compressed klass pointer) that does not fit into 32 bit. + // - current_l remains unchanged. + // - Therefore, we can replace all calculation with just a + // zero-extending load 32 to 64 bit. + // - Even that can be replaced with a conditional load if dst != current. + // (this is a local view. The shift step may have requested zero-extension). + } else { + if ((base_h == 0) && is_uimm(base_l, 31)) { + // If we happen to find that (base_h == 0), and that base_l is within the range + // which can be represented by a signed int, then we can use 64bit signed add with + // (-base_l) as 32bit signed immediate operand. The add will take care of the + // upper 32 bits of the result, saving us the need of an extra zero extension. + // For base_l to be in the required range, it must not have the most significant + // bit (aka sign bit) set. + lgr_if_needed(dst, current); // no zero/sign extension in this case! + z_agfi(dst, -(int)base_l); // base_l must be passed as signed. + need_zero_extend = false; + current = dst; + } else { + // To begin with, we may need to copy and/or zero-extend the register operand. + // We have to calculate (current_l - base_l). Because there is no unsigend + // subtract instruction with immediate operand, we add the 2's complement of base_l. + if (need_zero_extend) { + z_llgfr(dst, current); + need_zero_extend = false; + } else { + llgfr_if_needed(dst, current); + } + current = dst; + z_alfi(dst, -base_l); + } + } + } + + if (need_zero_extend) { + // We must zero-extend the calculated result. It may have some leftover bits in + // the hi-word because we only did optimized calculations. + z_llgfr(dst, current); + } else { + llgfr_if_needed(dst, current); // zero-extension while copying comes at no extra cost. + } BLOCK_COMMENT("} cKlass encoder"); } diff --git a/src/hotspot/cpu/s390/sharedRuntime_s390.cpp b/src/hotspot/cpu/s390/sharedRuntime_s390.cpp index 79980aeb670a582de67efbce03f38d0a9f80e118..43c5bba706afa2f8371fb85e2a56edcaddf5f820 100644 --- a/src/hotspot/cpu/s390/sharedRuntime_s390.cpp +++ b/src/hotspot/cpu/s390/sharedRuntime_s390.cpp @@ -556,16 +556,6 @@ void RegisterSaver::restore_result_registers(MacroAssembler* masm) { } } -size_t SharedRuntime::trampoline_size() { - return MacroAssembler::load_const_size() + 2; -} - -void SharedRuntime::generate_trampoline(MacroAssembler *masm, address destination) { - // Think about using pc-relative branch. - __ load_const(Z_R1_scratch, destination); - __ z_br(Z_R1_scratch); -} - // --------------------------------------------------------------------------- void SharedRuntime::save_native_result(MacroAssembler * masm, BasicType ret_type, @@ -3468,10 +3458,12 @@ int SpinPause() { return 0; } -BufferBlob* SharedRuntime::make_native_invoker(address call_target, - int shadow_space_bytes, - const GrowableArray& input_registers, - const GrowableArray& output_registers) { +#ifdef COMPILER2 +RuntimeStub* SharedRuntime::make_native_invoker(address call_target, + int shadow_space_bytes, + const GrowableArray& input_registers, + const GrowableArray& output_registers) { Unimplemented(); return nullptr; } +#endif diff --git a/src/hotspot/cpu/s390/vtableStubs_s390.cpp b/src/hotspot/cpu/s390/vtableStubs_s390.cpp index 306cce9395d2f42ac3f37f95693688dd0b4c8e34..56a9e36721ce4baa026abceb225707aaa940e80c 100644 --- a/src/hotspot/cpu/s390/vtableStubs_s390.cpp +++ b/src/hotspot/cpu/s390/vtableStubs_s390.cpp @@ -1,6 +1,6 @@ /* - * Copyright (c) 2016, 2020, Oracle and/or its affiliates. All rights reserved. - * Copyright (c) 2016, 2018 SAP SE. All rights reserved. + * Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2016, 2021 SAP SE. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -75,7 +75,7 @@ VtableStub* VtableStubs::create_vtable_stub(int vtable_index) { // Abuse Z_method as scratch register for generic emitter. // It is loaded further down anyway before it is first used. // No dynamic code size variance here, increment is 1, always. - __ add2mem_32(Address(Z_R1_scratch), 1, Z_method); + __ add2mem_64(Address(Z_R1_scratch), 1, Z_method); } #endif @@ -158,6 +158,7 @@ VtableStub* VtableStubs::create_itable_stub(int itable_index) { if (s == NULL) { return NULL; } + // Count unused bytes in instruction sequences of variable size. // We add them to the computed buffer size in order to avoid // overflow in subsequently generated stubs. @@ -179,7 +180,7 @@ VtableStub* VtableStubs::create_itable_stub(int itable_index) { // Abuse Z_method as scratch register for generic emitter. // It is loaded further down anyway before it is first used. // No dynamic code size variance here, increment is 1, always. - __ add2mem_32(Address(Z_R1_scratch), 1, Z_method); + __ add2mem_64(Address(Z_R1_scratch), 1, Z_method); } #endif diff --git a/src/hotspot/cpu/x86/assembler_x86.cpp b/src/hotspot/cpu/x86/assembler_x86.cpp index 16446d895f570202eba60194ce459366973d91ca..241deb2867f33dfebe98c19cc8b31c8ad9bd60f4 100644 --- a/src/hotspot/cpu/x86/assembler_x86.cpp +++ b/src/hotspot/cpu/x86/assembler_x86.cpp @@ -9173,6 +9173,13 @@ void Assembler::evpblendmq (XMMRegister dst, KRegister mask, XMMRegister nds, XM emit_int16(0x64, (0xC0 | encode)); } +void Assembler::bzhiq(Register dst, Register src1, Register src2) { + assert(VM_Version::supports_bmi2(), "bit manipulation instructions not supported"); + InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); + int encode = vex_prefix_and_encode(dst->encoding(), src2->encoding(), src1->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, &attributes); + emit_int16((unsigned char)0xF5, (0xC0 | encode)); +} + void Assembler::shlxl(Register dst, Register src1, Register src2) { assert(VM_Version::supports_bmi2(), ""); InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ true); diff --git a/src/hotspot/cpu/x86/assembler_x86.hpp b/src/hotspot/cpu/x86/assembler_x86.hpp index 0304a882a33046183822402a99b315a61ebb2a53..b73d1d2501f84e6685cd50a6f2ee730c216b6efa 100644 --- a/src/hotspot/cpu/x86/assembler_x86.hpp +++ b/src/hotspot/cpu/x86/assembler_x86.hpp @@ -2092,6 +2092,7 @@ private: void shlxq(Register dst, Register src1, Register src2); void shrxq(Register dst, Register src1, Register src2); + void bzhiq(Register dst, Register src1, Register src2); //====================VECTOR ARITHMETIC===================================== void evpmovd2m(KRegister kdst, XMMRegister src, int vector_len); diff --git a/src/hotspot/cpu/x86/c1_globals_x86.hpp b/src/hotspot/cpu/x86/c1_globals_x86.hpp index a2f88c28642214d155266e6a0d306c4ecb745312..9212e321d65baa7cd95267bea3f7970a4f6dd5ac 100644 --- a/src/hotspot/cpu/x86/c1_globals_x86.hpp +++ b/src/hotspot/cpu/x86/c1_globals_x86.hpp @@ -51,7 +51,6 @@ define_pd_global(bool, ProfileInterpreter, false); define_pd_global(uintx, CodeCacheExpansionSize, 32*K ); define_pd_global(uintx, CodeCacheMinBlockLength, 1 ); define_pd_global(uintx, CodeCacheMinimumUseSpace, 400*K); -define_pd_global(size_t, MetaspaceSize, 12*M ); define_pd_global(bool, NeverActAsServerClassMachine, true ); define_pd_global(uint64_t, MaxRAM, 1ULL*G); define_pd_global(bool, CICompileOSR, true ); diff --git a/src/hotspot/cpu/x86/c2_MacroAssembler_x86.cpp b/src/hotspot/cpu/x86/c2_MacroAssembler_x86.cpp index 4b7649cca535e8757d1cffc819ab20b1bc83714c..a4744970aca4be4082229be1d75cf96f9a780ba9 100644 --- a/src/hotspot/cpu/x86/c2_MacroAssembler_x86.cpp +++ b/src/hotspot/cpu/x86/c2_MacroAssembler_x86.cpp @@ -1894,17 +1894,9 @@ void C2_MacroAssembler::reduce8L(int opcode, Register dst, Register src1, XMMReg } void C2_MacroAssembler::genmask(Register dst, Register len, Register temp) { - if (ArrayCopyPartialInlineSize <= 32) { - mov64(dst, 1); - shlxq(dst, dst, len); - decq(dst); - } else { - mov64(dst, -1); - movq(temp, len); - negptr(temp); - addptr(temp, 64); - shrxq(dst, dst, temp); - } + assert(ArrayCopyPartialInlineSize <= 64,""); + mov64(dst, -1L); + bzhiq(dst, dst, len); } #endif // _LP64 diff --git a/src/hotspot/cpu/x86/c2_globals_x86.hpp b/src/hotspot/cpu/x86/c2_globals_x86.hpp index 42704bcd57052f76ddb9330806a10c40bb4c866c..776caa30cf9a55b104136aaf2e138d3d00bd09c4 100644 --- a/src/hotspot/cpu/x86/c2_globals_x86.hpp +++ b/src/hotspot/cpu/x86/c2_globals_x86.hpp @@ -91,9 +91,6 @@ define_pd_global(uintx, CodeCacheMinimumUseSpace, 400*K); define_pd_global(bool, TrapBasedRangeChecks, false); // Not needed on x86. -// Heap related flags -define_pd_global(size_t, MetaspaceSize, ScaleForWordSize(16*M)); - // Ergonomics related flags define_pd_global(bool, NeverActAsServerClassMachine, false); diff --git a/src/hotspot/cpu/x86/frame_x86.cpp b/src/hotspot/cpu/x86/frame_x86.cpp index 1234bc88dbcc736f05449a42ed22ff9b5c2941b8..6195d479a8c48b6c94b13fccf61f2962aedbde74 100644 --- a/src/hotspot/cpu/x86/frame_x86.cpp +++ b/src/hotspot/cpu/x86/frame_x86.cpp @@ -346,10 +346,6 @@ frame frame::sender_for_entry_frame(RegisterMap* map) const { vmassert(jfa->last_Java_pc() != NULL, "not walkable"); frame fr(jfa->last_Java_sp(), jfa->last_Java_fp(), jfa->last_Java_pc()); - if (jfa->saved_rbp_address()) { - update_map_with_saved_link(map, jfa->saved_rbp_address()); - } - return fr; } diff --git a/src/hotspot/cpu/x86/gc/shared/barrierSetAssembler_x86.cpp b/src/hotspot/cpu/x86/gc/shared/barrierSetAssembler_x86.cpp index 274cc1a8702ba8e54f6bb555802ef387a2cf3c9e..6e820f98689141a8c625eeca5d8a50b4b5b3882a 100644 --- a/src/hotspot/cpu/x86/gc/shared/barrierSetAssembler_x86.cpp +++ b/src/hotspot/cpu/x86/gc/shared/barrierSetAssembler_x86.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018, 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2018, 2021, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -23,6 +23,7 @@ */ #include "precompiled.hpp" +#include "classfile/classLoaderData.hpp" #include "gc/shared/barrierSet.hpp" #include "gc/shared/barrierSetAssembler.hpp" #include "gc/shared/barrierSetNMethod.hpp" diff --git a/src/hotspot/cpu/x86/gc/shared/cardTableBarrierSetAssembler_x86.cpp b/src/hotspot/cpu/x86/gc/shared/cardTableBarrierSetAssembler_x86.cpp index c00ea223f00dcca556d521f1d937ae9fc833a124..9b2d2c5efedcee2eba8c4a90adfe02b7e054dd7c 100644 --- a/src/hotspot/cpu/x86/gc/shared/cardTableBarrierSetAssembler_x86.cpp +++ b/src/hotspot/cpu/x86/gc/shared/cardTableBarrierSetAssembler_x86.cpp @@ -118,9 +118,6 @@ void CardTableBarrierSetAssembler::store_check(MacroAssembler* masm, Register ob int dirty = CardTable::dirty_card_val(); if (UseCondCardMark) { Label L_already_dirty; - if (ct->scanned_concurrently()) { - __ membar(Assembler::StoreLoad); - } __ cmpb(card_addr, dirty); __ jcc(Assembler::equal, L_already_dirty); __ movb(card_addr, dirty); diff --git a/src/hotspot/cpu/x86/javaFrameAnchor_x86.hpp b/src/hotspot/cpu/x86/javaFrameAnchor_x86.hpp index 4579b7377a0e9eae467ac2b694b5922a4b363560..bb39c8e513e51ceac3b6d501bec117564348970c 100644 --- a/src/hotspot/cpu/x86/javaFrameAnchor_x86.hpp +++ b/src/hotspot/cpu/x86/javaFrameAnchor_x86.hpp @@ -30,9 +30,6 @@ private: // FP value associated with _last_Java_sp: intptr_t* volatile _last_Java_fp; // pointer is volatile not what it points to - // (Optional) location of saved RBP register, which GCs want to inspect - intptr_t** volatile _saved_rbp_address; - public: // Each arch must define reset, save, restore // These are used by objects that only care about: @@ -46,7 +43,6 @@ public: // fence? _last_Java_fp = NULL; _last_Java_pc = NULL; - _saved_rbp_address = NULL; } void copy(JavaFrameAnchor* src) { @@ -64,8 +60,6 @@ public: _last_Java_pc = src->_last_Java_pc; // Must be last so profiler will always see valid frame if has_last_frame() is true _last_Java_sp = src->_last_Java_sp; - - _saved_rbp_address = src->_saved_rbp_address; } bool walkable(void) { return _last_Java_sp != NULL && _last_Java_pc != NULL; } @@ -76,12 +70,9 @@ public: address last_Java_pc(void) { return _last_Java_pc; } - intptr_t** saved_rbp_address(void) const { return _saved_rbp_address; } - private: static ByteSize last_Java_fp_offset() { return byte_offset_of(JavaFrameAnchor, _last_Java_fp); } - static ByteSize saved_rbp_address_offset() { return byte_offset_of(JavaFrameAnchor, _saved_rbp_address); } public: diff --git a/src/hotspot/cpu/x86/macroAssembler_x86.cpp b/src/hotspot/cpu/x86/macroAssembler_x86.cpp index d6e7783bee1719930183cd2c43a7402d482fbe03..ee3f1a8533459829640078d1fa0dcc90d275b863 100644 --- a/src/hotspot/cpu/x86/macroAssembler_x86.cpp +++ b/src/hotspot/cpu/x86/macroAssembler_x86.cpp @@ -2732,7 +2732,6 @@ void MacroAssembler::reset_last_Java_frame(Register java_thread, bool clear_fp) } // Always clear the pc because it could have been set by make_walkable() movptr(Address(java_thread, JavaThread::last_Java_pc_offset()), NULL_WORD); - movptr(Address(java_thread, JavaThread::saved_rbp_address_offset()), NULL_WORD); vzeroupper(); } @@ -3005,6 +3004,16 @@ void MacroAssembler::vaddss(XMMRegister dst, XMMRegister nds, AddressLiteral src } } +void MacroAssembler::vpaddb(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) { + assert(UseAVX > 0, "requires some form of AVX"); + if (reachable(src)) { + Assembler::vpaddb(dst, nds, as_Address(src), vector_len); + } else { + lea(rscratch, src); + Assembler::vpaddb(dst, nds, Address(rscratch, 0), vector_len); + } +} + void MacroAssembler::vpaddd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) { assert(UseAVX > 0, "requires some form of AVX"); if (reachable(src)) { diff --git a/src/hotspot/cpu/x86/macroAssembler_x86.hpp b/src/hotspot/cpu/x86/macroAssembler_x86.hpp index 5cabc583fc0e6becab0c033608f2543e070b9b91..a92155ec5ad1fa37f7d3ea1a8c8b8320aa75388c 100644 --- a/src/hotspot/cpu/x86/macroAssembler_x86.hpp +++ b/src/hotspot/cpu/x86/macroAssembler_x86.hpp @@ -1245,6 +1245,7 @@ public: void vpaddb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); void vpaddb(XMMRegister dst, XMMRegister nds, Address src, int vector_len); + void vpaddb(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch); void vpaddw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); void vpaddw(XMMRegister dst, XMMRegister nds, Address src, int vector_len); diff --git a/src/hotspot/cpu/x86/macroAssembler_x86_arrayCopy_avx3.cpp b/src/hotspot/cpu/x86/macroAssembler_x86_arrayCopy_avx3.cpp index b03338a82089d67d77051a22991bfdc4d967d409..ff92b5647ba84538a3786fb0920b9a49c5a4fc4e 100644 --- a/src/hotspot/cpu/x86/macroAssembler_x86_arrayCopy_avx3.cpp +++ b/src/hotspot/cpu/x86/macroAssembler_x86_arrayCopy_avx3.cpp @@ -196,10 +196,8 @@ void MacroAssembler::copy64_masked_avx(Register dst, Register src, XMMRegister x } else { Address::ScaleFactor scale = (Address::ScaleFactor)(shift); assert(MaxVectorSize == 64, "vector length != 64"); - negptr(length); - addq(length, 64); - mov64(temp, -1); - shrxq(temp, temp, length); + mov64(temp, -1L); + bzhiq(temp, temp, length); kmovql(mask, temp); evmovdqu(type[shift], mask, xmm, Address(src, index, scale, offset), Assembler::AVX_512bit); evmovdqu(type[shift], mask, Address(dst, index, scale, offset), xmm, Assembler::AVX_512bit); @@ -213,9 +211,8 @@ void MacroAssembler::copy32_masked_avx(Register dst, Register src, XMMRegister x assert(MaxVectorSize >= 32, "vector length should be >= 32"); BasicType type[] = { T_BYTE, T_SHORT, T_INT, T_LONG}; Address::ScaleFactor scale = (Address::ScaleFactor)(shift); - mov64(temp, 1); - shlxq(temp, temp, length); - decq(temp); + mov64(temp, -1L); + bzhiq(temp, temp, length); kmovql(mask, temp); evmovdqu(type[shift], mask, xmm, Address(src, index, scale, offset), Assembler::AVX_256bit); evmovdqu(type[shift], mask, Address(dst, index, scale, offset), xmm, Assembler::AVX_256bit); diff --git a/src/hotspot/cpu/x86/sharedRuntime_x86_32.cpp b/src/hotspot/cpu/x86/sharedRuntime_x86_32.cpp index b807541f71c523b10b7f3e85b1f9075c18ce9f19..11a1a235741cb50f097e4addf39676a216e546f2 100644 --- a/src/hotspot/cpu/x86/sharedRuntime_x86_32.cpp +++ b/src/hotspot/cpu/x86/sharedRuntime_x86_32.cpp @@ -371,14 +371,6 @@ bool SharedRuntime::is_wide_vector(int size) { return size > 16; } -size_t SharedRuntime::trampoline_size() { - return 16; -} - -void SharedRuntime::generate_trampoline(MacroAssembler *masm, address destination) { - __ jump(RuntimeAddress(destination)); -} - // The java_calling_convention describes stack locations as ideal slots on // a frame with no abi restrictions. Since we must observe abi restrictions // (like the placement of the register window) the slots must be biased by @@ -2980,10 +2972,12 @@ RuntimeStub* SharedRuntime::generate_resolve_blob(address destination, const cha return RuntimeStub::new_runtime_stub(name, &buffer, frame_complete, frame_size_words, oop_maps, true); } -BufferBlob* SharedRuntime::make_native_invoker(address call_target, +#ifdef COMPILER2 +RuntimeStub* SharedRuntime::make_native_invoker(address call_target, int shadow_space_bytes, const GrowableArray& input_registers, const GrowableArray& output_registers) { ShouldNotCallThis(); return nullptr; } +#endif diff --git a/src/hotspot/cpu/x86/sharedRuntime_x86_64.cpp b/src/hotspot/cpu/x86/sharedRuntime_x86_64.cpp index 837b59d44fe2ec799c5508e8eacbbbd6fc4f32b8..a2d7c5a82d3f24ce4555639ba87ef224f9b60171 100644 --- a/src/hotspot/cpu/x86/sharedRuntime_x86_64.cpp +++ b/src/hotspot/cpu/x86/sharedRuntime_x86_64.cpp @@ -423,14 +423,6 @@ bool SharedRuntime::is_wide_vector(int size) { return size > 16; } -size_t SharedRuntime::trampoline_size() { - return 16; -} - -void SharedRuntime::generate_trampoline(MacroAssembler *masm, address destination) { - __ jump(RuntimeAddress(destination)); -} - // The java_calling_convention describes stack locations as ideal slots on // a frame with no abi restrictions. Since we must observe abi restrictions // (like the placement of the register window) the slots must be biased by @@ -3161,7 +3153,6 @@ void SharedRuntime::generate_uncommon_trap_blob() { } #endif // COMPILER2 - //------------------------------generate_handler_blob------ // // Generate a special Compile2Runtime blob that saves all registers, @@ -3410,6 +3401,7 @@ RuntimeStub* SharedRuntime::generate_resolve_blob(address destination, const cha return RuntimeStub::new_runtime_stub(name, &buffer, frame_complete, frame_size_in_words, oop_maps, true); } +#ifdef COMPILER2 static const int native_invoker_code_size = MethodHandles::adapter_code_size; class NativeInvokerGenerator : public StubCodeGenerator { @@ -3418,6 +3410,10 @@ class NativeInvokerGenerator : public StubCodeGenerator { const GrowableArray& _input_registers; const GrowableArray& _output_registers; + + int _frame_complete; + int _framesize; + OopMapSet* _oop_maps; public: NativeInvokerGenerator(CodeBuffer* buffer, address call_target, @@ -3428,23 +3424,54 @@ public: _call_target(call_target), _shadow_space_bytes(shadow_space_bytes), _input_registers(input_registers), - _output_registers(output_registers) {} + _output_registers(output_registers), + _frame_complete(0), + _framesize(0), + _oop_maps(NULL) { + assert(_output_registers.length() <= 1 + || (_output_registers.length() == 2 && !_output_registers.at(1)->is_valid()), "no multi-reg returns"); + + } + void generate(); - void spill_register(VMReg reg) { + int spill_size_in_bytes() const { + if (_output_registers.length() == 0) { + return 0; + } + VMReg reg = _output_registers.at(0); + assert(reg->is_reg(), "must be a register"); + if (reg->is_Register()) { + return 8; + } else if (reg->is_XMMRegister()) { + if (UseAVX >= 3) { + return 64; + } else if (UseAVX >= 1) { + return 32; + } else { + return 16; + } + } else { + ShouldNotReachHere(); + } + return 0; + } + + void spill_out_registers() { + if (_output_registers.length() == 0) { + return; + } + VMReg reg = _output_registers.at(0); assert(reg->is_reg(), "must be a register"); MacroAssembler* masm = _masm; if (reg->is_Register()) { - __ push(reg->as_Register()); + __ movptr(Address(rsp, 0), reg->as_Register()); } else if (reg->is_XMMRegister()) { if (UseAVX >= 3) { - __ subptr(rsp, 64); // bytes __ evmovdqul(Address(rsp, 0), reg->as_XMMRegister(), Assembler::AVX_512bit); } else if (UseAVX >= 1) { - __ subptr(rsp, 32); __ vmovdqu(Address(rsp, 0), reg->as_XMMRegister()); } else { - __ subptr(rsp, 16); __ movdqu(Address(rsp, 0), reg->as_XMMRegister()); } } else { @@ -3452,27 +3479,40 @@ public: } } - void fill_register(VMReg reg) { + void fill_out_registers() { + if (_output_registers.length() == 0) { + return; + } + VMReg reg = _output_registers.at(0); assert(reg->is_reg(), "must be a register"); MacroAssembler* masm = _masm; if (reg->is_Register()) { - __ pop(reg->as_Register()); + __ movptr(reg->as_Register(), Address(rsp, 0)); } else if (reg->is_XMMRegister()) { if (UseAVX >= 3) { __ evmovdqul(reg->as_XMMRegister(), Address(rsp, 0), Assembler::AVX_512bit); - __ addptr(rsp, 64); // bytes } else if (UseAVX >= 1) { __ vmovdqu(reg->as_XMMRegister(), Address(rsp, 0)); - __ addptr(rsp, 32); } else { __ movdqu(reg->as_XMMRegister(), Address(rsp, 0)); - __ addptr(rsp, 16); } } else { ShouldNotReachHere(); } } + int frame_complete() const { + return _frame_complete; + } + + int framesize() const { + return (_framesize >> (LogBytesPerWord - LogBytesPerInt)); + } + + OopMapSet* oop_maps() const { + return _oop_maps; + } + private: #ifdef ASSERT bool target_uses_register(VMReg reg) { @@ -3481,57 +3521,61 @@ bool target_uses_register(VMReg reg) { #endif }; -BufferBlob* SharedRuntime::make_native_invoker(address call_target, - int shadow_space_bytes, - const GrowableArray& input_registers, - const GrowableArray& output_registers) { - BufferBlob* _invoke_native_blob = BufferBlob::create("nep_invoker_blob", native_invoker_code_size); - if (_invoke_native_blob == NULL) - return NULL; // allocation failure - - CodeBuffer code(_invoke_native_blob); +RuntimeStub* SharedRuntime::make_native_invoker(address call_target, + int shadow_space_bytes, + const GrowableArray& input_registers, + const GrowableArray& output_registers) { + int locs_size = 64; + CodeBuffer code("nep_invoker_blob", native_invoker_code_size, locs_size); NativeInvokerGenerator g(&code, call_target, shadow_space_bytes, input_registers, output_registers); g.generate(); code.log_section_sizes("nep_invoker_blob"); - return _invoke_native_blob; + RuntimeStub* stub = + RuntimeStub::new_runtime_stub("nep_invoker_blob", + &code, + g.frame_complete(), + g.framesize(), + g.oop_maps(), false); + return stub; } void NativeInvokerGenerator::generate() { assert(!(target_uses_register(r15_thread->as_VMReg()) || target_uses_register(rscratch1->as_VMReg())), "Register conflict"); + enum layout { + rbp_off, + rbp_off2, + return_off, + return_off2, + framesize // inclusive of return address + }; + + _framesize = align_up(framesize + ((_shadow_space_bytes + spill_size_in_bytes()) >> LogBytesPerInt), 4); + assert(is_even(_framesize/2), "sp not 16-byte aligned"); + + _oop_maps = new OopMapSet(); MacroAssembler* masm = _masm; - __ enter(); - Address java_pc(r15_thread, JavaThread::last_Java_pc_offset()); - __ movptr(rscratch1, Address(rsp, 8)); // read return address from stack - __ movptr(java_pc, rscratch1); + address start = __ pc(); - __ movptr(rscratch1, rsp); - __ addptr(rscratch1, 16); // skip return and frame - __ movptr(Address(r15_thread, JavaThread::last_Java_sp_offset()), rscratch1); + __ enter(); - __ movptr(Address(r15_thread, JavaThread::saved_rbp_address_offset()), rsp); // rsp points at saved RBP + // return address and rbp are already in place + __ subptr(rsp, (_framesize-4) << LogBytesPerInt); // prolog - // State transition - __ movl(Address(r15_thread, JavaThread::thread_state_offset()), _thread_in_native); + _frame_complete = __ pc() - start; - if (_shadow_space_bytes != 0) { - // needed here for correct stack args offset on Windows - __ subptr(rsp, _shadow_space_bytes); - } + address the_pc = __ pc(); - __ call(RuntimeAddress(_call_target)); + __ set_last_Java_frame(rsp, rbp, (address)the_pc); + OopMap* map = new OopMap(_framesize, 0); + _oop_maps->add_gc_map(the_pc - start, map); - if (_shadow_space_bytes != 0) { - // needed here for correct stack args offset on Windows - __ addptr(rsp, _shadow_space_bytes); - } + // State transition + __ movl(Address(r15_thread, JavaThread::thread_state_offset()), _thread_in_native); - assert(_output_registers.length() <= 1 - || (_output_registers.length() == 2 && !_output_registers.at(1)->is_valid()), "no multi-reg returns"); - bool need_spills = _output_registers.length() != 0; - VMReg ret_reg = need_spills ? _output_registers.at(0) : VMRegImpl::Bad(); + __ call(RuntimeAddress(_call_target)); __ restore_cpu_control_state_after_jni(); @@ -3572,9 +3616,7 @@ void NativeInvokerGenerator::generate() { __ bind(L_safepoint_poll_slow_path); __ vzeroupper(); - if (need_spills) { - spill_register(ret_reg); - } + spill_out_registers(); __ mov(c_rarg0, r15_thread); __ mov(r12, rsp); // remember sp @@ -3584,9 +3626,7 @@ void NativeInvokerGenerator::generate() { __ mov(rsp, r12); // restore sp __ reinit_heapbase(); - if (need_spills) { - fill_register(ret_reg); - } + fill_out_registers(); __ jmp(L_after_safepoint_poll); __ block_comment("} L_safepoint_poll_slow_path"); @@ -3597,9 +3637,7 @@ void NativeInvokerGenerator::generate() { __ bind(L_reguard); __ vzeroupper(); - if (need_spills) { - spill_register(ret_reg); - } + spill_out_registers(); __ mov(r12, rsp); // remember sp __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows @@ -3608,9 +3646,7 @@ void NativeInvokerGenerator::generate() { __ mov(rsp, r12); // restore sp __ reinit_heapbase(); - if (need_spills) { - fill_register(ret_reg); - } + fill_out_registers(); __ jmp(L_after_reguard); @@ -3620,6 +3656,7 @@ void NativeInvokerGenerator::generate() { __ flush(); } +#endif // COMPILER2 //------------------------------Montgomery multiplication------------------------ // diff --git a/src/hotspot/cpu/x86/stubGenerator_x86_32.cpp b/src/hotspot/cpu/x86/stubGenerator_x86_32.cpp index 681db40dd31f9f669bb3f596a9245aeaf6d93f20..6ab857150006d807af41c5234dd2262d3638c52f 100644 --- a/src/hotspot/cpu/x86/stubGenerator_x86_32.cpp +++ b/src/hotspot/cpu/x86/stubGenerator_x86_32.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2021, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -610,6 +610,21 @@ class StubGenerator: public StubCodeGenerator { return start; } + address generate_vector_byte_shuffle_mask(const char *stub_name) { + __ align(CodeEntryAlignment); + StubCodeMark mark(this, "StubRoutines", stub_name); + address start = __ pc(); + __ emit_data(0x70707070, relocInfo::none, 0); + __ emit_data(0x70707070, relocInfo::none, 0); + __ emit_data(0x70707070, relocInfo::none, 0); + __ emit_data(0x70707070, relocInfo::none, 0); + __ emit_data(0xF0F0F0F0, relocInfo::none, 0); + __ emit_data(0xF0F0F0F0, relocInfo::none, 0); + __ emit_data(0xF0F0F0F0, relocInfo::none, 0); + __ emit_data(0xF0F0F0F0, relocInfo::none, 0); + return start; + } + address generate_vector_mask_long_double(const char *stub_name, int32_t maskhi, int32_t masklo) { __ align(CodeEntryAlignment); StubCodeMark mark(this, "StubRoutines", stub_name); @@ -3981,6 +3996,7 @@ class StubGenerator: public StubCodeGenerator { StubRoutines::x86::_vector_64_bit_mask = generate_vector_custom_i32("vector_64_bit_mask", Assembler::AVX_512bit, 0xFFFFFFFF, 0xFFFFFFFF, 0, 0); StubRoutines::x86::_vector_int_shuffle_mask = generate_vector_mask("vector_int_shuffle_mask", 0x03020100); + StubRoutines::x86::_vector_byte_shuffle_mask = generate_vector_byte_shuffle_mask("vector_byte_shuffle_mask"); StubRoutines::x86::_vector_short_shuffle_mask = generate_vector_mask("vector_short_shuffle_mask", 0x01000100); StubRoutines::x86::_vector_long_shuffle_mask = generate_vector_mask_long_double("vector_long_shuffle_mask", 0x00000001, 0x0); StubRoutines::x86::_vector_byte_perm_mask = generate_vector_byte_perm_mask("vector_byte_perm_mask"); diff --git a/src/hotspot/cpu/x86/stubGenerator_x86_64.cpp b/src/hotspot/cpu/x86/stubGenerator_x86_64.cpp index be8377cde0d706b0519a467948ec0fe4109b6b2e..e1fa12d6509ef6331ded37f9ca6d578b6abf79f3 100644 --- a/src/hotspot/cpu/x86/stubGenerator_x86_64.cpp +++ b/src/hotspot/cpu/x86/stubGenerator_x86_64.cpp @@ -808,6 +808,17 @@ class StubGenerator: public StubCodeGenerator { return start; } + address generate_vector_byte_shuffle_mask(const char *stub_name) { + __ align(CodeEntryAlignment); + StubCodeMark mark(this, "StubRoutines", stub_name); + address start = __ pc(); + __ emit_data64(0x7070707070707070, relocInfo::none); + __ emit_data64(0x7070707070707070, relocInfo::none); + __ emit_data64(0xF0F0F0F0F0F0F0F0, relocInfo::none); + __ emit_data64(0xF0F0F0F0F0F0F0F0, relocInfo::none); + return start; + } + address generate_fp_mask(const char *stub_name, int64_t mask) { __ align(CodeEntryAlignment); StubCodeMark mark(this, "StubRoutines", stub_name); @@ -1471,6 +1482,7 @@ class StubGenerator: public StubCodeGenerator { __ subq(temp1, loop_size[shift]); // Main loop with aligned copy block size of 192 bytes at 32 byte granularity. + __ align(32); __ BIND(L_main_loop); __ copy64_avx(to, from, temp4, xmm1, false, shift, 0); __ copy64_avx(to, from, temp4, xmm1, false, shift, 64); @@ -1537,6 +1549,7 @@ class StubGenerator: public StubCodeGenerator { // Main loop with aligned copy block size of 192 bytes at // 64 byte copy granularity. + __ align(32); __ BIND(L_main_loop_64bytes); __ copy64_avx(to, from, temp4, xmm1, false, shift, 0 , true); __ copy64_avx(to, from, temp4, xmm1, false, shift, 64, true); @@ -1676,6 +1689,7 @@ class StubGenerator: public StubCodeGenerator { __ BIND(L_main_pre_loop); // Main loop with aligned copy block size of 192 bytes at 32 byte granularity. + __ align(32); __ BIND(L_main_loop); __ copy64_avx(to, from, temp1, xmm1, true, shift, -64); __ copy64_avx(to, from, temp1, xmm1, true, shift, -128); @@ -1708,6 +1722,7 @@ class StubGenerator: public StubCodeGenerator { // Main loop with aligned copy block size of 192 bytes at // 64 byte copy granularity. + __ align(32); __ BIND(L_main_loop_64bytes); __ copy64_avx(to, from, temp1, xmm1, true, shift, -64 , true); __ copy64_avx(to, from, temp1, xmm1, true, shift, -128, true); @@ -1770,7 +1785,7 @@ class StubGenerator: public StubCodeGenerator { // address generate_disjoint_byte_copy(bool aligned, address* entry, const char *name) { #if COMPILER2_OR_JVMCI - if (VM_Version::supports_avx512vlbw() && MaxVectorSize >= 32) { + if (VM_Version::supports_avx512vlbw() && VM_Version::supports_bmi2() && MaxVectorSize >= 32) { return generate_disjoint_copy_avx3_masked(entry, "jbyte_disjoint_arraycopy_avx3", 0, aligned, false, false); } @@ -1886,7 +1901,7 @@ class StubGenerator: public StubCodeGenerator { address generate_conjoint_byte_copy(bool aligned, address nooverlap_target, address* entry, const char *name) { #if COMPILER2_OR_JVMCI - if (VM_Version::supports_avx512vlbw() && MaxVectorSize >= 32) { + if (VM_Version::supports_avx512vlbw() && VM_Version::supports_bmi2() && MaxVectorSize >= 32) { return generate_conjoint_copy_avx3_masked(entry, "jbyte_conjoint_arraycopy_avx3", 0, nooverlap_target, aligned, false, false); } @@ -1997,7 +2012,7 @@ class StubGenerator: public StubCodeGenerator { // address generate_disjoint_short_copy(bool aligned, address *entry, const char *name) { #if COMPILER2_OR_JVMCI - if (VM_Version::supports_avx512vlbw() && MaxVectorSize >= 32) { + if (VM_Version::supports_avx512vlbw() && VM_Version::supports_bmi2() && MaxVectorSize >= 32) { return generate_disjoint_copy_avx3_masked(entry, "jshort_disjoint_arraycopy_avx3", 1, aligned, false, false); } @@ -2128,7 +2143,7 @@ class StubGenerator: public StubCodeGenerator { address generate_conjoint_short_copy(bool aligned, address nooverlap_target, address *entry, const char *name) { #if COMPILER2_OR_JVMCI - if (VM_Version::supports_avx512vlbw() && MaxVectorSize >= 32) { + if (VM_Version::supports_avx512vlbw() && VM_Version::supports_bmi2() && MaxVectorSize >= 32) { return generate_conjoint_copy_avx3_masked(entry, "jshort_conjoint_arraycopy_avx3", 1, nooverlap_target, aligned, false, false); } @@ -2232,7 +2247,7 @@ class StubGenerator: public StubCodeGenerator { address generate_disjoint_int_oop_copy(bool aligned, bool is_oop, address* entry, const char *name, bool dest_uninitialized = false) { #if COMPILER2_OR_JVMCI - if (VM_Version::supports_avx512vlbw() && MaxVectorSize >= 32) { + if (VM_Version::supports_avx512vlbw() && VM_Version::supports_bmi2() && MaxVectorSize >= 32) { return generate_disjoint_copy_avx3_masked(entry, "jint_disjoint_arraycopy_avx3", 2, aligned, is_oop, dest_uninitialized); } @@ -2343,7 +2358,7 @@ class StubGenerator: public StubCodeGenerator { address *entry, const char *name, bool dest_uninitialized = false) { #if COMPILER2_OR_JVMCI - if (VM_Version::supports_avx512vlbw() && MaxVectorSize >= 32) { + if (VM_Version::supports_avx512vlbw() && VM_Version::supports_bmi2() && MaxVectorSize >= 32) { return generate_conjoint_copy_avx3_masked(entry, "jint_conjoint_arraycopy_avx3", 2, nooverlap_target, aligned, is_oop, dest_uninitialized); } @@ -2456,7 +2471,7 @@ class StubGenerator: public StubCodeGenerator { address generate_disjoint_long_oop_copy(bool aligned, bool is_oop, address *entry, const char *name, bool dest_uninitialized = false) { #if COMPILER2_OR_JVMCI - if (VM_Version::supports_avx512vlbw() && MaxVectorSize >= 32) { + if (VM_Version::supports_avx512vlbw() && VM_Version::supports_bmi2() && MaxVectorSize >= 32) { return generate_disjoint_copy_avx3_masked(entry, "jlong_disjoint_arraycopy_avx3", 3, aligned, is_oop, dest_uninitialized); } @@ -2566,7 +2581,7 @@ class StubGenerator: public StubCodeGenerator { address nooverlap_target, address *entry, const char *name, bool dest_uninitialized = false) { #if COMPILER2_OR_JVMCI - if (VM_Version::supports_avx512vlbw() && MaxVectorSize >= 32) { + if (VM_Version::supports_avx512vlbw() && VM_Version::supports_bmi2() && MaxVectorSize >= 32) { return generate_conjoint_copy_avx3_masked(entry, "jlong_conjoint_arraycopy_avx3", 3, nooverlap_target, aligned, is_oop, dest_uninitialized); } @@ -6828,6 +6843,7 @@ address generate_avx_ghash_processBlocks() { StubRoutines::x86::_vector_64_bit_mask = generate_vector_custom_i32("vector_64_bit_mask", Assembler::AVX_512bit, 0xFFFFFFFF, 0xFFFFFFFF, 0, 0); StubRoutines::x86::_vector_int_shuffle_mask = generate_vector_mask("vector_int_shuffle_mask", 0x0302010003020100); + StubRoutines::x86::_vector_byte_shuffle_mask = generate_vector_byte_shuffle_mask("vector_byte_shuffle_mask"); StubRoutines::x86::_vector_short_shuffle_mask = generate_vector_mask("vector_short_shuffle_mask", 0x0100010001000100); StubRoutines::x86::_vector_long_shuffle_mask = generate_vector_mask("vector_long_shuffle_mask", 0x0000000100000000); StubRoutines::x86::_vector_long_sign_mask = generate_vector_mask("vector_long_sign_mask", 0x8000000000000000); diff --git a/src/hotspot/cpu/x86/stubRoutines_x86.cpp b/src/hotspot/cpu/x86/stubRoutines_x86.cpp index 45762902db2e8c3a55b881d9ceceeaaf2df19282..6aa4c4eb25631fd56e431c70cbe36ccb6c43a189 100644 --- a/src/hotspot/cpu/x86/stubRoutines_x86.cpp +++ b/src/hotspot/cpu/x86/stubRoutines_x86.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013, 2018, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2013, 2021, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -47,6 +47,7 @@ address StubRoutines::x86::_vector_short_to_byte_mask = NULL; address StubRoutines::x86::_vector_int_to_byte_mask = NULL; address StubRoutines::x86::_vector_int_to_short_mask = NULL; address StubRoutines::x86::_vector_all_bits_set = NULL; +address StubRoutines::x86::_vector_byte_shuffle_mask = NULL; address StubRoutines::x86::_vector_short_shuffle_mask = NULL; address StubRoutines::x86::_vector_int_shuffle_mask = NULL; address StubRoutines::x86::_vector_long_shuffle_mask = NULL; diff --git a/src/hotspot/cpu/x86/stubRoutines_x86.hpp b/src/hotspot/cpu/x86/stubRoutines_x86.hpp index 84ae8a75b5a8f594ff29af656030ba4891754af3..22e40b2c18116d300d8ed9477c49a48aba55f091 100644 --- a/src/hotspot/cpu/x86/stubRoutines_x86.hpp +++ b/src/hotspot/cpu/x86/stubRoutines_x86.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013, 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2013, 2021, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -149,6 +149,7 @@ class x86 { static address _vector_32_bit_mask; static address _vector_64_bit_mask; static address _vector_int_shuffle_mask; + static address _vector_byte_shuffle_mask; static address _vector_short_shuffle_mask; static address _vector_long_shuffle_mask; static address _vector_iota_indices; @@ -280,6 +281,10 @@ class x86 { return _vector_int_shuffle_mask; } + static address vector_byte_shuffle_mask() { + return _vector_byte_shuffle_mask; + } + static address vector_short_shuffle_mask() { return _vector_short_shuffle_mask; } diff --git a/src/hotspot/cpu/x86/vm_version_x86.hpp b/src/hotspot/cpu/x86/vm_version_x86.hpp index 5d91280e616151310b5e72a866c702f5c54bceb0..ab5e35b547934140d793bbc8cccfa8b5616a2291 100644 --- a/src/hotspot/cpu/x86/vm_version_x86.hpp +++ b/src/hotspot/cpu/x86/vm_version_x86.hpp @@ -1063,6 +1063,11 @@ public: static bool supports_clflushopt() { return ((_features & CPU_FLUSHOPT) != 0); } static bool supports_clwb() { return ((_features & CPU_CLWB) != 0); } +#ifdef __APPLE__ + // Is the CPU running emulated (for example macOS Rosetta running x86_64 code on M1 ARM (aarch64) + static bool is_cpu_emulated(); +#endif + // support functions for virtualization detection private: static void check_virtualizations(); diff --git a/src/hotspot/cpu/x86/vtableStubs_x86_64.cpp b/src/hotspot/cpu/x86/vtableStubs_x86_64.cpp index e626f95b33ba5b016f5944fc38311cd60fd11863..c6181f2d007ed8880c674cbe1fec535e6aa94972 100644 --- a/src/hotspot/cpu/x86/vtableStubs_x86_64.cpp +++ b/src/hotspot/cpu/x86/vtableStubs_x86_64.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2021, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -70,7 +70,7 @@ VtableStub* VtableStubs::create_vtable_stub(int vtable_index) { #if (!defined(PRODUCT) && defined(COMPILER2)) if (CountCompiledCalls) { - __ incrementl(ExternalAddress((address) SharedRuntime::nof_megamorphic_calls_addr())); + __ incrementq(ExternalAddress((address) SharedRuntime::nof_megamorphic_calls_addr())); } #endif @@ -148,6 +148,7 @@ VtableStub* VtableStubs::create_itable_stub(int itable_index) { if (s == NULL) { return NULL; } + // Count unused bytes in instruction sequences of variable size. // We add them to the computed buffer size in order to avoid // overflow in subsequently generated stubs. @@ -163,7 +164,7 @@ VtableStub* VtableStubs::create_itable_stub(int itable_index) { #if (!defined(PRODUCT) && defined(COMPILER2)) if (CountCompiledCalls) { - __ incrementl(ExternalAddress((address) SharedRuntime::nof_megamorphic_calls_addr())); + __ incrementq(ExternalAddress((address) SharedRuntime::nof_megamorphic_calls_addr())); } #endif // PRODUCT diff --git a/src/hotspot/cpu/x86/x86.ad b/src/hotspot/cpu/x86/x86.ad index 3ffc7c8f06ab601ae6c5d572c4a6806d9d6c5e6d..7cf669f0e27cc6e77cdde4f3897bbe11c0601d32 100644 --- a/src/hotspot/cpu/x86/x86.ad +++ b/src/hotspot/cpu/x86/x86.ad @@ -1356,6 +1356,7 @@ Assembler::Width widthForType(BasicType bt) { static address vector_long_sign_mask() { return StubRoutines::x86::vector_long_sign_mask(); } static address vector_all_bits_set() { return StubRoutines::x86::vector_all_bits_set(); } static address vector_int_to_short_mask() { return StubRoutines::x86::vector_int_to_short_mask(); } + static address vector_byte_shufflemask() { return StubRoutines::x86::vector_byte_shuffle_mask(); } static address vector_short_shufflemask() { return StubRoutines::x86::vector_short_shuffle_mask(); } static address vector_int_shufflemask() { return StubRoutines::x86::vector_int_shuffle_mask(); } static address vector_long_shufflemask() { return StubRoutines::x86::vector_long_shuffle_mask(); } @@ -1526,7 +1527,7 @@ const bool Matcher::match_rule_supported(int opcode) { case Op_VectorMaskGen: case Op_LoadVectorMasked: case Op_StoreVectorMasked: - if (UseAVX < 3) { + if (UseAVX < 3 || !VM_Version::supports_bmi2()) { return false; } break; @@ -1693,9 +1694,9 @@ const bool Matcher::match_rule_supported_vector(int opcode, int vlen, BasicType return false; // Implementation limitation due to how shuffle is loaded } else if (size_in_bits == 256 && UseAVX < 2) { return false; // Implementation limitation - } else if (bt == T_BYTE && size_in_bits >= 256 && !VM_Version::supports_avx512_vbmi()) { + } else if (bt == T_BYTE && size_in_bits > 256 && !VM_Version::supports_avx512_vbmi()) { return false; // Implementation limitation - } else if (bt == T_SHORT && size_in_bits >= 256 && !VM_Version::supports_avx512bw()) { + } else if (bt == T_SHORT && size_in_bits > 256 && !VM_Version::supports_avx512bw()) { return false; // Implementation limitation } break; @@ -7500,13 +7501,24 @@ instruct rearrangeB(vec dst, vec shuffle) %{ ins_pipe( pipe_slow ); %} -instruct rearrangeB_avx(vec dst, vec src, vec shuffle) %{ +instruct rearrangeB_avx(legVec dst, legVec src, vec shuffle, legVec vtmp1, legVec vtmp2, rRegP scratch) %{ predicate(vector_element_basic_type(n) == T_BYTE && vector_length(n) == 32 && !VM_Version::supports_avx512_vbmi()); match(Set dst (VectorRearrange src shuffle)); - format %{ "vector_rearrange $dst, $shuffle, $src" %} + effect(TEMP dst, TEMP vtmp1, TEMP vtmp2, TEMP scratch); + format %{ "vector_rearrange $dst, $shuffle, $src\t! using $vtmp1, $vtmp2, $scratch as TEMP" %} ins_encode %{ - __ vpshufb($dst$$XMMRegister, $shuffle$$XMMRegister, $src$$XMMRegister, Assembler::AVX_256bit); + assert(UseAVX >= 2, "required"); + // Swap src into vtmp1 + __ vperm2i128($vtmp1$$XMMRegister, $src$$XMMRegister, $src$$XMMRegister, 1); + // Shuffle swapped src to get entries from other 128 bit lane + __ vpshufb($vtmp1$$XMMRegister, $vtmp1$$XMMRegister, $shuffle$$XMMRegister, Assembler::AVX_256bit); + // Shuffle original src to get entries from self 128 bit lane + __ vpshufb($dst$$XMMRegister, $src$$XMMRegister, $shuffle$$XMMRegister, Assembler::AVX_256bit); + // Create a blend mask by setting high bits for entries coming from other lane in shuffle + __ vpaddb($vtmp2$$XMMRegister, $shuffle$$XMMRegister, ExternalAddress(vector_byte_shufflemask()), Assembler::AVX_256bit, $scratch$$Register); + // Perform the blend + __ vpblendvb($dst$$XMMRegister, $dst$$XMMRegister, $vtmp1$$XMMRegister, $vtmp2$$XMMRegister, Assembler::AVX_256bit); %} ins_pipe( pipe_slow ); %} @@ -7527,26 +7539,42 @@ instruct rearrangeB_evex(vec dst, vec src, vec shuffle) %{ instruct loadShuffleS(vec dst, vec src, vec vtmp, rRegP scratch) %{ predicate(vector_element_basic_type(n) == T_SHORT && - vector_length(n) <= 8 && !VM_Version::supports_avx512bw()); // NB! aligned with rearrangeS + vector_length(n) <= 16 && !VM_Version::supports_avx512bw()); // NB! aligned with rearrangeS match(Set dst (VectorLoadShuffle src)); effect(TEMP dst, TEMP vtmp, TEMP scratch); format %{ "vector_load_shuffle $dst, $src\t! using $vtmp and $scratch as TEMP" %} ins_encode %{ // Create a byte shuffle mask from short shuffle mask // only byte shuffle instruction available on these platforms + int vlen_in_bytes = vector_length_in_bytes(this); + if (UseAVX == 0) { + assert(vlen_in_bytes <= 16, "required"); + // Multiply each shuffle by two to get byte index + __ pmovzxbw($vtmp$$XMMRegister, $src$$XMMRegister); + __ psllw($vtmp$$XMMRegister, 1); + + // Duplicate to create 2 copies of byte index + __ movdqu($dst$$XMMRegister, $vtmp$$XMMRegister); + __ psllw($dst$$XMMRegister, 8); + __ por($dst$$XMMRegister, $vtmp$$XMMRegister); + + // Add one to get alternate byte index + __ movdqu($vtmp$$XMMRegister, ExternalAddress(vector_short_shufflemask()), $scratch$$Register); + __ paddb($dst$$XMMRegister, $vtmp$$XMMRegister); + } else { + assert(UseAVX > 1 || vlen_in_bytes <= 16, "required"); + int vlen_enc = vector_length_encoding(this); + // Multiply each shuffle by two to get byte index + __ vpmovzxbw($vtmp$$XMMRegister, $src$$XMMRegister, vlen_enc); + __ vpsllw($vtmp$$XMMRegister, $vtmp$$XMMRegister, 1, vlen_enc); - // Multiply each shuffle by two to get byte index - __ pmovzxbw($vtmp$$XMMRegister, $src$$XMMRegister); - __ psllw($vtmp$$XMMRegister, 1); - - // Duplicate to create 2 copies of byte index - __ movdqu($dst$$XMMRegister, $vtmp$$XMMRegister); - __ psllw($dst$$XMMRegister, 8); - __ por($dst$$XMMRegister, $vtmp$$XMMRegister); + // Duplicate to create 2 copies of byte index + __ vpsllw($dst$$XMMRegister, $vtmp$$XMMRegister, 8, vlen_enc); + __ vpor($dst$$XMMRegister, $dst$$XMMRegister, $vtmp$$XMMRegister, vlen_enc); - // Add one to get alternate byte index - __ movdqu($vtmp$$XMMRegister, ExternalAddress(vector_short_shufflemask()), $scratch$$Register); - __ paddb($dst$$XMMRegister, $vtmp$$XMMRegister); + // Add one to get alternate byte index + __ vpaddb($dst$$XMMRegister, $dst$$XMMRegister, ExternalAddress(vector_short_shufflemask()), vlen_enc, $scratch$$Register); + } %} ins_pipe( pipe_slow ); %} @@ -7563,6 +7591,28 @@ instruct rearrangeS(vec dst, vec shuffle) %{ ins_pipe( pipe_slow ); %} +instruct rearrangeS_avx(legVec dst, legVec src, vec shuffle, legVec vtmp1, legVec vtmp2, rRegP scratch) %{ + predicate(vector_element_basic_type(n) == T_SHORT && + vector_length(n) == 16 && !VM_Version::supports_avx512bw()); + match(Set dst (VectorRearrange src shuffle)); + effect(TEMP dst, TEMP vtmp1, TEMP vtmp2, TEMP scratch); + format %{ "vector_rearrange $dst, $shuffle, $src\t! using $vtmp1, $vtmp2, $scratch as TEMP" %} + ins_encode %{ + assert(UseAVX >= 2, "required"); + // Swap src into vtmp1 + __ vperm2i128($vtmp1$$XMMRegister, $src$$XMMRegister, $src$$XMMRegister, 1); + // Shuffle swapped src to get entries from other 128 bit lane + __ vpshufb($vtmp1$$XMMRegister, $vtmp1$$XMMRegister, $shuffle$$XMMRegister, Assembler::AVX_256bit); + // Shuffle original src to get entries from self 128 bit lane + __ vpshufb($dst$$XMMRegister, $src$$XMMRegister, $shuffle$$XMMRegister, Assembler::AVX_256bit); + // Create a blend mask by setting high bits for entries coming from other lane in shuffle + __ vpaddb($vtmp2$$XMMRegister, $shuffle$$XMMRegister, ExternalAddress(vector_byte_shufflemask()), Assembler::AVX_256bit, $scratch$$Register); + // Perform the blend + __ vpblendvb($dst$$XMMRegister, $dst$$XMMRegister, $vtmp1$$XMMRegister, $vtmp2$$XMMRegister, Assembler::AVX_256bit); + %} + ins_pipe( pipe_slow ); +%} + instruct loadShuffleS_evex(vec dst, vec src) %{ predicate(vector_element_basic_type(n) == T_SHORT && VM_Version::supports_avx512bw()); diff --git a/src/hotspot/cpu/zero/sharedRuntime_zero.cpp b/src/hotspot/cpu/zero/sharedRuntime_zero.cpp index f4aaeb6ea1e491ea34bdef1088a2f706001cf1db..efe82a0cce396ff2af91667885d1e65f85e7ac0c 100644 --- a/src/hotspot/cpu/zero/sharedRuntime_zero.cpp +++ b/src/hotspot/cpu/zero/sharedRuntime_zero.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2021, Oracle and/or its affiliates. All rights reserved. * Copyright 2007, 2008, 2009, 2010, 2011 Red Hat, Inc. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -119,16 +119,6 @@ RuntimeStub* SharedRuntime::generate_resolve_blob(address destination, const cha return generate_empty_runtime_stub("resolve_blob"); } -size_t SharedRuntime::trampoline_size() { - ShouldNotCallThis(); - return 0; -} - -void SharedRuntime::generate_trampoline(MacroAssembler *masm, address destination) { - ShouldNotCallThis(); - return; -} - int SharedRuntime::c_calling_convention(const BasicType *sig_bt, VMRegPair *regs, VMRegPair *regs2, diff --git a/src/hotspot/os/aix/os_aix.cpp b/src/hotspot/os/aix/os_aix.cpp index a976e4f6b940025931e04727595de87efd82d3f8..dc2c25e5568175571fe86767e822d3d9fd6544d1 100644 --- a/src/hotspot/os/aix/os_aix.cpp +++ b/src/hotspot/os/aix/os_aix.cpp @@ -29,7 +29,6 @@ // no precompiled headers #include "jvm.h" -#include "classfile/classLoader.hpp" #include "classfile/vmSymbols.hpp" #include "code/icBuffer.hpp" #include "code/vtableStubs.hpp" @@ -63,9 +62,9 @@ #include "runtime/os.hpp" #include "runtime/osThread.hpp" #include "runtime/perfMemory.hpp" +#include "runtime/safefetch.inline.hpp" #include "runtime/sharedRuntime.hpp" #include "runtime/statSampler.hpp" -#include "runtime/stubRoutines.hpp" #include "runtime/thread.inline.hpp" #include "runtime/threadCritical.hpp" #include "runtime/timer.hpp" @@ -109,7 +108,6 @@ #include #include #include -#include // Missing prototypes for various system APIs. extern "C" @@ -3151,64 +3149,6 @@ size_t os::current_stack_size() { return s; } -extern char** environ; - -// Run the specified command in a separate process. Return its exit value, -// or -1 on failure (e.g. can't fork a new process). -// Unlike system(), this function can be called from signal handler. It -// doesn't block SIGINT et al. -int os::fork_and_exec(char* cmd, bool use_vfork_if_available) { - char* argv[4] = { (char*)"sh", (char*)"-c", cmd, NULL}; - - pid_t pid = fork(); - - if (pid < 0) { - // fork failed - return -1; - - } else if (pid == 0) { - // child process - - // Try to be consistent with system(), which uses "/usr/bin/sh" on AIX. - execve("/usr/bin/sh", argv, environ); - - // execve failed - _exit(-1); - - } else { - // copied from J2SE ..._waitForProcessExit() in UNIXProcess_md.c; we don't - // care about the actual exit code, for now. - - int status; - - // Wait for the child process to exit. This returns immediately if - // the child has already exited. */ - while (waitpid(pid, &status, 0) < 0) { - switch (errno) { - case ECHILD: return 0; - case EINTR: break; - default: return -1; - } - } - - if (WIFEXITED(status)) { - // The child exited normally; get its exit code. - return WEXITSTATUS(status); - } else if (WIFSIGNALED(status)) { - // The child exited because of a signal. - // The best value to return is 0x80 + signal number, - // because that is what all Unix shells do, and because - // it allows callers to distinguish between process exit and - // process death by signal. - return 0x80 + WTERMSIG(status); - } else { - // Unknown exit code; pass it through. - return status; - } - } - return -1; -} - // Get the default path to the core file // Returns the length of the string int os::get_core_path(char* buffer, size_t bufferSize) { @@ -3225,12 +3165,6 @@ int os::get_core_path(char* buffer, size_t bufferSize) { return strlen(buffer); } -#ifndef PRODUCT -void TestReserveMemorySpecial_test() { - // No tests available for this platform -} -#endif - bool os::start_debugging(char *buf, int buflen) { int len = (int)strlen(buf); char *p = &buf[len]; diff --git a/src/hotspot/os/bsd/os_bsd.cpp b/src/hotspot/os/bsd/os_bsd.cpp index 627b995dfedaa43594931ba351ca1db6fa3a4edf..4488e1c31d8538461f12b27451f9f042c7e1c8e1 100644 --- a/src/hotspot/os/bsd/os_bsd.cpp +++ b/src/hotspot/os/bsd/os_bsd.cpp @@ -24,7 +24,6 @@ // no precompiled headers #include "jvm.h" -#include "classfile/classLoader.hpp" #include "classfile/vmSymbols.hpp" #include "code/icBuffer.hpp" #include "code/vtableStubs.hpp" @@ -94,7 +93,6 @@ # include # include # include -# include # include # include @@ -1412,7 +1410,7 @@ void os::print_os_info_brief(outputStream* st) { } void os::print_os_info(outputStream* st) { - st->print("OS:"); + st->print_cr("OS:"); os::Posix::print_uname_info(st); @@ -2611,80 +2609,6 @@ void os::pause() { } } -// Darwin has no "environ" in a dynamic library. -#ifdef __APPLE__ - #include - #define environ (*_NSGetEnviron()) -#else -extern char** environ; -#endif - -// Run the specified command in a separate process. Return its exit value, -// or -1 on failure (e.g. can't fork a new process). -// Unlike system(), this function can be called from signal handler. It -// doesn't block SIGINT et al. -int os::fork_and_exec(char* cmd, bool use_vfork_if_available) { - const char * argv[4] = {"sh", "-c", cmd, NULL}; - - // fork() in BsdThreads/NPTL is not async-safe. It needs to run - // pthread_atfork handlers and reset pthread library. All we need is a - // separate process to execve. Make a direct syscall to fork process. - // On IA64 there's no fork syscall, we have to use fork() and hope for - // the best... - pid_t pid = fork(); - - if (pid < 0) { - // fork failed - return -1; - - } else if (pid == 0) { - // child process - - // execve() in BsdThreads will call pthread_kill_other_threads_np() - // first to kill every thread on the thread list. Because this list is - // not reset by fork() (see notes above), execve() will instead kill - // every thread in the parent process. We know this is the only thread - // in the new process, so make a system call directly. - // IA64 should use normal execve() from glibc to match the glibc fork() - // above. - execve("/bin/sh", (char* const*)argv, environ); - - // execve failed - _exit(-1); - - } else { - // copied from J2SE ..._waitForProcessExit() in UNIXProcess_md.c; we don't - // care about the actual exit code, for now. - - int status; - - // Wait for the child process to exit. This returns immediately if - // the child has already exited. */ - while (waitpid(pid, &status, 0) < 0) { - switch (errno) { - case ECHILD: return 0; - case EINTR: break; - default: return -1; - } - } - - if (WIFEXITED(status)) { - // The child exited normally; get its exit code. - return WEXITSTATUS(status); - } else if (WIFSIGNALED(status)) { - // The child exited because of a signal - // The best value to return is 0x80 + signal number, - // because that is what all Unix shells do, and because - // it allows callers to distinguish between process exit and - // process death by signal. - return 0x80 + WTERMSIG(status); - } else { - // Unknown exit code; pass it through - return status; - } - } -} - // Get the kern.corefile setting, or otherwise the default path to the core file // Returns the length of the string int os::get_core_path(char* buffer, size_t bufferSize) { @@ -2719,12 +2643,6 @@ bool os::supports_map_sync() { return false; } -#ifndef PRODUCT -void TestReserveMemorySpecial_test() { - // No tests available for this platform -} -#endif - bool os::start_debugging(char *buf, int buflen) { int len = (int)strlen(buf); char *p = &buf[len]; diff --git a/src/hotspot/os/linux/gc/z/zPhysicalMemoryBacking_linux.cpp b/src/hotspot/os/linux/gc/z/zPhysicalMemoryBacking_linux.cpp index 2cd3b95a72b0f34fe110f7c63ac650aac1bc035a..7a113055423aa3837a114580c6ee66005874dae8 100644 --- a/src/hotspot/os/linux/gc/z/zPhysicalMemoryBacking_linux.cpp +++ b/src/hotspot/os/linux/gc/z/zPhysicalMemoryBacking_linux.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2021, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -34,7 +34,7 @@ #include "logging/log.hpp" #include "runtime/init.hpp" #include "runtime/os.hpp" -#include "runtime/stubRoutines.hpp" +#include "runtime/safefetch.inline.hpp" #include "utilities/align.hpp" #include "utilities/debug.hpp" #include "utilities/growableArray.hpp" diff --git a/src/hotspot/os/linux/os_linux.cpp b/src/hotspot/os/linux/os_linux.cpp index 3b9556d688d79a65f130451589301646ec3462df..b0bfa1301315ec32695c5e29b0dd4b23bb576600 100644 --- a/src/hotspot/os/linux/os_linux.cpp +++ b/src/hotspot/os/linux/os_linux.cpp @@ -97,7 +97,6 @@ # include # include # include -# include # include # include # include @@ -3529,11 +3528,19 @@ bool os::Linux::transparent_huge_pages_sanity_check(bool warn, return result; } +int os::Linux::hugetlbfs_page_size_flag(size_t page_size) { + if (page_size != default_large_page_size()) { + return (exact_log2(page_size) << MAP_HUGE_SHIFT); + } + return 0; +} + bool os::Linux::hugetlbfs_sanity_check(bool warn, size_t page_size) { bool result = false; - void *p = mmap(NULL, page_size, PROT_READ|PROT_WRITE, - MAP_ANONYMOUS|MAP_PRIVATE|MAP_HUGETLB, - -1, 0); + + // Include the page size flag to ensure we sanity check the correct page size. + int flags = MAP_ANONYMOUS | MAP_PRIVATE | MAP_HUGETLB | hugetlbfs_page_size_flag(page_size); + void *p = mmap(NULL, page_size, PROT_READ|PROT_WRITE, flags, -1, 0); if (p != MAP_FAILED) { // We don't know if this really is a huge page or not. @@ -3564,6 +3571,30 @@ bool os::Linux::hugetlbfs_sanity_check(bool warn, size_t page_size) { return result; } +bool os::Linux::shm_hugetlbfs_sanity_check(bool warn, size_t page_size) { + // Try to create a large shared memory segment. + int shmid = shmget(IPC_PRIVATE, page_size, SHM_HUGETLB|IPC_CREAT|SHM_R|SHM_W); + if (shmid == -1) { + // Possible reasons for shmget failure: + // 1. shmmax is too small for the request. + // > check shmmax value: cat /proc/sys/kernel/shmmax + // > increase shmmax value: echo "new_value" > /proc/sys/kernel/shmmax + // 2. not enough large page memory. + // > check available large pages: cat /proc/meminfo + // > increase amount of large pages: + // sysctl -w vm.nr_hugepages=new_value + // > For more information regarding large pages please refer to: + // https://www.kernel.org/doc/Documentation/vm/hugetlbpage.txt + if (warn) { + warning("Large pages using UseSHM are not configured on this system."); + } + return false; + } + // Managed to create a segment, now delete it. + shmctl(shmid, IPC_RMID, NULL); + return true; +} + // From the coredump_filter documentation: // // - (bit 0) anonymous private memory @@ -3748,7 +3779,18 @@ bool os::Linux::setup_large_page_type(size_t page_size) { UseHugeTLBFS = false; } - return UseSHM; + if (UseSHM) { + bool warn_on_failure = !FLAG_IS_DEFAULT(UseSHM); + if (shm_hugetlbfs_sanity_check(warn_on_failure, page_size)) { + return true; + } + UseSHM = false; + } + + if (!FLAG_IS_DEFAULT(UseLargePages)) { + log_warning(pagesize)("UseLargePages disabled, no large pages configured and available on the system."); + } + return false; } void os::large_page_init() { @@ -3888,13 +3930,15 @@ char* os::Linux::reserve_memory_special_shm(size_t bytes, size_t alignment, int shmid = shmget(IPC_PRIVATE, bytes, SHM_HUGETLB|IPC_CREAT|SHM_R|SHM_W); if (shmid == -1) { // Possible reasons for shmget failure: - // 1. shmmax is too small for Java heap. + // 1. shmmax is too small for the request. // > check shmmax value: cat /proc/sys/kernel/shmmax - // > increase shmmax value: echo "0xffffffff" > /proc/sys/kernel/shmmax + // > increase shmmax value: echo "new_value" > /proc/sys/kernel/shmmax // 2. not enough large page memory. // > check available large pages: cat /proc/meminfo // > increase amount of large pages: - // echo new_value > /proc/sys/vm/nr_hugepages + // sysctl -w vm.nr_hugepages=new_value + // > For more information regarding large pages please refer to: + // https://www.kernel.org/doc/Documentation/vm/hugetlbpage.txt // Note 1: different Linux may use different name for this property, // e.g. on Redhat AS-3 it is "hugetlb_pool". // Note 2: it's possible there's enough physical memory available but @@ -3943,10 +3987,9 @@ char* os::Linux::reserve_memory_special_huge_tlbfs_only(size_t bytes, int prot = exec ? PROT_READ|PROT_WRITE|PROT_EXEC : PROT_READ|PROT_WRITE; int flags = MAP_PRIVATE|MAP_ANONYMOUS|MAP_HUGETLB; + // Ensure the correct page size flag is used when needed. + flags |= hugetlbfs_page_size_flag(os::large_page_size()); - if (os::large_page_size() != default_large_page_size()) { - flags |= (exact_log2(os::large_page_size()) << MAP_HUGE_SHIFT); - } char* addr = (char*)::mmap(req_addr, bytes, prot, flags, -1, 0); if (addr == MAP_FAILED) { @@ -4016,11 +4059,7 @@ char* os::Linux::reserve_memory_special_huge_tlbfs_mixed(size_t bytes, } // Commit large-paged area. - flags |= MAP_HUGETLB; - - if (os::large_page_size() != default_large_page_size()) { - flags |= (exact_log2(os::large_page_size()) << MAP_HUGE_SHIFT); - } + flags |= MAP_HUGETLB | hugetlbfs_page_size_flag(os::large_page_size()); result = ::mmap(lp_start, lp_bytes, prot, flags, -1, 0); if (result == MAP_FAILED) { @@ -5179,68 +5218,6 @@ void os::pause() { } } -extern char** environ; - -// Run the specified command in a separate process. Return its exit value, -// or -1 on failure (e.g. can't fork a new process). -// Unlike system(), this function can be called from signal handler. It -// doesn't block SIGINT et al. -int os::fork_and_exec(char* cmd, bool use_vfork_if_available) { - const char * argv[4] = {"sh", "-c", cmd, NULL}; - - pid_t pid ; - - if (use_vfork_if_available) { - pid = vfork(); - } else { - pid = fork(); - } - - if (pid < 0) { - // fork failed - return -1; - - } else if (pid == 0) { - // child process - - execve("/bin/sh", (char* const*)argv, environ); - - // execve failed - _exit(-1); - - } else { - // copied from J2SE ..._waitForProcessExit() in UNIXProcess_md.c; we don't - // care about the actual exit code, for now. - - int status; - - // Wait for the child process to exit. This returns immediately if - // the child has already exited. */ - while (waitpid(pid, &status, 0) < 0) { - switch (errno) { - case ECHILD: return 0; - case EINTR: break; - default: return -1; - } - } - - if (WIFEXITED(status)) { - // The child exited normally; get its exit code. - return WEXITSTATUS(status); - } else if (WIFSIGNALED(status)) { - // The child exited because of a signal - // The best value to return is 0x80 + signal number, - // because that is what all Unix shells do, and because - // it allows callers to distinguish between process exit and - // process death by signal. - return 0x80 + WTERMSIG(status); - } else { - // Unknown exit code; pass it through - return status; - } - } -} - // Get the default path to the core file // Returns the length of the string int os::get_core_path(char* buffer, size_t bufferSize) { @@ -5495,172 +5472,3 @@ void os::print_memory_mappings(char* addr, size_t bytes, outputStream* st) { st->cr(); } } - -/////////////// Unit tests /////////////// - -#ifndef PRODUCT - -class TestReserveMemorySpecial : AllStatic { - public: - static void small_page_write(void* addr, size_t size) { - size_t page_size = os::vm_page_size(); - - char* end = (char*)addr + size; - for (char* p = (char*)addr; p < end; p += page_size) { - *p = 1; - } - } - - static void test_reserve_memory_special_huge_tlbfs_only(size_t size) { - if (!UseHugeTLBFS) { - return; - } - - char* addr = os::Linux::reserve_memory_special_huge_tlbfs_only(size, NULL, false); - - if (addr != NULL) { - small_page_write(addr, size); - - os::Linux::release_memory_special_huge_tlbfs(addr, size); - } - } - - static void test_reserve_memory_special_huge_tlbfs_only() { - if (!UseHugeTLBFS) { - return; - } - - size_t lp = os::large_page_size(); - - for (size_t size = lp; size <= lp * 10; size += lp) { - test_reserve_memory_special_huge_tlbfs_only(size); - } - } - - static void test_reserve_memory_special_huge_tlbfs_mixed() { - size_t lp = os::large_page_size(); - size_t ag = os::vm_allocation_granularity(); - - // sizes to test - const size_t sizes[] = { - lp, lp + ag, lp + lp / 2, lp * 2, - lp * 2 + ag, lp * 2 - ag, lp * 2 + lp / 2, - lp * 10, lp * 10 + lp / 2 - }; - const int num_sizes = sizeof(sizes) / sizeof(size_t); - - // For each size/alignment combination, we test three scenarios: - // 1) with req_addr == NULL - // 2) with a non-null req_addr at which we expect to successfully allocate - // 3) with a non-null req_addr which contains a pre-existing mapping, at which we - // expect the allocation to either fail or to ignore req_addr - - // Pre-allocate two areas; they shall be as large as the largest allocation - // and aligned to the largest alignment we will be testing. - const size_t mapping_size = sizes[num_sizes - 1] * 2; - char* const mapping1 = (char*) ::mmap(NULL, mapping_size, - PROT_NONE, MAP_PRIVATE|MAP_ANONYMOUS|MAP_NORESERVE, - -1, 0); - assert(mapping1 != MAP_FAILED, "should work"); - - char* const mapping2 = (char*) ::mmap(NULL, mapping_size, - PROT_NONE, MAP_PRIVATE|MAP_ANONYMOUS|MAP_NORESERVE, - -1, 0); - assert(mapping2 != MAP_FAILED, "should work"); - - // Unmap the first mapping, but leave the second mapping intact: the first - // mapping will serve as a value for a "good" req_addr (case 2). The second - // mapping, still intact, as "bad" req_addr (case 3). - ::munmap(mapping1, mapping_size); - - // Case 1 - for (int i = 0; i < num_sizes; i++) { - const size_t size = sizes[i]; - for (size_t alignment = ag; is_aligned(size, alignment); alignment *= 2) { - char* p = os::Linux::reserve_memory_special_huge_tlbfs_mixed(size, alignment, NULL, false); - if (p != NULL) { - assert(is_aligned(p, alignment), "must be"); - small_page_write(p, size); - os::Linux::release_memory_special_huge_tlbfs(p, size); - } - } - } - - // Case 2 - for (int i = 0; i < num_sizes; i++) { - const size_t size = sizes[i]; - for (size_t alignment = ag; is_aligned(size, alignment); alignment *= 2) { - char* const req_addr = align_up(mapping1, alignment); - char* p = os::Linux::reserve_memory_special_huge_tlbfs_mixed(size, alignment, req_addr, false); - if (p != NULL) { - assert(p == req_addr, "must be"); - small_page_write(p, size); - os::Linux::release_memory_special_huge_tlbfs(p, size); - } - } - } - - // Case 3 - for (int i = 0; i < num_sizes; i++) { - const size_t size = sizes[i]; - for (size_t alignment = ag; is_aligned(size, alignment); alignment *= 2) { - char* const req_addr = align_up(mapping2, alignment); - char* p = os::Linux::reserve_memory_special_huge_tlbfs_mixed(size, alignment, req_addr, false); - // as the area around req_addr contains already existing mappings, the API should always - // return NULL (as per contract, it cannot return another address) - assert(p == NULL, "must be"); - } - } - - ::munmap(mapping2, mapping_size); - - } - - static void test_reserve_memory_special_huge_tlbfs() { - if (!UseHugeTLBFS) { - return; - } - - test_reserve_memory_special_huge_tlbfs_only(); - test_reserve_memory_special_huge_tlbfs_mixed(); - } - - static void test_reserve_memory_special_shm(size_t size, size_t alignment) { - if (!UseSHM) { - return; - } - - char* addr = os::Linux::reserve_memory_special_shm(size, alignment, NULL, false); - - if (addr != NULL) { - assert(is_aligned(addr, alignment), "Check"); - assert(is_aligned(addr, os::large_page_size()), "Check"); - - small_page_write(addr, size); - - os::Linux::release_memory_special_shm(addr, size); - } - } - - static void test_reserve_memory_special_shm() { - size_t lp = os::large_page_size(); - size_t ag = os::vm_allocation_granularity(); - - for (size_t size = ag; size < lp * 3; size += ag) { - for (size_t alignment = ag; is_aligned(size, alignment); alignment *= 2) { - test_reserve_memory_special_shm(size, alignment); - } - } - } - - static void test() { - test_reserve_memory_special_huge_tlbfs(); - test_reserve_memory_special_shm(); - } -}; - -void TestReserveMemorySpecial_test() { - TestReserveMemorySpecial::test(); -} - -#endif diff --git a/src/hotspot/os/linux/os_linux.hpp b/src/hotspot/os/linux/os_linux.hpp index 513b12aaf506d77ba4c84147090be6a36a4aff1b..0e9a3add1f2f3d39bf4cee748251b4580c7ff6f1 100644 --- a/src/hotspot/os/linux/os_linux.hpp +++ b/src/hotspot/os/linux/os_linux.hpp @@ -85,6 +85,9 @@ class Linux { static bool setup_large_page_type(size_t page_size); static bool transparent_huge_pages_sanity_check(bool warn, size_t pages_size); static bool hugetlbfs_sanity_check(bool warn, size_t page_size); + static bool shm_hugetlbfs_sanity_check(bool warn, size_t page_size); + + static int hugetlbfs_page_size_flag(size_t page_size); static char* reserve_memory_special_shm(size_t bytes, size_t alignment, char* req_addr, bool exec); static char* reserve_memory_special_huge_tlbfs(size_t bytes, size_t alignment, char* req_addr, bool exec); diff --git a/src/hotspot/os/posix/os_posix.cpp b/src/hotspot/os/posix/os_posix.cpp index 7344efa415b79e1af099d8c641c74423a23f57ca..f52f003bd5e1daf5c9f4698ac09d98ccf4fe35c8 100644 --- a/src/hotspot/os/posix/os_posix.cpp +++ b/src/hotspot/os/posix/os_posix.cpp @@ -51,11 +51,17 @@ #include #include #include +#include #include +#include #include #include #include +#ifdef __APPLE__ + #include +#endif + #define ROOT_UID 0 #ifndef MAP_ANONYMOUS @@ -1765,3 +1771,75 @@ int os::PlatformMonitor::wait(jlong millis) { return OS_OK; } } + +// Darwin has no "environ" in a dynamic library. +#ifdef __APPLE__ + #define environ (*_NSGetEnviron()) +#else + extern char** environ; +#endif + +char** os::get_environ() { return environ; } + +// Run the specified command in a separate process. Return its exit value, +// or -1 on failure (e.g. can't fork a new process). +// Notes: -Unlike system(), this function can be called from signal handler. It +// doesn't block SIGINT et al. +// -this function is unsafe to use in non-error situations, mainly +// because the child process will inherit all parent descriptors. +int os::fork_and_exec(const char* cmd, bool prefer_vfork) { + const char * argv[4] = {"sh", "-c", cmd, NULL}; + + pid_t pid ; + + char** env = os::get_environ(); + + // Use always vfork on AIX, since its safe and helps with analyzing OOM situations. + // Otherwise leave it up to the caller. + AIX_ONLY(prefer_vfork = true;) + pid = prefer_vfork ? ::vfork() : ::fork(); + + if (pid < 0) { + // fork failed + return -1; + + } else if (pid == 0) { + // child process + + ::execve("/bin/sh", (char* const*)argv, env); + + // execve failed + ::_exit(-1); + + } else { + // copied from J2SE ..._waitForProcessExit() in UNIXProcess_md.c; we don't + // care about the actual exit code, for now. + + int status; + + // Wait for the child process to exit. This returns immediately if + // the child has already exited. */ + while (::waitpid(pid, &status, 0) < 0) { + switch (errno) { + case ECHILD: return 0; + case EINTR: break; + default: return -1; + } + } + + if (WIFEXITED(status)) { + // The child exited normally; get its exit code. + return WEXITSTATUS(status); + } else if (WIFSIGNALED(status)) { + // The child exited because of a signal + // The best value to return is 0x80 + signal number, + // because that is what all Unix shells do, and because + // it allows callers to distinguish between process exit and + // process death by signal. + return 0x80 + WTERMSIG(status); + } else { + // Unknown exit code; pass it through + return status; + } + } +} diff --git a/src/hotspot/os/posix/signals_posix.cpp b/src/hotspot/os/posix/signals_posix.cpp index 5fbcff7f9b08f5b1bb3990a6cba1589eb89f8272..8c1967ec0cc14ec496321296bae06c1cb40e5b8e 100644 --- a/src/hotspot/os/posix/signals_posix.cpp +++ b/src/hotspot/os/posix/signals_posix.cpp @@ -47,6 +47,17 @@ extern sigjmp_buf* get_jmp_buf_for_continuation(); #include + +static const char* get_signal_name(int sig, char* out, size_t outlen); + +// Returns address of a handler associated with the given sigaction +static address get_signal_handler(const struct sigaction* action); + +#define HANDLER_IS(handler, address) ((handler) == CAST_FROM_FN_PTR(void*, (address))) +#define HANDLER_IS_IGN(handler) (HANDLER_IS(handler, SIG_IGN)) +#define HANDLER_IS_DFL(handler) (HANDLER_IS(handler, SIG_DFL)) +#define HANDLER_IS_IGN_OR_DFL(handler) (HANDLER_IS_IGN(handler) || HANDLER_IS_DFL(handler)) + // Various signal related mechanism are laid out in the following order: // // sun.misc.Signal @@ -54,13 +65,25 @@ extern sigjmp_buf* get_jmp_buf_for_continuation(); // signal handling (except suspend/resume) // suspend/resume -// Glibc on Linux uses the SA_RESTORER flag to indicate -// the use of a "signal trampoline". We have no interest -// in this flag and need to ignore it when checking our -// own flag settings. -// Note: SA_RESTORER is not exposed through signal.h so we -// have to hardwire its 0x04000000 value in the mask. -LINUX_ONLY(const int SA_RESTORER_FLAG_MASK = ~0x04000000;) +// Helper function to strip any flags from a sigaction sa_flag +// which are not needed for semantic comparison (see remarks below +// about SA_RESTORER on Linux). +// Also to work around the fact that not all platforms define sa_flags +// as signed int (looking at you, zlinux). +static int get_sanitized_sa_flags(const struct sigaction* sa) { + int f = (int) sa->sa_flags; +#ifdef LINUX + // Glibc on Linux uses the SA_RESTORER flag to indicate + // the use of a "signal trampoline". We have no interest + // in this flag and need to ignore it when checking our + // own flag settings. + // Note: SA_RESTORER is not exposed through signal.h so we + // have to hardcode its 0x04000000 value here. + const int sa_restorer_flag = 0x04000000; + f &= ~sa_restorer_flag; +#endif // LINUX + return f; +} // Todo: provide a os::get_max_process_id() or similar. Number of processes // may have been configured, can be read more accurately from proc fs etc. @@ -76,22 +99,68 @@ extern "C" { typedef void (*sa_sigaction_t)(int, siginfo_t *, void *); } -// For diagnostics to print a message once. see run_periodic_checks -static sigset_t check_signal_done; -static bool check_signals = true; +// At various places we store handler information for each installed handler. +// SavedSignalHandlers is a helper class for those cases, keeping an array of sigaction +// structures. +class SavedSignalHandlers { + // Note: NSIG can be largish, depending on platform, and this array is expected + // to be sparsely populated. To save space the contained structures are + // C-heap allocated. Since they only get added outside of signal handling + // this is no problem. + struct sigaction* _sa[NSIG]; + + bool check_signal_number(int sig) const { + assert(sig > 0 && sig < NSIG, "invalid signal number %d", sig); + return sig > 0 && sig < NSIG; + } + +public: + + SavedSignalHandlers() { + ::memset(_sa, 0, sizeof(_sa)); + } + + ~SavedSignalHandlers() { + for (int i = 0; i < NSIG; i ++) { + FREE_C_HEAP_OBJ(_sa[i]); + } + } + + void set(int sig, const struct sigaction* act) { + if (check_signal_number(sig)) { + assert(_sa[sig] == NULL, "Overwriting signal handler?"); + _sa[sig] = NEW_C_HEAP_OBJ(struct sigaction, mtInternal); + *_sa[sig] = *act; + } + } + + const struct sigaction* get(int sig) const { + if (check_signal_number(sig)) { + return _sa[sig]; + } + return NULL; + } +}; + debug_only(static bool signal_sets_initialized = false); static sigset_t unblocked_sigs, vm_sigs, preinstalled_sigs; -struct sigaction sigact[NSIG]; -// For signal-chaining +// Our own signal handlers should never ever get replaced by a third party one. +// To check that, and to aid with diagnostics, store a copy of the handler setup +// and compare it periodically against reality (see os::run_periodic_checks()). +static bool check_signals = true; +static SavedSignalHandlers vm_handlers; +static bool do_check_signal_periodically[NSIG] = { 0 }; + +// For signal-chaining: +// if chaining is active, chained_handlers contains all handlers which we +// replaced with our own and to which we must delegate. +static SavedSignalHandlers chained_handlers; static bool libjsig_is_loaded = false; typedef struct sigaction *(*get_signal_t)(int); static get_signal_t get_signal_action = NULL; -// For diagnostic -int sigflags[NSIG]; - // suspend/resume support #if defined(__APPLE__) static OSXSemaphore sr_semaphore; @@ -270,8 +339,6 @@ static const struct { { -1, NULL } }; -static const char* get_signal_name(int sig, char* out, size_t outlen); - //////////////////////////////////////////////////////////////////////////////// // sun.misc.Signal support @@ -335,19 +402,6 @@ int os::signal_wait() { //////////////////////////////////////////////////////////////////////////////// // signal chaining support -static struct sigaction* get_preinstalled_handler(int sig) { - if (sigismember(&preinstalled_sigs, sig)) { - return &sigact[sig]; - } - return NULL; -} - -static void save_preinstalled_handler(int sig, struct sigaction& oldAct) { - assert(sig > 0 && sig < NSIG, "vm signal out of expected range"); - sigact[sig] = oldAct; - sigaddset(&preinstalled_sigs, sig); -} - struct sigaction* get_chained_signal_action(int sig) { struct sigaction *actp = NULL; @@ -357,7 +411,7 @@ struct sigaction* get_chained_signal_action(int sig) { } if (actp == NULL) { // Retrieve the preinstalled signal handler from jvm - actp = get_preinstalled_handler(sig); + actp = const_cast(chained_handlers.get(sig)); } return actp; @@ -736,18 +790,6 @@ static void print_sa_flags(outputStream* st, int flags) { st->print("%s", buffer); } -static int get_our_sigflags(int sig) { - assert(sig > 0 && sig < NSIG, "vm signal out of expected range"); - return sigflags[sig]; -} - -static void set_our_sigflags(int sig, int flags) { - assert(sig > 0 && sig < NSIG, "vm signal out of expected range"); - if (sig > 0 && sig < NSIG) { - sigflags[sig] = flags; - } -} - // Implementation may use the same storage for both the sa_sigaction field and the sa_handler field, // so check for "sigAct.sa_flags == SA_SIGINFO" static address get_signal_handler(const struct sigaction* action) { @@ -763,10 +805,32 @@ typedef int (*os_sigaction_t)(int, const struct sigaction *, struct sigaction *) static void SR_handler(int sig, siginfo_t* siginfo, ucontext_t* context); +// Semantically compare two sigaction structures. Return true if they are referring to +// the same handler, using the same flags. +static bool are_handlers_equal(const struct sigaction* sa, + const struct sigaction* expected_sa) { + address this_handler = get_signal_handler(sa); + address expected_handler = get_signal_handler(expected_sa); + const int this_flags = get_sanitized_sa_flags(sa); + const int expected_flags = get_sanitized_sa_flags(expected_sa); + return (this_handler == expected_handler) && + (this_flags == expected_flags); +} + +// If we installed one of our signal handlers for sig, check that the current +// setup matches what we originally installed. static void check_signal_handler(int sig) { char buf[O_BUFLEN]; - address jvmHandler = NULL; + bool mismatch = false; + + if (!do_check_signal_periodically[sig]) { + return; + } + + const struct sigaction* expected_act = vm_handlers.get(sig); + assert(expected_act != NULL, "Sanity"); + // Retrieve current signal setup. struct sigaction act; static os_sigaction_t os_sigaction = NULL; if (os_sigaction == NULL) { @@ -777,65 +841,22 @@ static void check_signal_handler(int sig) { os_sigaction(sig, (struct sigaction*)NULL, &act); - // See comment for SA_RESTORER_FLAG_MASK - LINUX_ONLY(act.sa_flags &= SA_RESTORER_FLAG_MASK;) - - address thisHandler = get_signal_handler(&act); - - switch (sig) { - case SIGSEGV: - case SIGBUS: - case SIGFPE: - case SIGPIPE: - case SIGILL: - case SIGXFSZ: - jvmHandler = CAST_FROM_FN_PTR(address, (sa_sigaction_t)javaSignalHandler); - break; - - case SHUTDOWN1_SIGNAL: - case SHUTDOWN2_SIGNAL: - case SHUTDOWN3_SIGNAL: - case BREAK_SIGNAL: - jvmHandler = (address)os::user_handler(); - break; - - default: - if (sig == PosixSignals::SR_signum) { - jvmHandler = CAST_FROM_FN_PTR(address, (sa_sigaction_t)SR_handler); - } else { - return; - } - break; - } - - if (thisHandler != jvmHandler) { - tty->print("Warning: %s handler ", os::exception_name(sig, buf, O_BUFLEN)); - tty->print_raw("expected:"); - print_signal_handler_name(tty, jvmHandler, buf, O_BUFLEN); - tty->print_raw(" found:"); - print_signal_handler_name(tty, thisHandler, buf, O_BUFLEN); - // No need to check this sig any longer - sigaddset(&check_signal_done, sig); + // Compare both sigaction structures (intelligently; only the members we care about). + if (!are_handlers_equal(&act, expected_act)) { + tty->print_cr("Warning: %s handler modified!", os::exception_name(sig, buf, sizeof(buf))); + // If we had a mismatch: + // - print all signal handlers. As part of that printout, details will be printed + // about any modified handlers. + // - Disable any further checks for this signal - we do not want to flood stdout. Though + // depending on which signal had been overwritten, we may die very soon anyway. + os::print_signal_handlers(tty, buf, O_BUFLEN); + do_check_signal_periodically[sig] = false; + tty->print_cr("Consider using jsig library."); // Running under non-interactive shell, SHUTDOWN2_SIGNAL will be reassigned SIG_IGN if (sig == SHUTDOWN2_SIGNAL && !isatty(fileno(stdin))) { - tty->print_cr("Running in non-interactive shell, %s handler is replaced by shell", + tty->print_cr("Note: Running in non-interactive shell, %s handler is replaced by shell", os::exception_name(sig, buf, O_BUFLEN)); } - } else if (get_our_sigflags(sig) != 0 && (int)act.sa_flags != get_our_sigflags(sig)) { - tty->print("Warning: %s handler flags ", os::exception_name(sig, buf, O_BUFLEN)); - tty->print("expected:"); - print_sa_flags(tty, get_our_sigflags(sig)); - tty->cr(); - tty->print(" found:"); - print_sa_flags(tty, act.sa_flags); - tty->cr(); - // No need to check this sig any longer - sigaddset(&check_signal_done, sig); - } - - // Dump all the signal - if (sigismember(&check_signal_done, sig)) { - os::print_signal_handlers(tty, buf, O_BUFLEN); } } @@ -857,7 +878,7 @@ void* os::signal(int signal_number, void* handler) { return (void *)-1; } - return CAST_FROM_FN_PTR(void*, oldSigAct.sa_handler); + return get_signal_handler(&oldSigAct); } void os::signal_raise(int signal_number) { @@ -869,15 +890,8 @@ int os::sigexitnum_pd() { return NSIG; } -static void do_signal_check(int signal) { - if (!sigismember(&check_signal_done, signal)) { - check_signal_handler(signal); - } -} - // This method is a periodic task to check for misbehaving JNI applications // under CheckJNI, we can add any periodic checks here - void os::run_periodic_checks() { if (check_signals == false) return; @@ -886,24 +900,24 @@ void os::run_periodic_checks() { // generation of hs*.log in the event of a crash, debugging // such a case can be very challenging, so we absolutely // check the following for a good measure: - do_signal_check(SIGSEGV); - do_signal_check(SIGILL); - do_signal_check(SIGFPE); - do_signal_check(SIGBUS); - do_signal_check(SIGPIPE); - do_signal_check(SIGXFSZ); - PPC64_ONLY(do_signal_check(SIGTRAP);) + check_signal_handler(SIGSEGV); + check_signal_handler(SIGILL); + check_signal_handler(SIGFPE); + check_signal_handler(SIGBUS); + check_signal_handler(SIGPIPE); + check_signal_handler(SIGXFSZ); + PPC64_ONLY(check_signal_handler(SIGTRAP);) // ReduceSignalUsage allows the user to override these handlers // see comments at the very top and jvm_md.h if (!ReduceSignalUsage) { - do_signal_check(SHUTDOWN1_SIGNAL); - do_signal_check(SHUTDOWN2_SIGNAL); - do_signal_check(SHUTDOWN3_SIGNAL); - do_signal_check(BREAK_SIGNAL); + check_signal_handler(SHUTDOWN1_SIGNAL); + check_signal_handler(SHUTDOWN2_SIGNAL); + check_signal_handler(SHUTDOWN3_SIGNAL); + check_signal_handler(BREAK_SIGNAL); } - do_signal_check(PosixSignals::SR_signum); + check_signal_handler(PosixSignals::SR_signum); } // Helper function for PosixSignals::print_siginfo_...(): @@ -1203,16 +1217,17 @@ void set_signal_handler(int sig) { struct sigaction oldAct; sigaction(sig, (struct sigaction*)NULL, &oldAct); + // Query the current signal handler. Needs to be a separate operation + // from installing a new handler since we need to honor AllowUserSignalHandlers. void* oldhand = get_signal_handler(&oldAct); - if (oldhand != CAST_FROM_FN_PTR(void*, SIG_DFL) && - oldhand != CAST_FROM_FN_PTR(void*, SIG_IGN) && - oldhand != CAST_FROM_FN_PTR(void*, (sa_sigaction_t)javaSignalHandler)) { + if (!HANDLER_IS_IGN_OR_DFL(oldhand) && + !HANDLER_IS(oldhand, javaSignalHandler)) { if (AllowUserSignalHandlers) { // Do not overwrite; user takes responsibility to forward to us. return; } else if (UseSignalChaining) { // save the old handler in jvm - save_preinstalled_handler(sig, oldAct); + chained_handlers.set(sig, &oldAct); // libjsig also interposes the sigaction() call below and saves the // old sigaction on it own. } else { @@ -1239,9 +1254,9 @@ void set_signal_handler(int sig) { } #endif - // Save flags, which are set by ours - assert(sig > 0 && sig < NSIG, "vm signal out of expected range"); - sigflags[sig] = sigAct.sa_flags; + // Save handler setup for later checking + vm_handlers.set(sig, &sigAct); + do_check_signal_periodically[sig] = true; int ret = sigaction(sig, &sigAct, &oldAct); assert(ret == 0, "check"); @@ -1346,53 +1361,67 @@ static void print_signal_set_short(outputStream* st, const sigset_t* set) { st->print("%s", buf); } -void PosixSignals::print_signal_handler(outputStream* st, int sig, - char* buf, size_t buflen) { - struct sigaction sa; - sigaction(sig, NULL, &sa); - - // See comment for SA_RESTORER_FLAG_MASK - LINUX_ONLY(sa.sa_flags &= SA_RESTORER_FLAG_MASK;) - - st->print("%10s: ", os::exception_name(sig, buf, buflen)); - - address handler = get_signal_handler(&sa); +static void print_single_signal_handler(outputStream* st, + const struct sigaction* act, + char* buf, size_t buflen) { - if (handler == CAST_FROM_FN_PTR(address, SIG_DFL)) { + address handler = get_signal_handler(act); + if (HANDLER_IS_DFL(handler)) { st->print("SIG_DFL"); - } else if (handler == CAST_FROM_FN_PTR(address, SIG_IGN)) { + } else if (HANDLER_IS_IGN(handler)) { st->print("SIG_IGN"); } else { - print_signal_handler_name(st, handler, buf, O_BUFLEN); + print_signal_handler_name(st, handler, buf, buflen); } - st->print(", sa_mask[0]="); - print_signal_set_short(st, &sa.sa_mask); + st->print(", mask="); + print_signal_set_short(st, &(act->sa_mask)); - address rh = VMError::get_resetted_sighandler(sig); - // May be, handler was resetted by VMError? - if (rh != NULL) { - handler = rh; - // See comment for SA_RESTORER_FLAG_MASK - sa.sa_flags = VMError::get_resetted_sigflags(sig) LINUX_ONLY(& SA_RESTORER_FLAG_MASK); - } + st->print(", flags="); + int flags = get_sanitized_sa_flags(act); + print_sa_flags(st, flags); - // Print textual representation of sa_flags. - st->print(", sa_flags="); - print_sa_flags(st, sa.sa_flags); +} - // Check: is it our handler? - if (handler == CAST_FROM_FN_PTR(address, (sa_sigaction_t)javaSignalHandler) || - handler == CAST_FROM_FN_PTR(address, (sa_sigaction_t)SR_handler)) { - // It is our signal handler - // check for flags, reset system-used one! - if ((int)sa.sa_flags != get_our_sigflags(sig)) { - st->print( - ", flags was changed from " PTR32_FORMAT ", consider using jsig library", - get_our_sigflags(sig)); +// Print established signal handler for this signal. +// - if this signal handler was installed by us and is chained to a pre-established user handler +// it replaced, print that one too. +// - otherwise, if this signal handler was installed by us and replaced another handler to which we +// are not chained (e.g. if chaining is off), print that one too. +void PosixSignals::print_signal_handler(outputStream* st, int sig, + char* buf, size_t buflen) { + + st->print("%10s: ", os::exception_name(sig, buf, buflen)); + + struct sigaction current_act; + sigaction(sig, NULL, ¤t_act); + + print_single_signal_handler(st, ¤t_act, buf, buflen); + st->cr(); + + // If we expected to see our own hotspot signal handler but found a different one, + // print a warning (unless the handler replacing it is our own crash handler, which can + // happen if this function is called during error reporting). + const struct sigaction* expected_act = vm_handlers.get(sig); + if (expected_act != NULL) { + const address current_handler = get_signal_handler(¤t_act); + if (!(HANDLER_IS(current_handler, VMError::crash_handler_address))) { + if (!are_handlers_equal(¤t_act, expected_act)) { + st->print_cr(" *** Handler was modified!"); + st->print (" *** Expected: "); + print_single_signal_handler(st, expected_act, buf, buflen); + st->cr(); + } } } - st->cr(); + + // If there is a chained handler waiting behind the current one, print it too. + const struct sigaction* chained_act = get_chained_signal_action(sig); + if (chained_act != NULL) { + st->print(" chained to: "); + print_single_signal_handler(st, ¤t_act, buf, buflen); + st->cr(); + } } void os::print_signal_handlers(outputStream* st, char* buf, size_t buflen) { @@ -1421,8 +1450,7 @@ void os::print_signal_handlers(outputStream* st, char* buf, size_t buflen) { bool PosixSignals::is_sig_ignored(int sig) { struct sigaction oact; sigaction(sig, (struct sigaction*)NULL, &oact); - void* ohlr = get_signal_handler(&oact); - if (ohlr == CAST_FROM_FN_PTR(void*, SIG_IGN)) { + if (HANDLER_IS_IGN(get_signal_handler(&oact))) { return true; } else { return false; @@ -1672,8 +1700,10 @@ int SR_initialize() { return -1; } - // Save signal flag - set_our_sigflags(PosixSignals::SR_signum, act.sa_flags); + // Save signal setup information for later checking. + vm_handlers.set(PosixSignals::SR_signum, &act); + do_check_signal_periodically[PosixSignals::SR_signum] = true; + return 0; } diff --git a/src/hotspot/os/posix/vmError_posix.cpp b/src/hotspot/os/posix/vmError_posix.cpp index 798097ba8243b08ba8f5a2f65c36456b68dfd2ac..709259141a207a8a858de0662f59ce43d2ae1660 100644 --- a/src/hotspot/os/posix/vmError_posix.cpp +++ b/src/hotspot/os/posix/vmError_posix.cpp @@ -49,16 +49,6 @@ #endif -// handle all synchronous program error signals which may happen during error -// reporting. They must be unblocked, caught, handled. - -static const int SIGNALS[] = { SIGSEGV, SIGBUS, SIGILL, SIGFPE, SIGTRAP }; // add more if needed -static const int NUM_SIGNALS = sizeof(SIGNALS) / sizeof(int); - -// Space for our "saved" signal flags and handlers -static int resettedSigflags[NUM_SIGNALS]; -static address resettedSighandler[NUM_SIGNALS]; - // Needed for cancelable steps. static volatile pthread_t reporter_thread_id; @@ -74,34 +64,6 @@ void VMError::interrupt_reporting_thread() { ::pthread_kill(reporter_thread_id, SIGILL); } -static void save_signal(int idx, int sig) -{ - struct sigaction sa; - sigaction(sig, NULL, &sa); - resettedSigflags[idx] = sa.sa_flags; - resettedSighandler[idx] = (sa.sa_flags & SA_SIGINFO) - ? CAST_FROM_FN_PTR(address, sa.sa_sigaction) - : CAST_FROM_FN_PTR(address, sa.sa_handler); -} - -int VMError::get_resetted_sigflags(int sig) { - for (int i = 0; i < NUM_SIGNALS; i++) { - if (SIGNALS[i] == sig) { - return resettedSigflags[i]; - } - } - return -1; -} - -address VMError::get_resetted_sighandler(int sig) { - for (int i = 0; i < NUM_SIGNALS; i++) { - if (SIGNALS[i] == sig) { - return resettedSighandler[i]; - } - } - return NULL; -} - static void crash_handler(int sig, siginfo_t* info, void* ucVoid) { PosixSignals::unblock_error_signals(); @@ -133,10 +95,15 @@ static void crash_handler(int sig, siginfo_t* info, void* ucVoid) { VMError::report_and_die(NULL, sig, pc, info, ucVoid); } +const void* VMError::crash_handler_address = CAST_FROM_FN_PTR(void *, crash_handler); + void VMError::install_secondary_signal_handler() { - for (int i = 0; i < NUM_SIGNALS; i++) { - save_signal(i, SIGNALS[i]); - os::signal(SIGNALS[i], CAST_FROM_FN_PTR(void *, crash_handler)); + static const int signals_to_handle[] = { + SIGSEGV, SIGBUS, SIGILL, SIGFPE, SIGTRAP, + 0 // end + }; + for (int i = 0; signals_to_handle[i] != 0; i++) { + os::signal(signals_to_handle[i], CAST_FROM_FN_PTR(void *, crash_handler)); } } diff --git a/src/hotspot/os/windows/os_windows.cpp b/src/hotspot/os/windows/os_windows.cpp index 53dd2d99f157c7405933b39d43a43e402e4096c1..22590c2e7ca6899ce406cf6fe4a647c60622a03e 100644 --- a/src/hotspot/os/windows/os_windows.cpp +++ b/src/hotspot/os/windows/os_windows.cpp @@ -27,7 +27,6 @@ // no precompiled headers #include "jvm.h" -#include "classfile/classLoader.hpp" #include "classfile/vmSymbols.hpp" #include "code/codeCache.hpp" #include "code/icBuffer.hpp" @@ -58,10 +57,10 @@ #include "runtime/orderAccess.hpp" #include "runtime/osThread.hpp" #include "runtime/perfMemory.hpp" +#include "runtime/safefetch.inline.hpp" #include "runtime/safepointMechanism.hpp" #include "runtime/sharedRuntime.hpp" #include "runtime/statSampler.hpp" -#include "runtime/stubRoutines.hpp" #include "runtime/thread.inline.hpp" #include "runtime/threadCritical.hpp" #include "runtime/timer.hpp" @@ -267,6 +266,8 @@ bool os::unsetenv(const char* name) { return (SetEnvironmentVariable(name, NULL) == TRUE); } +char** os::get_environ() { return _environ; } + // No setuid programs under Windows. bool os::have_special_privileges() { return false; @@ -1701,9 +1702,9 @@ void os::print_os_info(outputStream* st) { char buffer[1024]; st->print("HostName: "); if (get_host_name(buffer, sizeof(buffer))) { - st->print("%s ", buffer); + st->print_cr(buffer); } else { - st->print("N/A "); + st->print_cr("N/A"); } #endif st->print_cr("OS:"); @@ -4482,8 +4483,18 @@ bool os::same_files(const char* file1, const char* file2) { return true; } - HANDLE handle1 = create_read_only_file_handle(file1); - HANDLE handle2 = create_read_only_file_handle(file2); + char* native_file1 = os::strdup_check_oom(file1); + native_file1 = os::native_path(native_file1); + char* native_file2 = os::strdup_check_oom(file2); + native_file2 = os::native_path(native_file2); + if (strcmp(native_file1, native_file2) == 0) { + os::free(native_file1); + os::free(native_file2); + return true; + } + + HANDLE handle1 = create_read_only_file_handle(native_file1); + HANDLE handle2 = create_read_only_file_handle(native_file2); bool result = false; // if we could open both paths... @@ -4510,6 +4521,9 @@ bool os::same_files(const char* file1, const char* file2) { ::CloseHandle(handle2); } + os::free(native_file1); + os::free(native_file2); + return result; } @@ -5500,7 +5514,7 @@ int os::PlatformMonitor::wait(jlong millis) { // Run the specified command in a separate process. Return its exit value, // or -1 on failure (e.g. can't create a new process). -int os::fork_and_exec(char* cmd, bool use_vfork_if_available) { +int os::fork_and_exec(const char* cmd, bool dummy /* ignored */) { STARTUPINFO si; PROCESS_INFORMATION pi; DWORD exit_code; @@ -5778,58 +5792,6 @@ char* os::build_agent_function_name(const char *sym_name, const char *lib_name, return agent_entry_name; } -#ifndef PRODUCT - -// test the code path in reserve_memory_special() that tries to allocate memory in a single -// contiguous memory block at a particular address. -// The test first tries to find a good approximate address to allocate at by using the same -// method to allocate some memory at any address. The test then tries to allocate memory in -// the vicinity (not directly after it to avoid possible by-chance use of that location) -// This is of course only some dodgy assumption, there is no guarantee that the vicinity of -// the previously allocated memory is available for allocation. The only actual failure -// that is reported is when the test tries to allocate at a particular location but gets a -// different valid one. A NULL return value at this point is not considered an error but may -// be legitimate. -void TestReserveMemorySpecial_test() { - if (!UseLargePages) { - return; - } - // save current value of globals - bool old_use_large_pages_individual_allocation = UseLargePagesIndividualAllocation; - bool old_use_numa_interleaving = UseNUMAInterleaving; - - // set globals to make sure we hit the correct code path - UseLargePagesIndividualAllocation = UseNUMAInterleaving = false; - - // do an allocation at an address selected by the OS to get a good one. - const size_t large_allocation_size = os::large_page_size() * 4; - char* result = os::reserve_memory_special(large_allocation_size, os::large_page_size(), NULL, false); - if (result == NULL) { - } else { - os::release_memory_special(result, large_allocation_size); - - // allocate another page within the recently allocated memory area which seems to be a good location. At least - // we managed to get it once. - const size_t expected_allocation_size = os::large_page_size(); - char* expected_location = result + os::large_page_size(); - char* actual_location = os::reserve_memory_special(expected_allocation_size, os::large_page_size(), expected_location, false); - if (actual_location == NULL) { - } else { - // release memory - os::release_memory_special(actual_location, expected_allocation_size); - // only now check, after releasing any memory to avoid any leaks. - assert(actual_location == expected_location, - "Failed to allocate memory at requested location " PTR_FORMAT " of size " SIZE_FORMAT ", is " PTR_FORMAT " instead", - expected_location, expected_allocation_size, actual_location); - } - } - - // restore globals - UseLargePagesIndividualAllocation = old_use_large_pages_individual_allocation; - UseNUMAInterleaving = old_use_numa_interleaving; -} -#endif // PRODUCT - /* All the defined signal names for Windows. diff --git a/src/hotspot/os/windows/vmError_windows.cpp b/src/hotspot/os/windows/vmError_windows.cpp index 3c899e54245e5859bc6654068d7952d6c7108035..1a0a947a749df9f2bc28a8da4d97c5f7aa7e9d07 100644 --- a/src/hotspot/os/windows/vmError_windows.cpp +++ b/src/hotspot/os/windows/vmError_windows.cpp @@ -29,14 +29,6 @@ #include "runtime/thread.hpp" #include "utilities/vmError.hpp" -int VMError::get_resetted_sigflags(int sig) { - return -1; -} - -address VMError::get_resetted_sighandler(int sig) { - return NULL; -} - LONG WINAPI crash_handler(struct _EXCEPTION_POINTERS* exceptionInfo) { DWORD exception_code = exceptionInfo->ExceptionRecord->ExceptionCode; VMError::report_and_die(NULL, exception_code, NULL, exceptionInfo->ExceptionRecord, diff --git a/src/hotspot/os_cpu/aix_ppc/os_aix_ppc.cpp b/src/hotspot/os_cpu/aix_ppc/os_aix_ppc.cpp index 07f1f8be755fd10eb2e0fac588120d2387275364..d3482e32f2144482cd536e514e6be112798f49de 100644 --- a/src/hotspot/os_cpu/aix_ppc/os_aix_ppc.cpp +++ b/src/hotspot/os_cpu/aix_ppc/os_aix_ppc.cpp @@ -27,7 +27,6 @@ #include "jvm.h" #include "assembler_ppc.hpp" #include "asm/assembler.inline.hpp" -#include "classfile/classLoader.hpp" #include "classfile/vmSymbols.hpp" #include "code/codeCache.hpp" #include "code/icBuffer.hpp" diff --git a/src/hotspot/os_cpu/bsd_x86/os_bsd_x86.cpp b/src/hotspot/os_cpu/bsd_x86/os_bsd_x86.cpp index d1462df5fce3e52947051dcbc3156356c738c7bc..91ca9564a3e07777454d2f54b7b48e6fcec82939 100644 --- a/src/hotspot/os_cpu/bsd_x86/os_bsd_x86.cpp +++ b/src/hotspot/os_cpu/bsd_x86/os_bsd_x86.cpp @@ -25,7 +25,6 @@ // no precompiled headers #include "jvm.h" #include "asm/macroAssembler.hpp" -#include "classfile/classLoader.hpp" #include "classfile/vmSymbols.hpp" #include "code/codeCache.hpp" #include "code/icBuffer.hpp" @@ -64,7 +63,6 @@ # include # include # include -# include # include # include # include @@ -391,16 +389,6 @@ enum { bool PosixSignals::pd_hotspot_signal_handler(int sig, siginfo_t* info, ucontext_t* uc, JavaThread* thread) { - -/* - NOTE: does not seem to work on bsd. - if (info == NULL || info->si_code <= 0 || info->si_code == SI_NOINFO) { - // can't decode this kind of signal - info = NULL; - } else { - assert(sig == info->si_signo, "bad siginfo"); - } -*/ // decide if this trap can be handled by a stub address stub = NULL; @@ -466,7 +454,10 @@ bool PosixSignals::pd_hotspot_signal_handler(int sig, siginfo_t* info, #ifdef AMD64 if (sig == SIGFPE && - (info->si_code == FPE_INTDIV || info->si_code == FPE_FLTDIV)) { + (info->si_code == FPE_INTDIV || info->si_code == FPE_FLTDIV + // Workaround for macOS ARM incorrectly reporting FPE_FLTINV for "div by 0" + // instead of the expected FPE_FLTDIV when running x86_64 binary under Rosetta emulation + MACOS_ONLY(|| (VM_Version::is_cpu_emulated() && info->si_code == FPE_FLTINV)))) { stub = SharedRuntime:: continuation_for_implicit_exception(thread, diff --git a/src/hotspot/os_cpu/bsd_x86/thread_bsd_x86.cpp b/src/hotspot/os_cpu/bsd_x86/thread_bsd_x86.cpp index 3d7dc7e4b5a8bee5d3e50671976692615a47bf73..4564ddc5b9248f57e6328c94b744f588fa7c9198 100644 --- a/src/hotspot/os_cpu/bsd_x86/thread_bsd_x86.cpp +++ b/src/hotspot/os_cpu/bsd_x86/thread_bsd_x86.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2021, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -67,12 +67,6 @@ bool JavaThread::pd_get_top_frame(frame* fr_addr, void* ucontext, bool isInJava) return false; } - if (MetaspaceShared::is_in_trampoline_frame(addr)) { - // In the middle of a trampoline call. Bail out for safety. - // This happens rarely so shouldn't affect profiling. - return false; - } - frame ret_frame(ret_sp, ret_fp, addr); if (!ret_frame.safe_for_sender(this)) { #if COMPILER2_OR_JVMCI diff --git a/src/hotspot/os_cpu/bsd_x86/thread_bsd_x86.hpp b/src/hotspot/os_cpu/bsd_x86/thread_bsd_x86.hpp index 76a18fb6cdaebc2b90a151e00db083be33df351f..6f4e42e14ff861b08aecdb957857e708d4d1e6a7 100644 --- a/src/hotspot/os_cpu/bsd_x86/thread_bsd_x86.hpp +++ b/src/hotspot/os_cpu/bsd_x86/thread_bsd_x86.hpp @@ -37,10 +37,6 @@ return byte_offset_of(JavaThread, _anchor) + JavaFrameAnchor::last_Java_fp_offset(); } - static ByteSize saved_rbp_address_offset() { - return byte_offset_of(JavaThread, _anchor) + JavaFrameAnchor::saved_rbp_address_offset(); - } - bool pd_get_top_frame_for_signal_handler(frame* fr_addr, void* ucontext, bool isInJava); diff --git a/src/hotspot/os_cpu/bsd_x86/vm_version_bsd_x86.cpp b/src/hotspot/os_cpu/bsd_x86/vm_version_bsd_x86.cpp index 3dade34f9c6f51a83a7fa1a75a94fa19422e1be9..05cb7ef99c3376af3b479032ac535a4885560cc6 100644 --- a/src/hotspot/os_cpu/bsd_x86/vm_version_bsd_x86.cpp +++ b/src/hotspot/os_cpu/bsd_x86/vm_version_bsd_x86.cpp @@ -25,3 +25,24 @@ #include "precompiled.hpp" #include "runtime/os.hpp" #include "runtime/vm_version.hpp" + +#ifdef __APPLE__ + +#include +#include + +bool VM_Version::is_cpu_emulated() { + int ret = 0; + size_t size = sizeof(ret); + // Is this process being ran in Rosetta (i.e. emulation) mode on macOS? + if (sysctlbyname("sysctl.proc_translated", &ret, &size, NULL, 0) == -1) { + // errno == ENOENT is a valid response, but anything else is a real error + if (errno != ENOENT) { + warning("unable to lookup sysctl.proc_translated"); + } + } + return (ret==1); +} + +#endif + diff --git a/src/hotspot/os_cpu/bsd_zero/os_bsd_zero.cpp b/src/hotspot/os_cpu/bsd_zero/os_bsd_zero.cpp index 8bca69035b7c9a7a20eda18a9c57204bb75eacc1..18125f1c16ce56611cadb30b0c6c106d899b36e7 100644 --- a/src/hotspot/os_cpu/bsd_zero/os_bsd_zero.cpp +++ b/src/hotspot/os_cpu/bsd_zero/os_bsd_zero.cpp @@ -31,7 +31,6 @@ // no precompiled headers #include "jvm.h" #include "assembler_zero.inline.hpp" -#include "classfile/classLoader.hpp" #include "classfile/vmSymbols.hpp" #include "code/icBuffer.hpp" #include "code/vtableStubs.hpp" diff --git a/src/hotspot/os_cpu/linux_aarch64/atomic_linux_aarch64.S b/src/hotspot/os_cpu/linux_aarch64/atomic_linux_aarch64.S new file mode 100644 index 0000000000000000000000000000000000000000..f5d2c2b69c2226123fc868dfe8cd4c26c7b32d1e --- /dev/null +++ b/src/hotspot/os_cpu/linux_aarch64/atomic_linux_aarch64.S @@ -0,0 +1,150 @@ +// Copyright (c) 2021, Red Hat Inc. All rights reserved. +// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +// +// This code is free software; you can redistribute it and/or modify it +// under the terms of the GNU General Public License version 2 only, as +// published by the Free Software Foundation. +// +// This code is distributed in the hope that it will be useful, but WITHOUT +// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +// version 2 for more details (a copy is included in the LICENSE file that +// accompanied this code). +// +// You should have received a copy of the GNU General Public License version +// 2 along with this work; if not, write to the Free Software Foundation, +// Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +// +// Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +// or visit www.oracle.com if you need additional information or have any +// questions. + + + + .text + + .globl aarch64_atomic_fetch_add_8_default_impl + .align 5 +aarch64_atomic_fetch_add_8_default_impl: + prfm pstl1strm, [x0] +0: ldaxr x2, [x0] + add x8, x2, x1 + stlxr w9, x8, [x0] + cbnz w9, 0b + dmb ish + mov x0, x2 + ret + + .globl aarch64_atomic_fetch_add_4_default_impl + .align 5 +aarch64_atomic_fetch_add_4_default_impl: + prfm pstl1strm, [x0] +0: ldaxr w2, [x0] + add w8, w2, w1 + stlxr w9, w8, [x0] + cbnz w9, 0b + dmb ish + mov w0, w2 + ret + + .globl aarch64_atomic_xchg_4_default_impl + .align 5 +aarch64_atomic_xchg_4_default_impl: + prfm pstl1strm, [x0] +0: ldaxr w2, [x0] + stlxr w8, w1, [x0] + cbnz w8, 0b + dmb ish + mov w0, w2 + ret + + .globl aarch64_atomic_xchg_8_default_impl + .align 5 +aarch64_atomic_xchg_8_default_impl: + prfm pstl1strm, [x0] +0: ldaxr x2, [x0] + stlxr w8, x1, [x0] + cbnz w8, 0b + dmb ish + mov x0, x2 + ret + + .globl aarch64_atomic_cmpxchg_1_default_impl + .align 5 +aarch64_atomic_cmpxchg_1_default_impl: + dmb ish + prfm pstl1strm, [x0] +0: ldxrb w3, [x0] + eor w8, w3, w1 + tst x8, #0xff + b.ne 1f + stxrb w8, w2, [x0] + cbnz w8, 0b +1: mov w0, w3 + dmb ish + ret + + .globl aarch64_atomic_cmpxchg_4_default_impl + .align 5 +aarch64_atomic_cmpxchg_4_default_impl: + dmb ish + prfm pstl1strm, [x0] +0: ldxr w3, [x0] + cmp w3, w1 + b.ne 1f + stxr w8, w2, [x0] + cbnz w8, 0b +1: mov w0, w3 + dmb ish + ret + + .globl aarch64_atomic_cmpxchg_8_default_impl + .align 5 +aarch64_atomic_cmpxchg_8_default_impl: + dmb ish + prfm pstl1strm, [x0] +0: ldxr x3, [x0] + cmp x3, x1 + b.ne 1f + stxr w8, x2, [x0] + cbnz w8, 0b +1: mov x0, x3 + dmb ish + ret + + .globl aarch64_atomic_cmpxchg_1_relaxed_default_impl + .align 5 +aarch64_atomic_cmpxchg_1_relaxed_default_impl: + prfm pstl1strm, [x0] +0: ldxrb w3, [x0] + eor w8, w3, w1 + tst x8, #0xff + b.ne 1f + stxrb w8, w2, [x0] + cbnz w8, 0b +1: mov w0, w3 + ret + + .globl aarch64_atomic_cmpxchg_4_relaxed_default_impl + .align 5 +aarch64_atomic_cmpxchg_4_relaxed_default_impl: + prfm pstl1strm, [x0] +0: ldxr w3, [x0] + cmp w3, w1 + b.ne 1f + stxr w8, w2, [x0] + cbnz w8, 0b +1: mov w0, w3 + ret + + .globl aarch64_atomic_cmpxchg_8_relaxed_default_impl + .align 5 +aarch64_atomic_cmpxchg_8_relaxed_default_impl: + prfm pstl1strm, [x0] +0: ldxr x3, [x0] + cmp x3, x1 + b.ne 1f + stxr w8, x2, [x0] + cbnz w8, 0b +1: mov x0, x3 + ret diff --git a/src/hotspot/os_cpu/linux_aarch64/atomic_linux_aarch64.hpp b/src/hotspot/os_cpu/linux_aarch64/atomic_linux_aarch64.hpp index 8e275a4173e4455f8f707ec4141e04982458cee3..77e860ed5ec85202c4460e4faa0220a781bb426b 100644 --- a/src/hotspot/os_cpu/linux_aarch64/atomic_linux_aarch64.hpp +++ b/src/hotspot/os_cpu/linux_aarch64/atomic_linux_aarch64.hpp @@ -1,6 +1,6 @@ /* * Copyright (c) 1999, 2019, Oracle and/or its affiliates. All rights reserved. - * Copyright (c) 2014, 2019, Red Hat Inc. All rights reserved. + * Copyright (c) 2014, 2021, Red Hat Inc. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -26,59 +26,154 @@ #ifndef OS_CPU_LINUX_AARCH64_ATOMIC_LINUX_AARCH64_HPP #define OS_CPU_LINUX_AARCH64_ATOMIC_LINUX_AARCH64_HPP +#include "atomic_aarch64.hpp" #include "runtime/vm_version.hpp" // Implementation of class atomic + // Note that memory_order_conservative requires a full barrier after atomic stores. // See https://patchwork.kernel.org/patch/3575821/ +// Call one of the stubs from C++. This uses the C calling convention, +// but this asm definition is used in order only to clobber the +// registers we use. If we called the stubs via an ABI call we'd have +// to save X0 - X18 and most of the vectors. +// +// This really ought to be a template definition, but see GCC Bug +// 33661, template methods forget explicit local register asm +// vars. The problem is that register specifiers attached to local +// variables are ignored in any template function. +inline uint64_t bare_atomic_fastcall(address stub, volatile void *ptr, uint64_t arg1, uint64_t arg2 = 0) { + register uint64_t reg0 __asm__("x0") = (uint64_t)ptr; + register uint64_t reg1 __asm__("x1") = arg1; + register uint64_t reg2 __asm__("x2") = arg2; + register uint64_t reg3 __asm__("x3") = (uint64_t)stub; + register uint64_t result __asm__("x0"); + asm volatile(// "stp x29, x30, [sp, #-16]!;" + " blr %1;" + // " ldp x29, x30, [sp], #16 // regs %0, %1, %2, %3, %4" + : "=r"(result), "+r"(reg3), "+r"(reg2) + : "r"(reg1), "0"(reg0) : "x8", "x9", "x30", "cc", "memory"); + return result; +} + +template +inline D atomic_fastcall(F stub, volatile D *dest, T1 arg1) { + return (D)bare_atomic_fastcall(CAST_FROM_FN_PTR(address, stub), + dest, (uint64_t)arg1); +} + +template +inline D atomic_fastcall(F stub, volatile D *dest, T1 arg1, T2 arg2) { + return (D)bare_atomic_fastcall(CAST_FROM_FN_PTR(address, stub), + dest, (uint64_t)arg1, (uint64_t)arg2); +} + template struct Atomic::PlatformAdd { template - D add_and_fetch(D volatile* dest, I add_value, atomic_memory_order order) const { - D res = __atomic_add_fetch(dest, add_value, __ATOMIC_RELEASE); - FULL_MEM_BARRIER; - return res; - } + D fetch_and_add(D volatile* dest, I add_value, atomic_memory_order order) const; template - D fetch_and_add(D volatile* dest, I add_value, atomic_memory_order order) const { - return add_and_fetch(dest, add_value, order) - add_value; + D add_and_fetch(D volatile* dest, I add_value, atomic_memory_order order) const { + D value = fetch_and_add(dest, add_value, order) + add_value; + return value; } }; -template +template<> +template +inline D Atomic::PlatformAdd<4>::fetch_and_add(D volatile* dest, I add_value, + atomic_memory_order order) const { + STATIC_ASSERT(4 == sizeof(I)); + STATIC_ASSERT(4 == sizeof(D)); + D old_value + = atomic_fastcall(aarch64_atomic_fetch_add_4_impl, dest, add_value); + return old_value; +} + +template<> +template +inline D Atomic::PlatformAdd<8>::fetch_and_add(D volatile* dest, I add_value, + atomic_memory_order order) const { + STATIC_ASSERT(8 == sizeof(I)); + STATIC_ASSERT(8 == sizeof(D)); + D old_value + = atomic_fastcall(aarch64_atomic_fetch_add_8_impl, dest, add_value); + return old_value; +} + +template<> template -inline T Atomic::PlatformXchg::operator()(T volatile* dest, - T exchange_value, - atomic_memory_order order) const { - STATIC_ASSERT(byte_size == sizeof(T)); - T res = __atomic_exchange_n(dest, exchange_value, __ATOMIC_RELEASE); - FULL_MEM_BARRIER; - return res; +inline T Atomic::PlatformXchg<4>::operator()(T volatile* dest, + T exchange_value, + atomic_memory_order order) const { + STATIC_ASSERT(4 == sizeof(T)); + T old_value = atomic_fastcall(aarch64_atomic_xchg_4_impl, dest, exchange_value); + return old_value; } -// __attribute__((unused)) on dest is to get rid of spurious GCC warnings. -template +template<> template -inline T Atomic::PlatformCmpxchg::operator()(T volatile* dest __attribute__((unused)), - T compare_value, - T exchange_value, - atomic_memory_order order) const { - STATIC_ASSERT(byte_size == sizeof(T)); - if (order == memory_order_relaxed) { - T value = compare_value; - __atomic_compare_exchange(dest, &value, &exchange_value, /*weak*/false, - __ATOMIC_RELAXED, __ATOMIC_RELAXED); - return value; - } else { - T value = compare_value; - FULL_MEM_BARRIER; - __atomic_compare_exchange(dest, &value, &exchange_value, /*weak*/false, - __ATOMIC_RELAXED, __ATOMIC_RELAXED); - FULL_MEM_BARRIER; - return value; +inline T Atomic::PlatformXchg<8>::operator()(T volatile* dest, T exchange_value, + atomic_memory_order order) const { + STATIC_ASSERT(8 == sizeof(T)); + T old_value = atomic_fastcall(aarch64_atomic_xchg_8_impl, dest, exchange_value); + return old_value; +} + +template<> +template +inline T Atomic::PlatformCmpxchg<1>::operator()(T volatile* dest, + T compare_value, + T exchange_value, + atomic_memory_order order) const { + STATIC_ASSERT(1 == sizeof(T)); + aarch64_atomic_stub_t stub; + switch (order) { + case memory_order_relaxed: + stub = aarch64_atomic_cmpxchg_1_relaxed_impl; break; + default: + stub = aarch64_atomic_cmpxchg_1_impl; break; + } + + return atomic_fastcall(stub, dest, compare_value, exchange_value); +} + +template<> +template +inline T Atomic::PlatformCmpxchg<4>::operator()(T volatile* dest, + T compare_value, + T exchange_value, + atomic_memory_order order) const { + STATIC_ASSERT(4 == sizeof(T)); + aarch64_atomic_stub_t stub; + switch (order) { + case memory_order_relaxed: + stub = aarch64_atomic_cmpxchg_4_relaxed_impl; break; + default: + stub = aarch64_atomic_cmpxchg_4_impl; break; + } + + return atomic_fastcall(stub, dest, compare_value, exchange_value); +} + +template<> +template +inline T Atomic::PlatformCmpxchg<8>::operator()(T volatile* dest, + T compare_value, + T exchange_value, + atomic_memory_order order) const { + STATIC_ASSERT(8 == sizeof(T)); + aarch64_atomic_stub_t stub; + switch (order) { + case memory_order_relaxed: + stub = aarch64_atomic_cmpxchg_8_relaxed_impl; break; + default: + stub = aarch64_atomic_cmpxchg_8_impl; break; } + + return atomic_fastcall(stub, dest, compare_value, exchange_value); } template diff --git a/src/hotspot/os_cpu/linux_aarch64/os_linux_aarch64.cpp b/src/hotspot/os_cpu/linux_aarch64/os_linux_aarch64.cpp index a785369a8ddcae90e1931bf5bcd51b6321bdecf4..13702ba1c2364c68a712dc831477a5dad3a2bf1d 100644 --- a/src/hotspot/os_cpu/linux_aarch64/os_linux_aarch64.cpp +++ b/src/hotspot/os_cpu/linux_aarch64/os_linux_aarch64.cpp @@ -26,7 +26,6 @@ // no precompiled headers #include "jvm.h" #include "asm/macroAssembler.hpp" -#include "classfile/classLoader.hpp" #include "classfile/vmSymbols.hpp" #include "code/codeCache.hpp" #include "code/icBuffer.hpp" diff --git a/src/hotspot/os_cpu/linux_aarch64/thread_linux_aarch64.cpp b/src/hotspot/os_cpu/linux_aarch64/thread_linux_aarch64.cpp index 799d2cf87d18a1e43cef1784a1c47eeb12ea130c..702d6f6dcd5faeee8d0d8f0bc2060b4a3fa3f167 100644 --- a/src/hotspot/os_cpu/linux_aarch64/thread_linux_aarch64.cpp +++ b/src/hotspot/os_cpu/linux_aarch64/thread_linux_aarch64.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2021, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2014, Red Hat Inc. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -68,12 +68,6 @@ bool JavaThread::pd_get_top_frame(frame* fr_addr, void* ucontext, bool isInJava) return false; } - if (MetaspaceShared::is_in_trampoline_frame(addr)) { - // In the middle of a trampoline call. Bail out for safety. - // This happens rarely so shouldn't affect profiling. - return false; - } - frame ret_frame(ret_sp, ret_fp, addr); if (!ret_frame.safe_for_sender(this)) { #ifdef COMPILER2 diff --git a/src/hotspot/os_cpu/linux_aarch64/thread_linux_aarch64.hpp b/src/hotspot/os_cpu/linux_aarch64/thread_linux_aarch64.hpp index 8c52a6a99454cb215ea9ef523440510e7697b27c..5a1f273c548660815abf1ce1d513472212d7e01d 100644 --- a/src/hotspot/os_cpu/linux_aarch64/thread_linux_aarch64.hpp +++ b/src/hotspot/os_cpu/linux_aarch64/thread_linux_aarch64.hpp @@ -39,10 +39,6 @@ return byte_offset_of(JavaThread, _anchor) + JavaFrameAnchor::last_Java_fp_offset(); } - static ByteSize saved_fp_address_offset() { - return byte_offset_of(JavaThread, _anchor) + JavaFrameAnchor::saved_fp_address_offset(); - } - bool pd_get_top_frame_for_signal_handler(frame* fr_addr, void* ucontext, bool isInJava); diff --git a/src/hotspot/os_cpu/linux_arm/os_linux_arm.cpp b/src/hotspot/os_cpu/linux_arm/os_linux_arm.cpp index 12eb0f543cefee84db20b9bb3e2edef8b8fca4ce..064c1fba16b6eecf7e4067ab923631e621eac566 100644 --- a/src/hotspot/os_cpu/linux_arm/os_linux_arm.cpp +++ b/src/hotspot/os_cpu/linux_arm/os_linux_arm.cpp @@ -25,7 +25,6 @@ // no precompiled headers #include "jvm.h" #include "assembler_arm.inline.hpp" -#include "classfile/classLoader.hpp" #include "classfile/vmSymbols.hpp" #include "code/icBuffer.hpp" #include "code/vtableStubs.hpp" diff --git a/src/hotspot/os_cpu/linux_arm/thread_linux_arm.cpp b/src/hotspot/os_cpu/linux_arm/thread_linux_arm.cpp index 6fd8a2fbde56e4cea9892995bf097f416b350c71..66cb5e7f8451328dc2f88fa615cac7e4e199d7d9 100644 --- a/src/hotspot/os_cpu/linux_arm/thread_linux_arm.cpp +++ b/src/hotspot/os_cpu/linux_arm/thread_linux_arm.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2008, 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2008, 2021, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -101,12 +101,6 @@ bool JavaThread::pd_get_top_frame(frame* fr_addr, void* ucontext, bool isInJava) return false; } - if (MetaspaceShared::is_in_trampoline_frame(addr)) { - // In the middle of a trampoline call. Bail out for safety. - // This happens rarely so shouldn't affect profiling. - return false; - } - frame ret_frame(ret_sp, ret_fp, addr); if (!ret_frame.safe_for_sender(this)) { #ifdef COMPILER2 diff --git a/src/hotspot/os_cpu/linux_ppc/os_linux_ppc.cpp b/src/hotspot/os_cpu/linux_ppc/os_linux_ppc.cpp index 0e390ab3f2809ddfb512f72af710176258450e9b..5edb303b68b0bcd2f8dc350cfdff39d5eca898d5 100644 --- a/src/hotspot/os_cpu/linux_ppc/os_linux_ppc.cpp +++ b/src/hotspot/os_cpu/linux_ppc/os_linux_ppc.cpp @@ -27,7 +27,6 @@ #include "jvm.h" #include "assembler_ppc.hpp" #include "asm/assembler.inline.hpp" -#include "classfile/classLoader.hpp" #include "classfile/vmSymbols.hpp" #include "code/codeCache.hpp" #include "code/icBuffer.hpp" diff --git a/src/hotspot/os_cpu/linux_ppc/thread_linux_ppc.cpp b/src/hotspot/os_cpu/linux_ppc/thread_linux_ppc.cpp index f853f7a439a3ca8e8c0b06be9f44564a422a0900..9f779456640211c636aa03ae1ab09f46278b5217 100644 --- a/src/hotspot/os_cpu/linux_ppc/thread_linux_ppc.cpp +++ b/src/hotspot/os_cpu/linux_ppc/thread_linux_ppc.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2021, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2012, 2019 SAP SE. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -24,6 +24,7 @@ */ #include "precompiled.hpp" +#include "memory/metaspace.hpp" #include "runtime/frame.inline.hpp" #include "runtime/thread.hpp" diff --git a/src/hotspot/os_cpu/linux_s390/os_linux_s390.cpp b/src/hotspot/os_cpu/linux_s390/os_linux_s390.cpp index cd370d2f7a3186f995a368ea74a9d63969372be0..53b017d221725486a1b41944092e99d30565aa49 100644 --- a/src/hotspot/os_cpu/linux_s390/os_linux_s390.cpp +++ b/src/hotspot/os_cpu/linux_s390/os_linux_s390.cpp @@ -28,7 +28,6 @@ // no precompiled headers #include "jvm.h" #include "asm/assembler.inline.hpp" -#include "classfile/classLoader.hpp" #include "classfile/vmSymbols.hpp" #include "code/icBuffer.hpp" #include "code/nativeInst.hpp" diff --git a/src/hotspot/os_cpu/linux_s390/thread_linux_s390.cpp b/src/hotspot/os_cpu/linux_s390/thread_linux_s390.cpp index aebadb25518b6000a16359617860fe00a130667f..eeaf2f47fc607faa828afe9238a725de2cf88f6f 100644 --- a/src/hotspot/os_cpu/linux_s390/thread_linux_s390.cpp +++ b/src/hotspot/os_cpu/linux_s390/thread_linux_s390.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016, 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2016, 2019 SAP SE. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -24,6 +24,7 @@ */ #include "precompiled.hpp" +#include "memory/metaspace.hpp" #include "runtime/frame.inline.hpp" #include "runtime/thread.hpp" diff --git a/src/hotspot/os_cpu/linux_x86/os_linux_x86.cpp b/src/hotspot/os_cpu/linux_x86/os_linux_x86.cpp index ed603aa641e28bf493390d3031408ed7edeab0ab..dcd2f566a16d62279e2daa2a59e7097f6fffadee 100644 --- a/src/hotspot/os_cpu/linux_x86/os_linux_x86.cpp +++ b/src/hotspot/os_cpu/linux_x86/os_linux_x86.cpp @@ -25,7 +25,6 @@ // no precompiled headers #include "jvm.h" #include "asm/macroAssembler.hpp" -#include "classfile/classLoader.hpp" #include "classfile/vmSymbols.hpp" #include "code/codeCache.hpp" #include "code/icBuffer.hpp" diff --git a/src/hotspot/os_cpu/linux_x86/thread_linux_x86.cpp b/src/hotspot/os_cpu/linux_x86/thread_linux_x86.cpp index b72127907b20854668832be9dc819b0d66098d5a..b030abe5b2be61990cfb8ce5b0ec6ef10ec7997a 100644 --- a/src/hotspot/os_cpu/linux_x86/thread_linux_x86.cpp +++ b/src/hotspot/os_cpu/linux_x86/thread_linux_x86.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2021, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -68,12 +68,6 @@ bool JavaThread::pd_get_top_frame(frame* fr_addr, void* ucontext, bool isInJava) return false; } - if (MetaspaceShared::is_in_trampoline_frame(addr)) { - // In the middle of a trampoline call. Bail out for safety. - // This happens rarely so shouldn't affect profiling. - return false; - } - frame ret_frame(ret_sp, ret_fp, addr); if (!ret_frame.safe_for_sender(this)) { #if COMPILER2_OR_JVMCI diff --git a/src/hotspot/os_cpu/linux_x86/thread_linux_x86.hpp b/src/hotspot/os_cpu/linux_x86/thread_linux_x86.hpp index 1b02aadc52570815ceb8bb870bea27a868c01020..574edc79831be1ad0f5cff45f83639cd42cb04fb 100644 --- a/src/hotspot/os_cpu/linux_x86/thread_linux_x86.hpp +++ b/src/hotspot/os_cpu/linux_x86/thread_linux_x86.hpp @@ -37,10 +37,6 @@ return byte_offset_of(JavaThread, _anchor) + JavaFrameAnchor::last_Java_fp_offset(); } - static ByteSize saved_rbp_address_offset() { - return byte_offset_of(JavaThread, _anchor) + JavaFrameAnchor::saved_rbp_address_offset(); - } - bool pd_get_top_frame_for_signal_handler(frame* fr_addr, void* ucontext, bool isInJava); diff --git a/src/hotspot/os_cpu/linux_zero/os_linux_zero.cpp b/src/hotspot/os_cpu/linux_zero/os_linux_zero.cpp index e38b7d351aec3f895f9e2bfec13b82d88d094f75..ac57da78a25913645175cd54156c9d88aecb6dd4 100644 --- a/src/hotspot/os_cpu/linux_zero/os_linux_zero.cpp +++ b/src/hotspot/os_cpu/linux_zero/os_linux_zero.cpp @@ -26,7 +26,6 @@ // no precompiled headers #include "jvm.h" #include "assembler_zero.inline.hpp" -#include "classfile/classLoader.hpp" #include "classfile/vmSymbols.hpp" #include "code/icBuffer.hpp" #include "code/vtableStubs.hpp" diff --git a/src/hotspot/os_cpu/windows_aarch64/os_windows_aarch64.cpp b/src/hotspot/os_cpu/windows_aarch64/os_windows_aarch64.cpp index 6422eb406e05a81aed0f77d5a09fc9e81cc9ff99..94a96fc05afeca12df311dda3e31a7d6cd1434c6 100644 --- a/src/hotspot/os_cpu/windows_aarch64/os_windows_aarch64.cpp +++ b/src/hotspot/os_cpu/windows_aarch64/os_windows_aarch64.cpp @@ -25,7 +25,6 @@ #include "precompiled.hpp" #include "jvm.h" #include "asm/macroAssembler.hpp" -#include "classfile/classLoader.hpp" #include "classfile/vmSymbols.hpp" #include "code/codeCache.hpp" #include "code/icBuffer.hpp" diff --git a/src/hotspot/os_cpu/windows_aarch64/thread_windows_aarch64.cpp b/src/hotspot/os_cpu/windows_aarch64/thread_windows_aarch64.cpp index d235b4850e8afaa7e357cf2376cd6d1ec1277bf9..677e810b78cbec0069be713a743f6093ba82d85a 100644 --- a/src/hotspot/os_cpu/windows_aarch64/thread_windows_aarch64.cpp +++ b/src/hotspot/os_cpu/windows_aarch64/thread_windows_aarch64.cpp @@ -69,12 +69,6 @@ bool JavaThread::pd_get_top_frame(frame* fr_addr, void* ucontext, bool isInJava) return false; } - if (MetaspaceShared::is_in_trampoline_frame(ret_frame.pc())) { - // In the middle of a trampoline call. Bail out for safety. - // This happens rarely so shouldn't affect profiling. - return false; - } - if (!ret_frame.safe_for_sender(jt)) { #if COMPILER2_OR_JVMCI // C2 and JVMCI use ebp as a general register see if NULL fp helps diff --git a/src/hotspot/os_cpu/windows_aarch64/thread_windows_aarch64.hpp b/src/hotspot/os_cpu/windows_aarch64/thread_windows_aarch64.hpp index 64d7a65e62a64451ca2c222537bc7b7f0df07637..bcf43c8b088296c06d8c4d1ac70739dca1b1d8d5 100644 --- a/src/hotspot/os_cpu/windows_aarch64/thread_windows_aarch64.hpp +++ b/src/hotspot/os_cpu/windows_aarch64/thread_windows_aarch64.hpp @@ -38,10 +38,6 @@ return byte_offset_of(JavaThread, _anchor) + JavaFrameAnchor::last_Java_fp_offset(); } - static ByteSize saved_fp_address_offset() { - return byte_offset_of(JavaThread, _anchor) + JavaFrameAnchor::saved_fp_address_offset(); - } - bool pd_get_top_frame_for_signal_handler(frame* fr_addr, void* ucontext, bool isInJava); diff --git a/src/hotspot/os_cpu/windows_x86/os_windows_x86.cpp b/src/hotspot/os_cpu/windows_x86/os_windows_x86.cpp index 2e18762cf634d66acd52c4be0daf429100107b92..764780db4e1c3c629f3752bf1006219ec7976a60 100644 --- a/src/hotspot/os_cpu/windows_x86/os_windows_x86.cpp +++ b/src/hotspot/os_cpu/windows_x86/os_windows_x86.cpp @@ -25,7 +25,6 @@ // no precompiled headers #include "jvm.h" #include "asm/macroAssembler.hpp" -#include "classfile/classLoader.hpp" #include "classfile/vmSymbols.hpp" #include "code/icBuffer.hpp" #include "code/vtableStubs.hpp" diff --git a/src/hotspot/os_cpu/windows_x86/thread_windows_x86.cpp b/src/hotspot/os_cpu/windows_x86/thread_windows_x86.cpp index 048574596f4e2bf0204e16273e44b19fbc54c5f8..8cf064e0613267a08c10fc03b89c8740a430b96e 100644 --- a/src/hotspot/os_cpu/windows_x86/thread_windows_x86.cpp +++ b/src/hotspot/os_cpu/windows_x86/thread_windows_x86.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2021, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -64,12 +64,6 @@ bool JavaThread::pd_get_top_frame(frame* fr_addr, void* ucontext, bool isInJava) return false; } - if (MetaspaceShared::is_in_trampoline_frame(ret_frame.pc())) { - // In the middle of a trampoline call. Bail out for safety. - // This happens rarely so shouldn't affect profiling. - return false; - } - if (!ret_frame.safe_for_sender(this)) { #if COMPILER2_OR_JVMCI // C2 and JVMCI use ebp as a general register see if NULL fp helps diff --git a/src/hotspot/os_cpu/windows_x86/thread_windows_x86.hpp b/src/hotspot/os_cpu/windows_x86/thread_windows_x86.hpp index 9cb8bc89682fe798b554f77435f23ecdcd4e779f..21577346246628da9c78dd87122bf9aa5631334f 100644 --- a/src/hotspot/os_cpu/windows_x86/thread_windows_x86.hpp +++ b/src/hotspot/os_cpu/windows_x86/thread_windows_x86.hpp @@ -44,10 +44,6 @@ return byte_offset_of(JavaThread, _anchor) + JavaFrameAnchor::last_Java_fp_offset(); } - static ByteSize saved_rbp_address_offset() { - return byte_offset_of(JavaThread, _anchor) + JavaFrameAnchor::saved_rbp_address_offset(); - } - bool pd_get_top_frame_for_signal_handler(frame* fr_addr, void* ucontext, bool isInJava); diff --git a/src/hotspot/share/aot/aotCodeHeap.cpp b/src/hotspot/share/aot/aotCodeHeap.cpp index a84138d64c8a8e9c2afd34e9973652d10e97232c..462665002ec3445acc89844b04185a8fcb197d8b 100644 --- a/src/hotspot/share/aot/aotCodeHeap.cpp +++ b/src/hotspot/share/aot/aotCodeHeap.cpp @@ -112,8 +112,7 @@ Klass* AOTCodeHeap::lookup_klass(const char* name, int len, const Method* method log_debug(aot, class, resolve)("Probe failed for AOT class %s", name); return NULL; } - Klass* k = SystemDictionary::find_instance_or_array_klass(sym, loader, protection_domain, thread); - assert(!thread->has_pending_exception(), "should not throw"); + Klass* k = SystemDictionary::find_instance_or_array_klass(sym, loader, protection_domain); if (k != NULL) { log_info(aot, class, resolve)("%s %s (lookup)", caller->method_holder()->external_name(), k->external_name()); diff --git a/src/hotspot/share/asm/codeBuffer.cpp b/src/hotspot/share/asm/codeBuffer.cpp index 7558c9c85726201e738b7b8b612a4200b1330255..7d76c8ae751dee352be16a6bc7847cdd3ab23829 100644 --- a/src/hotspot/share/asm/codeBuffer.cpp +++ b/src/hotspot/share/asm/codeBuffer.cpp @@ -27,6 +27,7 @@ #include "code/oopRecorder.inline.hpp" #include "compiler/disassembler.hpp" #include "logging/log.hpp" +#include "oops/klass.inline.hpp" #include "oops/methodData.hpp" #include "oops/oop.inline.hpp" #include "runtime/icache.hpp" diff --git a/src/hotspot/share/c1/c1_LIR.hpp b/src/hotspot/share/c1/c1_LIR.hpp index dcd2d50b327af35b456e49a4f73da4794c2fdb5e..08e4ac2264af4790b132fa77400f8c7cdad78b50 100644 --- a/src/hotspot/share/c1/c1_LIR.hpp +++ b/src/hotspot/share/c1/c1_LIR.hpp @@ -231,8 +231,8 @@ class LIR_OprDesc: public CompilationResourceObj { , is_xmm_bits = 1 , last_use_bits = 1 , is_fpu_stack_offset_bits = 1 // used in assertion checking on x86 for FPU stack slot allocation - , non_data_bits = kind_bits + type_bits + size_bits + destroys_bits + last_use_bits + - is_fpu_stack_offset_bits + virtual_bits + is_xmm_bits + , non_data_bits = pointer_bits + kind_bits + type_bits + size_bits + destroys_bits + virtual_bits + + is_xmm_bits + last_use_bits + is_fpu_stack_offset_bits , data_bits = BitsPerInt - non_data_bits , reg_bits = data_bits / 2 // for two registers in one value encoding }; @@ -649,6 +649,11 @@ class LIR_OprFact: public AllStatic { #endif // X86 static LIR_Opr virtual_register(int index, BasicType type) { + if (index > LIR_OprDesc::vreg_max) { + // Running out of virtual registers. Caller should bailout. + return illegalOpr; + } + LIR_Opr res; switch (type) { case T_OBJECT: // fall through diff --git a/src/hotspot/share/c1/c1_LIRGenerator.cpp b/src/hotspot/share/c1/c1_LIRGenerator.cpp index a78dc845e5962c4346592c90efca107867d1334d..d1fc710252871a1078f7742f48715cabb4d9ef6c 100644 --- a/src/hotspot/share/c1/c1_LIRGenerator.cpp +++ b/src/hotspot/share/c1/c1_LIRGenerator.cpp @@ -1049,20 +1049,21 @@ void LIRGenerator::move_to_phi(ValueStack* cur_state) { LIR_Opr LIRGenerator::new_register(BasicType type) { - int vreg = _virtual_register_number; - // add a little fudge factor for the bailout, since the bailout is - // only checked periodically. This gives a few extra registers to - // hand out before we really run out, which helps us keep from - // tripping over assertions. - if (vreg + 20 >= LIR_OprDesc::vreg_max) { - bailout("out of virtual registers"); - if (vreg + 2 >= LIR_OprDesc::vreg_max) { - // wrap it around + int vreg_num = _virtual_register_number; + // Add a little fudge factor for the bailout since the bailout is only checked periodically. This allows us to hand out + // a few extra registers before we really run out which helps to avoid to trip over assertions. + if (vreg_num + 20 >= LIR_OprDesc::vreg_max) { + bailout("out of virtual registers in LIR generator"); + if (vreg_num + 2 >= LIR_OprDesc::vreg_max) { + // Wrap it around and continue until bailout really happens to avoid hitting assertions. _virtual_register_number = LIR_OprDesc::vreg_base; + vreg_num = LIR_OprDesc::vreg_base; } } _virtual_register_number += 1; - return LIR_OprFact::virtual_register(vreg, type); + LIR_Opr vreg = LIR_OprFact::virtual_register(vreg_num, type); + assert(vreg != LIR_OprFact::illegal(), "ran out of virtual registers"); + return vreg; } diff --git a/src/hotspot/share/c1/c1_LinearScan.cpp b/src/hotspot/share/c1/c1_LinearScan.cpp index 1d49366cc1aa6fa0c709afd2836468fff92c69ce..ccc02b7e85e81d55abb5af1719b7dca3dba969a5 100644 --- a/src/hotspot/share/c1/c1_LinearScan.cpp +++ b/src/hotspot/share/c1/c1_LinearScan.cpp @@ -3928,8 +3928,8 @@ void MoveResolver::insert_move(Interval* from_interval, Interval* to_interval) { assert(_insert_list != NULL && _insert_idx != -1, "must setup insert position first"); assert(_insertion_buffer.lir_list() == _insert_list, "wrong insertion buffer"); - LIR_Opr from_opr = LIR_OprFact::virtual_register(from_interval->reg_num(), from_interval->type()); - LIR_Opr to_opr = LIR_OprFact::virtual_register(to_interval->reg_num(), to_interval->type()); + LIR_Opr from_opr = get_virtual_register(from_interval); + LIR_Opr to_opr = get_virtual_register(to_interval); if (!_multiple_reads_allowed) { // the last_use flag is an optimization for FPU stack allocation. When the same @@ -3947,12 +3947,27 @@ void MoveResolver::insert_move(LIR_Opr from_opr, Interval* to_interval) { assert(_insert_list != NULL && _insert_idx != -1, "must setup insert position first"); assert(_insertion_buffer.lir_list() == _insert_list, "wrong insertion buffer"); - LIR_Opr to_opr = LIR_OprFact::virtual_register(to_interval->reg_num(), to_interval->type()); + LIR_Opr to_opr = get_virtual_register(to_interval); _insertion_buffer.move(_insert_idx, from_opr, to_opr); TRACE_LINEAR_SCAN(4, tty->print("MoveResolver: inserted move from constant "); from_opr->print(); tty->print_cr(" to %d (%d, %d)", to_interval->reg_num(), to_interval->assigned_reg(), to_interval->assigned_regHi())); } +LIR_Opr MoveResolver::get_virtual_register(Interval* interval) { + // Add a little fudge factor for the bailout since the bailout is only checked periodically. This allows us to hand out + // a few extra registers before we really run out which helps to avoid to trip over assertions. + int reg_num = interval->reg_num(); + if (reg_num + 20 >= LIR_OprDesc::vreg_max) { + _allocator->bailout("out of virtual registers in linear scan"); + if (reg_num + 2 >= LIR_OprDesc::vreg_max) { + // Wrap it around and continue until bailout really happens to avoid hitting assertions. + reg_num = LIR_OprDesc::vreg_base; + } + } + LIR_Opr vreg = LIR_OprFact::virtual_register(reg_num, interval->type()); + assert(vreg != LIR_OprFact::illegal(), "ran out of virtual registers"); + return vreg; +} void MoveResolver::resolve_mappings() { TRACE_LINEAR_SCAN(4, tty->print_cr("MoveResolver: resolving mappings for Block B%d, index %d", _insert_list->block() != NULL ? _insert_list->block()->block_id() : -1, _insert_idx)); diff --git a/src/hotspot/share/c1/c1_LinearScan.hpp b/src/hotspot/share/c1/c1_LinearScan.hpp index 0249453d9c10237660b930730c1d534fcb4c93ff..761de2c3dc73ecc3691157892dddd969ea3e6c36 100644 --- a/src/hotspot/share/c1/c1_LinearScan.hpp +++ b/src/hotspot/share/c1/c1_LinearScan.hpp @@ -436,6 +436,7 @@ class MoveResolver: public StackObj { void append_insertion_buffer(); void insert_move(Interval* from_interval, Interval* to_interval); void insert_move(LIR_Opr from_opr, Interval* to_interval); + LIR_Opr get_virtual_register(Interval* interval); DEBUG_ONLY(void verify_before_resolve();) void resolve_mappings(); diff --git a/src/hotspot/share/ci/ciEnv.cpp b/src/hotspot/share/ci/ciEnv.cpp index 3df467216ba934542619b6e6abe81549d21ac0ad..9ffba8d9fd6d0f09ee35e2694d31ebddb2b9493a 100644 --- a/src/hotspot/share/ci/ciEnv.cpp +++ b/src/hotspot/share/ci/ciEnv.cpp @@ -304,10 +304,10 @@ ciInstance* ciEnv::get_or_create_exception(jobject& handle, Symbol* name) { VM_ENTRY_MARK; if (handle == NULL) { // Cf. universe.cpp, creation of Universe::_null_ptr_exception_instance. - Klass* k = SystemDictionary::find(name, Handle(), Handle(), THREAD); + InstanceKlass* ik = SystemDictionary::find_instance_klass(name, Handle(), Handle()); jobject objh = NULL; - if (!HAS_PENDING_EXCEPTION && k != NULL) { - oop obj = InstanceKlass::cast(k)->allocate_instance(THREAD); + if (ik != NULL) { + oop obj = ik->allocate_instance(THREAD); if (!HAS_PENDING_EXCEPTION) objh = JNIHandles::make_global(Handle(THREAD, obj)); } @@ -445,11 +445,9 @@ ciKlass* ciEnv::get_klass_by_name_impl(ciKlass* accessing_klass, MutexLocker ml(Compile_lock); Klass* kls; if (!require_local) { - kls = SystemDictionary::find_constrained_instance_or_array_klass(sym, loader, - CHECK_AND_CLEAR_(fail_type)); + kls = SystemDictionary::find_constrained_instance_or_array_klass(sym, loader, THREAD); } else { - kls = SystemDictionary::find_instance_or_array_klass(sym, loader, domain, - CHECK_AND_CLEAR_(fail_type)); + kls = SystemDictionary::find_instance_or_array_klass(sym, loader, domain); } found_klass = kls; } @@ -957,7 +955,7 @@ void ciEnv::register_method(ciMethod* target, bool has_unsafe_access, bool has_wide_vectors, RTMState rtm_state, - const GrowableArrayView& native_invokers) { + const GrowableArrayView& native_invokers) { VM_ENTRY_MARK; nmethod* nm = NULL; { diff --git a/src/hotspot/share/ci/ciEnv.hpp b/src/hotspot/share/ci/ciEnv.hpp index 5baf280764e12a90c678c4e410761865af25405f..ab1c359659d9bfd8e0dc2c824e47166a4ca1dad5 100644 --- a/src/hotspot/share/ci/ciEnv.hpp +++ b/src/hotspot/share/ci/ciEnv.hpp @@ -380,7 +380,7 @@ public: bool has_unsafe_access, bool has_wide_vectors, RTMState rtm_state = NoRTM, - const GrowableArrayView& native_invokers = GrowableArrayView::EMPTY); + const GrowableArrayView& native_invokers = GrowableArrayView::EMPTY); // Access to certain well known ciObjects. diff --git a/src/hotspot/share/ci/ciInstanceKlass.cpp b/src/hotspot/share/ci/ciInstanceKlass.cpp index 8baa34d19e631835bfe9919d84228695d303a8b4..e9a42e451cca1c6ca8e05cedd7f3c50f3c425285 100644 --- a/src/hotspot/share/ci/ciInstanceKlass.cpp +++ b/src/hotspot/share/ci/ciInstanceKlass.cpp @@ -727,7 +727,7 @@ void ciInstanceKlass::dump_replay_data(outputStream* out) { // Try to record related loaded classes Klass* sub = ik->subklass(); while (sub != NULL) { - if (sub->is_instance_klass()) { + if (sub->is_instance_klass() && !sub->is_hidden() && !InstanceKlass::cast(sub)->is_unsafe_anonymous()) { out->print_cr("instanceKlass %s", sub->name()->as_quoted_ascii()); } sub = sub->next_sibling(); diff --git a/src/hotspot/share/ci/ciMethodData.cpp b/src/hotspot/share/ci/ciMethodData.cpp index 8e177980a7a4edd66c2a4241fb864e751335904f..8f61ce744dcecc51e32c19c5e9066d338a9409a9 100644 --- a/src/hotspot/share/ci/ciMethodData.cpp +++ b/src/hotspot/share/ci/ciMethodData.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2021, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -30,6 +30,7 @@ #include "compiler/compiler_globals.hpp" #include "memory/allocation.inline.hpp" #include "memory/resourceArea.hpp" +#include "oops/klass.inline.hpp" #include "runtime/deoptimization.hpp" #include "utilities/copy.hpp" diff --git a/src/hotspot/share/classfile/classFileStream.cpp b/src/hotspot/share/classfile/classFileStream.cpp index 6a625e5350c15d315851475caf9b5aaf84a0b323..b80f873c4908afc8752339795aeb10518c7151d3 100644 --- a/src/hotspot/share/classfile/classFileStream.cpp +++ b/src/hotspot/share/classfile/classFileStream.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2021, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -45,7 +45,9 @@ ClassFileStream::ClassFileStream(const u1* buffer, _current(buffer), _source(source), _need_verify(verify_stream), - _from_boot_loader_modules_image(from_boot_loader_modules_image) {} + _from_boot_loader_modules_image(from_boot_loader_modules_image) { + assert(buffer != NULL, "caller should throw NPE"); +} const u1* ClassFileStream::clone_buffer() const { u1* const new_buffer_start = NEW_RESOURCE_ARRAY(u1, length()); diff --git a/src/hotspot/share/classfile/classListParser.cpp b/src/hotspot/share/classfile/classListParser.cpp index e5e8f2cd243cf9f6ea8a07c742fd8748ab56ab09..b32801504eed5446f93ef0d52e29dece8b7ef061 100644 --- a/src/hotspot/share/classfile/classListParser.cpp +++ b/src/hotspot/share/classfile/classListParser.cpp @@ -43,6 +43,7 @@ #include "memory/metaspaceShared.hpp" #include "memory/resourceArea.hpp" #include "oops/constantPool.hpp" +#include "runtime/atomic.hpp" #include "runtime/handles.inline.hpp" #include "runtime/java.hpp" #include "runtime/javaCalls.hpp" @@ -50,11 +51,10 @@ #include "utilities/hashtable.inline.hpp" #include "utilities/macros.hpp" +volatile Thread* ClassListParser::_parsing_thread = NULL; ClassListParser* ClassListParser::_instance = NULL; ClassListParser::ClassListParser(const char* file) { - assert(_instance == NULL, "must be singleton"); - _instance = this; _classlist_file = file; _file = NULL; // Use os::open() because neither fopen() nor os::fopen() @@ -73,12 +73,22 @@ ClassListParser::ClassListParser(const char* file) { _line_no = 0; _interfaces = new (ResourceObj::C_HEAP, mtClass) GrowableArray(10, mtClass); _indy_items = new (ResourceObj::C_HEAP, mtClass) GrowableArray(9, mtClass); + + // _instance should only be accessed by the thread that created _instance. + assert(_instance == NULL, "must be singleton"); + _instance = this; + Atomic::store(&_parsing_thread, Thread::current()); +} + +bool ClassListParser::is_parsing_thread() { + return Atomic::load(&_parsing_thread) == Thread::current(); } ClassListParser::~ClassListParser() { if (_file) { fclose(_file); } + Atomic::store(&_parsing_thread, (Thread*)NULL); _instance = NULL; } diff --git a/src/hotspot/share/classfile/classListParser.hpp b/src/hotspot/share/classfile/classListParser.hpp index f8598a500219bbb52283f0031588948a3fc9b869..ed7116ce90e4dcbc71a5504855ed66a659c553f5 100644 --- a/src/hotspot/share/classfile/classListParser.hpp +++ b/src/hotspot/share/classfile/classListParser.hpp @@ -33,6 +33,8 @@ #define LAMBDA_PROXY_TAG "@lambda-proxy" #define LAMBDA_FORM_TAG "@lambda-form-invoker" +class Thread; + class ID2KlassTable : public KVHashtable { public: ID2KlassTable() : KVHashtable(1987) {} @@ -81,6 +83,7 @@ class ClassListParser : public StackObj { _line_buf_size = _max_allowed_line_len + _line_buf_extra }; + static volatile Thread* _parsing_thread; // the thread that created _instance static ClassListParser* _instance; // the singleton. const char* _classlist_file; FILE* _file; @@ -119,9 +122,13 @@ public: ClassListParser(const char* file); ~ClassListParser(); + static bool is_parsing_thread(); static ClassListParser* instance() { + assert(is_parsing_thread(), "call this only in the thread that created ClassListParsing::_instance"); + assert(_instance != NULL, "must be"); return _instance; } + bool parse_one_line(); void split_tokens_by_whitespace(int offset); int split_at_tag_from_line(); diff --git a/src/hotspot/share/classfile/classLoader.cpp b/src/hotspot/share/classfile/classLoader.cpp index d80fe19bd357459797014dd0d956eec0ebba3c60..47eaecead777044967c6ac7bf5cd32344c5bfe9b 100644 --- a/src/hotspot/share/classfile/classLoader.cpp +++ b/src/hotspot/share/classfile/classLoader.cpp @@ -287,7 +287,7 @@ ClassPathZipEntry::~ClassPathZipEntry() { u1* ClassPathZipEntry::open_entry(const char* name, jint* filesize, bool nul_terminate, TRAPS) { // enable call to C land - JavaThread* thread = JavaThread::current(); + JavaThread* thread = THREAD->as_Java_thread(); ThreadToNativeFromVM ttn(thread); // check whether zip archive contains name jint name_len; @@ -501,7 +501,7 @@ void ClassLoader::trace_class_path(const char* msg, const char* name) { } } -void ClassLoader::setup_bootstrap_search_path() { +void ClassLoader::setup_bootstrap_search_path(TRAPS) { const char* sys_class_path = Arguments::get_sysclasspath(); assert(sys_class_path != NULL, "System boot class path must not be NULL"); if (PrintSharedArchiveAndExit) { @@ -510,11 +510,11 @@ void ClassLoader::setup_bootstrap_search_path() { } else { trace_class_path("bootstrap loader class path=", sys_class_path); } - setup_boot_search_path(sys_class_path); + setup_bootstrap_search_path_impl(sys_class_path, CHECK); } #if INCLUDE_CDS -void ClassLoader::setup_app_search_path(const char *class_path) { +void ClassLoader::setup_app_search_path(const char *class_path, TRAPS) { Arguments::assert_is_dumping_archive(); ResourceMark rm; @@ -522,7 +522,7 @@ void ClassLoader::setup_app_search_path(const char *class_path) { while (cp_stream.has_next()) { const char* path = cp_stream.get_next(); - update_class_path_entry_list(path, false, false, false); + update_class_path_entry_list(path, false, false, false, CHECK); } } @@ -542,7 +542,7 @@ void ClassLoader::add_to_module_path_entries(const char* path, } // Add a module path to the _module_path_entries list. -void ClassLoader::update_module_path_entry_list(const char *path, TRAPS) { +void ClassLoader::setup_module_search_path(const char* path, TRAPS) { Arguments::assert_is_dumping_archive(); struct stat st; if (os::stat(path, &st) != 0) { @@ -562,10 +562,6 @@ void ClassLoader::update_module_path_entry_list(const char *path, TRAPS) { return; } -void ClassLoader::setup_module_search_path(const char* path, TRAPS) { - update_module_path_entry_list(path, THREAD); -} - #endif // INCLUDE_CDS void ClassLoader::close_jrt_image() { @@ -632,8 +628,7 @@ bool ClassLoader::is_in_patch_mod_entries(Symbol* module_name) { } // Set up the _jrt_entry if present and boot append path -void ClassLoader::setup_boot_search_path(const char *class_path) { - EXCEPTION_MARK; +void ClassLoader::setup_bootstrap_search_path_impl(const char *class_path, TRAPS) { ResourceMark rm(THREAD); ClasspathStream cp_stream(class_path); bool set_base_piece = true; @@ -675,7 +670,7 @@ void ClassLoader::setup_boot_search_path(const char *class_path) { } else { // Every entry on the system boot class path after the initial base piece, // which is set by os::set_boot_path(), is considered an appended entry. - update_class_path_entry_list(path, false, true, false); + update_class_path_entry_list(path, false, true, false, CHECK); } } } @@ -717,19 +712,27 @@ void ClassLoader::add_to_exploded_build_list(Symbol* module_sym, TRAPS) { } } +jzfile* ClassLoader::open_zip_file(const char* canonical_path, char** error_msg, JavaThread* thread) { + // enable call to C land + ThreadToNativeFromVM ttn(thread); + HandleMark hm(thread); + load_zip_library_if_needed(); + return (*ZipOpen)(canonical_path, error_msg); +} + ClassPathEntry* ClassLoader::create_class_path_entry(const char *path, const struct stat* st, bool throw_exception, bool is_boot_append, bool from_class_path_attr, TRAPS) { - JavaThread* thread = JavaThread::current(); + JavaThread* thread = THREAD->as_Java_thread(); ClassPathEntry* new_entry = NULL; if ((st->st_mode & S_IFMT) == S_IFREG) { ResourceMark rm(thread); // Regular file, should be a zip or jimage file // Canonicalized filename - char* canonical_path = NEW_RESOURCE_ARRAY_IN_THREAD(thread, char, JVM_MAXPATHLEN); - if (!get_canonical_path(path, canonical_path, JVM_MAXPATHLEN)) { + const char* canonical_path = get_canonical_path(path, thread); + if (canonical_path == NULL) { // This matches the classic VM if (throw_exception) { THROW_MSG_(vmSymbols::java_io_IOException(), "Bad pathname", NULL); @@ -743,14 +746,7 @@ ClassPathEntry* ClassLoader::create_class_path_entry(const char *path, const str new_entry = new ClassPathImageEntry(jimage, canonical_path); } else { char* error_msg = NULL; - jzfile* zip; - { - // enable call to C land - ThreadToNativeFromVM ttn(thread); - HandleMark hm(thread); - load_zip_library_if_needed(); - zip = (*ZipOpen)(canonical_path, &error_msg); - } + jzfile* zip = open_zip_file(canonical_path, &error_msg, thread); if (zip != NULL && error_msg == NULL) { new_entry = new ClassPathZipEntry(zip, path, is_boot_append, from_class_path_attr); } else { @@ -789,18 +785,12 @@ ClassPathZipEntry* ClassLoader::create_class_path_zip_entry(const char *path, bo struct stat st; if (os::stat(path, &st) == 0) { if ((st.st_mode & S_IFMT) == S_IFREG) { - char canonical_path[JVM_MAXPATHLEN]; - if (get_canonical_path(path, canonical_path, JVM_MAXPATHLEN)) { + JavaThread* thread = JavaThread::current(); + ResourceMark rm(thread); + const char* canonical_path = get_canonical_path(path, thread); + if (canonical_path != NULL) { char* error_msg = NULL; - jzfile* zip; - { - // enable call to C land - JavaThread* thread = JavaThread::current(); - ThreadToNativeFromVM ttn(thread); - HandleMark hm(thread); - load_zip_library_if_needed(); - zip = (*ZipOpen)(canonical_path, &error_msg); - } + jzfile* zip = open_zip_file(canonical_path, &error_msg, thread); if (zip != NULL && error_msg == NULL) { // create using canonical path return new ClassPathZipEntry(zip, canonical_path, is_boot_append, false); @@ -847,7 +837,8 @@ void ClassLoader::add_to_boot_append_entries(ClassPathEntry *new_entry) { // jdk/internal/loader/ClassLoaders$AppClassLoader instance. void ClassLoader::add_to_app_classpath_entries(const char* path, ClassPathEntry* entry, - bool check_for_duplicates) { + bool check_for_duplicates, + TRAPS) { #if INCLUDE_CDS assert(entry != NULL, "ClassPathEntry should not be NULL"); ClassPathEntry* e = _app_classpath_entries; @@ -871,7 +862,7 @@ void ClassLoader::add_to_app_classpath_entries(const char* path, } if (entry->is_jar_file()) { - ClassLoaderExt::process_jar_manifest(entry, check_for_duplicates); + ClassLoaderExt::process_jar_manifest(entry, check_for_duplicates, CHECK); } #endif } @@ -881,13 +872,12 @@ bool ClassLoader::update_class_path_entry_list(const char *path, bool check_for_duplicates, bool is_boot_append, bool from_class_path_attr, - bool throw_exception) { + TRAPS) { struct stat st; if (os::stat(path, &st) == 0) { // File or directory found ClassPathEntry* new_entry = NULL; - Thread* THREAD = Thread::current(); - new_entry = create_class_path_entry(path, &st, throw_exception, is_boot_append, from_class_path_attr, CHECK_(false)); + new_entry = create_class_path_entry(path, &st, /*throw_exception=*/true, is_boot_append, from_class_path_attr, CHECK_false); if (new_entry == NULL) { return false; } @@ -897,7 +887,7 @@ bool ClassLoader::update_class_path_entry_list(const char *path, if (is_boot_append) { add_to_boot_append_entries(new_entry); } else { - add_to_app_classpath_entries(path, new_entry, check_for_duplicates); + add_to_app_classpath_entries(path, new_entry, check_for_duplicates, CHECK_false); } return true; } else { @@ -1286,7 +1276,7 @@ InstanceKlass* ClassLoader::load_class(Symbol* name, bool search_append_only, TR return NULL; } - result->set_classpath_index(classpath_index, THREAD); + result->set_classpath_index(classpath_index); return result; } @@ -1339,25 +1329,22 @@ void ClassLoader::record_result(InstanceKlass* ik, const ClassFileStream* stream PackageEntry* pkg_entry = ik->package(); if (FileMapInfo::get_number_of_shared_paths() > 0) { - char* canonical_path_table_entry = NEW_RESOURCE_ARRAY_IN_THREAD(THREAD, char, JVM_MAXPATHLEN); - - // save the path from the file: protocol or the module name from the jrt: protocol - // if no protocol prefix is found, path is the same as stream->source() + // Save the path from the file: protocol or the module name from the jrt: protocol + // if no protocol prefix is found, path is the same as stream->source(). This path + // must be valid since the class has been successfully parsed. char* path = skip_uri_protocol(src); - char* canonical_class_src_path = NEW_RESOURCE_ARRAY_IN_THREAD(THREAD, char, JVM_MAXPATHLEN); - bool success = get_canonical_path(path, canonical_class_src_path, JVM_MAXPATHLEN); - // The path is from the ClassFileStream. Since a ClassFileStream has been created successfully in functions - // such as ClassLoader::load_class(), its source path must be valid. - assert(success, "must be valid path"); + assert(path != NULL, "sanity"); for (int i = 0; i < FileMapInfo::get_number_of_shared_paths(); i++) { SharedClassPathEntry* ent = FileMapInfo::shared_path(i); - success = get_canonical_path(ent->name(), canonical_path_table_entry, JVM_MAXPATHLEN); // A shared path has been validated during its creation in ClassLoader::create_class_path_entry(), // it must be valid here. - assert(success, "must be valid path"); + assert(ent->name() != NULL, "sanity"); // If the path (from the class stream source) is the same as the shared // class or module path, then we have a match. - if (strcmp(canonical_path_table_entry, canonical_class_src_path) == 0) { + // src may come from the App/Platform class loaders, which would canonicalize + // the file name. We cannot use strcmp to check for equality against ent->name(). + // We must use os::same_files (which is faster than canonicalizing ent->name()). + if (os::same_files(ent->name(), path)) { // NULL pkg_entry and pkg_entry in an unnamed module implies the class // is from the -cp or boot loader append path which consists of -Xbootclasspath/a // and jvmti appended entries. @@ -1421,7 +1408,7 @@ void ClassLoader::record_result(InstanceKlass* ik, const ClassFileStream* stream ik->name()->utf8_length()); assert(file_name != NULL, "invariant"); - ClassLoaderExt::record_result(classpath_index, ik, THREAD); + ClassLoaderExt::record_result(classpath_index, ik, CHECK); } #endif // INCLUDE_CDS @@ -1430,9 +1417,7 @@ void ClassLoader::record_result(InstanceKlass* ik, const ClassFileStream* stream // this list has been created, it must not change order (see class PackageInfo) // it can be appended to and is by jvmti. -void ClassLoader::initialize() { - EXCEPTION_MARK; - +void ClassLoader::initialize(TRAPS) { if (UsePerfData) { // jvmstat performance counters NEWPERFTICKCOUNTER(_perf_accumulated_time, SUN_CLS, "time"); @@ -1464,7 +1449,7 @@ void ClassLoader::initialize() { // lookup java library entry points load_java_library(); // jimage library entry points are loaded below, in lookup_vm_options - setup_bootstrap_search_path(); + setup_bootstrap_search_path(CHECK); } char* lookup_vm_resource(JImageFile *jimage, const char *jimage_version, const char *path) { @@ -1501,16 +1486,16 @@ char* ClassLoader::lookup_vm_options() { } #if INCLUDE_CDS -void ClassLoader::initialize_shared_path() { +void ClassLoader::initialize_shared_path(TRAPS) { if (Arguments::is_dumping_archive()) { - ClassLoaderExt::setup_search_paths(); + ClassLoaderExt::setup_search_paths(CHECK); } } void ClassLoader::initialize_module_path(TRAPS) { if (Arguments::is_dumping_archive()) { - ClassLoaderExt::setup_module_paths(THREAD); - FileMapInfo::allocate_shared_path_table(); + ClassLoaderExt::setup_module_paths(CHECK); + FileMapInfo::allocate_shared_path_table(CHECK); } } @@ -1566,7 +1551,11 @@ int ClassLoader::compute_Object_vtable() { void classLoader_init1() { - ClassLoader::initialize(); + EXCEPTION_MARK; + ClassLoader::initialize(THREAD); + if (HAS_PENDING_EXCEPTION) { + vm_exit_during_initialization("ClassLoader::initialize() failed unexpectedly"); + } } // Complete the ClassPathEntry setup for the boot loader @@ -1599,18 +1588,18 @@ void ClassLoader::classLoader_init2(TRAPS) { } } -bool ClassLoader::get_canonical_path(const char* orig, char* out, int len) { - assert(orig != NULL && out != NULL && len > 0, "bad arguments"); - JavaThread* THREAD = JavaThread::current(); - ResourceMark rm(THREAD); - +char* ClassLoader::get_canonical_path(const char* orig, Thread* thread) { + assert(orig != NULL, "bad arguments"); + // caller needs to allocate ResourceMark for the following output buffer + char* canonical_path = NEW_RESOURCE_ARRAY_IN_THREAD(thread, char, JVM_MAXPATHLEN); + ResourceMark rm(thread); // os::native_path writes into orig_copy - char* orig_copy = NEW_RESOURCE_ARRAY_IN_THREAD(THREAD, char, strlen(orig)+1); + char* orig_copy = NEW_RESOURCE_ARRAY_IN_THREAD(thread, char, strlen(orig)+1); strcpy(orig_copy, orig); - if ((CanonicalizeEntry)(os::native_path(orig_copy), out, len) < 0) { - return false; + if ((CanonicalizeEntry)(os::native_path(orig_copy), canonical_path, JVM_MAXPATHLEN) < 0) { + return NULL; } - return true; + return canonical_path; } void ClassLoader::create_javabase() { diff --git a/src/hotspot/share/classfile/classLoader.hpp b/src/hotspot/share/classfile/classLoader.hpp index 8df5a50b7354aa2413f20bd0bc0b4480477f5363..d46fbe14bfe734deec3356d692fee240faafbf9c 100644 --- a/src/hotspot/share/classfile/classLoader.hpp +++ b/src/hotspot/share/classfile/classLoader.hpp @@ -222,11 +222,12 @@ class ClassLoader: AllStatic { CDS_ONLY(static ClassPathEntry* _last_app_classpath_entry;) CDS_ONLY(static ClassPathEntry* _module_path_entries;) CDS_ONLY(static ClassPathEntry* _last_module_path_entry;) - CDS_ONLY(static void setup_app_search_path(const char* class_path);) + CDS_ONLY(static void setup_app_search_path(const char* class_path, TRAPS);) CDS_ONLY(static void setup_module_search_path(const char* path, TRAPS);) static void add_to_app_classpath_entries(const char* path, ClassPathEntry* entry, - bool check_for_duplicates); + bool check_for_duplicates, + TRAPS); CDS_ONLY(static void add_to_module_path_entries(const char* path, ClassPathEntry* entry);) public: @@ -240,8 +241,8 @@ class ClassLoader: AllStatic { // - setup the boot loader's system class path // - setup the boot loader's patch mod entries, if present // - create the ModuleEntry for java.base - static void setup_bootstrap_search_path(); - static void setup_boot_search_path(const char *class_path); + static void setup_bootstrap_search_path(TRAPS); + static void setup_bootstrap_search_path_impl(const char *class_path, TRAPS); static void setup_patch_mod_entries(); static void create_javabase(); @@ -254,6 +255,7 @@ class ClassLoader: AllStatic { static int _libzip_loaded; // used to sync loading zip. static void release_load_zip_library(); static inline void load_zip_library_if_needed(); + static jzfile* open_zip_file(const char* canonical_path, char** error_msg, JavaThread* thread); public: static ClassPathEntry* create_class_path_entry(const char *path, const struct stat* st, @@ -263,7 +265,7 @@ class ClassLoader: AllStatic { // Canonicalizes path names, so strcmp will work properly. This is mainly // to avoid confusing the zip library - static bool get_canonical_path(const char* orig, char* out, int len); + static char* get_canonical_path(const char* orig, Thread* thread); static const char* file_name_for_class_name(const char* class_name, int class_name_len); static PackageEntry* get_package_entry(Symbol* pkg_name, ClassLoaderData* loader_data); @@ -272,8 +274,7 @@ class ClassLoader: AllStatic { bool check_for_duplicates, bool is_boot_append, bool from_class_path_attr, - bool throw_exception=true); - CDS_ONLY(static void update_module_path_entry_list(const char *path, TRAPS);) + TRAPS); static void print_bootclasspath(); // Timing @@ -335,9 +336,9 @@ class ClassLoader: AllStatic { static objArrayOop get_system_packages(TRAPS); // Initialization - static void initialize(); + static void initialize(TRAPS); static void classLoader_init2(TRAPS); - CDS_ONLY(static void initialize_shared_path();) + CDS_ONLY(static void initialize_shared_path(TRAPS);) CDS_ONLY(static void initialize_module_path(TRAPS);) static int compute_Object_vtable(); diff --git a/src/hotspot/share/classfile/classLoaderData.hpp b/src/hotspot/share/classfile/classLoaderData.hpp index bfad9a5cab91d7c50669d7cb81447b41eb97e745..d2c994ec1be3a405fca0824cf09d337e6392f810 100644 --- a/src/hotspot/share/classfile/classLoaderData.hpp +++ b/src/hotspot/share/classfile/classLoaderData.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012, 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2012, 2021, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -26,8 +26,6 @@ #define SHARE_CLASSFILE_CLASSLOADERDATA_HPP #include "memory/allocation.hpp" -#include "memory/memRegion.hpp" -#include "memory/metaspace.hpp" #include "oops/oopHandle.hpp" #include "oops/weakHandle.hpp" #include "runtime/atomic.hpp" diff --git a/src/hotspot/share/classfile/classLoaderExt.cpp b/src/hotspot/share/classfile/classLoaderExt.cpp index bd39fc071e5df65432ecbda49d17bdb02456c323..f9003f3436841db80661129dac3524b1a38802de 100644 --- a/src/hotspot/share/classfile/classLoaderExt.cpp +++ b/src/hotspot/share/classfile/classLoaderExt.cpp @@ -65,7 +65,7 @@ void ClassLoaderExt::append_boot_classpath(ClassPathEntry* new_entry) { ClassLoader::add_to_boot_append_entries(new_entry); } -void ClassLoaderExt::setup_app_search_path() { +void ClassLoaderExt::setup_app_search_path(TRAPS) { Arguments::assert_is_dumping_archive(); _app_class_paths_start_index = ClassLoader::num_boot_classpath_entries(); char* app_class_path = os::strdup(Arguments::get_appclasspath()); @@ -77,7 +77,7 @@ void ClassLoaderExt::setup_app_search_path() { trace_class_path("app loader class path (skipped)=", app_class_path); } else { trace_class_path("app loader class path=", app_class_path); - ClassLoader::setup_app_search_path(app_class_path); + ClassLoader::setup_app_search_path(app_class_path, CHECK); } } @@ -88,7 +88,7 @@ void ClassLoaderExt::process_module_table(ModuleEntryTable* met, TRAPS) { char* path = m->location()->as_C_string(); if (strncmp(path, "file:", 5) == 0) { path = ClassLoader::skip_uri_protocol(path); - ClassLoader::setup_module_search_path(path, THREAD); + ClassLoader::setup_module_search_path(path, CHECK); } m = m->next(); } @@ -100,7 +100,7 @@ void ClassLoaderExt::setup_module_paths(TRAPS) { ClassLoader::num_app_classpath_entries(); Handle system_class_loader (THREAD, SystemDictionary::java_system_loader()); ModuleEntryTable* met = Modules::get_module_entry_table(system_class_loader); - process_module_table(met, THREAD); + process_module_table(met, CHECK); } char* ClassLoaderExt::read_manifest(ClassPathEntry* entry, jint *manifest_size, bool clean_text, TRAPS) { @@ -164,8 +164,7 @@ char* ClassLoaderExt::get_class_path_attr(const char* jar_path, char* manifest, } void ClassLoaderExt::process_jar_manifest(ClassPathEntry* entry, - bool check_for_duplicates) { - Thread* THREAD = Thread::current(); + bool check_for_duplicates, TRAPS) { ResourceMark rm(THREAD); jint manifest_size; char* manifest = read_manifest(entry, &manifest_size, CHECK); @@ -213,7 +212,8 @@ void ClassLoaderExt::process_jar_manifest(ClassPathEntry* entry, char* libname = NEW_RESOURCE_ARRAY(char, libname_len + 1); int n = os::snprintf(libname, libname_len + 1, "%.*s%s", dir_len, dir_name, file_start); assert((size_t)n == libname_len, "Unexpected number of characters in string"); - if (ClassLoader::update_class_path_entry_list(libname, true, false, true /* from_class_path_attr */)) { + bool status = ClassLoader::update_class_path_entry_list(libname, true, false, true /* from_class_path_attr */, CHECK); + if (status) { trace_class_path("library = ", libname); } else { trace_class_path("library (non-existent) = ", libname); @@ -226,8 +226,8 @@ void ClassLoaderExt::process_jar_manifest(ClassPathEntry* entry, } } -void ClassLoaderExt::setup_search_paths() { - ClassLoaderExt::setup_app_search_path(); +void ClassLoaderExt::setup_search_paths(TRAPS) { + ClassLoaderExt::setup_app_search_path(CHECK); } void ClassLoaderExt::record_result(const s2 classpath_index, diff --git a/src/hotspot/share/classfile/classLoaderExt.hpp b/src/hotspot/share/classfile/classLoaderExt.hpp index cca5bed24f5d94285efac25da0d16a83e565a069..983741bc87b204032a834d36baf100f6cbdffdf2 100644 --- a/src/hotspot/share/classfile/classLoaderExt.hpp +++ b/src/hotspot/share/classfile/classLoaderExt.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2014, 2019, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2014, 2021, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -45,7 +45,7 @@ private: }; static char* get_class_path_attr(const char* jar_path, char* manifest, jint manifest_size); - static void setup_app_search_path(); // Only when -Xshare:dump + static void setup_app_search_path(TRAPS); // Only when -Xshare:dump static void process_module_table(ModuleEntryTable* met, TRAPS); // index of first app JAR in shared classpath entry table static jshort _app_class_paths_start_index; @@ -61,12 +61,12 @@ private: static ClassPathEntry* find_classpath_entry_from_cache(const char* path, TRAPS); public: - static void process_jar_manifest(ClassPathEntry* entry, bool check_for_duplicates); + static void process_jar_manifest(ClassPathEntry* entry, bool check_for_duplicates, TRAPS); // Called by JVMTI code to add boot classpath static void append_boot_classpath(ClassPathEntry* new_entry); - static void setup_search_paths(); + static void setup_search_paths(TRAPS); static void setup_module_paths(TRAPS); static char* read_manifest(ClassPathEntry* entry, jint *manifest_size, TRAPS) { diff --git a/src/hotspot/share/classfile/classLoaderStats.hpp b/src/hotspot/share/classfile/classLoaderStats.hpp index c9f602721d43b498979019c48458c0c76ec81579..ce3f2331730b6f0436dcded49fe41bae20b7317b 100644 --- a/src/hotspot/share/classfile/classLoaderStats.hpp +++ b/src/hotspot/share/classfile/classLoaderStats.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2014, 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2014, 2021, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -30,7 +30,7 @@ #include "oops/klass.hpp" #include "oops/oop.hpp" #include "oops/oopsHierarchy.hpp" -#include "runtime/vmOperations.hpp" +#include "runtime/vmOperation.hpp" #include "services/diagnosticCommand.hpp" #include "utilities/resourceHash.hpp" diff --git a/src/hotspot/share/classfile/compactHashtable.cpp b/src/hotspot/share/classfile/compactHashtable.cpp index 9780f0bfbf30d9c2e1794439e6651239116c9f77..808de7c7fb266b6513cf0652691adb76d2281867 100644 --- a/src/hotspot/share/classfile/compactHashtable.cpp +++ b/src/hotspot/share/classfile/compactHashtable.cpp @@ -27,10 +27,9 @@ #include "classfile/compactHashtable.hpp" #include "classfile/javaClasses.hpp" #include "logging/logMessage.hpp" -#include "memory/dynamicArchive.hpp" +#include "memory/archiveBuilder.hpp" #include "memory/heapShared.inline.hpp" #include "memory/metadataFactory.hpp" -#include "memory/metaspaceShared.hpp" #include "runtime/arguments.hpp" #include "runtime/globals.hpp" #include "runtime/vmThread.hpp" @@ -74,11 +73,11 @@ CompactHashtableWriter::~CompactHashtableWriter() { size_t CompactHashtableWriter::estimate_size(int num_entries) { int num_buckets = calculate_num_buckets(num_entries); - size_t bucket_bytes = MetaspaceShared::ro_array_bytesize(num_buckets + 1); + size_t bucket_bytes = ArchiveBuilder::ro_array_bytesize(num_buckets + 1); // In worst case, we have no VALUE_ONLY_BUCKET_TYPE, so each entry takes 2 slots int entries_space = 2 * num_entries; - size_t entry_bytes = MetaspaceShared::ro_array_bytesize(entries_space); + size_t entry_bytes = ArchiveBuilder::ro_array_bytesize(entries_space); return bucket_bytes + entry_bytes @@ -109,8 +108,8 @@ void CompactHashtableWriter::allocate_table() { "Too many entries."); } - _compact_buckets = MetaspaceShared::new_ro_array(_num_buckets + 1); - _compact_entries = MetaspaceShared::new_ro_array(entries_space); + _compact_buckets = ArchiveBuilder::new_ro_array(_num_buckets + 1); + _compact_entries = ArchiveBuilder::new_ro_array(entries_space); _stats->bucket_count = _num_buckets; _stats->bucket_bytes = align_up(_compact_buckets->size() * BytesPerWord, diff --git a/src/hotspot/share/classfile/compactHashtable.hpp b/src/hotspot/share/classfile/compactHashtable.hpp index 4b27058b59d3a6fca9c24707287d414d784fc9a8..7eedb48b08b21d929803fb3d29acc535733b2411 100644 --- a/src/hotspot/share/classfile/compactHashtable.hpp +++ b/src/hotspot/share/classfile/compactHashtable.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2021, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -27,6 +27,7 @@ #include "oops/array.hpp" #include "oops/symbol.hpp" +#include "runtime/globals.hpp" #include "utilities/growableArray.hpp" @@ -48,6 +49,10 @@ public: int hashentry_bytes; int bucket_count; int bucket_bytes; + + CompactHashtableStats() : + hashentry_count(0), hashentry_bytes(0), + bucket_count(0), bucket_bytes(0) {} }; #if INCLUDE_CDS diff --git a/src/hotspot/share/classfile/javaAssertions.hpp b/src/hotspot/share/classfile/javaAssertions.hpp index b2fa038b24a2111d7b19aa481d659e2781551988..58d03eacd481d861720337caaef2556ce439a506 100644 --- a/src/hotspot/share/classfile/javaAssertions.hpp +++ b/src/hotspot/share/classfile/javaAssertions.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2019, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2021, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -27,6 +27,7 @@ #include "oops/objArrayOop.hpp" #include "oops/typeArrayOop.hpp" +#include "runtime/handles.hpp" #include "utilities/exceptions.hpp" #include "utilities/ostream.hpp" diff --git a/src/hotspot/share/classfile/klassFactory.cpp b/src/hotspot/share/classfile/klassFactory.cpp index a02d01020b50293579d9af734f56c0cadba0b8c4..629f5f8c0c103d593d61fb65ea0ee052914e2d1f 100644 --- a/src/hotspot/share/classfile/klassFactory.cpp +++ b/src/hotspot/share/classfile/klassFactory.cpp @@ -1,5 +1,5 @@ /* -* Copyright (c) 2015, 2020, Oracle and/or its affiliates. All rights reserved. +* Copyright (c) 2015, 2021, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -31,7 +31,6 @@ #include "classfile/classLoadInfo.hpp" #include "classfile/klassFactory.hpp" #include "memory/filemap.hpp" -#include "memory/metaspaceShared.hpp" #include "memory/resourceArea.hpp" #include "prims/jvmtiEnvBase.hpp" #include "prims/jvmtiRedefineClasses.hpp" @@ -98,7 +97,7 @@ InstanceKlass* KlassFactory::check_shared_class_file_load_hook( } if (class_loader.is_null()) { - new_ik->set_classpath_index(path_index, THREAD); + new_ik->set_classpath_index(path_index); } return new_ik; @@ -206,10 +205,7 @@ InstanceKlass* KlassFactory::create_from_stream(ClassFileStream* stream, const ClassInstanceInfo* cl_inst_info = cl_info.class_hidden_info_ptr(); InstanceKlass* result = parser.create_instance_klass(old_stream != stream, *cl_inst_info, CHECK_NULL); - - if (result == NULL) { - return NULL; - } + assert(result != NULL, "result cannot be null with no pending exception"); if (cached_class_file != NULL) { // JVMTI: we have an InstanceKlass now, tell it about the cached bytes diff --git a/src/hotspot/share/classfile/loaderConstraints.cpp b/src/hotspot/share/classfile/loaderConstraints.cpp index d0bf7052821513f27c4ec533304b7ae9c325b36a..3caaddb3cb2956489f0fdb19eee407f4fd9e04b6 100644 --- a/src/hotspot/share/classfile/loaderConstraints.cpp +++ b/src/hotspot/share/classfile/loaderConstraints.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2021, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -29,6 +29,7 @@ #include "classfile/loaderConstraints.hpp" #include "logging/log.hpp" #include "memory/resourceArea.hpp" +#include "oops/klass.inline.hpp" #include "oops/oop.inline.hpp" #include "runtime/handles.inline.hpp" #include "runtime/safepoint.hpp" diff --git a/src/hotspot/share/classfile/moduleEntry.cpp b/src/hotspot/share/classfile/moduleEntry.cpp index 08c97c9e7a7f96ba3c50c5ddc0e30db2b55e2718..7d9c9001b40f9b9613d532eec1315839a42e2849 100644 --- a/src/hotspot/share/classfile/moduleEntry.cpp +++ b/src/hotspot/share/classfile/moduleEntry.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016, 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -33,7 +33,6 @@ #include "memory/archiveUtils.hpp" #include "memory/filemap.hpp" #include "memory/heapShared.hpp" -#include "memory/metaspaceShared.hpp" #include "memory/resourceArea.hpp" #include "memory/universe.hpp" #include "oops/oopHandle.inline.hpp" @@ -368,6 +367,11 @@ ModuleEntryTable::~ModuleEntryTable() { assert(new_entry_free_list() == NULL, "entry present on ModuleEntryTable's free list"); } +void ModuleEntry::set_loader_data(ClassLoaderData* cld) { + assert(!cld->has_class_mirror_holder(), "Unexpected has_class_mirror_holder cld"); + _loader_data = cld; +} + #if INCLUDE_CDS_JAVA_HEAP typedef ResourceHashtable< const ModuleEntry*, @@ -380,7 +384,7 @@ static ArchivedModuleEntries* _archive_modules_entries = NULL; ModuleEntry* ModuleEntry::allocate_archived_entry() const { assert(is_named(), "unnamed packages/modules are not archived"); - ModuleEntry* archived_entry = (ModuleEntry*)MetaspaceShared::read_write_space_alloc(sizeof(ModuleEntry)); + ModuleEntry* archived_entry = (ModuleEntry*)ArchiveBuilder::rw_region_alloc(sizeof(ModuleEntry)); memcpy((void*)archived_entry, (void*)this, sizeof(ModuleEntry)); if (_archive_modules_entries == NULL) { @@ -405,7 +409,7 @@ Array* ModuleEntry::write_growable_array(GrowableArray* archived_array = NULL; int length = (array == NULL) ? 0 : array->length(); if (length > 0) { - archived_array = MetaspaceShared::new_ro_array(length); + archived_array = ArchiveBuilder::new_ro_array(length); for (int i = 0; i < length; i++) { ModuleEntry* archived_entry = get_archived_entry(array->at(i)); archived_array->at_put(i, archived_entry); @@ -513,7 +517,7 @@ void ModuleEntryTable::iterate_symbols(MetaspaceClosure* closure) { } Array* ModuleEntryTable::allocate_archived_entries() { - Array* archived_modules = MetaspaceShared::new_rw_array(number_of_entries()); + Array* archived_modules = ArchiveBuilder::new_rw_array(number_of_entries()); int n = 0; for (int i = 0; i < table_size(); ++i) { for (ModuleEntry* m = bucket(i); m != NULL; m = m->next()) { diff --git a/src/hotspot/share/classfile/moduleEntry.hpp b/src/hotspot/share/classfile/moduleEntry.hpp index c86904bc823a4e8c7bc15dd26babfbc6a24202e3..959fcac74e3e855ac2a827d88ac7796d8b7a1b8f 100644 --- a/src/hotspot/share/classfile/moduleEntry.hpp +++ b/src/hotspot/share/classfile/moduleEntry.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016, 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -26,7 +26,6 @@ #define SHARE_CLASSFILE_MODULEENTRY_HPP #include "jni.h" -#include "classfile/classLoaderData.hpp" #include "oops/oopHandle.hpp" #include "oops/symbol.hpp" #include "runtime/mutexLocker.hpp" @@ -46,6 +45,7 @@ #define JAVA_BASE_NAME_LEN 9 template class Array; +class ClassLoaderData; class MetaspaceClosure; class ModuleClosure; @@ -112,11 +112,7 @@ public: void set_shared_protection_domain(ClassLoaderData *loader_data, Handle pd); ClassLoaderData* loader_data() const { return _loader_data; } - - void set_loader_data(ClassLoaderData* cld) { - assert(!cld->has_class_mirror_holder(), "Unexpected has_class_mirror_holder cld"); - _loader_data = cld; - } + void set_loader_data(ClassLoaderData* cld); Symbol* version() const { return _version; } void set_version(Symbol* version); diff --git a/src/hotspot/share/classfile/modules.cpp b/src/hotspot/share/classfile/modules.cpp index e8b107e34ded76b86036cb907f9f123c60f320a9..f5b5fe8ada59aa338f5c73229a9df43940f534a7 100644 --- a/src/hotspot/share/classfile/modules.cpp +++ b/src/hotspot/share/classfile/modules.cpp @@ -100,13 +100,12 @@ static PackageEntryTable* get_package_entry_table(Handle h_loader) { return loader_cld->packages(); } -static ModuleEntry* get_module_entry(jobject module, TRAPS) { - oop m = JNIHandles::resolve_non_null(module); - if (!java_lang_Module::is_instance(m)) { +static ModuleEntry* get_module_entry(Handle module, TRAPS) { + if (!java_lang_Module::is_instance(module())) { THROW_MSG_NULL(vmSymbols::java_lang_IllegalArgumentException(), "module is not an instance of type java.lang.Module"); } - return java_lang_Module::module_entry(m); + return java_lang_Module::module_entry(module()); } @@ -272,23 +271,22 @@ void throw_dup_pkg_exception(const char* module_name, PackageEntry* package, TRA } } -void Modules::define_module(jobject module, jboolean is_open, jstring version, +void Modules::define_module(Handle module, jboolean is_open, jstring version, jstring location, jobjectArray packages, TRAPS) { check_cds_restrictions(CHECK); ResourceMark rm(THREAD); - if (module == NULL) { + if (module.is_null()) { THROW_MSG(vmSymbols::java_lang_NullPointerException(), "Null module object"); } - Handle module_handle(THREAD, JNIHandles::resolve_non_null(module)); - if (!java_lang_Module::is_instance(module_handle())) { + if (!java_lang_Module::is_instance(module())) { THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), "module is not an instance of type java.lang.Module"); } int module_name_len; - char* module_name = get_module_name(module_handle(), module_name_len, CHECK); + char* module_name = get_module_name(module(), module_name_len, CHECK); if (module_name == NULL) { THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), "Module name cannot be null"); @@ -301,11 +299,11 @@ void Modules::define_module(jobject module, jboolean is_open, jstring version, // Special handling of java.base definition if (strcmp(module_name, JAVA_BASE_NAME) == 0) { assert(is_open == JNI_FALSE, "java.base module cannot be open"); - define_javabase_module(module_handle, version, location, packages_h, num_packages, CHECK); + define_javabase_module(module, version, location, packages_h, num_packages, CHECK); return; } - oop loader = java_lang_Module::loader(module_handle()); + oop loader = java_lang_Module::loader(module()); // Make sure loader is not the jdk.internal.reflect.DelegatingClassLoader. if (loader != java_lang_ClassLoader::non_reflection_class_loader(loader)) { THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), @@ -402,7 +400,7 @@ void Modules::define_module(jobject module, jboolean is_open, jstring version, if (!dupl_modules && existing_pkg == NULL) { if (module_table->lookup_only(module_symbol) == NULL) { // Create the entry for this module in the class loader's module entry table. - ModuleEntry* module_entry = module_table->locked_create_entry(module_handle, + ModuleEntry* module_entry = module_table->locked_create_entry(module, (is_open == JNI_TRUE), module_symbol, version_symbol, location_symbol, loader_data); assert(module_entry != NULL, "module_entry creation failed"); @@ -419,7 +417,7 @@ void Modules::define_module(jobject module, jboolean is_open, jstring version, } // Store pointer to ModuleEntry record in java.lang.Module object. - java_lang_Module::set_module_entry(module_handle(), module_entry); + java_lang_Module::set_module_entry(module(), module_entry); } else { dupl_modules = true; } @@ -476,7 +474,7 @@ void Modules::define_module(jobject module, jboolean is_open, jstring version, } #if INCLUDE_CDS_JAVA_HEAP -void Modules::define_archived_modules(jobject platform_loader, jobject system_loader, TRAPS) { +void Modules::define_archived_modules(Handle h_platform_loader, Handle h_system_loader, TRAPS) { assert(UseSharedSpaces && MetaspaceShared::use_full_module_graph(), "must be"); // We don't want the classes used by the archived full module graph to be redefined by JVMTI. @@ -490,19 +488,17 @@ void Modules::define_archived_modules(jobject platform_loader, jobject system_lo // Patch any previously loaded class's module field with java.base's java.lang.Module. ModuleEntryTable::patch_javabase_entries(java_base_module); - if (platform_loader == NULL) { + if (h_platform_loader.is_null()) { THROW_MSG(vmSymbols::java_lang_NullPointerException(), "Null platform loader object"); } - if (system_loader == NULL) { + if (h_system_loader.is_null()) { THROW_MSG(vmSymbols::java_lang_NullPointerException(), "Null system loader object"); } - Handle h_platform_loader(THREAD, JNIHandles::resolve_non_null(platform_loader)); ClassLoaderData* platform_loader_data = SystemDictionary::register_loader(h_platform_loader); ClassLoaderDataShared::restore_java_platform_loader_from_archive(platform_loader_data); - Handle h_system_loader(THREAD, JNIHandles::resolve_non_null(system_loader)); ClassLoaderData* system_loader_data = SystemDictionary::register_loader(h_system_loader); ClassLoaderDataShared::restore_java_system_loader_from_archive(system_loader_data); } @@ -515,27 +511,26 @@ void Modules::check_cds_restrictions(TRAPS) { } #endif // INCLUDE_CDS_JAVA_HEAP -void Modules::set_bootloader_unnamed_module(jobject module, TRAPS) { +void Modules::set_bootloader_unnamed_module(Handle module, TRAPS) { ResourceMark rm(THREAD); - if (module == NULL) { + if (module.is_null()) { THROW_MSG(vmSymbols::java_lang_NullPointerException(), "Null module object"); } - Handle module_handle(THREAD, JNIHandles::resolve(module)); - if (!java_lang_Module::is_instance(module_handle())) { + if (!java_lang_Module::is_instance(module())) { THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), "module is not an instance of type java.lang.Module"); } // Ensure that this is an unnamed module - oop name = java_lang_Module::name(module_handle()); + oop name = java_lang_Module::name(module()); if (name != NULL) { THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), "boot loader's unnamed module's java.lang.Module has a name"); } // Validate java_base's loader is the boot loader. - oop loader = java_lang_Module::loader(module_handle()); + oop loader = java_lang_Module::loader(module()); if (loader != NULL) { THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), "Class loader must be the boot class loader"); @@ -547,19 +542,19 @@ void Modules::set_bootloader_unnamed_module(jobject module, TRAPS) { ClassLoaderData* boot_loader_data = ClassLoaderData::the_null_class_loader_data(); ModuleEntry* unnamed_module = boot_loader_data->unnamed_module(); assert(unnamed_module != NULL, "boot loader's unnamed ModuleEntry not defined"); - unnamed_module->set_module(boot_loader_data->add_handle(module_handle)); + unnamed_module->set_module(boot_loader_data->add_handle(module)); // Store pointer to the ModuleEntry in the unnamed module's java.lang.Module object. - java_lang_Module::set_module_entry(module_handle(), unnamed_module); + java_lang_Module::set_module_entry(module(), unnamed_module); } -void Modules::add_module_exports(jobject from_module, jstring package_name, jobject to_module, TRAPS) { +void Modules::add_module_exports(Handle from_module, jstring package_name, Handle to_module, TRAPS) { check_cds_restrictions(CHECK); if (package_name == NULL) { THROW_MSG(vmSymbols::java_lang_NullPointerException(), "package is null"); } - if (from_module == NULL) { + if (from_module.is_null()) { THROW_MSG(vmSymbols::java_lang_NullPointerException(), "from_module is null"); } @@ -573,7 +568,7 @@ void Modules::add_module_exports(jobject from_module, jstring package_name, jobj if (!from_module_entry->is_named() || from_module_entry->is_open()) return; ModuleEntry* to_module_entry; - if (to_module == NULL) { + if (to_module.is_null()) { to_module_entry = NULL; // It's an unqualified export. } else { to_module_entry = get_module_entry(to_module, CHECK); @@ -619,19 +614,19 @@ void Modules::add_module_exports(jobject from_module, jstring package_name, jobj } -void Modules::add_module_exports_qualified(jobject from_module, jstring package, - jobject to_module, TRAPS) { +void Modules::add_module_exports_qualified(Handle from_module, jstring package, + Handle to_module, TRAPS) { check_cds_restrictions(CHECK); - if (to_module == NULL) { + if (to_module.is_null()) { THROW_MSG(vmSymbols::java_lang_NullPointerException(), "to_module is null"); } add_module_exports(from_module, package, to_module, CHECK); } -void Modules::add_reads_module(jobject from_module, jobject to_module, TRAPS) { +void Modules::add_reads_module(Handle from_module, Handle to_module, TRAPS) { check_cds_restrictions(CHECK); - if (from_module == NULL) { + if (from_module.is_null()) { THROW_MSG(vmSymbols::java_lang_NullPointerException(), "from_module is null"); } @@ -643,7 +638,7 @@ void Modules::add_reads_module(jobject from_module, jobject to_module, TRAPS) { } ModuleEntry* to_module_entry; - if (to_module != NULL) { + if (!to_module.is_null()) { to_module_entry = get_module_entry(to_module, CHECK); if (to_module_entry == NULL) { THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), @@ -735,9 +730,9 @@ jobject Modules::get_named_module(Handle h_loader, const char* package_name, TRA } // Export package in module to all unnamed modules. -void Modules::add_module_exports_to_all_unnamed(jobject module, jstring package_name, TRAPS) { +void Modules::add_module_exports_to_all_unnamed(Handle module, jstring package_name, TRAPS) { check_cds_restrictions(CHECK); - if (module == NULL) { + if (module.is_null()) { THROW_MSG(vmSymbols::java_lang_NullPointerException(), "module is null"); } diff --git a/src/hotspot/share/classfile/modules.hpp b/src/hotspot/share/classfile/modules.hpp index b76aa8b4aed47896100198da6c3a3c271ace7c42..461d7b514eeb35e15f93711651772c299c13dcb8 100644 --- a/src/hotspot/share/classfile/modules.hpp +++ b/src/hotspot/share/classfile/modules.hpp @@ -1,5 +1,5 @@ /* -* Copyright (c) 2016, 2020, Oracle and/or its affiliates. All rights reserved. +* Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -50,10 +50,10 @@ public: // * A package already exists in another module for this class loader // * Module is an unnamed module // NullPointerExceptions are thrown if module is null. - static void define_module(jobject module, jboolean is_open, jstring version, + static void define_module(Handle module, jboolean is_open, jstring version, jstring location, jobjectArray packages, TRAPS); - static void define_archived_modules(jobject platform_loader, jobject system_loader, + static void define_archived_modules(Handle h_platform_loader, Handle h_system_loader, TRAPS) NOT_CDS_JAVA_HEAP_RETURN; // Provides the java.lang.Module for the unnamed module defined @@ -64,7 +64,7 @@ public: // * Module is not a subclass of java.lang.Module // * Module's class loader is not the boot loader // NullPointerExceptions are thrown if module is null. - static void set_bootloader_unnamed_module(jobject module, TRAPS); + static void set_bootloader_unnamed_module(Handle module, TRAPS); // This either does a qualified export of package in module from_module to module // to_module or, if to_module is null, does an unqualified export of package. @@ -76,7 +76,7 @@ public: // * Package is not syntactically correct // * Package is not defined for from_module's class loader // * Package is not in module from_module. - static void add_module_exports(jobject from_module, jstring package, jobject to_module, TRAPS); + static void add_module_exports(Handle from_module, jstring package, Handle to_module, TRAPS); // This does a qualified export of package in module from_module to module // to_module. Any "." in the package name will be converted to "/" @@ -87,7 +87,7 @@ public: // * Package is not syntactically correct // * Package is not defined for from_module's class loader // * Package is not in module from_module. - static void add_module_exports_qualified(jobject from_module, jstring package, jobject to_module, TRAPS); + static void add_module_exports_qualified(Handle from_module, jstring package, Handle to_module, TRAPS); // add_reads_module adds module to_module to the list of modules that from_module // can read. If from_module is the same as to_module then this is a no-op. @@ -95,7 +95,7 @@ public: // from_module can read all current and future unnamed modules). // An IllegalArgumentException is thrown if from_module is null or either (non-null) // module does not exist. - static void add_reads_module(jobject from_module, jobject to_module, TRAPS); + static void add_reads_module(Handle from_module, Handle to_module, TRAPS); // Return the java.lang.Module object for this class object. static jobject get_module(jclass clazz, TRAPS); @@ -112,7 +112,7 @@ public: // If either module or package is null then NullPointerException is thrown. // If module or package is bad, or module is unnamed, or package is not in // module then IllegalArgumentException is thrown. - static void add_module_exports_to_all_unnamed(jobject module, jstring package, TRAPS); + static void add_module_exports_to_all_unnamed(Handle module, jstring package, TRAPS); // Return TRUE iff package is defined by loader static bool is_package_defined(Symbol* package_name, Handle h_loader, TRAPS); diff --git a/src/hotspot/share/classfile/packageEntry.cpp b/src/hotspot/share/classfile/packageEntry.cpp index 8e5c826dbad3033612c05f77b71a2664e2b7d846..b32a6a511021b575df8907b46d0a923e64f3bb91 100644 --- a/src/hotspot/share/classfile/packageEntry.cpp +++ b/src/hotspot/share/classfile/packageEntry.cpp @@ -23,13 +23,13 @@ */ #include "precompiled.hpp" +#include "classfile/classLoaderData.hpp" #include "classfile/moduleEntry.hpp" #include "classfile/packageEntry.hpp" #include "classfile/vmSymbols.hpp" #include "logging/log.hpp" #include "memory/archiveBuilder.hpp" #include "memory/archiveUtils.hpp" -#include "memory/metaspaceShared.hpp" #include "memory/resourceArea.hpp" #include "oops/array.hpp" #include "oops/symbol.hpp" @@ -208,7 +208,7 @@ static ArchivedPackageEntries* _archived_packages_entries = NULL; PackageEntry* PackageEntry::allocate_archived_entry() const { assert(!in_unnamed_module(), "unnamed packages/modules are not archived"); - PackageEntry* archived_entry = (PackageEntry*)MetaspaceShared::read_write_space_alloc(sizeof(PackageEntry)); + PackageEntry* archived_entry = (PackageEntry*)ArchiveBuilder::rw_region_alloc(sizeof(PackageEntry)); memcpy((void*)archived_entry, (void*)this, sizeof(PackageEntry)); if (_archived_packages_entries == NULL) { @@ -278,7 +278,7 @@ Array* PackageEntryTable::allocate_archived_entries() { } } - Array* archived_packages = MetaspaceShared::new_rw_array(n); + Array* archived_packages = ArchiveBuilder::new_rw_array(n); for (n = 0, i = 0; i < table_size(); ++i) { for (PackageEntry* p = bucket(i); p != NULL; p = p->next()) { if (p->module()->name() != NULL) { diff --git a/src/hotspot/share/classfile/placeholders.cpp b/src/hotspot/share/classfile/placeholders.cpp index 4f2b8d60b60bc090ec2bf225d2556918727fb507..551b26e3099d3dc39ed06c6371831e49d0021186 100644 --- a/src/hotspot/share/classfile/placeholders.cpp +++ b/src/hotspot/share/classfile/placeholders.cpp @@ -104,7 +104,7 @@ void PlaceholderEntry::set_threadQ(SeenThread* seenthread, PlaceholderTable::cla // Doubly-linked list of Threads per action for class/classloader pair // Class circularity support: links in thread before loading superclass -// bootstrapsearchpath support: links in a thread before load_instance_class +// bootstrap loader support: links in a thread before load_instance_class // definers: use as queue of define requestors, including owner of // define token. Appends for debugging of requestor order void PlaceholderEntry::add_seen_thread(Thread* thread, PlaceholderTable::classloadAction action) { @@ -112,6 +112,9 @@ void PlaceholderEntry::add_seen_thread(Thread* thread, PlaceholderTable::classlo SeenThread* threadEntry = new SeenThread(thread); SeenThread* seen = actionToQueue(action); + assert(action != PlaceholderTable::LOAD_INSTANCE || seen == NULL, + "Only one LOAD_INSTANCE allowed at a time"); + if (seen == NULL) { set_threadQ(threadEntry, action); return; diff --git a/src/hotspot/share/classfile/placeholders.hpp b/src/hotspot/share/classfile/placeholders.hpp index abb0dc18ffae0788cd1395f369de4f969acb0451..d85ac9adfdc843fb7a5549746a4794abbf6ff6d2 100644 --- a/src/hotspot/share/classfile/placeholders.hpp +++ b/src/hotspot/share/classfile/placeholders.hpp @@ -120,8 +120,8 @@ class PlaceholderEntry : public HashtableEntry { InstanceKlass* _instanceKlass; // InstanceKlass from successful define SeenThread* _superThreadQ; // doubly-linked queue of Threads loading a superclass for this class SeenThread* _loadInstanceThreadQ; // loadInstance thread - // can be multiple threads if classloader object lock broken by application - // or if classloader supports parallel classloading + // This can't be multiple threads since class loading waits for + // this token to be removed. SeenThread* _defineThreadQ; // queue of Threads trying to define this class // including _definer diff --git a/src/hotspot/share/classfile/resolutionErrors.cpp b/src/hotspot/share/classfile/resolutionErrors.cpp index d248a5e55ec0f69996c71acb9e11c2c7809e1e81..47eca140606cfb94cfae66420247b9444f7102aa 100644 --- a/src/hotspot/share/classfile/resolutionErrors.cpp +++ b/src/hotspot/share/classfile/resolutionErrors.cpp @@ -27,6 +27,7 @@ #include "memory/allocation.hpp" #include "memory/resourceArea.hpp" #include "oops/instanceKlass.hpp" +#include "oops/klass.inline.hpp" #include "oops/oop.inline.hpp" #include "runtime/handles.inline.hpp" #include "runtime/safepoint.hpp" diff --git a/src/hotspot/share/classfile/stringTable.cpp b/src/hotspot/share/classfile/stringTable.cpp index 6c2f61a26c7873bdd0bcce1f5f762c3cfe03abf5..8fe73d6759cadf24983b2094d1994994978df2ba 100644 --- a/src/hotspot/share/classfile/stringTable.cpp +++ b/src/hotspot/share/classfile/stringTable.cpp @@ -34,7 +34,7 @@ #include "logging/log.hpp" #include "logging/logStream.hpp" #include "memory/allocation.inline.hpp" -#include "memory/filemap.hpp" +#include "memory/archiveBuilder.hpp" #include "memory/heapShared.inline.hpp" #include "memory/resourceArea.hpp" #include "memory/universe.hpp" @@ -760,7 +760,7 @@ void StringTable::write_to_archive(const DumpedInternedStrings* dumped_interned_ assert(HeapShared::is_heap_object_archiving_allowed(), "must be"); _shared_table.reset(); - CompactHashtableWriter writer(_items_count, &MetaspaceShared::stats()->string); + CompactHashtableWriter writer(_items_count, ArchiveBuilder::string_stats()); // Copy the interned strings into the "string space" within the java heap CopyToArchive copier(&writer); diff --git a/src/hotspot/share/classfile/symbolTable.cpp b/src/hotspot/share/classfile/symbolTable.cpp index 776a74a27060c0fdd681059a20bd4d92a27c10ce..26f3c6ad379a3740c0f96de6a3d6be2482814ab0 100644 --- a/src/hotspot/share/classfile/symbolTable.cpp +++ b/src/hotspot/share/classfile/symbolTable.cpp @@ -24,6 +24,7 @@ #include "precompiled.hpp" #include "classfile/altHashing.hpp" +#include "classfile/classLoaderData.hpp" #include "classfile/compactHashtable.hpp" #include "classfile/javaClasses.hpp" #include "classfile/symbolTable.hpp" @@ -31,7 +32,6 @@ #include "memory/archiveBuilder.hpp" #include "memory/dynamicArchive.hpp" #include "memory/metaspaceClosure.hpp" -#include "memory/metaspaceShared.hpp" #include "memory/resourceArea.hpp" #include "oops/oop.inline.hpp" #include "runtime/atomic.hpp" @@ -601,8 +601,7 @@ size_t SymbolTable::estimate_size_for_archive() { } void SymbolTable::write_to_archive(GrowableArray* symbols) { - CompactHashtableWriter writer(int(_items_count), - &MetaspaceShared::stats()->symbol); + CompactHashtableWriter writer(int(_items_count), ArchiveBuilder::symbol_stats()); copy_shared_symbol_table(symbols, &writer); if (!DynamicDumpSharedSpaces) { _shared_table.reset(); diff --git a/src/hotspot/share/classfile/systemDictionary.cpp b/src/hotspot/share/classfile/systemDictionary.cpp index 2ede5d1aacb2f3b95f3fd352398e1321fd804fd9..13644a298e0e885564dba1a3e9aeb723a19ae415 100644 --- a/src/hotspot/share/classfile/systemDictionary.cpp +++ b/src/hotspot/share/classfile/systemDictionary.cpp @@ -185,7 +185,7 @@ bool SystemDictionary::is_platform_class_loader(oop class_loader) { return (class_loader->klass() == vmClasses::jdk_internal_loader_ClassLoaders_PlatformClassLoader_klass()); } -Handle SystemDictionary::compute_loader_lock_object(Handle class_loader) { +Handle SystemDictionary::get_loader_lock_or_null(Handle class_loader) { // If class_loader is NULL or parallelCapable, the JVM doesn't acquire a lock while loading. if (is_parallelCapable(class_loader)) { return Handle(); @@ -369,8 +369,8 @@ InstanceKlass* SystemDictionary::resolve_super_or_fail(Symbol* class_name, #if INCLUDE_CDS if (DumpSharedSpaces) { // Special processing for handling UNREGISTERED shared classes. - InstanceKlass* k = SystemDictionaryShared::dump_time_resolve_super_or_fail(class_name, - super_name, class_loader, protection_domain, is_superclass, CHECK_NULL); + InstanceKlass* k = SystemDictionaryShared::lookup_super_for_unregistered_class(class_name, + super_name, is_superclass); if (k) { return k; } @@ -570,12 +570,9 @@ void SystemDictionary::double_lock_wait(Thread* thread, Handle lockObject) { // super class loading here. // This also is critical in cases where the original thread gets stalled // even in non-circularity situations. -// Note: must call resolve_super_or_fail even if null super - -// to force placeholder entry creation for this class for circularity detection -// Caller must check for pending exception // Returns non-null Klass* if other thread has completed load -// and we are done, -// If return null Klass* and no pending exception, the caller must load the class +// and we are done. If this returns a null Klass* and no pending exception, +// the caller must load the class. InstanceKlass* SystemDictionary::handle_parallel_super_load( Symbol* name, Symbol* superclassname, Handle class_loader, Handle protection_domain, Handle lockObject, TRAPS) { @@ -584,14 +581,7 @@ InstanceKlass* SystemDictionary::handle_parallel_super_load( Dictionary* dictionary = loader_data->dictionary(); unsigned int name_hash = dictionary->compute_hash(name); - // superk is not used, resolve_super called for circularity check only - // This code is reached in two situations. One if this thread - // is loading the same class twice (e.g. ClassCircularity, or - // java.lang.instrument). - // The second is if another thread started the resolve_super first - // and has not yet finished. - // In both cases the original caller will clean up the placeholder - // entry on error. + // superk is not used; resolve_super_or_fail is called for circularity check only. Klass* superk = SystemDictionary::resolve_super_or_fail(name, superclassname, class_loader, @@ -603,7 +593,6 @@ InstanceKlass* SystemDictionary::handle_parallel_super_load( // Serial class loaders and bootstrap classloader do wait for superclass loads if (!class_loader.is_null() && is_parallelCapable(class_loader)) { MutexLocker mu(THREAD, SystemDictionary_lock); - // Check if classloading completed while we were loading superclass or waiting return dictionary->find_class(name_hash, name); } @@ -703,7 +692,7 @@ InstanceKlass* SystemDictionary::resolve_instance_class_or_null(Symbol* name, // the define. // ParallelCapable Classloaders and the bootstrap classloader // do not acquire lock here. - Handle lockObject = compute_loader_lock_object(class_loader); + Handle lockObject = get_loader_lock_or_null(class_loader); ObjectLocker ol(lockObject, THREAD); // Check again (after locking) if the class already exists in SystemDictionary @@ -759,10 +748,8 @@ InstanceKlass* SystemDictionary::resolve_instance_class_or_null(Symbol* name, // but only allows a single thread to load a class/classloader pair. // The LOAD_INSTANCE placeholder is the mechanism for mutual exclusion. // case 2. parallelCapable user level classloaders - // These class loaders don't lock the object until load_instance_class is - // called after this placeholder is added. - // Allow parallel classloading of a class/classloader pair where mutual - // exclusion is provided by this lock in the class loader Java code. + // These class loaders lock a per-class object lock when ClassLoader.loadClass() + // is called. A LOAD_INSTANCE placeholder isn't used for mutual exclusion. // case 3. traditional classloaders that rely on the classloader object lock // There should be no need for need for LOAD_INSTANCE, except: // case 4. traditional class loaders that break the classloader object lock @@ -771,65 +758,64 @@ InstanceKlass* SystemDictionary::resolve_instance_class_or_null(Symbol* name, // and that lock is still held when calling classloader's loadClass. // For these classloaders, we ensure that the first requestor // completes the load and other requestors wait for completion. - { + if (class_loader.is_null() || !is_parallelCapable(class_loader)) { MutexLocker mu(THREAD, SystemDictionary_lock); - if (class_loader.is_null() || !is_parallelCapable(class_loader)) { - PlaceholderEntry* oldprobe = placeholders()->get_entry(name_hash, name, loader_data); - if (oldprobe != NULL) { - // only need check_seen_thread once, not on each loop - // 6341374 java/lang/Instrument with -Xcomp - if (oldprobe->check_seen_thread(THREAD, PlaceholderTable::LOAD_INSTANCE)) { - throw_circularity_error = true; - } else { - // case 3: traditional: should never see load_in_progress. - while (!class_has_been_loaded && oldprobe != NULL && oldprobe->instance_load_in_progress()) { - - // case 1: bootstrap classloader: prevent futile classloading, - // wait on first requestor - if (class_loader.is_null()) { - SystemDictionary_lock->wait(); - } else { + PlaceholderEntry* oldprobe = placeholders()->get_entry(name_hash, name, loader_data); + if (oldprobe != NULL) { + // only need check_seen_thread once, not on each loop + // 6341374 java/lang/Instrument with -Xcomp + if (oldprobe->check_seen_thread(THREAD, PlaceholderTable::LOAD_INSTANCE)) { + throw_circularity_error = true; + } else { + // case 3: traditional: should never see load_in_progress. + while (!class_has_been_loaded && oldprobe != NULL && oldprobe->instance_load_in_progress()) { + + // case 1: bootstrap classloader: prevent futile classloading, + // wait on first requestor + if (class_loader.is_null()) { + SystemDictionary_lock->wait(); + } else { // case 4: traditional with broken classloader lock. wait on first // requestor. - double_lock_wait(THREAD, lockObject); - } - // Check if classloading completed while we were waiting - InstanceKlass* check = dictionary->find_class(name_hash, name); - if (check != NULL) { - // Klass is already loaded, so just return it - loaded_class = check; - class_has_been_loaded = true; - } - // check if other thread failed to load and cleaned up - oldprobe = placeholders()->get_entry(name_hash, name, loader_data); + double_lock_wait(THREAD, lockObject); + } + // Check if classloading completed while we were waiting + InstanceKlass* check = dictionary->find_class(name_hash, name); + if (check != NULL) { + // Klass is already loaded, so just return it + loaded_class = check; + class_has_been_loaded = true; } + // check if other thread failed to load and cleaned up + oldprobe = placeholders()->get_entry(name_hash, name, loader_data); } } } - // All cases: add LOAD_INSTANCE while holding the SystemDictionary_lock + // Add LOAD_INSTANCE while holding the SystemDictionary_lock if (!throw_circularity_error && !class_has_been_loaded) { - PlaceholderEntry* newprobe = placeholders()->find_and_add(name_hash, name, loader_data, - PlaceholderTable::LOAD_INSTANCE, NULL, THREAD); - load_instance_added = true; - // For class loaders that do not acquire the classloader object lock, - // if they did not catch another thread holding LOAD_INSTANCE, - // need a check analogous to the acquire ObjectLocker/find_class - // i.e. now that we hold the LOAD_INSTANCE token on loading this class/CL - // one final check if the load has already completed - // class loaders holding the ObjectLock shouldn't find the class here + // For the bootclass loader, if the thread did not catch another thread holding + // the LOAD_INSTANCE token, we need to check whether it completed loading + // while holding the SD_lock. InstanceKlass* check = dictionary->find_class(name_hash, name); if (check != NULL) { // Klass is already loaded, so return it after checking/adding protection domain loaded_class = check; class_has_been_loaded = true; + } else { + // Now we've got the LOAD_INSTANCE token. Threads will wait on loading to complete for this thread. + PlaceholderEntry* newprobe = placeholders()->find_and_add(name_hash, name, loader_data, + PlaceholderTable::LOAD_INSTANCE, + NULL, + THREAD); + load_instance_added = true; } } } // must throw error outside of owning lock if (throw_circularity_error) { - assert(!HAS_PENDING_EXCEPTION && load_instance_added == false,"circularity error cleanup"); + assert(!HAS_PENDING_EXCEPTION && !load_instance_added, "circularity error cleanup"); ResourceMark rm(THREAD); THROW_MSG_NULL(vmSymbols::java_lang_ClassCircularityError(), name->as_C_string()); } @@ -913,16 +899,15 @@ InstanceKlass* SystemDictionary::resolve_instance_class_or_null(Symbol* name, // _dictionary->bucket(index) is read here, so the caller will not see // the new entry. -Klass* SystemDictionary::find(Symbol* class_name, - Handle class_loader, - Handle protection_domain, - TRAPS) { +InstanceKlass* SystemDictionary::find_instance_klass(Symbol* class_name, + Handle class_loader, + Handle protection_domain) { // The result of this call should be consistent with the result // of the call to resolve_instance_class_or_null(). // See evaluation 6790209 and 4474172 for more details. - class_loader = Handle(THREAD, java_lang_ClassLoader::non_reflection_class_loader(class_loader())); - ClassLoaderData* loader_data = ClassLoaderData::class_loader_data_or_null(class_loader()); + oop class_loader_oop = java_lang_ClassLoader::non_reflection_class_loader(class_loader()); + ClassLoaderData* loader_data = ClassLoaderData::class_loader_data_or_null(class_loader_oop); if (loader_data == NULL) { // If the ClassLoaderData has not been setup, @@ -932,16 +917,14 @@ Klass* SystemDictionary::find(Symbol* class_name, Dictionary* dictionary = loader_data->dictionary(); unsigned int name_hash = dictionary->compute_hash(class_name); - return dictionary->find(name_hash, class_name, - protection_domain); + return dictionary->find(name_hash, class_name, protection_domain); } // Look for a loaded instance or array klass by name. Do not do any loading. // return NULL in case of error. Klass* SystemDictionary::find_instance_or_array_klass(Symbol* class_name, Handle class_loader, - Handle protection_domain, - TRAPS) { + Handle protection_domain) { Klass* k = NULL; assert(class_name != NULL, "class name must be non NULL"); @@ -955,13 +938,13 @@ Klass* SystemDictionary::find_instance_or_array_klass(Symbol* class_name, if (t != T_OBJECT) { k = Universe::typeArrayKlassObj(t); } else { - k = SystemDictionary::find(ss.as_symbol(), class_loader, protection_domain, THREAD); + k = SystemDictionary::find_instance_klass(ss.as_symbol(), class_loader, protection_domain); } if (k != NULL) { k = k->array_klass_or_null(ndims); } } else { - k = find(class_name, class_loader, protection_domain, THREAD); + k = find_instance_klass(class_name, class_loader, protection_domain); } return k; } @@ -1007,8 +990,9 @@ InstanceKlass* SystemDictionary::parse_stream(Symbol* class_name, loader_data, cl_info, CHECK_NULL); + assert(k != NULL, "no klass created"); - if ((cl_info.is_hidden() || is_unsafe_anon_class) && k != NULL) { + if (cl_info.is_hidden() || is_unsafe_anon_class) { // Hidden classes that are not strong and unsafe anonymous classes must update // ClassLoaderData holder so that they can be unloaded when the mirror is no // longer referenced. @@ -1052,7 +1036,8 @@ InstanceKlass* SystemDictionary::parse_stream(Symbol* class_name, // JVM_DefineClass). // Note: class_name can be NULL. In that case we do not know the name of // the class until we have parsed the stream. - +// This function either returns an InstanceKlass or throws an exception. It does +// not return NULL without a pending exception. InstanceKlass* SystemDictionary::resolve_from_stream(Symbol* class_name, Handle class_loader, Handle protection_domain, @@ -1065,7 +1050,7 @@ InstanceKlass* SystemDictionary::resolve_from_stream(Symbol* class_name, // Classloaders that support parallelism, e.g. bootstrap classloader, // do not acquire lock here - Handle lockObject = compute_loader_lock_object(class_loader); + Handle lockObject = get_loader_lock_or_null(class_loader); ObjectLocker ol(lockObject, THREAD); // Parse the stream and create a klass. @@ -1085,9 +1070,6 @@ InstanceKlass* SystemDictionary::resolve_from_stream(Symbol* class_name, #endif if (k == NULL) { - if (st->buffer() == NULL) { - return NULL; - } ClassLoadInfo cl_info(protection_domain); k = KlassFactory::create_from_stream(st, class_name, loader_data, cl_info, CHECK_NULL); } @@ -1233,7 +1215,7 @@ bool SystemDictionary::check_shared_class_super_type(InstanceKlass* klass, Insta if (!super_type->is_shared_unregistered_class() && super_type->class_loader_data() != NULL) { // Check if the super class is loaded by the current class_loader Symbol* name = super_type->name(); - Klass* check = find(name, class_loader, protection_domain, CHECK_0); + InstanceKlass* check = find_instance_klass(name, class_loader, protection_domain); if (check == super_type) { return true; } @@ -1356,7 +1338,7 @@ InstanceKlass* SystemDictionary::load_shared_class(InstanceKlass* ik, ClassLoaderData* loader_data = ClassLoaderData::class_loader_data(class_loader()); { HandleMark hm(THREAD); - Handle lockObject = compute_loader_lock_object(class_loader); + Handle lockObject = get_loader_lock_or_null(class_loader); ObjectLocker ol(lockObject, THREAD); // prohibited package check assumes all classes loaded from archive call // restore_unshareable_info which calls ik->set_package() @@ -1374,7 +1356,7 @@ void SystemDictionary::load_shared_class_misc(InstanceKlass* ik, ClassLoaderData // package was loaded. if (loader_data->is_the_null_class_loader_data()) { int path_index = ik->shared_classpath_index(); - ik->set_classpath_index(path_index, THREAD); + ik->set_classpath_index(path_index); } // notify a class loaded from shared object @@ -1558,7 +1540,7 @@ void SystemDictionary::define_instance_class(InstanceKlass* k, Handle class_load // hole with systemDictionary updates and check_constraints if (!is_parallelCapable(class_loader)) { assert(ObjectSynchronizer::current_thread_holds_lock(THREAD->as_Java_thread(), - compute_loader_lock_object(class_loader)), + get_loader_lock_or_null(class_loader)), "define called without lock"); } @@ -1812,17 +1794,6 @@ void SystemDictionary::initialize(TRAPS) { } } -#ifdef ASSERT -// Verify that this placeholder exists since this class is in the middle of loading. -void verify_placeholder(Symbol* class_name, ClassLoaderData* loader_data) { - // Only parallel capable class loaders use placeholder table for define class. - assert_locked_or_safepoint(SystemDictionary_lock); - unsigned int name_hash = placeholders()->compute_hash(class_name); - Symbol* ph_check = placeholders()->find_entry(name_hash, class_name, loader_data); - assert(ph_check != NULL, "This placeholder should exist"); -} -#endif // ASSERT - // Constraints on class loaders. The details of the algorithm can be // found in the OOPSLA'98 paper "Dynamic Class Loading in the Java // Virtual Machine" by Sheng Liang and Gilad Bracha. The basic idea is @@ -1862,8 +1833,6 @@ void SystemDictionary::check_constraints(unsigned int name_hash, } } - DEBUG_ONLY(if (is_parallelCapable(class_loader)) verify_placeholder(name, loader_data)); - if (throwException == false) { if (constraints()->check_or_update(k, class_loader, name) == false) { throwException = true; @@ -1918,13 +1887,13 @@ void SystemDictionary::update_dictionary(unsigned int hash, // loader constraints might know about a class that isn't fully loaded // yet and these will be ignored. Klass* SystemDictionary::find_constrained_instance_or_array_klass( - Symbol* class_name, Handle class_loader, TRAPS) { + Symbol* class_name, Handle class_loader, Thread* THREAD) { // First see if it has been loaded directly. // Force the protection domain to be null. (This removes protection checks.) Handle no_protection_domain; Klass* klass = find_instance_or_array_klass(class_name, class_loader, - no_protection_domain, CHECK_NULL); + no_protection_domain); if (klass != NULL) return klass; diff --git a/src/hotspot/share/classfile/systemDictionary.hpp b/src/hotspot/share/classfile/systemDictionary.hpp index 2ef0e9b9b5b2a1360ea4566030d65d9e4ceff793..cdc26435c0a51200d66a13b01ea5fe1d1d6b5e33 100644 --- a/src/hotspot/share/classfile/systemDictionary.hpp +++ b/src/hotspot/share/classfile/systemDictionary.hpp @@ -139,15 +139,14 @@ class SystemDictionary : AllStatic { TRAPS); // Lookup an already loaded class. If not found NULL is returned. - static Klass* find(Symbol* class_name, Handle class_loader, Handle protection_domain, TRAPS); + static InstanceKlass* find_instance_klass(Symbol* class_name, Handle class_loader, Handle protection_domain); // Lookup an already loaded instance or array class. // Do not make any queries to class loaders; consult only the cache. // If not found NULL is returned. static Klass* find_instance_or_array_klass(Symbol* class_name, Handle class_loader, - Handle protection_domain, - TRAPS); + Handle protection_domain); // Lookup an instance or array class that has already been loaded // either into the given class loader, or else into another class @@ -172,7 +171,7 @@ class SystemDictionary : AllStatic { // to local linkage and access checks. static Klass* find_constrained_instance_or_array_klass(Symbol* class_name, Handle class_loader, - TRAPS); + Thread* THREAD); static void classes_do(MetaspaceClosure* it); // Iterate over all methods in all klasses @@ -387,7 +386,7 @@ protected: static InstanceKlass* load_shared_boot_class(Symbol* class_name, PackageEntry* pkg_entry, TRAPS); - static Handle compute_loader_lock_object(Handle class_loader); + static Handle get_loader_lock_or_null(Handle class_loader); static InstanceKlass* find_or_define_instance_class(Symbol* class_name, Handle class_loader, InstanceKlass* k, TRAPS); diff --git a/src/hotspot/share/classfile/systemDictionaryShared.cpp b/src/hotspot/share/classfile/systemDictionaryShared.cpp index f45f70275297a04b986fa2fbedf53cf346463f9f..719e6a9b79678a4522fa237011ee1c80f0329356 100644 --- a/src/hotspot/share/classfile/systemDictionaryShared.cpp +++ b/src/hotspot/share/classfile/systemDictionaryShared.cpp @@ -1013,7 +1013,7 @@ InstanceKlass* SystemDictionaryShared::find_or_load_shared_class( // Note: currently, find_or_load_shared_class is called only from // JVM_FindLoadedClass and used for PlatformClassLoader and AppClassLoader, // which are parallel-capable loaders, so a lock here is NOT taken. - assert(compute_loader_lock_object(class_loader) == NULL, "ObjectLocker not required"); + assert(get_loader_lock_or_null(class_loader) == NULL, "ObjectLocker not required"); { MutexLocker mu(THREAD, SystemDictionary_lock); InstanceKlass* check = dictionary->find_class(d_hash, name); @@ -1196,18 +1196,23 @@ bool SystemDictionaryShared::add_unregistered_class(InstanceKlass* k, TRAPS) { return created; } -// This function is called to resolve the super/interfaces of shared classes for -// non-built-in loaders. E.g., SharedClass in the below example +// This function is called to lookup the super/interfaces of shared classes for +// unregistered loaders. E.g., SharedClass in the below example // where "super:" (and optionally "interface:") have been specified. // // java/lang/Object id: 0 -// Interface id: 2 super: 0 source: cust.jar +// Interface id: 2 super: 0 source: cust.jar // SharedClass id: 4 super: 0 interfaces: 2 source: cust.jar -InstanceKlass* SystemDictionaryShared::dump_time_resolve_super_or_fail( - Symbol* class_name, Symbol* super_name, Handle class_loader, - Handle protection_domain, bool is_superclass, TRAPS) { +InstanceKlass* SystemDictionaryShared::lookup_super_for_unregistered_class( + Symbol* class_name, Symbol* super_name, bool is_superclass) { - assert(DumpSharedSpaces, "only when dumping"); + assert(DumpSharedSpaces, "only when static dumping"); + + if (!ClassListParser::is_parsing_thread()) { + // Unregistered classes can be created only by ClassListParser::_parsing_thread. + + return NULL; + } ClassListParser* parser = ClassListParser::instance(); if (parser == NULL) { @@ -1600,7 +1605,9 @@ void SystemDictionaryShared::add_lambda_proxy_class(InstanceKlass* caller_ik, InstanceKlass* nest_host = caller_ik->nest_host(THREAD); DumpTimeSharedClassInfo* info = _dumptime_table->get(lambda_ik); - if (info != NULL && !lambda_ik->is_non_strong_hidden() && is_builtin(lambda_ik) && is_builtin(caller_ik)) { + if (info != NULL && !lambda_ik->is_non_strong_hidden() && is_builtin(lambda_ik) && is_builtin(caller_ik) + // Don't include the lambda proxy if its nest host is not in the "linked" state. + && nest_host->is_linked()) { // Set _is_archived_lambda_proxy in DumpTimeSharedClassInfo so that the lambda_ik // won't be excluded during dumping of shared archive. See ExcludeDumpTimeSharedClasses. info->_is_archived_lambda_proxy = true; @@ -1670,7 +1677,7 @@ InstanceKlass* SystemDictionaryShared::prepare_shared_lambda_proxy_class(Instanc InstanceKlass* caller_ik, TRAPS) { Handle class_loader(THREAD, caller_ik->class_loader()); Handle protection_domain; - PackageEntry* pkg_entry = get_package_entry_from_class(caller_ik, class_loader); + PackageEntry* pkg_entry = caller_ik->package(); if (caller_ik->class_loader() != NULL) { protection_domain = SystemDictionaryShared::init_security_info(class_loader, caller_ik, pkg_entry, CHECK_NULL); } @@ -2038,7 +2045,7 @@ public: log_info(cds,dynamic)("Archiving hidden %s", info._proxy_klasses->at(0)->external_name()); size_t byte_size = sizeof(RunTimeLambdaProxyClassInfo); RunTimeLambdaProxyClassInfo* runtime_info = - (RunTimeLambdaProxyClassInfo*)MetaspaceShared::read_only_space_alloc(byte_size); + (RunTimeLambdaProxyClassInfo*)ArchiveBuilder::ro_region_alloc(byte_size); runtime_info->init(key, info); unsigned int hash = runtime_info->hash(); u4 delta = _builder->any_to_offset_u4((void*)runtime_info); @@ -2086,7 +2093,7 @@ public: if (!info.is_excluded() && info.is_builtin() == _is_builtin) { size_t byte_size = RunTimeSharedClassInfo::byte_size(info._klass, info.num_verifier_constraints(), info.num_loader_constraints()); RunTimeSharedClassInfo* record; - record = (RunTimeSharedClassInfo*)MetaspaceShared::read_only_space_alloc(byte_size); + record = (RunTimeSharedClassInfo*)ArchiveBuilder::ro_region_alloc(byte_size); record->init(info); unsigned int hash; diff --git a/src/hotspot/share/classfile/systemDictionaryShared.hpp b/src/hotspot/share/classfile/systemDictionaryShared.hpp index db02009b8807e26841d4518731da097b53ff67fe..6a7939e4c8506897353d4b2efa738435963268cf 100644 --- a/src/hotspot/share/classfile/systemDictionaryShared.hpp +++ b/src/hotspot/share/classfile/systemDictionaryShared.hpp @@ -25,6 +25,7 @@ #ifndef SHARE_CLASSFILE_SYSTEMDICTIONARYSHARED_HPP #define SHARE_CLASSFILE_SYSTEMDICTIONARYSHARED_HPP +#include "classfile/classLoaderData.hpp" #include "classfile/packageEntry.hpp" #include "classfile/systemDictionary.hpp" #include "memory/filemap.hpp" @@ -246,12 +247,8 @@ public: static bool is_sharing_possible(ClassLoaderData* loader_data); static bool add_unregistered_class(InstanceKlass* k, TRAPS); - static InstanceKlass* dump_time_resolve_super_or_fail(Symbol* class_name, - Symbol* super_name, - Handle class_loader, - Handle protection_domain, - bool is_superclass, - TRAPS); + static InstanceKlass* lookup_super_for_unregistered_class(Symbol* class_name, + Symbol* super_name, bool is_superclass); static void init_dumptime_info(InstanceKlass* k) NOT_CDS_RETURN; static void remove_dumptime_info(InstanceKlass* k) NOT_CDS_RETURN; diff --git a/src/hotspot/share/classfile/verifier.cpp b/src/hotspot/share/classfile/verifier.cpp index 03c716b2362e79fc34d5706a02a83fde5ad6f516..4ff2d4793bb76df9f13d66e246f5990ef6eb33c9 100644 --- a/src/hotspot/share/classfile/verifier.cpp +++ b/src/hotspot/share/classfile/verifier.cpp @@ -3149,7 +3149,7 @@ void ClassVerifier::verify_return_value( if (return_type == VerificationType::bogus_type()) { verify_error(ErrorContext::bad_type(bci, current_frame->stack_top_ctx(), TypeOrigin::signature(return_type)), - "Method expects a return value"); + "Method does not expect a return value"); return; } bool match = return_type.is_assignable_from(type, this, false, CHECK_VERIFY(this)); diff --git a/src/hotspot/share/code/compiledIC.cpp b/src/hotspot/share/code/compiledIC.cpp index 848cc98a4c2cb29bf87f3c8759a4ef56d964ee52..f326ccc738df7eabd63c97bcdff8bdb1524351ac 100644 --- a/src/hotspot/share/code/compiledIC.cpp +++ b/src/hotspot/share/code/compiledIC.cpp @@ -35,6 +35,7 @@ #include "memory/oopFactory.hpp" #include "memory/resourceArea.hpp" #include "memory/universe.hpp" +#include "oops/klass.inline.hpp" #include "oops/method.inline.hpp" #include "oops/oop.inline.hpp" #include "oops/symbol.hpp" diff --git a/src/hotspot/share/code/compiledMethod.cpp b/src/hotspot/share/code/compiledMethod.cpp index 0d6ffe789c58e50ac083157dbbb44dde56292510..74c38880742bc2ad830d605ecaa9f6fd231b19e6 100644 --- a/src/hotspot/share/code/compiledMethod.cpp +++ b/src/hotspot/share/code/compiledMethod.cpp @@ -36,6 +36,8 @@ #include "logging/log.hpp" #include "logging/logTag.hpp" #include "memory/resourceArea.hpp" +#include "oops/compiledICHolder.inline.hpp" +#include "oops/klass.inline.hpp" #include "oops/methodData.hpp" #include "oops/method.inline.hpp" #include "prims/methodHandles.hpp" diff --git a/src/hotspot/share/code/debugInfo.cpp b/src/hotspot/share/code/debugInfo.cpp index aeed7f4d937d6ab2e488ba88445fd2488bb2fcc4..47d6f7b9300da203a9906c420bdcf6b00d338a90 100644 --- a/src/hotspot/share/code/debugInfo.cpp +++ b/src/hotspot/share/code/debugInfo.cpp @@ -159,11 +159,11 @@ void ObjectValue::read_object(DebugInfoReadStream* stream) { } void ObjectValue::write_on(DebugInfoWriteStream* stream) { - if (_visited) { + if (is_visited()) { stream->write_int(OBJECT_ID_CODE); stream->write_int(_id); } else { - _visited = true; + set_visited(true); stream->write_int(is_auto_box() ? AUTO_BOX_OBJECT_CODE : OBJECT_CODE); stream->write_int(_id); _klass->write_on(stream); diff --git a/src/hotspot/share/code/debugInfo.hpp b/src/hotspot/share/code/debugInfo.hpp index bfba1523b083fad128d972799c4bcf6bfc95e17e..3f213783a218afc6b24effa529fde4e9e0630d15 100644 --- a/src/hotspot/share/code/debugInfo.hpp +++ b/src/hotspot/share/code/debugInfo.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2021, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -149,7 +149,7 @@ class ObjectValue: public ScopeValue { bool is_visited() const { return _visited; } void set_value(oop value); - void set_visited(bool visited) { _visited = false; } + void set_visited(bool visited) { _visited = visited; } // Serialization of debugging information void read_object(DebugInfoReadStream* stream); diff --git a/src/hotspot/share/code/dependencies.cpp b/src/hotspot/share/code/dependencies.cpp index 862e673fc5db915fda03c04c5a3a9f8089643ffd..6c077d3f092e00c026c580efd6158339a918bcc8 100644 --- a/src/hotspot/share/code/dependencies.cpp +++ b/src/hotspot/share/code/dependencies.cpp @@ -99,32 +99,12 @@ void Dependencies::assert_abstract_with_unique_concrete_subtype(ciKlass* ctxk, c assert_common_2(abstract_with_unique_concrete_subtype, ctxk, conck); } -void Dependencies::assert_abstract_with_no_concrete_subtype(ciKlass* ctxk) { - check_ctxk_abstract(ctxk); - assert_common_1(abstract_with_no_concrete_subtype, ctxk); -} - -void Dependencies::assert_concrete_with_no_concrete_subtype(ciKlass* ctxk) { - check_ctxk_concrete(ctxk); - assert_common_1(concrete_with_no_concrete_subtype, ctxk); -} - void Dependencies::assert_unique_concrete_method(ciKlass* ctxk, ciMethod* uniqm) { check_ctxk(ctxk); check_unique_method(ctxk, uniqm); assert_common_2(unique_concrete_method, ctxk, uniqm); } -void Dependencies::assert_abstract_with_exclusive_concrete_subtypes(ciKlass* ctxk, ciKlass* k1, ciKlass* k2) { - check_ctxk(ctxk); - assert_common_3(abstract_with_exclusive_concrete_subtypes_2, ctxk, k1, k2); -} - -void Dependencies::assert_exclusive_concrete_methods(ciKlass* ctxk, ciMethod* m1, ciMethod* m2) { - check_ctxk(ctxk); - assert_common_3(exclusive_concrete_methods_2, ctxk, m1, m2); -} - void Dependencies::assert_has_no_finalizable_subclasses(ciKlass* ctxk) { check_ctxk(ctxk); assert_common_1(no_finalizable_subclasses, ctxk); @@ -266,47 +246,6 @@ void Dependencies::assert_common_2(DepType dept, deps->append(x1); } -void Dependencies::assert_common_3(DepType dept, - ciKlass* ctxk, ciBaseObject* x, ciBaseObject* x2) { - assert(dep_context_arg(dept) == 0, "sanity"); - assert(dep_args(dept) == 3, "sanity"); - log_dependency(dept, ctxk, x, x2); - GrowableArray* deps = _deps[dept]; - - // try to normalize an unordered pair: - bool swap = false; - switch (dept) { - case abstract_with_exclusive_concrete_subtypes_2: - swap = (x->ident() > x2->ident() && x->as_metadata()->as_klass() != ctxk); - break; - case exclusive_concrete_methods_2: - swap = (x->ident() > x2->ident() && x->as_metadata()->as_method()->holder() != ctxk); - break; - default: - break; - } - if (swap) { ciBaseObject* t = x; x = x2; x2 = t; } - - // see if the same (or a similar) dep is already recorded - if (note_dep_seen(dept, x) && note_dep_seen(dept, x2)) { - // look in this bucket for redundant assertions - const int stride = 3; - for (int i = deps->length(); (i -= stride) >= 0; ) { - ciBaseObject* y = deps->at(i+1); - ciBaseObject* y2 = deps->at(i+2); - if (x == y && x2 == y2) { // same subjects; check the context - if (maybe_merge_ctxk(deps, i+0, ctxk)) { - return; - } - } - } - } - // append the assertion in the correct bucket: - deps->append(ctxk); - deps->append(x); - deps->append(x2); -} - #if INCLUDE_JVMCI bool Dependencies::maybe_merge_ctxk(GrowableArray* deps, int ctxk_i, DepValue ctxk2_dv) { @@ -473,10 +412,7 @@ size_t Dependencies::estimate_size_in_bytes() { ciKlass* Dependencies::ctxk_encoded_as_null(DepType dept, ciBaseObject* x) { switch (dept) { - case abstract_with_exclusive_concrete_subtypes_2: - return x->as_metadata()->as_klass(); case unique_concrete_method: - case exclusive_concrete_methods_2: return x->as_metadata()->as_method()->holder(); default: return NULL; // let NULL be NULL @@ -486,11 +422,7 @@ ciKlass* Dependencies::ctxk_encoded_as_null(DepType dept, ciBaseObject* x) { Klass* Dependencies::ctxk_encoded_as_null(DepType dept, Metadata* x) { assert(must_be_in_vm(), "raw oops here"); switch (dept) { - case abstract_with_exclusive_concrete_subtypes_2: - assert(x->is_klass(), "sanity"); - return (Klass*) x; case unique_concrete_method: - case exclusive_concrete_methods_2: assert(x->is_method(), "sanity"); return ((Method*)x)->method_holder(); default: @@ -593,11 +525,7 @@ const char* Dependencies::_dep_name[TYPE_LIMIT] = { "evol_method", "leaf_type", "abstract_with_unique_concrete_subtype", - "abstract_with_no_concrete_subtype", - "concrete_with_no_concrete_subtype", "unique_concrete_method", - "abstract_with_exclusive_concrete_subtypes_2", - "exclusive_concrete_methods_2", "no_finalizable_subclasses", "call_site_target_value" }; @@ -607,11 +535,7 @@ int Dependencies::_dep_args[TYPE_LIMIT] = { 1, // evol_method m 1, // leaf_type ctxk 2, // abstract_with_unique_concrete_subtype ctxk, k - 1, // abstract_with_no_concrete_subtype ctxk - 1, // concrete_with_no_concrete_subtype ctxk 2, // unique_concrete_method ctxk, m - 3, // unique_concrete_subtypes_2 ctxk, k1, k2 - 3, // unique_concrete_methods_2 ctxk, m1, m2 1, // no_finalizable_subclasses ctxk 2 // call_site_target_value call_site, method_handle }; @@ -1198,17 +1122,18 @@ class ClassHierarchyWalker { } else if (!k->is_instance_klass()) { return false; // no methods to find in an array type } else { + InstanceKlass* ik = InstanceKlass::cast(k); // Search class hierarchy first, skipping private implementations // as they never override any inherited methods - Method* m = InstanceKlass::cast(k)->find_instance_method(_name, _signature, Klass::PrivateLookupMode::skip); - if (!Dependencies::is_concrete_method(m, k)) { + Method* m = ik->find_instance_method(_name, _signature, Klass::PrivateLookupMode::skip); + if (!Dependencies::is_concrete_method(m, ik)) { // Check for re-abstraction of method - if (!k->is_interface() && m != NULL && m->is_abstract()) { + if (!ik->is_interface() && m != NULL && m->is_abstract()) { // Found a matching abstract method 'm' in the class hierarchy. // This is fine iff 'k' is an abstract class and all concrete subtypes // of 'k' override 'm' and are participates of the current search. ClassHierarchyWalker wf(_participants, _num_participants); - Klass* w = wf.find_witness_subtype(k); + Klass* w = wf.find_witness_subtype(ik); if (w != NULL) { Method* wm = InstanceKlass::cast(w)->find_instance_method(_name, _signature, Klass::PrivateLookupMode::skip); if (!Dependencies::is_concrete_method(wm, w)) { @@ -1221,10 +1146,10 @@ class ClassHierarchyWalker { } } // Check interface defaults also, if any exist. - Array* default_methods = InstanceKlass::cast(k)->default_methods(); + Array* default_methods = ik->default_methods(); if (default_methods == NULL) return false; - m = InstanceKlass::cast(k)->find_method(default_methods, _name, _signature); + m = ik->find_method(default_methods, _name, _signature); if (!Dependencies::is_concrete_method(m, NULL)) return false; } @@ -1264,16 +1189,17 @@ class ClassHierarchyWalker { private: // the actual search method: - Klass* find_witness_anywhere(Klass* context_type, - bool participants_hide_witnesses, - bool top_level_call = true); + Klass* find_witness_anywhere(InstanceKlass* context_type, + bool participants_hide_witnesses); // the spot-checking version: Klass* find_witness_in(KlassDepChange& changes, - Klass* context_type, - bool participants_hide_witnesses); + InstanceKlass* context_type, + bool participants_hide_witnesses); public: - Klass* find_witness_subtype(Klass* context_type, KlassDepChange* changes = NULL) { + Klass* find_witness_subtype(Klass* k, KlassDepChange* changes = NULL) { assert(doing_subtype_search(), "must set up a subtype search"); + assert(k->is_instance_klass(), "required"); + InstanceKlass* context_type = InstanceKlass::cast(k); // When looking for unexpected concrete types, // do not look beneath expected ones. const bool participants_hide_witnesses = true; @@ -1285,8 +1211,10 @@ class ClassHierarchyWalker { return find_witness_anywhere(context_type, participants_hide_witnesses); } } - Klass* find_witness_definer(Klass* context_type, KlassDepChange* changes = NULL) { + Klass* find_witness_definer(Klass* k, KlassDepChange* changes = NULL) { assert(!doing_subtype_search(), "must set up a method definer search"); + assert(k->is_instance_klass(), "required"); + InstanceKlass* context_type = InstanceKlass::cast(k); // When looking for unexpected concrete methods, // look beneath expected ones, to see if there are overrides. const bool participants_hide_witnesses = true; @@ -1347,8 +1275,8 @@ static bool count_find_witness_calls() { Klass* ClassHierarchyWalker::find_witness_in(KlassDepChange& changes, - Klass* context_type, - bool participants_hide_witnesses) { + InstanceKlass* context_type, + bool participants_hide_witnesses) { assert(changes.involves_context(context_type), "irrelevant dependency"); Klass* new_type = changes.new_type(); @@ -1360,7 +1288,7 @@ Klass* ClassHierarchyWalker::find_witness_in(KlassDepChange& changes, // Must not move the class hierarchy during this check: assert_locked_or_safepoint(Compile_lock); - int nof_impls = InstanceKlass::cast(context_type)->nof_implementors(); + int nof_impls = context_type->nof_implementors(); if (nof_impls > 1) { // Avoid this case: *I.m > { A.m, C }; B.m > C // %%% Until this is fixed more systematically, bail out. @@ -1391,15 +1319,8 @@ Klass* ClassHierarchyWalker::find_witness_in(KlassDepChange& changes, return NULL; } - // Walk hierarchy under a context type, looking for unexpected types. -// Do not report participant types, and recursively walk beneath -// them only if participants_hide_witnesses is false. -// If top_level_call is false, skip testing the context type, -// because the caller has already considered it. -Klass* ClassHierarchyWalker::find_witness_anywhere(Klass* context_type, - bool participants_hide_witnesses, - bool top_level_call) { +Klass* ClassHierarchyWalker::find_witness_anywhere(InstanceKlass* context_type, bool participants_hide_witnesses) { // Current thread must be in VM (not native mode, as in CI): assert(must_be_in_vm(), "raw oops here"); // Must not move the class hierarchy during this check: @@ -1408,106 +1329,50 @@ Klass* ClassHierarchyWalker::find_witness_anywhere(Klass* context_type, bool do_counts = count_find_witness_calls(); // Check the root of the sub-hierarchy first. - if (top_level_call) { - if (do_counts) { - NOT_PRODUCT(deps_find_witness_calls++); - NOT_PRODUCT(deps_find_witness_steps++); - } - if (is_participant(context_type)) { - if (participants_hide_witnesses) return NULL; - // else fall through to search loop... - } else if (is_witness(context_type) && !ignore_witness(context_type)) { - // The context is an abstract class or interface, to start with. - return context_type; - } + if (do_counts) { + NOT_PRODUCT(deps_find_witness_calls++); } - // Now we must check each implementor and each subclass. - // Use a short worklist to avoid blowing the stack. - // Each worklist entry is a *chain* of subklass siblings to process. - const int CHAINMAX = 100; // >= 1 + InstanceKlass::implementors_limit - Klass* chains[CHAINMAX]; - int chaini = 0; // index into worklist - Klass* chain; // scratch variable -#define ADD_SUBCLASS_CHAIN(k) { \ - assert(chaini < CHAINMAX, "oob"); \ - chain = k->subklass(); \ - if (chain != NULL) chains[chaini++] = chain; } - - // Look for non-abstract subclasses. - // (Note: Interfaces do not have subclasses.) - ADD_SUBCLASS_CHAIN(context_type); - + // (Note: Interfaces do not have subclasses.) // If it is an interface, search its direct implementors. - // (Their subclasses are additional indirect implementors. - // See InstanceKlass::add_implementor.) - // (Note: nof_implementors is always zero for non-interfaces.) - if (top_level_call) { - int nof_impls = InstanceKlass::cast(context_type)->nof_implementors(); - if (nof_impls > 1) { + // (Their subclasses are additional indirect implementors. See InstanceKlass::add_implementor().) + if (context_type->is_interface()) { + int nof_impls = context_type->nof_implementors(); + if (nof_impls == 0) { + return NULL; // no implementors + } else if (nof_impls == 1) { // unique implementor + assert(context_type != context_type->implementor(), "not unique"); + context_type = context_type->implementor(); + } else { // nof_impls >= 2 // Avoid this case: *I.m > { A.m, C }; B.m > C // Here, I.m has 2 concrete implementations, but m appears unique // as A.m, because the search misses B.m when checking C. // The inherited method B.m was getting missed by the walker // when interface 'I' was the starting point. // %%% Until this is fixed more systematically, bail out. - // (Old CHA had the same limitation.) return context_type; } - if (nof_impls > 0) { - Klass* impl = InstanceKlass::cast(context_type)->implementor(); - assert(impl != NULL, "just checking"); - // If impl is the same as the context_type, then more than one - // implementor has seen. No exact info in this case. - if (impl == context_type) { - return context_type; // report an inexact witness to this sad affair - } - if (do_counts) - { NOT_PRODUCT(deps_find_witness_steps++); } - if (is_participant(impl)) { - if (!participants_hide_witnesses) { - ADD_SUBCLASS_CHAIN(impl); - } - } else if (is_witness(impl) && !ignore_witness(impl)) { - return impl; - } else { - ADD_SUBCLASS_CHAIN(impl); - } - } } - // Recursively process each non-trivial sibling chain. - while (chaini > 0) { - Klass* chain = chains[--chaini]; - for (Klass* sub = chain; sub != NULL; sub = sub->next_sibling()) { - if (do_counts) { NOT_PRODUCT(deps_find_witness_steps++); } - if (is_participant(sub)) { - if (participants_hide_witnesses) continue; - // else fall through to process this guy's subclasses - } else if (is_witness(sub) && !ignore_witness(sub)) { - return sub; - } - if (chaini < (VerifyDependencies? 2: CHAINMAX)) { - // Fast path. (Partially disabled if VerifyDependencies.) - ADD_SUBCLASS_CHAIN(sub); - } else { - // Worklist overflow. Do a recursive call. Should be rare. - // The recursive call will have its own worklist, of course. - // (Note that sub has already been tested, so that there is - // no need for the recursive call to re-test. That's handy, - // since the recursive call sees sub as the context_type.) - if (do_counts) { NOT_PRODUCT(deps_find_witness_recursions++); } - Klass* witness = find_witness_anywhere(sub, - participants_hide_witnesses, - /*top_level_call=*/ false); - if (witness != NULL) return witness; + assert(!context_type->is_interface(), "not allowed"); + + for (ClassHierarchyIterator iter(context_type); !iter.done(); iter.next()) { + Klass* sub = iter.klass(); + + if (do_counts) { NOT_PRODUCT(deps_find_witness_steps++); } + + // Do not report participant types. + if (is_participant(sub)) { + // Walk beneath a participant only when it doesn't hide witnesses. + if (participants_hide_witnesses) { + iter.skip_subclasses(); } + } else if (is_witness(sub) && !ignore_witness(sub)) { + return sub; // found a witness } } - // No witness found. The dependency remains unbroken. return NULL; -#undef ADD_SUBCLASS_CHAIN } @@ -1606,32 +1471,12 @@ Klass* Dependencies::check_leaf_type(Klass* ctxk) { // This allows the compiler to narrow occurrences of ctxk by conck, // when dealing with the types of actual instances. Klass* Dependencies::check_abstract_with_unique_concrete_subtype(Klass* ctxk, - Klass* conck, - KlassDepChange* changes) { + Klass* conck, + KlassDepChange* changes) { ClassHierarchyWalker wf(conck); return wf.find_witness_subtype(ctxk, changes); } -// If a non-concrete class has no concrete subtypes, it is not (yet) -// instantiatable. This can allow the compiler to make some paths go -// dead, if they are gated by a test of the type. -Klass* Dependencies::check_abstract_with_no_concrete_subtype(Klass* ctxk, - KlassDepChange* changes) { - // Find any concrete subtype, with no participants: - ClassHierarchyWalker wf; - return wf.find_witness_subtype(ctxk, changes); -} - - -// If a concrete class has no concrete subtypes, it can always be -// exactly typed. This allows the use of a cheaper type test. -Klass* Dependencies::check_concrete_with_no_concrete_subtype(Klass* ctxk, - KlassDepChange* changes) { - // Find any concrete subtype, with only the ctxk as participant: - ClassHierarchyWalker wf(ctxk); - return wf.find_witness_subtype(ctxk, changes); -} - // Find the unique concrete proper subtype of ctxk, or NULL if there // is more than one concrete proper subtype. If there are no concrete @@ -1645,22 +1490,6 @@ Klass* Dependencies::find_unique_concrete_subtype(Klass* ctxk) { if (wit != NULL) return NULL; // Too many witnesses. Klass* conck = wf.participant(0); if (conck == NULL) { -#ifndef PRODUCT - // Make sure the dependency mechanism will pass this discovery: - if (VerifyDependencies) { - // Turn off dependency tracing while actually testing deps. - FlagSetting fs(TraceDependencies, false); - if (!Dependencies::is_concrete_klass(ctxk)) { - guarantee(NULL == - (void *)check_abstract_with_no_concrete_subtype(ctxk), - "verify dep."); - } else { - guarantee(NULL == - (void *)check_concrete_with_no_concrete_subtype(ctxk), - "verify dep."); - } - } -#endif //PRODUCT return ctxk; // Return ctxk as a flag for "no subtypes". } else { #ifndef PRODUCT @@ -1679,76 +1508,12 @@ Klass* Dependencies::find_unique_concrete_subtype(Klass* ctxk) { } } -// Test the assertion that the k[12] are the only concrete subtypes of ctxk, -// except possibly for further subtypes of k[12] themselves. -// The context type must be abstract. The types k1 and k2 are themselves -// allowed to have further concrete subtypes. -Klass* Dependencies::check_abstract_with_exclusive_concrete_subtypes( - Klass* ctxk, - Klass* k1, - Klass* k2, - KlassDepChange* changes) { - ClassHierarchyWalker wf; - wf.add_participant(k1); - wf.add_participant(k2); - return wf.find_witness_subtype(ctxk, changes); -} - -// Search ctxk for concrete implementations. If there are klen or fewer, -// pack them into the given array and return the number. -// Otherwise, return -1, meaning the given array would overflow. -// (Note that a return of 0 means there are exactly no concrete subtypes.) -// In this search, if ctxk is concrete, it will be reported alone. -// For any type CC reported, no proper subtypes of CC will be reported. -int Dependencies::find_exclusive_concrete_subtypes(Klass* ctxk, - int klen, - Klass* karray[]) { - ClassHierarchyWalker wf; - wf.record_witnesses(klen); - Klass* wit = wf.find_witness_subtype(ctxk); - if (wit != NULL) return -1; // Too many witnesses. - int num = wf.num_participants(); - assert(num <= klen, "oob"); - // Pack the result array with the good news. - for (int i = 0; i < num; i++) - karray[i] = wf.participant(i); -#ifndef PRODUCT - // Make sure the dependency mechanism will pass this discovery: - if (VerifyDependencies) { - // Turn off dependency tracing while actually testing deps. - FlagSetting fs(TraceDependencies, false); - switch (Dependencies::is_concrete_klass(ctxk)? -1: num) { - case -1: // ctxk was itself concrete - guarantee(num == 1 && karray[0] == ctxk, "verify dep."); - break; - case 0: - guarantee(NULL == (void *)check_abstract_with_no_concrete_subtype(ctxk), - "verify dep."); - break; - case 1: - guarantee(NULL == (void *) - check_abstract_with_unique_concrete_subtype(ctxk, karray[0]), - "verify dep."); - break; - case 2: - guarantee(NULL == (void *) - check_abstract_with_exclusive_concrete_subtypes(ctxk, - karray[0], - karray[1]), - "verify dep."); - break; - default: - ShouldNotReachHere(); // klen > 2 yet supported - } - } -#endif //PRODUCT - return num; -} // If a class (or interface) has a unique concrete method uniqm, return NULL. // Otherwise, return a class that contains an interfering method. -Klass* Dependencies::check_unique_concrete_method(Klass* ctxk, Method* uniqm, - KlassDepChange* changes) { +Klass* Dependencies::check_unique_concrete_method(Klass* ctxk, + Method* uniqm, + KlassDepChange* changes) { // Here is a missing optimization: If uniqm->is_final(), // we don't really need to search beneath it for overrides. // This is probably not important, since we don't use dependencies @@ -1792,16 +1557,6 @@ Method* Dependencies::find_unique_concrete_method(Klass* ctxk, Method* m) { return fm; } -Klass* Dependencies::check_exclusive_concrete_methods(Klass* ctxk, - Method* m1, - Method* m2, - KlassDepChange* changes) { - ClassHierarchyWalker wf(m1); - wf.add_participant(m1->method_holder()); - wf.add_participant(m2->method_holder()); - return wf.find_witness_definer(ctxk, changes); -} - Klass* Dependencies::check_has_no_finalizable_subclasses(Klass* ctxk, KlassDepChange* changes) { Klass* search_at = ctxk; if (changes != NULL) @@ -1854,21 +1609,9 @@ Klass* Dependencies::DepStream::check_klass_dependency(KlassDepChange* changes) case abstract_with_unique_concrete_subtype: witness = check_abstract_with_unique_concrete_subtype(context_type(), type_argument(1), changes); break; - case abstract_with_no_concrete_subtype: - witness = check_abstract_with_no_concrete_subtype(context_type(), changes); - break; - case concrete_with_no_concrete_subtype: - witness = check_concrete_with_no_concrete_subtype(context_type(), changes); - break; case unique_concrete_method: witness = check_unique_concrete_method(context_type(), method_argument(1), changes); break; - case abstract_with_exclusive_concrete_subtypes_2: - witness = check_abstract_with_exclusive_concrete_subtypes(context_type(), type_argument(1), type_argument(2), changes); - break; - case exclusive_concrete_methods_2: - witness = check_exclusive_concrete_methods(context_type(), method_argument(1), method_argument(2), changes); - break; case no_finalizable_subclasses: witness = check_has_no_finalizable_subclasses(context_type(), changes); break; diff --git a/src/hotspot/share/code/dependencies.hpp b/src/hotspot/share/code/dependencies.hpp index 4366201ef19e0a476d8837a3f4b26d89dc308489..55bbc11502af75b2b4eafaec79f0506af0558a5a 100644 --- a/src/hotspot/share/code/dependencies.hpp +++ b/src/hotspot/share/code/dependencies.hpp @@ -116,12 +116,6 @@ class Dependencies: public ResourceObj { // An abstract class CX has exactly one concrete subtype CC. abstract_with_unique_concrete_subtype, - // The type CX is purely abstract, with no concrete subtype* at all. - abstract_with_no_concrete_subtype, - - // The concrete CX is free of concrete proper subtypes. - concrete_with_no_concrete_subtype, - // Given a method M1 and a context class CX, the set MM(CX, M1) of // "concrete matching methods" in CX of M1 is the set of every // concrete M2 for which it is possible to create an invokevirtual @@ -140,23 +134,6 @@ class Dependencies: public ResourceObj { // than {M1}. unique_concrete_method, // one unique concrete method under CX - // An "exclusive" assertion concerns two methods or subtypes, and - // declares that there are at most two (or perhaps later N>2) - // specific items that jointly satisfy the restriction. - // We list all items explicitly rather than just giving their - // count, for robustness in the face of complex schema changes. - - // A context class CX (which may be either abstract or concrete) - // has two exclusive concrete subtypes* C1, C2 if every concrete - // subtype* of CX is either C1 or C2. Note that if neither C1 or C2 - // are equal to CX, then CX itself must be abstract. But it is - // also possible (for example) that C1 is CX (a concrete class) - // and C2 is a proper subtype of C1. - abstract_with_exclusive_concrete_subtypes_2, - - // This dependency asserts that MM(CX, M1) is no greater than {M1,M2}. - exclusive_concrete_methods_2, - // This dependency asserts that no instances of class or it's // subclasses require finalization registration. no_finalizable_subclasses, @@ -348,18 +325,13 @@ class Dependencies: public ResourceObj { void assert_common_1(DepType dept, ciBaseObject* x); void assert_common_2(DepType dept, ciBaseObject* x0, ciBaseObject* x1); - void assert_common_3(DepType dept, ciKlass* ctxk, ciBaseObject* x1, ciBaseObject* x2); public: // Adding assertions to a new dependency set at compile time: void assert_evol_method(ciMethod* m); void assert_leaf_type(ciKlass* ctxk); void assert_abstract_with_unique_concrete_subtype(ciKlass* ctxk, ciKlass* conck); - void assert_abstract_with_no_concrete_subtype(ciKlass* ctxk); - void assert_concrete_with_no_concrete_subtype(ciKlass* ctxk); void assert_unique_concrete_method(ciKlass* ctxk, ciMethod* uniqm); - void assert_abstract_with_exclusive_concrete_subtypes(ciKlass* ctxk, ciKlass* k1, ciKlass* k2); - void assert_exclusive_concrete_methods(ciKlass* ctxk, ciMethod* m1, ciMethod* m2); void assert_has_no_finalizable_subclasses(ciKlass* ctxk); void assert_call_site_target_value(ciCallSite* call_site, ciMethodHandle* method_handle); @@ -426,18 +398,8 @@ class Dependencies: public ResourceObj { // Checking old assertions at run-time (in the VM only): static Klass* check_evol_method(Method* m); static Klass* check_leaf_type(Klass* ctxk); - static Klass* check_abstract_with_unique_concrete_subtype(Klass* ctxk, Klass* conck, - KlassDepChange* changes = NULL); - static Klass* check_abstract_with_no_concrete_subtype(Klass* ctxk, - KlassDepChange* changes = NULL); - static Klass* check_concrete_with_no_concrete_subtype(Klass* ctxk, - KlassDepChange* changes = NULL); - static Klass* check_unique_concrete_method(Klass* ctxk, Method* uniqm, - KlassDepChange* changes = NULL); - static Klass* check_abstract_with_exclusive_concrete_subtypes(Klass* ctxk, Klass* k1, Klass* k2, - KlassDepChange* changes = NULL); - static Klass* check_exclusive_concrete_methods(Klass* ctxk, Method* m1, Method* m2, - KlassDepChange* changes = NULL); + static Klass* check_abstract_with_unique_concrete_subtype(Klass* ctxk, Klass* conck, KlassDepChange* changes = NULL); + static Klass* check_unique_concrete_method(Klass* ctxk, Method* uniqm, KlassDepChange* changes = NULL); static Klass* check_has_no_finalizable_subclasses(Klass* ctxk, KlassDepChange* changes = NULL); static Klass* check_call_site_target_value(oop call_site, oop method_handle, CallSiteDepChange* changes = NULL); // A returned Klass* is NULL if the dependency assertion is still @@ -455,9 +417,8 @@ class Dependencies: public ResourceObj { // It is used by DepStream::spot_check_dependency_at. // Detecting possible new assertions: - static Klass* find_unique_concrete_subtype(Klass* ctxk); - static Method* find_unique_concrete_method(Klass* ctxk, Method* m); - static int find_exclusive_concrete_subtypes(Klass* ctxk, int klen, Klass* k[]); + static Klass* find_unique_concrete_subtype(Klass* ctxk); + static Method* find_unique_concrete_method(Klass* ctxk, Method* m); // Create the encoding which will be stored in an nmethod. void encode_content_bytes(); diff --git a/src/hotspot/share/code/icBuffer.cpp b/src/hotspot/share/code/icBuffer.cpp index fb49e14b38033ecb51a4f01f4925ff5a501eaf04..cb13bdbd54ef0ff64c369d7a33205c0efb6042bd 100644 --- a/src/hotspot/share/code/icBuffer.cpp +++ b/src/hotspot/share/code/icBuffer.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2021, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -38,6 +38,7 @@ #include "runtime/mutexLocker.hpp" #include "runtime/stubRoutines.hpp" #include "runtime/thread.inline.hpp" +#include "runtime/vmOperations.hpp" DEF_STUB_INTERFACE(ICStub); diff --git a/src/hotspot/share/code/nmethod.cpp b/src/hotspot/share/code/nmethod.cpp index 0711c399e82d4bae0d8f8eaccd0c09176ba36494..6805198a945ba110e7253d21236069d1568e46b0 100644 --- a/src/hotspot/share/code/nmethod.cpp +++ b/src/hotspot/share/code/nmethod.cpp @@ -46,6 +46,7 @@ #include "memory/resourceArea.hpp" #include "memory/universe.hpp" #include "oops/access.inline.hpp" +#include "oops/klass.inline.hpp" #include "oops/method.inline.hpp" #include "oops/methodData.hpp" #include "oops/oop.inline.hpp" @@ -501,7 +502,7 @@ nmethod* nmethod::new_nmethod(const methodHandle& method, ImplicitExceptionTable* nul_chk_table, AbstractCompiler* compiler, int comp_level, - const GrowableArrayView& native_invokers + const GrowableArrayView& native_invokers #if INCLUDE_JVMCI , char* speculations, int speculations_len, @@ -726,7 +727,7 @@ nmethod::nmethod( ImplicitExceptionTable* nul_chk_table, AbstractCompiler* compiler, int comp_level, - const GrowableArrayView& native_invokers + const GrowableArrayView& native_invokers #if INCLUDE_JVMCI , char* speculations, int speculations_len, @@ -1057,7 +1058,7 @@ void nmethod::copy_values(GrowableArray* array) { } void nmethod::free_native_invokers() { - for (BufferBlob** it = native_invokers_begin(); it < native_invokers_end(); it++) { + for (RuntimeStub** it = native_invokers_begin(); it < native_invokers_end(); it++) { CodeCache::free(*it); } } @@ -2696,7 +2697,7 @@ void nmethod::print_pcs_on(outputStream* st) { void nmethod::print_native_invokers() { ResourceMark m; // in case methods get printed via debugger tty->print_cr("Native invokers:"); - for (BufferBlob** itt = native_invokers_begin(); itt < native_invokers_end(); itt++) { + for (RuntimeStub** itt = native_invokers_begin(); itt < native_invokers_end(); itt++) { (*itt)->print_on(tty); } } diff --git a/src/hotspot/share/code/nmethod.hpp b/src/hotspot/share/code/nmethod.hpp index 592fa8f7017fae6c59afcc22a571403a181d8c7d..6eb7f6f81b649284c2878bb5e40b852bc18526f4 100644 --- a/src/hotspot/share/code/nmethod.hpp +++ b/src/hotspot/share/code/nmethod.hpp @@ -314,7 +314,7 @@ class nmethod : public CompiledMethod { ImplicitExceptionTable* nul_chk_table, AbstractCompiler* compiler, int comp_level, - const GrowableArrayView& native_invokers + const GrowableArrayView& native_invokers #if INCLUDE_JVMCI , char* speculations, int speculations_len, @@ -363,7 +363,7 @@ class nmethod : public CompiledMethod { ImplicitExceptionTable* nul_chk_table, AbstractCompiler* compiler, int comp_level, - const GrowableArrayView& native_invokers = GrowableArrayView::EMPTY + const GrowableArrayView& native_invokers = GrowableArrayView::EMPTY #if INCLUDE_JVMCI , char* speculations = NULL, int speculations_len = 0, @@ -413,8 +413,8 @@ class nmethod : public CompiledMethod { PcDesc* scopes_pcs_end () const { return (PcDesc*)(header_begin() + _dependencies_offset) ; } address dependencies_begin () const { return header_begin() + _dependencies_offset ; } address dependencies_end () const { return header_begin() + _native_invokers_offset ; } - BufferBlob** native_invokers_begin() const { return (BufferBlob**)(header_begin() + _native_invokers_offset) ; } - BufferBlob** native_invokers_end () const { return (BufferBlob**)(header_begin() + _handler_table_offset); } + RuntimeStub** native_invokers_begin() const { return (RuntimeStub**)(header_begin() + _native_invokers_offset) ; } + RuntimeStub** native_invokers_end () const { return (RuntimeStub**)(header_begin() + _handler_table_offset); } address handler_table_begin () const { return header_begin() + _handler_table_offset ; } address handler_table_end () const { return header_begin() + _nul_chk_table_offset ; } address nul_chk_table_begin () const { return header_begin() + _nul_chk_table_offset ; } diff --git a/src/hotspot/share/compiler/compilationPolicy.cpp b/src/hotspot/share/compiler/compilationPolicy.cpp index 100182e87074691c9c642864996ae1c6d94bde8f..1218ead4eea295b55e1590bc9947dc4da575e9ce 100644 --- a/src/hotspot/share/compiler/compilationPolicy.cpp +++ b/src/hotspot/share/compiler/compilationPolicy.cpp @@ -86,15 +86,15 @@ bool CompilationPolicy::must_be_compiled(const methodHandle& m, int comp_level) (UseCompiler && AlwaysCompileLoopMethods && m->has_loops() && CompileBroker::should_compile_new_jobs()); // eagerly compile loop methods } -void CompilationPolicy::compile_if_required(const methodHandle& selected_method, TRAPS) { - if (must_be_compiled(selected_method)) { +void CompilationPolicy::compile_if_required(const methodHandle& m, TRAPS) { + if (must_be_compiled(m)) { // This path is unusual, mostly used by the '-Xcomp' stress test mode. if (!THREAD->can_call_java() || THREAD->is_Compiler_thread()) { // don't force compilation, resolve was on behalf of compiler return; } - if (selected_method->method_holder()->is_not_initialized()) { + if (m->method_holder()->is_not_initialized()) { // 'is_not_initialized' means not only '!is_initialized', but also that // initialization has not been started yet ('!being_initialized') // Do not force compilation of methods in uninitialized classes. @@ -104,9 +104,11 @@ void CompilationPolicy::compile_if_required(const methodHandle& selected_method, // even before classes are initialized. return; } - CompileBroker::compile_method(selected_method, InvocationEntryBci, - CompilationPolicy::initial_compile_level(selected_method), - methodHandle(), 0, CompileTask::Reason_MustBeCompiled, THREAD); + CompLevel level = initial_compile_level(m); + if (PrintTieredEvents) { + print_event(COMPILE, m(), m(), InvocationEntryBci, level); + } + CompileBroker::compile_method(m, InvocationEntryBci, level, methodHandle(), 0, CompileTask::Reason_MustBeCompiled, THREAD); } } @@ -326,7 +328,7 @@ double CompilationPolicy::threshold_scale(CompLevel level, int feedback_k) { // than specified by IncreaseFirstTierCompileThresholdAt percentage. // The main intention is to keep enough free space for C2 compiled code // to achieve peak performance if the code cache is under stress. - if (!CompilationModeFlag::disable_intermediate() && TieredStopAtLevel == CompLevel_full_optimization && level != CompLevel_full_optimization) { + if (CompilerConfig::is_tiered() && !CompilationModeFlag::disable_intermediate() && is_c1_compile(level)) { double current_reverse_free_ratio = CodeCache::reverse_free_ratio(CodeCache::get_code_blob_type(level)); if (current_reverse_free_ratio > _increase_threshold_at_ratio) { k *= exp(current_reverse_free_ratio - _increase_threshold_at_ratio); @@ -337,7 +339,7 @@ double CompilationPolicy::threshold_scale(CompLevel level, int feedback_k) { return 1; } -void CompilationPolicy::print_counters(const char* prefix, Method* m) { +void CompilationPolicy::print_counters(const char* prefix, const Method* m) { int invocation_count = m->invocation_count(); int backedge_count = m->backedge_count(); MethodData* mdh = m->method_data(); @@ -358,8 +360,7 @@ void CompilationPolicy::print_counters(const char* prefix, Method* m) { } // Print an event. -void CompilationPolicy::print_event(EventType type, Method* m, Method* im, - int bci, CompLevel level) { +void CompilationPolicy::print_event(EventType type, const Method* m, const Method* im, int bci, CompLevel level) { bool inlinee_event = m != im; ttyLocker tty_lock; @@ -509,6 +510,17 @@ void CompilationPolicy::initialize() { #ifdef ASSERT bool CompilationPolicy::verify_level(CompLevel level) { + if (TieredCompilation && level > TieredStopAtLevel) { + return false; + } + // Check if there is a compiler to process the requested level + if (!CompilerConfig::is_c1_enabled() && is_c1_compile(level)) { + return false; + } + if (!CompilerConfig::is_c2_or_jvmci_compiler_enabled() && is_c2_compile(level)) { + return false; + } + // AOT and interpreter levels are always valid. if (level == CompLevel_aot || level == CompLevel_none) { return true; @@ -528,49 +540,54 @@ bool CompilationPolicy::verify_level(CompLevel level) { CompLevel CompilationPolicy::highest_compile_level() { - CompLevel max_level = CompLevel_none; + CompLevel level = CompLevel_none; + // Setup the maximum level availible for the current compiler configuration. if (!CompilerConfig::is_interpreter_only()) { if (CompilerConfig::is_c2_or_jvmci_compiler_enabled()) { - max_level = CompLevel_full_optimization; + level = CompLevel_full_optimization; } else if (CompilerConfig::is_c1_enabled()) { if (CompilerConfig::is_c1_simple_only()) { - max_level = CompLevel_simple; + level = CompLevel_simple; } else { - max_level = CompLevel_full_profile; + level = CompLevel_full_profile; } } - max_level = MAX2(max_level, (CompLevel) TieredStopAtLevel); - } - return max_level; -} - -CompLevel CompilationPolicy::limit_level(CompLevel level) { - if (CompilationModeFlag::quick_only()) { - level = MIN2(level, CompLevel_simple); } - assert(verify_level(level), "Invalid compilation level %d", level); - if (level <= TieredStopAtLevel) { - return level; - } - // Some compilation levels are not valid depending on a compilation mode: - // a) quick_only - levels 2,3,4 are invalid; levels -1,0,1 are valid; - // b) high_only - levels 1,2,3 are invalid; levels -1,0,4 are valid; - // c) high_only_quick_internal - levels 2,3 are invalid; levels -1,0,1,4 are valid. - // The invalid levels are actually sequential so a single comparison is sufficient. - // Down here we already have (level > TieredStopAtLevel), which also implies that - // (TieredStopAtLevel < Highest Possible Level), so we need to return a level that is: - // a) a max level that is strictly less than the highest for a given compilation mode - // b) less or equal to TieredStopAtLevel - if (CompilationModeFlag::normal() || CompilationModeFlag::quick_only()) { - return (CompLevel)TieredStopAtLevel; + // Clamp the maximum level with TieredStopAtLevel. + if (TieredCompilation) { + level = MIN2(level, (CompLevel) TieredStopAtLevel); + } + + // Fix it up if after the clamping it has become invalid. + // Bring it monotonically down depending on the next available level for + // the compilation mode. + if (!CompilationModeFlag::normal()) { + // a) quick_only - levels 2,3,4 are invalid; levels -1,0,1 are valid; + // b) high_only - levels 1,2,3 are invalid; levels -1,0,4 are valid; + // c) high_only_quick_internal - levels 2,3 are invalid; levels -1,0,1,4 are valid. + if (CompilationModeFlag::quick_only()) { + if (level == CompLevel_limited_profile || level == CompLevel_full_profile || level == CompLevel_full_optimization) { + level = CompLevel_simple; + } + } else if (CompilationModeFlag::high_only()) { + if (level == CompLevel_simple || level == CompLevel_limited_profile || level == CompLevel_full_profile) { + level = CompLevel_none; + } + } else if (CompilationModeFlag::high_only_quick_internal()) { + if (level == CompLevel_limited_profile || level == CompLevel_full_profile) { + level = CompLevel_simple; + } + } } - if (CompilationModeFlag::high_only() || CompilationModeFlag::high_only_quick_internal()) { - return MIN2(CompLevel_none, (CompLevel)TieredStopAtLevel); - } + assert(verify_level(level), "Invalid highest compilation level: %d", level); + return level; +} - ShouldNotReachHere(); - return CompLevel_any; +CompLevel CompilationPolicy::limit_level(CompLevel level) { + level = MIN2(level, highest_compile_level()); + assert(verify_level(level), "Invalid compilation level: %d", level); + return level; } CompLevel CompilationPolicy::initial_compile_level(const methodHandle& method) { @@ -658,9 +675,8 @@ CompileTask* CompilationPolicy::select_task(CompileQueue* compile_queue) { methodHandle max_method_h(Thread::current(), max_method); - if (max_task != NULL && max_task->comp_level() == CompLevel_full_profile && - TieredStopAtLevel > CompLevel_full_profile && - max_method != NULL && is_method_profiled(max_method_h)) { + if (max_task != NULL && max_task->comp_level() == CompLevel_full_profile && TieredStopAtLevel > CompLevel_full_profile && + max_method != NULL && is_method_profiled(max_method_h) && !Arguments::is_compiler_only()) { max_task->set_comp_level(CompLevel_limited_profile); if (CompileBroker::compilation_is_complete(max_method_h, max_task->osr_bci(), CompLevel_limited_profile)) { @@ -740,7 +756,7 @@ nmethod* CompilationPolicy::event(const methodHandle& method, const methodHandle // Check if the method can be compiled, change level if necessary void CompilationPolicy::compile(const methodHandle& mh, int bci, CompLevel level, TRAPS) { - assert(verify_level(level) && level <= TieredStopAtLevel, "Invalid compilation level %d", level); + assert(verify_level(level), "Invalid compilation level requested: %d", level); if (level == CompLevel_none) { if (mh->has_compiled_code()) { @@ -1038,33 +1054,18 @@ CompLevel CompilationPolicy::common(const methodHandle& method, CompLevel cur_le if (common(method, CompLevel_full_profile, disable_feedback) == CompLevel_full_optimization) { next_level = CompLevel_full_optimization; } else if (!CompilationModeFlag::disable_intermediate() && Predicate::apply(i, b, cur_level, method)) { -#if INCLUDE_JVMCI - if (EnableJVMCI && UseJVMCICompiler) { - // Since JVMCI takes a while to warm up, its queue inevitably backs up during - // early VM execution. As of 2014-06-13, JVMCI's inliner assumes that the root - // compilation method and all potential inlinees have mature profiles (which - // includes type profiling). If it sees immature profiles, JVMCI's inliner - // can perform pathologically bad (e.g., causing OutOfMemoryErrors due to - // exploring/inlining too many graphs). Since a rewrite of the inliner is - // in progress, we simply disable the dialing back heuristic for now and will - // revisit this decision once the new inliner is completed. + // C1-generated fully profiled code is about 30% slower than the limited profile + // code that has only invocation and backedge counters. The observation is that + // if C2 queue is large enough we can spend too much time in the fully profiled code + // while waiting for C2 to pick the method from the queue. To alleviate this problem + // we introduce a feedback on the C2 queue size. If the C2 queue is sufficiently long + // we choose to compile a limited profiled version and then recompile with full profiling + // when the load on C2 goes down. + if (!disable_feedback && CompileBroker::queue_size(CompLevel_full_optimization) > + Tier3DelayOn * compiler_count(CompLevel_full_optimization)) { + next_level = CompLevel_limited_profile; + } else { next_level = CompLevel_full_profile; - } else -#endif - { - // C1-generated fully profiled code is about 30% slower than the limited profile - // code that has only invocation and backedge counters. The observation is that - // if C2 queue is large enough we can spend too much time in the fully profiled code - // while waiting for C2 to pick the method from the queue. To alleviate this problem - // we introduce a feedback on the C2 queue size. If the C2 queue is sufficiently long - // we choose to compile a limited profiled version and then recompile with full profiling - // when the load on C2 goes down. - if (!disable_feedback && CompileBroker::queue_size(CompLevel_full_optimization) > - Tier3DelayOn * compiler_count(CompLevel_full_optimization)) { - next_level = CompLevel_limited_profile; - } else { - next_level = CompLevel_full_profile; - } } } break; diff --git a/src/hotspot/share/compiler/compilationPolicy.hpp b/src/hotspot/share/compiler/compilationPolicy.hpp index e4c0e643df657941d4d5c5b84d3531dd9981fa9b..0c039a8d593477bb238f75993524e214cbf9ddd9 100644 --- a/src/hotspot/share/compiler/compilationPolicy.hpp +++ b/src/hotspot/share/compiler/compilationPolicy.hpp @@ -184,7 +184,7 @@ class CompilationPolicy : AllStatic { // loop_event checks if a method should be OSR compiled at a different // level. static CompLevel loop_event(const methodHandle& method, CompLevel cur_level, Thread* thread); - static void print_counters(const char* prefix, Method* m); + static void print_counters(const char* prefix, const Method* m); // Has a method been long around? // We don't remove old methods from the compile queue even if they have // very low activity (see select_task()). @@ -216,7 +216,7 @@ class CompilationPolicy : AllStatic { static void set_c2_count(int x) { _c2_count = x; } enum EventType { CALL, LOOP, COMPILE, REMOVE_FROM_QUEUE, UPDATE_IN_QUEUE, REPROFILE, MAKE_NOT_ENTRANT }; - static void print_event(EventType type, Method* m, Method* im, int bci, CompLevel level); + static void print_event(EventType type, const Method* m, const Method* im, int bci, CompLevel level); // Check if the method can be compiled, change level if necessary static void compile(const methodHandle& mh, int bci, CompLevel level, TRAPS); // Simple methods are as good being compiled with C1 as C2. diff --git a/src/hotspot/share/compiler/compileBroker.cpp b/src/hotspot/share/compiler/compileBroker.cpp index 3e88c915236ef9b1091ea18aea3e50f4293c4a33..129798a96c25264dff2aee54ae4b7681fb8ba923 100644 --- a/src/hotspot/share/compiler/compileBroker.cpp +++ b/src/hotspot/share/compiler/compileBroker.cpp @@ -64,6 +64,7 @@ #include "runtime/safepointVerifiers.hpp" #include "runtime/sharedRuntime.hpp" #include "runtime/sweeper.hpp" +#include "runtime/threadSMR.hpp" #include "runtime/timerTrace.hpp" #include "runtime/vframe.inline.hpp" #include "utilities/debug.hpp" @@ -1005,7 +1006,8 @@ void CompileBroker::init_compiler_sweeper_threads() { _compilers[1]->set_num_compiler_threads(i + 1); if (TraceCompilerThreads) { ResourceMark rm; - MutexLocker mu(Threads_lock); + ThreadsListHandle tlh; // get_thread_name() depends on the TLH. + assert(tlh.includes(ct), "ct=" INTPTR_FORMAT " exited unexpectedly.", p2i(ct)); tty->print_cr("Added initial compiler thread %s", ct->get_thread_name()); } } @@ -1025,7 +1027,8 @@ void CompileBroker::init_compiler_sweeper_threads() { _compilers[0]->set_num_compiler_threads(i + 1); if (TraceCompilerThreads) { ResourceMark rm; - MutexLocker mu(Threads_lock); + ThreadsListHandle tlh; // get_thread_name() depends on the TLH. + assert(tlh.includes(ct), "ct=" INTPTR_FORMAT " exited unexpectedly.", p2i(ct)); tty->print_cr("Added initial compiler thread %s", ct->get_thread_name()); } } @@ -1111,7 +1114,8 @@ void CompileBroker::possibly_add_compiler_threads(Thread* THREAD) { _compilers[1]->set_num_compiler_threads(i + 1); if (TraceCompilerThreads) { ResourceMark rm; - MutexLocker mu(Threads_lock); + ThreadsListHandle tlh; // get_thread_name() depends on the TLH. + assert(tlh.includes(ct), "ct=" INTPTR_FORMAT " exited unexpectedly.", p2i(ct)); tty->print_cr("Added compiler thread %s (available memory: %dMB, available non-profiled code cache: %dMB)", ct->get_thread_name(), (int)(available_memory/M), (int)(available_cc_np/M)); } @@ -1131,7 +1135,8 @@ void CompileBroker::possibly_add_compiler_threads(Thread* THREAD) { _compilers[0]->set_num_compiler_threads(i + 1); if (TraceCompilerThreads) { ResourceMark rm; - MutexLocker mu(Threads_lock); + ThreadsListHandle tlh; // get_thread_name() depends on the TLH. + assert(tlh.includes(ct), "ct=" INTPTR_FORMAT " exited unexpectedly.", p2i(ct)); tty->print_cr("Added compiler thread %s (available memory: %dMB, available profiled code cache: %dMB)", ct->get_thread_name(), (int)(available_memory/M), (int)(available_cc_p/M)); } diff --git a/src/hotspot/share/compiler/compileTask.cpp b/src/hotspot/share/compiler/compileTask.cpp index c56ebdb888b5df614189ba2b558608da38f49ab3..d610d8bdcf814730579233cf65bd9649566b2d25 100644 --- a/src/hotspot/share/compiler/compileTask.cpp +++ b/src/hotspot/share/compiler/compileTask.cpp @@ -31,6 +31,7 @@ #include "logging/log.hpp" #include "logging/logStream.hpp" #include "memory/resourceArea.hpp" +#include "oops/klass.inline.hpp" #include "runtime/handles.inline.hpp" #include "runtime/jniHandles.hpp" diff --git a/src/hotspot/share/compiler/compilerDefinitions.cpp b/src/hotspot/share/compiler/compilerDefinitions.cpp index cd29c595e87d75ce78e8b8db9a9e96fb498a6fe2..6ca90baf21fe7565af7c0087fa582f383b0df577 100644 --- a/src/hotspot/share/compiler/compilerDefinitions.cpp +++ b/src/hotspot/share/compiler/compilerDefinitions.cpp @@ -191,9 +191,6 @@ void set_client_emulation_mode_flags() { if (FLAG_IS_DEFAULT(CodeCacheExpansionSize)) { FLAG_SET_ERGO(CodeCacheExpansionSize, 32*K); } - if (FLAG_IS_DEFAULT(MetaspaceSize)) { - FLAG_SET_ERGO(MetaspaceSize, MIN2(12*M, MaxMetaspaceSize)); - } if (FLAG_IS_DEFAULT(MaxRAM)) { // Do not use FLAG_SET_ERGO to update MaxRAM, as this will impact // heap setting done based on available phys_mem (see Arguments::set_heap_size). @@ -308,12 +305,19 @@ void CompilerConfig::set_compilation_policy_flags() { 8 * CodeCache::page_size() <= ReservedCodeCacheSize) { FLAG_SET_ERGO(SegmentedCodeCache, true); } + if (Arguments::is_compiler_only()) { // -Xcomp + // Be much more aggressive in tiered mode with -Xcomp and exercise C2 more. + // We will first compile a level 3 version (C1 with full profiling), then do one invocation of it and + // compile a level 4 (C2) and then continue executing it. + if (FLAG_IS_DEFAULT(Tier3InvokeNotifyFreqLog)) { + FLAG_SET_CMDLINE(Tier3InvokeNotifyFreqLog, 0); + } + if (FLAG_IS_DEFAULT(Tier4InvocationThreshold)) { + FLAG_SET_CMDLINE(Tier4InvocationThreshold, 0); + } + } } - if (!UseInterpreter) { // -Xcomp - Tier3InvokeNotifyFreqLog = 0; - Tier4InvocationThreshold = 0; - } if (CompileThresholdScaling < 0) { vm_exit_during_initialization("Negative value specified for CompileThresholdScaling", NULL); @@ -444,12 +448,19 @@ void CompilerConfig::set_jvmci_specific_flags() { if (FLAG_IS_DEFAULT(InitialCodeCacheSize)) { FLAG_SET_DEFAULT(InitialCodeCacheSize, MAX2(16*M, InitialCodeCacheSize)); } - if (FLAG_IS_DEFAULT(MetaspaceSize)) { - FLAG_SET_DEFAULT(MetaspaceSize, MIN2(MAX2(12*M, MetaspaceSize), MaxMetaspaceSize)); - } if (FLAG_IS_DEFAULT(NewSizeThreadIncrease)) { FLAG_SET_DEFAULT(NewSizeThreadIncrease, MAX2(4*K, NewSizeThreadIncrease)); } + if (FLAG_IS_DEFAULT(Tier3DelayOn)) { + // This effectively prevents the compile broker scheduling tier 2 + // (i.e., limited C1 profiling) compilations instead of tier 3 + // (i.e., full C1 profiling) compilations when the tier 4 queue + // backs up (which is quite likely when using a non-AOT compiled JVMCI + // compiler). The observation based on jargraal is that the downside + // of skipping full profiling is much worse for performance than the + // queue backing up. + FLAG_SET_DEFAULT(Tier3DelayOn, 100000); + } } // !UseJVMCINativeLibrary } // UseJVMCICompiler } @@ -555,6 +566,8 @@ void CompilerConfig::ergo_initialize() { if (NeverActAsServerClassMachine) { set_client_emulation_mode_flags(); } + } else if (!has_c2() && !is_jvmci_compiler()) { + set_client_emulation_mode_flags(); } set_legacy_emulation_flags(); diff --git a/src/hotspot/share/compiler/compilerDefinitions.hpp b/src/hotspot/share/compiler/compilerDefinitions.hpp index 8e5e58f2c36f395735d6d5f2c2404e5f901c0e35..48036afc279fc2ab1d32bc0cfb18aa52f4a6ff73 100644 --- a/src/hotspot/share/compiler/compilerDefinitions.hpp +++ b/src/hotspot/share/compiler/compilerDefinitions.hpp @@ -158,7 +158,7 @@ public: static bool is_c1_only() { if (!is_interpreter_only() && has_c1()) { const bool c1_only = !has_c2() && !is_jvmci_compiler(); - const bool tiered_degraded_to_c1_only = TieredStopAtLevel >= CompLevel_simple && TieredStopAtLevel < CompLevel_full_optimization; + const bool tiered_degraded_to_c1_only = TieredCompilation && TieredStopAtLevel >= CompLevel_simple && TieredStopAtLevel < CompLevel_full_optimization; const bool c1_only_compilation_mode = CompilationModeFlag::quick_only(); return c1_only || tiered_degraded_to_c1_only || c1_only_compilation_mode; } @@ -177,9 +177,10 @@ public: // Is the JVM in a configuration that permits only c1-compiled methods at level 1? static bool is_c1_simple_only() { if (is_c1_only()) { - const bool tiered_degraded_to_level_1 = TieredStopAtLevel == CompLevel_simple; + const bool tiered_degraded_to_level_1 = TieredCompilation && TieredStopAtLevel == CompLevel_simple; const bool c1_only_compilation_mode = CompilationModeFlag::quick_only(); - return tiered_degraded_to_level_1 || c1_only_compilation_mode; + const bool tiered_off = !TieredCompilation; + return tiered_degraded_to_level_1 || c1_only_compilation_mode || tiered_off; } return false; } diff --git a/src/hotspot/share/compiler/compilerOracle.cpp b/src/hotspot/share/compiler/compilerOracle.cpp index b1cd911ddea4db5021ff88d06bb61317d4f70798..ab7fd4bcc199c2213619be62b5fb15e9e85fa797 100644 --- a/src/hotspot/share/compiler/compilerOracle.cpp +++ b/src/hotspot/share/compiler/compilerOracle.cpp @@ -416,7 +416,7 @@ static enum CompileCommand match_option_name(const char* line, int* bytes_read, *bytes_read = 0; char option_buf[256]; int matches = sscanf(line, "%255[a-zA-Z0-9]%n", option_buf, bytes_read); - if (matches > 0) { + if (matches > 0 && strcasecmp(option_buf, "unknown") != 0) { for (uint i = 0; i < ARRAY_SIZE(option_names); i++) { if (strcasecmp(option_buf, option_names[i]) == 0) { return static_cast(i); diff --git a/src/hotspot/share/compiler/compiler_globals_pd.hpp b/src/hotspot/share/compiler/compiler_globals_pd.hpp index faa1c11fe130348c97936573dcd56ad4351d1a2d..0cb8c6917ba63d49cf7930b7cee76726399266e4 100644 --- a/src/hotspot/share/compiler/compiler_globals_pd.hpp +++ b/src/hotspot/share/compiler/compiler_globals_pd.hpp @@ -70,7 +70,6 @@ define_pd_global(uintx, NonNMethodCodeHeapSize, 32*M); define_pd_global(uintx, CodeCacheExpansionSize, 32*K); define_pd_global(uintx, CodeCacheMinBlockLength, 1); define_pd_global(uintx, CodeCacheMinimumUseSpace, 200*K); -define_pd_global(size_t, MetaspaceSize, ScaleForWordSize(4*M)); define_pd_global(bool, NeverActAsServerClassMachine, true); define_pd_global(uint64_t,MaxRAM, 1ULL*G); #define CI_COMPILER_COUNT 0 diff --git a/src/hotspot/share/compiler/methodLiveness.cpp b/src/hotspot/share/compiler/methodLiveness.cpp index 8d99627cca2c8e744e1b0798af9ff043dab973cc..5f83eea6716e65406e2239906f7f724b2fd004f4 100644 --- a/src/hotspot/share/compiler/methodLiveness.cpp +++ b/src/hotspot/share/compiler/methodLiveness.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1998, 2019, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1998, 2021, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -467,8 +467,6 @@ void MethodLiveness::BasicBlock::compute_gen_kill_range(ciBytecodeStream *bytes) } void MethodLiveness::BasicBlock::compute_gen_kill_single(ciBytecodeStream *instruction) { - int localNum; - // We prohibit _gen and _kill from having locals in common. If we // know that one is definitely going to be applied before the other, // we could save some computation time by relaxing this prohibition. @@ -693,7 +691,7 @@ void MethodLiveness::BasicBlock::compute_gen_kill_single(ciBytecodeStream *instr case Bytecodes::_lstore: case Bytecodes::_dstore: - store_two(localNum = instruction->get_index()); + store_two(instruction->get_index()); break; case Bytecodes::_lstore_0: diff --git a/src/hotspot/share/gc/epsilon/epsilonHeap.cpp b/src/hotspot/share/gc/epsilon/epsilonHeap.cpp index bd937cffca474f8cea2a82055ee69427c37f47e8..867ad32f9bea424bf01ed71142fa20e7402e44a4 100644 --- a/src/hotspot/share/gc/epsilon/epsilonHeap.cpp +++ b/src/hotspot/share/gc/epsilon/epsilonHeap.cpp @@ -31,6 +31,7 @@ #include "gc/shared/locationPrinter.inline.hpp" #include "memory/allocation.hpp" #include "memory/allocation.inline.hpp" +#include "memory/metaspaceUtils.hpp" #include "memory/resourceArea.hpp" #include "memory/universe.hpp" #include "runtime/atomic.hpp" diff --git a/src/hotspot/share/gc/g1/g1BarrierSet.hpp b/src/hotspot/share/gc/g1/g1BarrierSet.hpp index 9b7ee9e93e74f19b55070867d34cb68736fcc693..8d009a9e19f3f134a42ad554913786c7e7034f65 100644 --- a/src/hotspot/share/gc/g1/g1BarrierSet.hpp +++ b/src/hotspot/share/gc/g1/g1BarrierSet.hpp @@ -53,6 +53,10 @@ class G1BarrierSet: public CardTableBarrierSet { G1BarrierSet(G1CardTable* table); ~G1BarrierSet() { } + virtual bool card_mark_must_follow_store() const { + return true; + } + // Add "pre_val" to a set of objects that may have been disconnected from the // pre-marking object graph. static void enqueue(oop pre_val); diff --git a/src/hotspot/share/gc/g1/g1CardTable.hpp b/src/hotspot/share/gc/g1/g1CardTable.hpp index 3540bb9411a813293379c6e45dc6e943971b0941..925ae098b63504128a7ffdedd139f83208438ac8 100644 --- a/src/hotspot/share/gc/g1/g1CardTable.hpp +++ b/src/hotspot/share/gc/g1/g1CardTable.hpp @@ -79,7 +79,7 @@ public: STATIC_ASSERT(BitsPerByte == 8); static const size_t WordAlreadyScanned = (SIZE_MAX / 255) * g1_card_already_scanned; - G1CardTable(MemRegion whole_heap): CardTable(whole_heap, /* scanned concurrently */ true), _listener() { + G1CardTable(MemRegion whole_heap): CardTable(whole_heap), _listener() { _listener.set_card_table(this); } diff --git a/src/hotspot/share/gc/g1/g1CollectedHeap.cpp b/src/hotspot/share/gc/g1/g1CollectedHeap.cpp index 8f33eac44430d8d77388489c44df6fe3325c63cc..9453aac73460f026d98d4e395a3d3e859d97be4d 100644 --- a/src/hotspot/share/gc/g1/g1CollectedHeap.cpp +++ b/src/hotspot/share/gc/g1/g1CollectedHeap.cpp @@ -93,6 +93,7 @@ #include "memory/allocation.hpp" #include "memory/iterator.hpp" #include "memory/heapInspection.hpp" +#include "memory/metaspaceUtils.hpp" #include "memory/resourceArea.hpp" #include "memory/universe.hpp" #include "oops/access.inline.hpp" @@ -1497,8 +1498,8 @@ G1RegionToSpaceMapper* G1CollectedHeap::create_aux_memory_mapper(const char* des os::trace_page_sizes_for_requested_size(description, size, - preferred_page_size, page_size, + preferred_page_size, rs.base(), rs.size()); @@ -1794,12 +1795,9 @@ void G1CollectedHeap::ref_processing_init() { // * Discovery is atomic - i.e. not concurrent. // * Reference discovery will not need a barrier. - bool mt_processing = ParallelRefProcEnabled && (ParallelGCThreads > 1); - // Concurrent Mark ref processor _ref_processor_cm = new ReferenceProcessor(&_is_subject_to_discovery_cm, - mt_processing, // mt processing ParallelGCThreads, // degree of mt processing (ParallelGCThreads > 1) || (ConcGCThreads > 1), // mt discovery MAX2(ParallelGCThreads, ConcGCThreads), // degree of mt discovery @@ -1810,7 +1808,6 @@ void G1CollectedHeap::ref_processing_init() { // STW ref processor _ref_processor_stw = new ReferenceProcessor(&_is_subject_to_discovery_stw, - mt_processing, // mt processing ParallelGCThreads, // degree of mt processing (ParallelGCThreads > 1), // mt discovery ParallelGCThreads, // degree of mt discovery diff --git a/src/hotspot/share/gc/g1/g1CollectionSetCandidates.cpp b/src/hotspot/share/gc/g1/g1CollectionSetCandidates.cpp index d9904f2fc8f120019b0da335e559451b461118df..587aa33618337d4fc1ff7f2236055e716bee7038 100644 --- a/src/hotspot/share/gc/g1/g1CollectionSetCandidates.cpp +++ b/src/hotspot/share/gc/g1/g1CollectionSetCandidates.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019, 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2019, 2021, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -35,6 +35,25 @@ void G1CollectionSetCandidates::remove(uint num_regions) { } } +void G1CollectionSetCandidates::remove_from_end(uint num_remove, size_t wasted) { + assert(num_remove <= num_remaining(), "trying to remove more regions than remaining"); + +#ifdef ASSERT + size_t reclaimable = 0; + + for (uint i = 0; i < num_remove; i++) { + uint cur_idx = _num_regions - i - 1; + reclaimable += at(cur_idx)->reclaimable_bytes(); + // Make sure we crash if we access it. + _regions[cur_idx] = NULL; + } + + assert(reclaimable == wasted, "Recalculated reclaimable inconsistent"); +#endif + _num_regions -= num_remove; + _remaining_reclaimable_bytes -= wasted; +} + void G1CollectionSetCandidates::iterate(HeapRegionClosure* cl) { for (uint i = _front_idx; i < _num_regions; i++) { HeapRegion* r = _regions[i]; @@ -45,6 +64,16 @@ void G1CollectionSetCandidates::iterate(HeapRegionClosure* cl) { } } +void G1CollectionSetCandidates::iterate_backwards(HeapRegionClosure* cl) { + for (uint i = _num_regions; i > _front_idx; i--) { + HeapRegion* r = _regions[i - 1]; + if (cl->do_heap_region(r)) { + cl->set_incomplete(); + break; + } + } +} + #ifndef PRODUCT void G1CollectionSetCandidates::verify() const { guarantee(_front_idx <= _num_regions, "Index: %u Num_regions: %u", _front_idx, _num_regions); diff --git a/src/hotspot/share/gc/g1/g1CollectionSetCandidates.hpp b/src/hotspot/share/gc/g1/g1CollectionSetCandidates.hpp index ce358d122aa74f86e666ee1d1d57ffd28b1a350e..3086cff0903af2544f658a0ba8ddc5c049d35dad 100644 --- a/src/hotspot/share/gc/g1/g1CollectionSetCandidates.hpp +++ b/src/hotspot/share/gc/g1/g1CollectionSetCandidates.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2019, 2021, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -74,10 +74,16 @@ public: return res; } + // Remove num_regions from the front of the collection set candidate list. void remove(uint num_regions); + // Remove num_remove regions from the back of the collection set candidate list. + void remove_from_end(uint num_remove, size_t wasted); // Iterate over all remaining collection set candidate regions. void iterate(HeapRegionClosure* cl); + // Iterate over all remaining collectin set candidate regions from the end + // to the beginning of the set. + void iterate_backwards(HeapRegionClosure* cl); // Return the number of candidate regions remaining. uint num_remaining() { return _num_regions - _front_idx; } diff --git a/src/hotspot/share/gc/g1/g1CollectionSetChooser.cpp b/src/hotspot/share/gc/g1/g1CollectionSetChooser.cpp index 362e39bbeab6c5c46794435cd8142e2103a1d8ee..c982caf7d66e09023faf84b3711aab8112b62b3c 100644 --- a/src/hotspot/share/gc/g1/g1CollectionSetChooser.cpp +++ b/src/hotspot/share/gc/g1/g1CollectionSetChooser.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2019, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2021, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -257,6 +257,60 @@ bool G1CollectionSetChooser::should_add(HeapRegion* hr) { hr->rem_set()->is_complete(); } +// Closure implementing early pruning (removal) of regions meeting the +// G1HeapWastePercent criteria. That is, either until _max_pruned regions were +// removed (for forward progress in evacuation) or the waste accumulated by the +// removed regions is above max_wasted. +class G1PruneRegionClosure : public HeapRegionClosure { + uint _num_pruned; + size_t _cur_wasted; + + uint const _max_pruned; + size_t const _max_wasted; + +public: + G1PruneRegionClosure(uint max_pruned, size_t max_wasted) : + _num_pruned(0), _cur_wasted(0), _max_pruned(max_pruned), _max_wasted(max_wasted) { } + + virtual bool do_heap_region(HeapRegion* r) { + size_t const reclaimable = r->reclaimable_bytes(); + if (_num_pruned > _max_pruned || + _cur_wasted + reclaimable > _max_wasted) { + return true; + } + r->rem_set()->clear(true /* cardset_only */); + _cur_wasted += reclaimable; + _num_pruned++; + return false; + } + + uint num_pruned() const { return _num_pruned; } + size_t wasted() const { return _cur_wasted; } +}; + +void G1CollectionSetChooser::prune(G1CollectionSetCandidates* candidates) { + G1Policy* p = G1CollectedHeap::heap()->policy(); + + uint min_old_cset_length = p->calc_min_old_cset_length(candidates); + uint num_candidates = candidates->num_regions(); + + if (min_old_cset_length < num_candidates) { + size_t allowed_waste = p->allowed_waste_in_collection_set(); + + G1PruneRegionClosure prune_cl(num_candidates - min_old_cset_length, + allowed_waste); + candidates->iterate_backwards(&prune_cl); + + log_debug(gc, ergo, cset)("Pruned %u regions out of %u, leaving " SIZE_FORMAT " bytes waste (allowed " SIZE_FORMAT ")", + prune_cl.num_pruned(), + candidates->num_regions(), + prune_cl.wasted(), + allowed_waste); + + candidates->remove_from_end(prune_cl.num_pruned(), prune_cl.wasted()); + } +} + G1CollectionSetCandidates* G1CollectionSetChooser::build(WorkGang* workers, uint max_num_regions) { uint num_workers = workers->active_workers(); uint chunk_size = calculate_work_chunk_size(num_workers, max_num_regions); @@ -265,6 +319,7 @@ G1CollectionSetCandidates* G1CollectionSetChooser::build(WorkGang* workers, uint workers->run_task(&cl, num_workers); G1CollectionSetCandidates* result = cl.get_sorted_candidates(); + prune(result); result->verify(); return result; } diff --git a/src/hotspot/share/gc/g1/g1CollectionSetChooser.hpp b/src/hotspot/share/gc/g1/g1CollectionSetChooser.hpp index 4d6a77abc191bf987a84d4751135b392ef2fc166..2fadcd8945b89afcec9896e811e2aeb91cb2e084 100644 --- a/src/hotspot/share/gc/g1/g1CollectionSetChooser.hpp +++ b/src/hotspot/share/gc/g1/g1CollectionSetChooser.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2019, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2021, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -36,6 +36,11 @@ class WorkGang; // methods. class G1CollectionSetChooser : public AllStatic { static uint calculate_work_chunk_size(uint num_workers, uint num_regions); + + // Remove regions in the collection set candidates as long as the G1HeapWastePercent + // criteria is met. Keep at least the minimum amount of old regions to guarantee + // some progress. + static void prune(G1CollectionSetCandidates* candidates); public: static size_t mixed_gc_live_threshold_bytes() { diff --git a/src/hotspot/share/gc/g1/g1ConcurrentMark.cpp b/src/hotspot/share/gc/g1/g1ConcurrentMark.cpp index 53b31b22577b8855927cc3eeb40e6b423f15a43c..64780555a35859000b4895309c8406bf8be177d1 100644 --- a/src/hotspot/share/gc/g1/g1ConcurrentMark.cpp +++ b/src/hotspot/share/gc/g1/g1ConcurrentMark.cpp @@ -58,6 +58,7 @@ #include "logging/log.hpp" #include "memory/allocation.hpp" #include "memory/iterator.hpp" +#include "memory/metaspaceUtils.hpp" #include "memory/resourceArea.hpp" #include "memory/universe.hpp" #include "oops/access.inline.hpp" diff --git a/src/hotspot/share/gc/g1/g1ConcurrentMark.hpp b/src/hotspot/share/gc/g1/g1ConcurrentMark.hpp index 2e0171a72eeda6110da6d680c2de76d21bdd4d30..93eddc65eceb073cc29882be27e3e2bef3d6bc37 100644 --- a/src/hotspot/share/gc/g1/g1ConcurrentMark.hpp +++ b/src/hotspot/share/gc/g1/g1ConcurrentMark.hpp @@ -50,10 +50,6 @@ class G1RegionToSpaceMapper; class G1SurvivorRegions; class ThreadClosure; -PRAGMA_DIAG_PUSH -// warning C4522: multiple assignment operators specified -PRAGMA_DISABLE_MSVC_WARNING(4522) - // This is a container class for either an oop or a continuation address for // mark stack entries. Both are pushed onto the mark stack. class G1TaskQueueEntry { @@ -89,8 +85,6 @@ public: bool is_null() const { return _holder == NULL; } }; -PRAGMA_DIAG_POP - typedef GenericTaskQueue G1CMTaskQueue; typedef GenericTaskQueueSet G1CMTaskQueueSet; diff --git a/src/hotspot/share/gc/g1/g1FullGCAdjustTask.cpp b/src/hotspot/share/gc/g1/g1FullGCAdjustTask.cpp index ac5ba8834fb81a432f314614b7733a9c38d7ca2d..80dd4288b20b7ba5b2b2aece32b9960a833b63b2 100644 --- a/src/hotspot/share/gc/g1/g1FullGCAdjustTask.cpp +++ b/src/hotspot/share/gc/g1/g1FullGCAdjustTask.cpp @@ -81,7 +81,7 @@ class G1AdjustRegionClosure : public HeapRegionClosure { G1FullGCAdjustTask::G1FullGCAdjustTask(G1FullCollector* collector) : G1FullGCTask("G1 Adjust", collector), _root_processor(G1CollectedHeap::heap(), collector->workers()), - _references_done(0), + _references_done(false), _weak_proc_task(collector->workers()), _hrclaimer(collector->workers()), _adjust(collector), @@ -99,8 +99,7 @@ void G1FullGCAdjustTask::work(uint worker_id) { marker->preserved_stack()->adjust_during_full_gc(); // Adjust the weak roots. - - if (Atomic::add(&_references_done, 1u) == 1u) { // First incr claims task. + if (!Atomic::cmpxchg(&_references_done, false, true)) { G1CollectedHeap::heap()->ref_processor_stw()->weak_oops_do(&_adjust); } diff --git a/src/hotspot/share/gc/g1/g1FullGCAdjustTask.hpp b/src/hotspot/share/gc/g1/g1FullGCAdjustTask.hpp index 2ddcc9853fed760509475d0bea0641e45ea521cc..0b33b485452f8faf975266b7f04e2299ba5c2409 100644 --- a/src/hotspot/share/gc/g1/g1FullGCAdjustTask.hpp +++ b/src/hotspot/share/gc/g1/g1FullGCAdjustTask.hpp @@ -38,7 +38,7 @@ class G1CollectedHeap; class G1FullGCAdjustTask : public G1FullGCTask { G1RootProcessor _root_processor; - volatile uint _references_done; // Atomic counter / bool + volatile bool _references_done; WeakProcessor::Task _weak_proc_task; HeapRegionClaimer _hrclaimer; G1AdjustClosure _adjust; diff --git a/src/hotspot/share/gc/g1/g1FullGCMarker.inline.hpp b/src/hotspot/share/gc/g1/g1FullGCMarker.inline.hpp index 75d8f6563051a92150c7a02b34b18b07ca7addd2..755929968988e81138d022cddb506d885cd7223b 100644 --- a/src/hotspot/share/gc/g1/g1FullGCMarker.inline.hpp +++ b/src/hotspot/share/gc/g1/g1FullGCMarker.inline.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017, 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2017, 2021, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -25,6 +25,7 @@ #ifndef SHARE_GC_G1_G1FULLGCMARKER_INLINE_HPP #define SHARE_GC_G1_G1FULLGCMARKER_INLINE_HPP +#include "classfile/classLoaderData.hpp" #include "classfile/javaClasses.inline.hpp" #include "gc/g1/g1Allocator.inline.hpp" #include "gc/g1/g1ConcurrentMarkBitMap.inline.hpp" diff --git a/src/hotspot/share/gc/g1/g1FullGCReferenceProcessorExecutor.cpp b/src/hotspot/share/gc/g1/g1FullGCReferenceProcessorExecutor.cpp index 103fb4eca8843653bead7454e95eea48c1405ed9..a730a44e8b8b21a0d203c2772be45016cf57fe12 100644 --- a/src/hotspot/share/gc/g1/g1FullGCReferenceProcessorExecutor.cpp +++ b/src/hotspot/share/gc/g1/g1FullGCReferenceProcessorExecutor.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017, 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2017, 2021, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -52,8 +52,7 @@ G1FullGCReferenceProcessingExecutor::G1RefProcTaskProxy::G1RefProcTaskProxy(Proc G1FullCollector* collector) : AbstractGangTask("G1 reference processing task"), _proc_task(proc_task), - _collector(collector), - _terminator(_collector->workers(), _collector->oop_queue_set()) { } + _collector(collector) { } void G1FullGCReferenceProcessingExecutor::G1RefProcTaskProxy::work(uint worker_id) { G1FullGCMarker* marker = _collector->marker(worker_id); diff --git a/src/hotspot/share/gc/g1/g1FullGCReferenceProcessorExecutor.hpp b/src/hotspot/share/gc/g1/g1FullGCReferenceProcessorExecutor.hpp index 657421490a01b048b4ba3b07fb365935310ac2c0..9887d0f130ce47af518253d97a93112706c3ec05 100644 --- a/src/hotspot/share/gc/g1/g1FullGCReferenceProcessorExecutor.hpp +++ b/src/hotspot/share/gc/g1/g1FullGCReferenceProcessorExecutor.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017, 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2017, 2021, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -61,7 +61,6 @@ private: typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask; ProcessTask& _proc_task; G1FullCollector* _collector; - TaskTerminator _terminator; public: G1RefProcTaskProxy(ProcessTask& proc_task, diff --git a/src/hotspot/share/gc/g1/g1HeapTransition.cpp b/src/hotspot/share/gc/g1/g1HeapTransition.cpp index a6cef0bed7e05176842adc3443fdd8e8de25b2ad..c3d1e740ab487cfa3a9218d64fc6db9c824f5a41 100644 --- a/src/hotspot/share/gc/g1/g1HeapTransition.cpp +++ b/src/hotspot/share/gc/g1/g1HeapTransition.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -27,7 +27,7 @@ #include "gc/g1/g1HeapTransition.hpp" #include "gc/g1/g1Policy.hpp" #include "logging/logStream.hpp" -#include "memory/metaspace.hpp" +#include "memory/metaspaceUtils.hpp" G1HeapTransition::Data::Data(G1CollectedHeap* g1_heap) : _eden_length(g1_heap->eden_regions_count()), diff --git a/src/hotspot/share/gc/g1/g1Policy.cpp b/src/hotspot/share/gc/g1/g1Policy.cpp index 7dee921bc7957b7d1c1799c8ac5cc72415eb4a41..0e58094a871c3340db97a844388727f872bf8e8c 100644 --- a/src/hotspot/share/gc/g1/g1Policy.cpp +++ b/src/hotspot/share/gc/g1/g1Policy.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2021, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -1278,22 +1278,16 @@ bool G1Policy::next_gc_should_be_mixed(const char* true_action_str, log_debug(gc, ergo)("%s (candidate old regions not available)", false_action_str); return false; } - - // Is the amount of uncollected reclaimable space above G1HeapWastePercent? - size_t reclaimable_bytes = candidates->remaining_reclaimable_bytes(); - double reclaimable_percent = reclaimable_bytes_percent(reclaimable_bytes); - double threshold = (double) G1HeapWastePercent; - if (reclaimable_percent <= threshold) { - log_debug(gc, ergo)("%s (reclaimable percentage not over threshold). candidate old regions: %u reclaimable: " SIZE_FORMAT " (%1.2f) threshold: " UINTX_FORMAT, - false_action_str, candidates->num_remaining(), reclaimable_bytes, reclaimable_percent, G1HeapWastePercent); - return false; - } - log_debug(gc, ergo)("%s (candidate old regions available). candidate old regions: %u reclaimable: " SIZE_FORMAT " (%1.2f) threshold: " UINTX_FORMAT, - true_action_str, candidates->num_remaining(), reclaimable_bytes, reclaimable_percent, G1HeapWastePercent); + // Go through all regions - we already pruned regions not worth collecting + // during candidate selection. return true; } -uint G1Policy::calc_min_old_cset_length() const { +size_t G1Policy::allowed_waste_in_collection_set() const { + return G1HeapWastePercent * _g1h->capacity() / 100; +} + +uint G1Policy::calc_min_old_cset_length(G1CollectionSetCandidates* candidates) const { // The min old CSet region bound is based on the maximum desired // number of mixed GCs after a cycle. I.e., even if some old regions // look expensive, we should add them to the CSet anyway to make @@ -1304,7 +1298,7 @@ uint G1Policy::calc_min_old_cset_length() const { // to the CSet candidates in the first place, not how many remain, so // that the result is the same during all mixed GCs that follow a cycle. - const size_t region_num = _collection_set->candidates()->num_regions(); + const size_t region_num = candidates->num_regions(); const size_t gc_num = (size_t) MAX2(G1MixedGCCountTarget, (uintx) 1); size_t result = region_num / gc_num; // emulate ceiling @@ -1347,7 +1341,7 @@ void G1Policy::calculate_old_collection_set_regions(G1CollectionSetCandidates* c double optional_threshold_ms = time_remaining_ms * optional_prediction_fraction(); - const uint min_old_cset_length = calc_min_old_cset_length(); + const uint min_old_cset_length = calc_min_old_cset_length(candidates); const uint max_old_cset_length = MAX2(min_old_cset_length, calc_max_old_cset_length()); const uint max_optional_regions = max_old_cset_length - min_old_cset_length; bool check_time_remaining = use_adaptive_young_list_length(); diff --git a/src/hotspot/share/gc/g1/g1Policy.hpp b/src/hotspot/share/gc/g1/g1Policy.hpp index d3d30805c5d8bbb0cb4961195198036dbc2914b0..5ee3bb0c754cb0cc97d72bb6e4f5c8afdb3cb5d0 100644 --- a/src/hotspot/share/gc/g1/g1Policy.hpp +++ b/src/hotspot/share/gc/g1/g1Policy.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016, 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -246,7 +246,7 @@ public: // Calculate the minimum number of old regions we'll add to the CSet // during a mixed GC. - uint calc_min_old_cset_length() const; + uint calc_min_old_cset_length(G1CollectionSetCandidates* candidates) const; // Calculate the maximum number of old regions we'll add to the CSet // during a mixed GC. @@ -347,6 +347,8 @@ public: bool next_gc_should_be_mixed(const char* true_action_str, const char* false_action_str) const; + // Amount of allowed waste in bytes in the collection set. + size_t allowed_waste_in_collection_set() const; // Calculate and return the number of initial and optional old gen regions from // the given collection set candidates and the remaining time. void calculate_old_collection_set_regions(G1CollectionSetCandidates* candidates, diff --git a/src/hotspot/share/gc/g1/g1RootProcessor.cpp b/src/hotspot/share/gc/g1/g1RootProcessor.cpp index 8ed58797955f019e0ed53480ac5c042eb11250a7..93a45b9ffaeafde1d4ed375dafc18376769a1d0b 100644 --- a/src/hotspot/share/gc/g1/g1RootProcessor.cpp +++ b/src/hotspot/share/gc/g1/g1RootProcessor.cpp @@ -75,7 +75,7 @@ void G1RootProcessor::evacuate_roots(G1ParScanThreadState* pss, uint worker_id) } // CodeCache is already processed in java roots - _process_strong_tasks.all_tasks_completed(n_workers(), G1RP_PS_CodeCache_oops_do); + _process_strong_tasks.all_tasks_claimed(G1RP_PS_CodeCache_oops_do); } // Adaptor to pass the closures to the strong roots in the VM. @@ -106,9 +106,8 @@ void G1RootProcessor::process_strong_roots(OopClosure* oops, // CodeCache is already processed in java roots // refProcessor is not needed since we are inside a safe point - _process_strong_tasks.all_tasks_completed(n_workers(), - G1RP_PS_CodeCache_oops_do, - G1RP_PS_refProcessor_oops_do); + _process_strong_tasks.all_tasks_claimed(G1RP_PS_CodeCache_oops_do, + G1RP_PS_refProcessor_oops_do); } // Adaptor to pass the closures to all the roots in the VM. @@ -144,7 +143,7 @@ void G1RootProcessor::process_all_roots(OopClosure* oops, process_code_cache_roots(blobs, NULL, 0); // refProcessor is not needed since we are inside a safe point - _process_strong_tasks.all_tasks_completed(n_workers(), G1RP_PS_refProcessor_oops_do); + _process_strong_tasks.all_tasks_claimed(G1RP_PS_refProcessor_oops_do); } void G1RootProcessor::process_java_roots(G1RootClosures* closures, diff --git a/src/hotspot/share/gc/g1/heapRegionRemSet.cpp b/src/hotspot/share/gc/g1/heapRegionRemSet.cpp index 4c122ab41bd0f00694e2091b9a17b33b8e299227..63ea389e072fbd0b257b44c5c803620e683f0fae 100644 --- a/src/hotspot/share/gc/g1/heapRegionRemSet.cpp +++ b/src/hotspot/share/gc/g1/heapRegionRemSet.cpp @@ -145,6 +145,13 @@ void OtherRegionsTable::add_reference(OopOrNarrowOopStar from, uint tid) { PerRegionTable* prt = find_region_table(ind, from_hr); if (prt == NULL) { MutexLocker x(_m, Mutex::_no_safepoint_check_flag); + + // Rechecking if the region is coarsened, while holding the lock. + if (is_region_coarsened(from_hrm_ind)) { + assert(contains_reference_locked(from), "We just found " PTR_FORMAT " in the Coarse table", p2i(from)); + return; + } + // Confirm that it's really not there... prt = find_region_table(ind, from_hr); if (prt == NULL) { @@ -160,6 +167,8 @@ void OtherRegionsTable::add_reference(OopOrNarrowOopStar from, uint tid) { return; } + // Sparse PRT returned overflow (sparse table is full) + if (_n_fine_entries == _max_fine_entries) { prt = delete_region_table(num_added_by_coarsening); // There is no need to clear the links to the 'all' list here: diff --git a/src/hotspot/share/gc/parallel/mutableSpace.cpp b/src/hotspot/share/gc/parallel/mutableSpace.cpp index 62b52fecd9dbaa25f32de31feefdff7ca40822e2..defb1ffd04e117c25fee6b13d23e2ff14805a402 100644 --- a/src/hotspot/share/gc/parallel/mutableSpace.cpp +++ b/src/hotspot/share/gc/parallel/mutableSpace.cpp @@ -215,6 +215,15 @@ bool MutableSpace::cas_deallocate(HeapWord *obj, size_t size) { return Atomic::cmpxchg(top_addr(), expected_top, obj) == expected_top; } +// Only used by oldgen allocation. +bool MutableSpace::needs_expand(size_t word_size) const { + assert_lock_strong(ExpandHeap_lock); + // Holding the lock means end is stable. So while top may be advancing + // via concurrent allocations, there is no need to order the reads of top + // and end here, unlike in cas_allocate. + return pointer_delta(end(), top()) < word_size; +} + void MutableSpace::oop_iterate(OopIterateClosure* cl) { HeapWord* obj_addr = bottom(); HeapWord* t = top(); diff --git a/src/hotspot/share/gc/parallel/mutableSpace.hpp b/src/hotspot/share/gc/parallel/mutableSpace.hpp index 3e9b0a1514c822096d44c5060dbd01ee472be4e5..b6bb131828f22b0da41bb9ad0247bde8da816b00 100644 --- a/src/hotspot/share/gc/parallel/mutableSpace.hpp +++ b/src/hotspot/share/gc/parallel/mutableSpace.hpp @@ -142,6 +142,11 @@ class MutableSpace: public CHeapObj { virtual HeapWord* cas_allocate(size_t word_size); // Optional deallocation. Used in NUMA-allocator. bool cas_deallocate(HeapWord *obj, size_t size); + // Return true if this space needs to be expanded in order to satisfy an + // allocation request of the indicated size. Concurrent allocations and + // resizes may change the result of a later call. Used by oldgen allocator. + // precondition: holding ExpandHeap_lock + bool needs_expand(size_t word_size) const; // Iteration. void oop_iterate(OopIterateClosure* cl); diff --git a/src/hotspot/share/gc/parallel/parMarkBitMap.cpp b/src/hotspot/share/gc/parallel/parMarkBitMap.cpp index f26992f41207ad2663f90b4d0354321b6c8d79a4..d08762b2ca15cade115e6e0ee2793fe77b017e78 100644 --- a/src/hotspot/share/gc/parallel/parMarkBitMap.cpp +++ b/src/hotspot/share/gc/parallel/parMarkBitMap.cpp @@ -50,7 +50,8 @@ ParMarkBitMap::initialize(MemRegion covered_region) const size_t rs_align = page_sz == (size_t) os::vm_page_size() ? 0 : MAX2(page_sz, granularity); ReservedSpace rs(_reserved_byte_size, rs_align, rs_align > 0); - os::trace_page_sizes("Mark Bitmap", raw_bytes, raw_bytes, page_sz, + const size_t used_page_sz = ReservedSpace::actual_reserved_page_size(rs); + os::trace_page_sizes("Mark Bitmap", raw_bytes, raw_bytes, used_page_sz, rs.base(), rs.size()); MemTracker::record_virtual_memory_type((address)rs.base(), mtGC); diff --git a/src/hotspot/share/gc/parallel/parallelScavengeHeap.cpp b/src/hotspot/share/gc/parallel/parallelScavengeHeap.cpp index 25af816bf2d94e5a1f2713378a4b115a0b0ca605..550002367c86569097cbc52292a811cee692a2c3 100644 --- a/src/hotspot/share/gc/parallel/parallelScavengeHeap.cpp +++ b/src/hotspot/share/gc/parallel/parallelScavengeHeap.cpp @@ -44,6 +44,7 @@ #include "logging/log.hpp" #include "memory/iterator.hpp" #include "memory/metaspaceCounters.hpp" +#include "memory/metaspaceUtils.hpp" #include "memory/universe.hpp" #include "oops/oop.inline.hpp" #include "runtime/handles.inline.hpp" diff --git a/src/hotspot/share/gc/parallel/parallelScavengeHeap.hpp b/src/hotspot/share/gc/parallel/parallelScavengeHeap.hpp index e4cdb776453e48e99838188e5eb13f6e2fe8e4f9..689400fbe040ea68623db06986b0342d90382b99 100644 --- a/src/hotspot/share/gc/parallel/parallelScavengeHeap.hpp +++ b/src/hotspot/share/gc/parallel/parallelScavengeHeap.hpp @@ -39,7 +39,6 @@ #include "gc/shared/strongRootsScope.hpp" #include "gc/shared/workgroup.hpp" #include "logging/log.hpp" -#include "memory/metaspace.hpp" #include "utilities/growableArray.hpp" #include "utilities/ostream.hpp" diff --git a/src/hotspot/share/gc/parallel/psCardTable.hpp b/src/hotspot/share/gc/parallel/psCardTable.hpp index df1e0158727a1c8d192b207b14260fcb1afca82f..d912c6567412503e1c08ced94824b24d4b41c401 100644 --- a/src/hotspot/share/gc/parallel/psCardTable.hpp +++ b/src/hotspot/share/gc/parallel/psCardTable.hpp @@ -51,7 +51,7 @@ class PSCardTable: public CardTable { }; public: - PSCardTable(MemRegion whole_heap) : CardTable(whole_heap, /* scanned_concurrently */ false) {} + PSCardTable(MemRegion whole_heap) : CardTable(whole_heap) {} static CardValue youngergen_card_val() { return youngergen_card; } static CardValue verify_card_val() { return verify_card; } diff --git a/src/hotspot/share/gc/parallel/psClosure.inline.hpp b/src/hotspot/share/gc/parallel/psClosure.inline.hpp index 9100abf3bb9f53ddb32d0249d382355b5b67b019..8e347b20f7866d715d363acf459ddac2fec159e1 100644 --- a/src/hotspot/share/gc/parallel/psClosure.inline.hpp +++ b/src/hotspot/share/gc/parallel/psClosure.inline.hpp @@ -32,6 +32,26 @@ #include "oops/oop.inline.hpp" #include "utilities/globalDefinitions.hpp" +class PSAdjustWeakRootsClosure final: public OopClosure { +public: + virtual void do_oop(narrowOop* p) { ShouldNotReachHere(); } + + virtual void do_oop(oop* p) { + if (PSScavenge::should_scavenge(p)) { + oop o = RawAccess::oop_load(p); + assert(o->is_forwarded(), "Objects are already forwarded before weak processing"); + oop new_obj = o->forwardee(); + if (log_develop_is_enabled(Trace, gc, scavenge)) { + ResourceMark rm; // required by internal_name() + log_develop_trace(gc, scavenge)("{%s %s " PTR_FORMAT " -> " PTR_FORMAT " (%d)}", + "forwarding", + new_obj->klass()->internal_name(), p2i((void *)o), p2i((void *)new_obj), new_obj->size()); + } + RawAccess::oop_store(p, new_obj); + } + } +}; + template class PSRootsClosure: public OopClosure { private: diff --git a/src/hotspot/share/gc/parallel/psCompactionManager.cpp b/src/hotspot/share/gc/parallel/psCompactionManager.cpp index fecb67317f1d68309c8cf31a0c06a4c0dfd02d10..117817caacc9c9c3fb99678f2495adc120d037a7 100644 --- a/src/hotspot/share/gc/parallel/psCompactionManager.cpp +++ b/src/hotspot/share/gc/parallel/psCompactionManager.cpp @@ -179,3 +179,19 @@ void ParCompactionManager::push_shadow_region(size_t shadow_region) { void ParCompactionManager::remove_all_shadow_regions() { _shadow_region_array->clear(); } + +#ifdef ASSERT +void ParCompactionManager::verify_all_marking_stack_empty() { + uint parallel_gc_threads = ParallelGCThreads; + for (uint i = 0; i <= parallel_gc_threads; i++) { + assert(_manager_array[i]->marking_stacks_empty(), "Marking stack should be empty"); + } +} + +void ParCompactionManager::verify_all_region_stack_empty() { + uint parallel_gc_threads = ParallelGCThreads; + for (uint i = 0; i <= parallel_gc_threads; i++) { + assert(_manager_array[i]->region_stack()->is_empty(), "Region stack should be empty"); + } +} +#endif diff --git a/src/hotspot/share/gc/parallel/psCompactionManager.hpp b/src/hotspot/share/gc/parallel/psCompactionManager.hpp index 6b899231ac6ac3259ce51b73ab9c53b4dbe35faf..29be946c072ddf2768e54c56f3c093591db802c9 100644 --- a/src/hotspot/share/gc/parallel/psCompactionManager.hpp +++ b/src/hotspot/share/gc/parallel/psCompactionManager.hpp @@ -46,10 +46,6 @@ class ParCompactionManager : public CHeapObj { friend class PCRefProcTask; friend class MarkFromRootsTask; friend class UpdateDensePrefixAndCompactionTask; - - public: - - private: typedef GenericTaskQueue OopTaskQueue; typedef GenericTaskQueueSet OopTaskQueueSet; @@ -69,7 +65,6 @@ class ParCompactionManager : public CHeapObj { static RegionTaskQueueSet* _region_task_queues; static PSOldGen* _old_gen; -private: OverflowTaskQueue _marking_stack; ObjArrayTaskQueue _objarray_stack; size_t _next_shadow_region; @@ -143,7 +138,7 @@ private: RegionTaskQueue* region_stack() { return &_region_stack; } - inline static ParCompactionManager* manager_array(uint index); + static ParCompactionManager* get_vmthread_cm() { return _manager_array[ParallelGCThreads]; } ParCompactionManager(); @@ -196,13 +191,13 @@ private: FollowStackClosure(ParCompactionManager* cm) : _compaction_manager(cm) { } virtual void do_void(); }; -}; -inline ParCompactionManager* ParCompactionManager::manager_array(uint index) { - assert(_manager_array != NULL, "access of NULL manager_array"); - assert(index <= ParallelGCThreads, "out of range manager_array access"); - return _manager_array[index]; -} + // Called after marking. + static void verify_all_marking_stack_empty() NOT_DEBUG_RETURN; + + // Region staks hold regions in from-space; called after compaction. + static void verify_all_region_stack_empty() NOT_DEBUG_RETURN; +}; bool ParCompactionManager::marking_stacks_empty() const { return _marking_stack.is_empty() && _objarray_stack.is_empty(); diff --git a/src/hotspot/share/gc/parallel/psCompactionManager.inline.hpp b/src/hotspot/share/gc/parallel/psCompactionManager.inline.hpp index 03ce090a4f43e201a4dad06cc3be3b14a2c5bc4b..a0df3dade2a90cc35691434e8a6819c2a03837c8 100644 --- a/src/hotspot/share/gc/parallel/psCompactionManager.inline.hpp +++ b/src/hotspot/share/gc/parallel/psCompactionManager.inline.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2010, 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2010, 2021, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -25,6 +25,7 @@ #ifndef SHARE_GC_PARALLEL_PSCOMPACTIONMANAGER_INLINE_HPP #define SHARE_GC_PARALLEL_PSCOMPACTIONMANAGER_INLINE_HPP +#include "classfile/classLoaderData.hpp" #include "classfile/javaClasses.inline.hpp" #include "gc/parallel/parMarkBitMap.hpp" #include "gc/parallel/psCompactionManager.hpp" diff --git a/src/hotspot/share/gc/parallel/psOldGen.cpp b/src/hotspot/share/gc/parallel/psOldGen.cpp index 1b97f949628ba40790e264d20275b4b8b5d1e047..44c7901571ac339bbf726203e040b8873ab1737b 100644 --- a/src/hotspot/share/gc/parallel/psOldGen.cpp +++ b/src/hotspot/share/gc/parallel/psOldGen.cpp @@ -178,19 +178,31 @@ void PSOldGen::object_iterate_block(ObjectClosure* cl, size_t block_index) { } } -HeapWord* PSOldGen::expand_and_cas_allocate(size_t word_size) { - expand(word_size*HeapWordSize); +bool PSOldGen::expand_for_allocate(size_t word_size) { + assert(word_size > 0, "allocating zero words?"); + bool result = true; + { + MutexLocker x(ExpandHeap_lock); + // Avoid "expand storms" by rechecking available space after obtaining + // the lock, because another thread may have already made sufficient + // space available. If insufficient space available, that will remain + // true until we expand, since we have the lock. Other threads may take + // the space we need before we can allocate it, regardless of whether we + // expand. That's okay, we'll just try expanding again. + if (object_space()->needs_expand(word_size)) { + result = expand(word_size*HeapWordSize); + } + } if (GCExpandToAllocateDelayMillis > 0) { os::naked_sleep(GCExpandToAllocateDelayMillis); } - return cas_allocate_noexpand(word_size); + return result; } -void PSOldGen::expand(size_t bytes) { - if (bytes == 0) { - return; - } - MutexLocker x(ExpandHeap_lock); +bool PSOldGen::expand(size_t bytes) { + assert_lock_strong(ExpandHeap_lock); + assert_locked_or_safepoint(Heap_lock); + assert(bytes > 0, "precondition"); const size_t alignment = virtual_space()->alignment(); size_t aligned_bytes = align_up(bytes, alignment); size_t aligned_expand_bytes = align_up(MinHeapDeltaBytes, alignment); @@ -200,13 +212,11 @@ void PSOldGen::expand(size_t bytes) { // providing a page per lgroup. Alignment is larger or equal to the page size. aligned_expand_bytes = MAX2(aligned_expand_bytes, alignment * os::numa_get_groups_num()); } - if (aligned_bytes == 0){ - // The alignment caused the number of bytes to wrap. An expand_by(0) will - // return true with the implication that and expansion was done when it - // was not. A call to expand implies a best effort to expand by "bytes" - // but not a guarantee. Align down to give a best effort. This is likely - // the most that the generation can expand since it has some capacity to - // start with. + if (aligned_bytes == 0) { + // The alignment caused the number of bytes to wrap. A call to expand + // implies a best effort to expand by "bytes" but not a guarantee. Align + // down to give a best effort. This is likely the most that the generation + // can expand since it has some capacity to start with. aligned_bytes = align_down(bytes, alignment); } @@ -224,14 +234,13 @@ void PSOldGen::expand(size_t bytes) { if (success && GCLocker::is_active_and_needs_gc()) { log_debug(gc)("Garbage collection disabled, expanded heap instead"); } + return success; } bool PSOldGen::expand_by(size_t bytes) { assert_lock_strong(ExpandHeap_lock); assert_locked_or_safepoint(Heap_lock); - if (bytes == 0) { - return true; // That's what virtual_space()->expand_by(0) would return - } + assert(bytes > 0, "precondition"); bool result = virtual_space()->expand_by(bytes); if (result) { if (ZapUnusedHeapArea) { @@ -268,7 +277,7 @@ bool PSOldGen::expand_to_reserved() { assert_lock_strong(ExpandHeap_lock); assert_locked_or_safepoint(Heap_lock); - bool result = true; + bool result = false; const size_t remaining_bytes = virtual_space()->uncommitted_size(); if (remaining_bytes > 0) { result = expand_by(remaining_bytes); @@ -323,10 +332,10 @@ void PSOldGen::resize(size_t desired_free_space) { } if (new_size > current_size) { size_t change_bytes = new_size - current_size; + MutexLocker x(ExpandHeap_lock); expand(change_bytes); } else { size_t change_bytes = current_size - new_size; - // shrink doesn't grab this lock, expand does. Is that right? MutexLocker x(ExpandHeap_lock); shrink(change_bytes); } diff --git a/src/hotspot/share/gc/parallel/psOldGen.hpp b/src/hotspot/share/gc/parallel/psOldGen.hpp index dd0e7fe1e83c56debf2362c24a73692e6c2d6980..53947a948984caffa548ba7dfb3880599dc96d5f 100644 --- a/src/hotspot/share/gc/parallel/psOldGen.hpp +++ b/src/hotspot/share/gc/parallel/psOldGen.hpp @@ -79,8 +79,8 @@ class PSOldGen : public CHeapObj { return res; } - HeapWord* expand_and_cas_allocate(size_t word_size); - void expand(size_t bytes); + bool expand_for_allocate(size_t word_size); + bool expand(size_t bytes); bool expand_by(size_t bytes); bool expand_to_reserved(); @@ -135,8 +135,12 @@ class PSOldGen : public CHeapObj { void resize(size_t desired_free_space); HeapWord* allocate(size_t word_size) { - HeapWord* res = cas_allocate_noexpand(word_size); - return (res == NULL) ? expand_and_cas_allocate(word_size) : res; + HeapWord* res; + do { + res = cas_allocate_noexpand(word_size); + // Retry failed allocation if expand succeeds. + } while ((res == nullptr) && expand_for_allocate(word_size)); + return res; } // Iteration. diff --git a/src/hotspot/share/gc/parallel/psParallelCompact.cpp b/src/hotspot/share/gc/parallel/psParallelCompact.cpp index 41e0d28d6914075b92f7e37e93ebcd6e7681e49a..0acee8a8ecb7dc59956980e7ac6c2b466e2f0e7e 100644 --- a/src/hotspot/share/gc/parallel/psParallelCompact.cpp +++ b/src/hotspot/share/gc/parallel/psParallelCompact.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2005, 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2005, 2021, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -57,11 +57,12 @@ #include "gc/shared/referenceProcessorPhaseTimes.hpp" #include "gc/shared/spaceDecorator.inline.hpp" #include "gc/shared/taskTerminator.hpp" -#include "gc/shared/weakProcessor.hpp" +#include "gc/shared/weakProcessor.inline.hpp" #include "gc/shared/workerPolicy.hpp" #include "gc/shared/workgroup.hpp" #include "logging/log.hpp" #include "memory/iterator.inline.hpp" +#include "memory/metaspaceUtils.hpp" #include "memory/resourceArea.hpp" #include "memory/universe.hpp" #include "oops/access.inline.hpp" @@ -779,7 +780,7 @@ bool ParallelCompactData::summarize(SplitInfo& split_info, return true; } -HeapWord* ParallelCompactData::calc_new_pointer(HeapWord* addr, ParCompactionManager* cm) { +HeapWord* ParallelCompactData::calc_new_pointer(HeapWord* addr, ParCompactionManager* cm) const { assert(addr != NULL, "Should detect NULL oop earlier"); assert(ParallelScavengeHeap::heap()->is_in(addr), "not in heap"); assert(PSParallelCompact::mark_bitmap()->is_marked(addr), "not marked"); @@ -858,7 +859,6 @@ public: BoolObjectClosure* is_subject_to_discovery, BoolObjectClosure* is_alive_non_header) : ReferenceProcessor(is_subject_to_discovery, - ParallelRefProcEnabled && (ParallelGCThreads > 1), // mt processing ParallelGCThreads, // mt processing degree true, // mt discovery ParallelGCThreads, // mt discovery degree @@ -1784,12 +1784,9 @@ bool PSParallelCompact::invoke_no_policy(bool maximum_heap_compaction) { const PreGenGCValues pre_gc_values = heap->get_pre_gc_values(); // Get the compaction manager reserved for the VM thread. - ParCompactionManager* const vmthread_cm = - ParCompactionManager::manager_array(ParallelScavengeHeap::heap()->workers().total_workers()); + ParCompactionManager* const vmthread_cm = ParCompactionManager::get_vmthread_cm(); { - ResourceMark rm; - const uint active_workers = WorkerPolicy::calc_active_workers(ParallelScavengeHeap::heap()->workers().total_workers(), ParallelScavengeHeap::heap()->workers().active_workers(), @@ -1834,11 +1831,13 @@ bool PSParallelCompact::invoke_no_policy(bool maximum_heap_compaction) { // adjust_roots() updates Universe::_intArrayKlassObj which is // needed by the compaction for filling holes in the dense prefix. - adjust_roots(vmthread_cm); + adjust_roots(); compaction_start.update(); compact(); + ParCompactionManager::verify_all_region_stack_empty(); + // Reset the mark bitmap, summary data, and do other bookkeeping. Must be // done before resizing. post_compact(); @@ -1935,15 +1934,6 @@ bool PSParallelCompact::invoke_no_policy(bool maximum_heap_compaction) { heap->post_full_gc_dump(&_gc_timer); } -#ifdef ASSERT - for (size_t i = 0; i < ParallelGCThreads + 1; ++i) { - ParCompactionManager* const cm = - ParCompactionManager::manager_array(int(i)); - assert(cm->marking_stack()->is_empty(), "should be empty"); - assert(cm->region_stack()->is_empty(), "Region stack " SIZE_FORMAT " is not empty", i); - } -#endif // ASSERT - if (VerifyAfterGC && heap->total_collections() >= VerifyGCStartAt) { Universe::verify("After GC"); } @@ -2183,7 +2173,7 @@ void PSParallelCompact::marking_phase(ParCompactionManager* cm, } // This is the point where the entire marking should have completed. - assert(cm->marking_stacks_empty(), "Marking should have completed"); + ParCompactionManager::verify_all_marking_stack_empty(); { GCTraceTime(Debug, gc, phases) tm("Weak Processing", &_gc_timer); @@ -2209,35 +2199,94 @@ void PSParallelCompact::marking_phase(ParCompactionManager* cm, _gc_tracer.report_object_count_after_gc(is_alive_closure()); } -void PSParallelCompact::adjust_roots(ParCompactionManager* cm) { - // Adjust the pointers to reflect the new locations - GCTraceTime(Info, gc, phases) tm("Adjust Roots", &_gc_timer); +#ifdef ASSERT +void PCAdjustPointerClosure::verify_cm(ParCompactionManager* cm) { + assert(cm != NULL, "associate ParCompactionManage should not be NULL"); + auto vmthread_cm = ParCompactionManager::get_vmthread_cm(); + if (Thread::current()->is_VM_thread()) { + assert(cm == vmthread_cm, "VM threads should use ParCompactionManager from get_vmthread_cm()"); + } else { + assert(Thread::current()->is_GC_task_thread(), "Must be a GC thread"); + assert(cm != vmthread_cm, "GC threads should use ParCompactionManager from gc_thread_compaction_manager()"); + } +} +#endif - // Need new claim bits when tracing through and adjusting pointers. - ClassLoaderDataGraph::clear_claimed_marks(); +class PSAdjustTask final : public AbstractGangTask { + SubTasksDone _sub_tasks; + WeakProcessor::Task _weak_proc_task; + OopStorageSetStrongParState _oop_storage_iter; + uint _nworkers; - PCAdjustPointerClosure oop_closure(cm); + enum PSAdjustSubTask { + PSAdjustSubTask_code_cache, + PSAdjustSubTask_aot, + PSAdjustSubTask_old_ref_process, + PSAdjustSubTask_young_ref_process, - // General strong roots. - Threads::oops_do(&oop_closure, NULL); - OopStorageSet::strong_oops_do(&oop_closure); - CLDToOopClosure cld_closure(&oop_closure, ClassLoaderData::_claim_strong); - ClassLoaderDataGraph::cld_do(&cld_closure); + PSAdjustSubTask_num_elements + }; - // Now adjust pointers in remaining weak roots. (All of which should - // have been cleared if they pointed to non-surviving objects.) - WeakProcessor::oops_do(&oop_closure); +public: + PSAdjustTask(uint nworkers) : + AbstractGangTask("PSAdjust task"), + _sub_tasks(PSAdjustSubTask_num_elements), + _weak_proc_task(nworkers), + _nworkers(nworkers) { + // Need new claim bits when tracing through and adjusting pointers. + ClassLoaderDataGraph::clear_claimed_marks(); + if (nworkers > 1) { + Threads::change_thread_claim_token(); + } + } - CodeBlobToOopClosure adjust_from_blobs(&oop_closure, CodeBlobToOopClosure::FixRelocations); - CodeCache::blobs_do(&adjust_from_blobs); - AOT_ONLY(AOTLoader::oops_do(&oop_closure);) + ~PSAdjustTask() { + Threads::assert_all_threads_claimed(); + } - ref_processor()->weak_oops_do(&oop_closure); - // Roots were visited so references into the young gen in roots - // may have been scanned. Process them also. - // Should the reference processor have a span that excludes - // young gen objects? - PSScavenge::reference_processor()->weak_oops_do(&oop_closure); + void work(uint worker_id) { + ParCompactionManager* cm = ParCompactionManager::gc_thread_compaction_manager(worker_id); + PCAdjustPointerClosure adjust(cm); + { + ResourceMark rm; + Threads::possibly_parallel_oops_do(_nworkers > 1, &adjust, nullptr); + } + _oop_storage_iter.oops_do(&adjust); + { + CLDToOopClosure cld_closure(&adjust, ClassLoaderData::_claim_strong); + ClassLoaderDataGraph::cld_do(&cld_closure); + } + { + AlwaysTrueClosure always_alive; + _weak_proc_task.work(worker_id, &always_alive, &adjust); + } + if (_sub_tasks.try_claim_task(PSAdjustSubTask_code_cache)) { + CodeBlobToOopClosure adjust_code(&adjust, CodeBlobToOopClosure::FixRelocations); + CodeCache::blobs_do(&adjust_code); + } + if (_sub_tasks.try_claim_task(PSAdjustSubTask_aot)) { + AOT_ONLY(AOTLoader::oops_do(&adjust);) + } + if (_sub_tasks.try_claim_task(PSAdjustSubTask_old_ref_process)) { + PSParallelCompact::ref_processor()->weak_oops_do(&adjust); + } + if (_sub_tasks.try_claim_task(PSAdjustSubTask_young_ref_process)) { + // Roots were visited so references into the young gen in roots + // may have been scanned. Process them also. + // Should the reference processor have a span that excludes + // young gen objects? + PSScavenge::reference_processor()->weak_oops_do(&adjust); + } + _sub_tasks.all_tasks_claimed(); + } +}; + +void PSParallelCompact::adjust_roots() { + // Adjust the pointers to reflect the new locations + GCTraceTime(Info, gc, phases) tm("Adjust Roots", &_gc_timer); + uint nworkers = ParallelScavengeHeap::heap()->workers().active_workers(); + PSAdjustTask task(nworkers); + ParallelScavengeHeap::heap()->workers().run_task(&task); } // Helper class to print 8 region numbers per line and then print the total at the end. @@ -2306,7 +2355,7 @@ void PSParallelCompact::prepare_region_draining_tasks(uint parallel_gc_threads) for (size_t cur = end_region - 1; cur + 1 > beg_region; --cur) { if (sd.region(cur)->claim_unsafe()) { - ParCompactionManager* cm = ParCompactionManager::manager_array(worker_id); + ParCompactionManager* cm = ParCompactionManager::gc_thread_compaction_manager(worker_id); bool result = sd.region(cur)->mark_normal(); assert(result, "Must succeed at this point."); cm->region_stack()->push(cur); @@ -2505,7 +2554,6 @@ static void compaction_with_stealing_work(TaskTerminator* terminator, uint worke // Go around again. } } - return; } class UpdateDensePrefixAndCompactionTask: public AbstractGangTask { @@ -2571,9 +2619,11 @@ void PSParallelCompact::compact() { } { - // Update the deferred objects, if any. Any compaction manager can be used. GCTraceTime(Trace, gc, phases) tm("Deferred Updates", &_gc_timer); - ParCompactionManager* cm = ParCompactionManager::manager_array(0); + // Update the deferred objects, if any. In principle, any compaction + // manager can be used. However, since the current thread is VM thread, we + // use the rightful one to keep the verification logic happy. + ParCompactionManager* cm = ParCompactionManager::get_vmthread_cm(); for (unsigned int id = old_space_id; id < last_space_id; ++id) { update_deferred_objects(cm, SpaceId(id)); } @@ -3133,7 +3183,7 @@ void PSParallelCompact::initialize_shadow_regions(uint parallel_gc_threads) size_t beg_region = sd.addr_to_region_idx(_space_info[old_space_id].dense_prefix()); for (uint i = 0; i < parallel_gc_threads; i++) { - ParCompactionManager *cm = ParCompactionManager::manager_array(i); + ParCompactionManager *cm = ParCompactionManager::gc_thread_compaction_manager(i); cm->set_next_shadow_region(beg_region + i); } } diff --git a/src/hotspot/share/gc/parallel/psParallelCompact.hpp b/src/hotspot/share/gc/parallel/psParallelCompact.hpp index c14dffb2334991be1b1b1c2341873090622d2baa..91cfc9a484f2a6a49f41df5c1c5ea4ac5c4735de 100644 --- a/src/hotspot/share/gc/parallel/psParallelCompact.hpp +++ b/src/hotspot/share/gc/parallel/psParallelCompact.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2005, 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2005, 2021, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -480,9 +480,9 @@ public: HeapWord* partial_obj_end(size_t region_idx) const; // Return the location of the object after compaction. - HeapWord* calc_new_pointer(HeapWord* addr, ParCompactionManager* cm); + HeapWord* calc_new_pointer(HeapWord* addr, ParCompactionManager* cm) const; - HeapWord* calc_new_pointer(oop p, ParCompactionManager* cm) { + HeapWord* calc_new_pointer(oop p, ParCompactionManager* cm) const { return calc_new_pointer(cast_from_oop(p), cm); } @@ -1107,7 +1107,7 @@ class PSParallelCompact : AllStatic { static void summary_phase(ParCompactionManager* cm, bool maximum_compaction); // Adjust addresses in roots. Does not adjust addresses in heap. - static void adjust_roots(ParCompactionManager* cm); + static void adjust_roots(); DEBUG_ONLY(static void write_block_fill_histogram();) @@ -1144,7 +1144,7 @@ class PSParallelCompact : AllStatic { static bool initialize(); // Closure accessors - static BoolObjectClosure* is_alive_closure() { return (BoolObjectClosure*)&_is_alive_closure; } + static BoolObjectClosure* is_alive_closure() { return &_is_alive_closure; } // Public accessors static elapsedTimer* accumulated_time() { return &_accumulated_time; } diff --git a/src/hotspot/share/gc/parallel/psParallelCompact.inline.hpp b/src/hotspot/share/gc/parallel/psParallelCompact.inline.hpp index 688da81e9c2848caa2e5756b59c944307db8f58e..7d2678e3f4c457fd4979e5b64a49396f79d9b8d0 100644 --- a/src/hotspot/share/gc/parallel/psParallelCompact.inline.hpp +++ b/src/hotspot/share/gc/parallel/psParallelCompact.inline.hpp @@ -113,10 +113,9 @@ inline void PSParallelCompact::adjust_pointer(T* p, ParCompactionManager* cm) { assert(ParallelScavengeHeap::heap()->is_in(obj), "should be in heap"); oop new_obj = (oop)summary_data().calc_new_pointer(obj, cm); - assert(new_obj != NULL, // is forwarding ptr? - "should be forwarded"); - // Just always do the update unconditionally? - if (new_obj != NULL) { + assert(new_obj != NULL, "non-null address for live objects"); + // Is it actually relocated at all? + if (new_obj != obj) { assert(ParallelScavengeHeap::heap()->is_in_reserved(new_obj), "should be in object space"); RawAccess::oop_store(p, new_obj); @@ -127,7 +126,7 @@ inline void PSParallelCompact::adjust_pointer(T* p, ParCompactionManager* cm) { class PCAdjustPointerClosure: public BasicOopIterateClosure { public: PCAdjustPointerClosure(ParCompactionManager* cm) { - assert(cm != NULL, "associate ParCompactionManage should not be NULL"); + verify_cm(cm); _cm = cm; } template void do_oop_nv(T* p) { PSParallelCompact::adjust_pointer(p, _cm); } @@ -137,6 +136,8 @@ public: virtual ReferenceIterationMode reference_iteration_mode() { return DO_FIELDS; } private: ParCompactionManager* _cm; + + static void verify_cm(ParCompactionManager* cm) NOT_DEBUG_RETURN; }; #endif // SHARE_GC_PARALLEL_PSPARALLELCOMPACT_INLINE_HPP diff --git a/src/hotspot/share/gc/parallel/psScavenge.cpp b/src/hotspot/share/gc/parallel/psScavenge.cpp index 0b07c01f2625445894bed07f0f3bd70b18ca3fe6..61af24c6c4bf7209c9dc31b4d86c4f3457b272e4 100644 --- a/src/hotspot/share/gc/parallel/psScavenge.cpp +++ b/src/hotspot/share/gc/parallel/psScavenge.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2002, 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2002, 2021, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -52,7 +52,7 @@ #include "gc/shared/scavengableNMethods.hpp" #include "gc/shared/spaceDecorator.inline.hpp" #include "gc/shared/taskTerminator.hpp" -#include "gc/shared/weakProcessor.hpp" +#include "gc/shared/weakProcessor.inline.hpp" #include "gc/shared/workerPolicy.hpp" #include "gc/shared/workgroup.hpp" #include "memory/iterator.hpp" @@ -520,11 +520,10 @@ bool PSScavenge::invoke_no_policy() { assert(promotion_manager->stacks_empty(),"stacks should be empty at this point"); - PSScavengeRootsClosure root_closure(promotion_manager); - { GCTraceTime(Debug, gc, phases) tm("Weak Processing", &_gc_timer); - WeakProcessor::weak_oops_do(&_is_alive_closure, &root_closure); + PSAdjustWeakRootsClosure root_closure; + WeakProcessor::weak_oops_do(&ParallelScavengeHeap::heap()->workers(), &_is_alive_closure, &root_closure, 1); } // Verify that usage of root_closure didn't copy any objects. @@ -822,7 +821,6 @@ void PSScavenge::initialize() { _span_based_discoverer.set_span(young_gen->reserved()); _ref_processor = new ReferenceProcessor(&_span_based_discoverer, - ParallelRefProcEnabled && (ParallelGCThreads > 1), // mt processing ParallelGCThreads, // mt processing degree true, // mt discovery ParallelGCThreads, // mt discovery degree diff --git a/src/hotspot/share/gc/serial/cSpaceCounters.cpp b/src/hotspot/share/gc/serial/cSpaceCounters.cpp index 24a78036122b8087cda90156c544040ff00122f0..1f95a971ccab130e0c628f17297772f23cf6de6a 100644 --- a/src/hotspot/share/gc/serial/cSpaceCounters.cpp +++ b/src/hotspot/share/gc/serial/cSpaceCounters.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2002, 2019, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2002, 2021, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -25,7 +25,6 @@ #include "precompiled.hpp" #include "gc/serial/cSpaceCounters.hpp" #include "memory/allocation.inline.hpp" -#include "memory/metaspace.hpp" #include "memory/resourceArea.hpp" CSpaceCounters::CSpaceCounters(const char* name, int ordinal, size_t max_size, diff --git a/src/hotspot/share/gc/shared/c1/cardTableBarrierSetC1.cpp b/src/hotspot/share/gc/shared/c1/cardTableBarrierSetC1.cpp index 106412d90b48a9593b2b5cd6be2785327d0f279a..a89b57d8e457707c3e87946cc99aca96659c49cb 100644 --- a/src/hotspot/share/gc/shared/c1/cardTableBarrierSetC1.cpp +++ b/src/hotspot/share/gc/shared/c1/cardTableBarrierSetC1.cpp @@ -83,9 +83,6 @@ void CardTableBarrierSetC1::post_barrier(LIRAccess& access, LIR_OprDesc* addr, L LIR_Opr dirty = LIR_OprFact::intConst(CardTable::dirty_card_val()); if (UseCondCardMark) { LIR_Opr cur_value = gen->new_register(T_INT); - if (ct->scanned_concurrently()) { - __ membar_storeload(); - } __ move(card_addr, cur_value); LabelObj* L_already_dirty = new LabelObj(); @@ -94,9 +91,6 @@ void CardTableBarrierSetC1::post_barrier(LIRAccess& access, LIR_OprDesc* addr, L __ move(dirty, card_addr); __ branch_destination(L_already_dirty->label()); } else { - if (ct->scanned_concurrently()) { - __ membar_storestore(); - } __ move(dirty, card_addr); } #endif diff --git a/src/hotspot/share/gc/shared/c2/cardTableBarrierSetC2.cpp b/src/hotspot/share/gc/shared/c2/cardTableBarrierSetC2.cpp index be20fbc12ddd6248bf38c1a1955eff8435f572d0..d3da34e5b27fb41f9126f8601ccfdb568cc81371 100644 --- a/src/hotspot/share/gc/shared/c2/cardTableBarrierSetC2.cpp +++ b/src/hotspot/share/gc/shared/c2/cardTableBarrierSetC2.cpp @@ -58,8 +58,6 @@ void CardTableBarrierSetC2::post_barrier(GraphKit* kit, Node* val, BasicType bt, bool use_precise) const { - CardTableBarrierSet* ctbs = barrier_set_cast(BarrierSet::barrier_set()); - CardTable* ct = ctbs->card_table(); // No store check needed if we're storing a NULL or an old object // (latter case is probably a string constant). The concurrent // mark sweep garbage collector, however, needs to have all nonNull @@ -105,10 +103,6 @@ void CardTableBarrierSetC2::post_barrier(GraphKit* kit, Node* zero = __ ConI(0); // Dirty card value if (UseCondCardMark) { - if (ct->scanned_concurrently()) { - kit->insert_mem_bar(Op_MemBarVolatile, oop_store); - __ sync_kit(kit); - } // The classic GC reference write barrier is typically implemented // as a store into the global card mark table. Unfortunately // unconditional stores can result in false sharing and excessive @@ -121,12 +115,7 @@ void CardTableBarrierSetC2::post_barrier(GraphKit* kit, } // Smash zero into card - if (!ct->scanned_concurrently()) { - __ store(__ ctrl(), card_adr, zero, T_BYTE, adr_type, MemNode::unordered); - } else { - // Specialized path for CM store barrier - __ storeCM(__ ctrl(), card_adr, zero, oop_store, adr_idx, T_BYTE, adr_type); - } + __ store(__ ctrl(), card_adr, zero, T_BYTE, adr_type, MemNode::unordered); if (UseCondCardMark) { __ end_if(); diff --git a/src/hotspot/share/gc/shared/cardTable.cpp b/src/hotspot/share/gc/shared/cardTable.cpp index b2a7118e8aa77bed4feb3ffc01c17c9b689f1c1e..84f624b300133adb26a108f849a9a29480bfacb7 100644 --- a/src/hotspot/share/gc/shared/cardTable.cpp +++ b/src/hotspot/share/gc/shared/cardTable.cpp @@ -41,8 +41,7 @@ size_t CardTable::compute_byte_map_size() { return align_up(_guard_index + 1, MAX2(_page_size, granularity)); } -CardTable::CardTable(MemRegion whole_heap, bool conc_scan) : - _scanned_concurrently(conc_scan), +CardTable::CardTable(MemRegion whole_heap) : _whole_heap(whole_heap), _guard_index(0), _last_valid_index(0), diff --git a/src/hotspot/share/gc/shared/cardTable.hpp b/src/hotspot/share/gc/shared/cardTable.hpp index f5b06ebb172cebdf7e2e64379e9573dc1388e94b..ff406eee4be5becec92b4c3eef08fab6eaeb4f36 100644 --- a/src/hotspot/share/gc/shared/cardTable.hpp +++ b/src/hotspot/share/gc/shared/cardTable.hpp @@ -43,7 +43,6 @@ public: protected: // The declaration order of these const fields is important; see the // constructor before changing. - const bool _scanned_concurrently; const MemRegion _whole_heap; // the region covered by the card table size_t _guard_index; // index of very last element in the card // table; it is set to a guard value @@ -113,7 +112,7 @@ protected: static const intptr_t clean_card_row = (intptr_t)(-1); public: - CardTable(MemRegion whole_heap, bool conc_scan); + CardTable(MemRegion whole_heap); virtual ~CardTable(); virtual void initialize(); @@ -245,7 +244,6 @@ public: // But since the heap starts at some higher address, this points to somewhere // before the beginning of the actual _byte_map. CardValue* byte_map_base() const { return _byte_map_base; } - bool scanned_concurrently() const { return _scanned_concurrently; } virtual bool is_in_young(oop obj) const = 0; diff --git a/src/hotspot/share/gc/shared/cardTableBarrierSet.cpp b/src/hotspot/share/gc/shared/cardTableBarrierSet.cpp index 7e491c36dd5fee3f17f9593ad6068315242b775a..225fca264bd84b56300bc0503be2e15e37b5814b 100644 --- a/src/hotspot/share/gc/shared/cardTableBarrierSet.cpp +++ b/src/hotspot/share/gc/shared/cardTableBarrierSet.cpp @@ -192,5 +192,5 @@ void CardTableBarrierSet::on_thread_detach(Thread* thread) { } bool CardTableBarrierSet::card_mark_must_follow_store() const { - return _card_table->scanned_concurrently(); + return false; } diff --git a/src/hotspot/share/gc/shared/cardTableBarrierSet.inline.hpp b/src/hotspot/share/gc/shared/cardTableBarrierSet.inline.hpp index f88a0dc070a34cc2a2486a396bc33f5a7be32c43..97e3c4593df31ed6325b0165c01b41b3b05d1163 100644 --- a/src/hotspot/share/gc/shared/cardTableBarrierSet.inline.hpp +++ b/src/hotspot/share/gc/shared/cardTableBarrierSet.inline.hpp @@ -32,12 +32,7 @@ template inline void CardTableBarrierSet::write_ref_field_post(T* field, oop newVal) { volatile CardValue* byte = _card_table->byte_for(field); - if (_card_table->scanned_concurrently()) { - // Perform a releasing store if the card table is scanned concurrently - Atomic::release_store(byte, CardTable::dirty_card_val()); - } else { - *byte = CardTable::dirty_card_val(); - } + *byte = CardTable::dirty_card_val(); } #endif // SHARE_GC_SHARED_CARDTABLEBARRIERSET_INLINE_HPP diff --git a/src/hotspot/share/gc/shared/cardTableRS.cpp b/src/hotspot/share/gc/shared/cardTableRS.cpp index cc232960a05eecad0268d20ec5200aac4d16b606..3dc15fb23a18219b1f296bd966dcce6f2a6ced88 100644 --- a/src/hotspot/share/gc/shared/cardTableRS.cpp +++ b/src/hotspot/share/gc/shared/cardTableRS.cpp @@ -434,8 +434,8 @@ void CardTableRS::verify() { CardTable::verify(); } -CardTableRS::CardTableRS(MemRegion whole_heap, bool scanned_concurrently) : - CardTable(whole_heap, scanned_concurrently) { } +CardTableRS::CardTableRS(MemRegion whole_heap) : + CardTable(whole_heap) { } void CardTableRS::initialize() { CardTable::initialize(); diff --git a/src/hotspot/share/gc/shared/cardTableRS.hpp b/src/hotspot/share/gc/shared/cardTableRS.hpp index e90ab9e313444bdcbb7da9e85ae38add40c73ba8..86ea16e0957f5e432210fb3620e03b55a095aa71 100644 --- a/src/hotspot/share/gc/shared/cardTableRS.hpp +++ b/src/hotspot/share/gc/shared/cardTableRS.hpp @@ -45,7 +45,7 @@ class CardTableRS : public CardTable { void verify_space(Space* s, HeapWord* gen_start); public: - CardTableRS(MemRegion whole_heap, bool scanned_concurrently); + CardTableRS(MemRegion whole_heap); void younger_refs_in_space_iterate(Space* sp, HeapWord* gen_boundary, OopIterateClosure* cl); diff --git a/src/hotspot/share/gc/shared/collectedHeap.cpp b/src/hotspot/share/gc/shared/collectedHeap.cpp index 105d64ba2f17c0624ee8f723ee0445df90345b0f..b864ca2fd7aadcda1eebae3cdb121599c1674cb9 100644 --- a/src/hotspot/share/gc/shared/collectedHeap.cpp +++ b/src/hotspot/share/gc/shared/collectedHeap.cpp @@ -23,6 +23,7 @@ */ #include "precompiled.hpp" +#include "classfile/classLoaderData.hpp" #include "classfile/vmClasses.hpp" #include "gc/shared/allocTracer.hpp" #include "gc/shared/barrierSet.hpp" @@ -40,6 +41,7 @@ #include "logging/log.hpp" #include "logging/logStream.hpp" #include "memory/classLoaderMetaspace.hpp" +#include "memory/metaspaceUtils.hpp" #include "memory/resourceArea.hpp" #include "memory/universe.hpp" #include "oops/instanceMirrorKlass.hpp" diff --git a/src/hotspot/share/gc/shared/collectedHeap.hpp b/src/hotspot/share/gc/shared/collectedHeap.hpp index ef532f148faaaa76501a94d068b544a67584e4c6..d2723e9b804698e54ddb8b65c87edcdeded75a57 100644 --- a/src/hotspot/share/gc/shared/collectedHeap.hpp +++ b/src/hotspot/share/gc/shared/collectedHeap.hpp @@ -29,6 +29,7 @@ #include "gc/shared/gcWhen.hpp" #include "gc/shared/verifyOption.hpp" #include "memory/allocation.hpp" +#include "memory/metaspace.hpp" #include "memory/universe.hpp" #include "runtime/handles.hpp" #include "runtime/perfDataTypes.hpp" diff --git a/src/hotspot/share/gc/shared/gcLocker.cpp b/src/hotspot/share/gc/shared/gcLocker.cpp index 814e2fa1a40333288bb88e02e21cff130a44201d..f1b3dce71cfdf2cae07810ed565a21bd87e31df1 100644 --- a/src/hotspot/share/gc/shared/gcLocker.cpp +++ b/src/hotspot/share/gc/shared/gcLocker.cpp @@ -36,7 +36,6 @@ volatile jint GCLocker::_jni_lock_count = 0; volatile bool GCLocker::_needs_gc = false; -volatile bool GCLocker::_doing_gc = false; unsigned int GCLocker::_total_collections = 0; #ifdef ASSERT @@ -127,12 +126,16 @@ bool GCLocker::should_discard(GCCause::Cause cause, uint total_collections) { void GCLocker::jni_lock(JavaThread* thread) { assert(!thread->in_critical(), "shouldn't currently be in a critical region"); MonitorLocker ml(JNICritical_lock); - // Block entering threads if we know at least one thread is in a - // JNI critical region and we need a GC. - // We check that at least one thread is in a critical region before - // blocking because blocked threads are woken up by a thread exiting - // a JNI critical region. - while (is_active_and_needs_gc() || _doing_gc) { + // Block entering threads if there's a pending GC request. + while (needs_gc()) { + // There's at least one thread that has not left the critical region (CR) + // completely. When that last thread (no new threads can enter CR due to the + // blocking) exits CR, it calls `jni_unlock`, which sets `_needs_gc` + // to false and wakes up all blocked threads. + // We would like to assert #threads in CR to be > 0, `_jni_lock_count > 0` + // in the code, but it's too strong; it's possible that the last thread + // has called `jni_unlock`, but not yet finished the call, e.g. initiating + // a GCCause::_gc_locker GC. ml.wait(); } thread->enter_critical(); @@ -154,7 +157,6 @@ void GCLocker::jni_unlock(JavaThread* thread) { // must not be a safepoint between the lock becoming inactive and // getting the count, else there may be unnecessary GCLocker GCs. _total_collections = Universe::heap()->total_collections(); - _doing_gc = true; GCLockerTracer::report_gc_locker(); { // Must give up the lock while at a safepoint @@ -162,7 +164,6 @@ void GCLocker::jni_unlock(JavaThread* thread) { log_debug_jni("Performing GC after exiting critical section."); Universe::heap()->collect(GCCause::_gc_locker); } - _doing_gc = false; _needs_gc = false; JNICritical_lock->notify_all(); } diff --git a/src/hotspot/share/gc/shared/gcLocker.hpp b/src/hotspot/share/gc/shared/gcLocker.hpp index 4b776058da8f530f7289ea1390138dc7d89d4666..91ed84c41a99b6ba7d8cbd5278380461686ca5e8 100644 --- a/src/hotspot/share/gc/shared/gcLocker.hpp +++ b/src/hotspot/share/gc/shared/gcLocker.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2021, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -44,8 +44,6 @@ class GCLocker: public AllStatic { // unlocking. static volatile jint _jni_lock_count; // number of jni active instances. static volatile bool _needs_gc; // heap is filling, we need a GC - // note: bool is typedef'd as jint - static volatile bool _doing_gc; // unlock_critical() is doing a GC static uint _total_collections; // value for _gc_locker collection #ifdef ASSERT diff --git a/src/hotspot/share/gc/shared/gcVMOperations.cpp b/src/hotspot/share/gc/shared/gcVMOperations.cpp index a8a4084990eccb65fa7382a50c9941847d670108..f73974981c9cffec524918efcfc1c291a4cca3c0 100644 --- a/src/hotspot/share/gc/shared/gcVMOperations.cpp +++ b/src/hotspot/share/gc/shared/gcVMOperations.cpp @@ -23,7 +23,7 @@ */ #include "precompiled.hpp" -#include "classfile/classLoader.hpp" +#include "classfile/classLoaderData.hpp" #include "classfile/javaClasses.hpp" #include "gc/shared/allocTracer.hpp" #include "gc/shared/gcId.hpp" diff --git a/src/hotspot/share/gc/shared/gcVMOperations.hpp b/src/hotspot/share/gc/shared/gcVMOperations.hpp index aa8e0e9314640d0c0c14c30b4f4e066784c814ed..25af6a1d1544255caec38be48491416e5a73a70d 100644 --- a/src/hotspot/share/gc/shared/gcVMOperations.hpp +++ b/src/hotspot/share/gc/shared/gcVMOperations.hpp @@ -31,7 +31,7 @@ #include "prims/jvmtiExport.hpp" #include "runtime/handles.hpp" #include "runtime/synchronizer.hpp" -#include "runtime/vmOperations.hpp" +#include "runtime/vmOperation.hpp" // The following class hierarchy represents // a set of operations (VM_Operation) related to GC. diff --git a/src/hotspot/share/gc/shared/genCollectedHeap.cpp b/src/hotspot/share/gc/shared/genCollectedHeap.cpp index fb303850c08348a2270abc59b6456e97f26265e6..743e8882624c1351b0e995ac6b6252df8178e1e2 100644 --- a/src/hotspot/share/gc/shared/genCollectedHeap.cpp +++ b/src/hotspot/share/gc/shared/genCollectedHeap.cpp @@ -60,6 +60,7 @@ #include "memory/iterator.hpp" #include "memory/metaspace/metaspaceSizesSnapshot.hpp" #include "memory/metaspaceCounters.hpp" +#include "memory/metaspaceUtils.hpp" #include "memory/resourceArea.hpp" #include "memory/universe.hpp" #include "oops/oop.inline.hpp" @@ -142,7 +143,7 @@ jint GenCollectedHeap::initialize() { } CardTableRS* GenCollectedHeap::create_rem_set(const MemRegion& reserved_region) { - return new CardTableRS(reserved_region, false /* scan_concurrently */); + return new CardTableRS(reserved_region); } void GenCollectedHeap::initialize_size_policy(size_t init_eden_size, @@ -172,11 +173,12 @@ ReservedHeapSpace GenCollectedHeap::allocate(size_t alignment) { SIZE_FORMAT, total_reserved, alignment); ReservedHeapSpace heap_rs = Universe::reserve_heap(total_reserved, alignment); + size_t used_page_size = ReservedSpace::actual_reserved_page_size(heap_rs); os::trace_page_sizes("Heap", MinHeapSize, total_reserved, - alignment, + used_page_size, heap_rs.base(), heap_rs.size()); diff --git a/src/hotspot/share/gc/shared/genOopClosures.inline.hpp b/src/hotspot/share/gc/shared/genOopClosures.inline.hpp index da85dd1db4225560a7bfbd7fbdf914aa7be50857..0b547e6dcff9001ae3216137feed7ab25a80a90b 100644 --- a/src/hotspot/share/gc/shared/genOopClosures.inline.hpp +++ b/src/hotspot/share/gc/shared/genOopClosures.inline.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2019, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2021, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -25,6 +25,7 @@ #ifndef SHARE_GC_SHARED_GENOOPCLOSURES_INLINE_HPP #define SHARE_GC_SHARED_GENOOPCLOSURES_INLINE_HPP +#include "classfile/classLoaderData.hpp" #include "gc/shared/cardTableRS.hpp" #include "gc/shared/genCollectedHeap.hpp" #include "gc/shared/genOopClosures.hpp" diff --git a/src/hotspot/share/gc/shared/oopStorage.cpp b/src/hotspot/share/gc/shared/oopStorage.cpp index 4248c9d91b83e9480221f90f824404297cd95e10..45142a0bd75d03bc5ea8003bd946c6eda5b86dd3 100644 --- a/src/hotspot/share/gc/shared/oopStorage.cpp +++ b/src/hotspot/share/gc/shared/oopStorage.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018, 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2018, 2021, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -36,8 +36,8 @@ #include "runtime/mutexLocker.hpp" #include "runtime/orderAccess.hpp" #include "runtime/os.hpp" +#include "runtime/safefetch.inline.hpp" #include "runtime/safepoint.hpp" -#include "runtime/stubRoutines.hpp" #include "runtime/thread.hpp" #include "services/memTracker.hpp" #include "utilities/align.hpp" @@ -787,6 +787,21 @@ OopStorage::~OopStorage() { os::free(const_cast(_name)); } +void OopStorage::register_num_dead_callback(NumDeadCallback f) { + assert(_num_dead_callback == NULL, "Only one callback function supported"); + _num_dead_callback = f; +} + +void OopStorage::report_num_dead(size_t num_dead) const { + if (_num_dead_callback != NULL) { + _num_dead_callback(num_dead); + } +} + +bool OopStorage::should_report_num_dead() const { + return _num_dead_callback != NULL; +} + // Managing service thread notifications. // // We don't want cleanup work to linger indefinitely, but we also don't want @@ -815,21 +830,6 @@ static jlong cleanup_trigger_permit_time = 0; // too frequent. const jlong cleanup_trigger_defer_period = 500 * NANOSECS_PER_MILLISEC; -void OopStorage::register_num_dead_callback(NumDeadCallback f) { - assert(_num_dead_callback == NULL, "Only one callback function supported"); - _num_dead_callback = f; -} - -void OopStorage::report_num_dead(size_t num_dead) const { - if (_num_dead_callback != NULL) { - _num_dead_callback(num_dead); - } -} - -bool OopStorage::should_report_num_dead() const { - return _num_dead_callback != NULL; -} - void OopStorage::trigger_cleanup_if_needed() { MonitorLocker ml(Service_lock, Monitor::_no_safepoint_check_flag); if (Atomic::load(&needs_cleanup_requested) && diff --git a/src/hotspot/share/gc/shared/referenceProcessor.cpp b/src/hotspot/share/gc/shared/referenceProcessor.cpp index e2fed87ffb365f637ffdd7546c21487fb4f56c45..4fd331337130c1994458bc32495f6cef50ed4fdf 100644 --- a/src/hotspot/share/gc/shared/referenceProcessor.cpp +++ b/src/hotspot/share/gc/shared/referenceProcessor.cpp @@ -26,9 +26,9 @@ #include "classfile/javaClasses.inline.hpp" #include "gc/shared/collectedHeap.hpp" #include "gc/shared/collectedHeap.inline.hpp" +#include "gc/shared/gc_globals.hpp" #include "gc/shared/gcTimer.hpp" #include "gc/shared/gcTraceTime.inline.hpp" -#include "gc/shared/gc_globals.hpp" #include "gc/shared/referencePolicy.hpp" #include "gc/shared/referenceProcessor.inline.hpp" #include "gc/shared/referenceProcessorPhaseTimes.hpp" @@ -93,7 +93,6 @@ void ReferenceProcessor::enable_discovery(bool check_no_refs) { } ReferenceProcessor::ReferenceProcessor(BoolObjectClosure* is_subject_to_discovery, - bool mt_processing, uint mt_processing_degree, bool mt_discovery, uint mt_discovery_degree, @@ -103,7 +102,6 @@ ReferenceProcessor::ReferenceProcessor(BoolObjectClosure* is_subject_to_discover _is_subject_to_discovery(is_subject_to_discovery), _discovering_refs(false), _enqueuing_is_done(false), - _processing_is_mt(mt_processing), _next_id(0), _adjust_no_of_processing_threads(adjust_no_of_processing_threads), _is_alive_non_header(is_alive_non_header) @@ -140,6 +138,10 @@ void ReferenceProcessor::verify_no_references_recorded() { } #endif +bool ReferenceProcessor::processing_is_mt() const { + return ParallelRefProcEnabled && _num_queues > 1; +} + void ReferenceProcessor::weak_oops_do(OopClosure* f) { for (uint i = 0; i < _max_num_queues * number_of_subclasses_of_ref(); i++) { if (UseCompressedOops) { @@ -245,11 +247,6 @@ ReferenceProcessorStats ReferenceProcessor::process_discovered_references( process_phantom_refs(is_alive, keep_alive, complete_gc, task_executor, phase_times); } - if (task_executor != NULL) { - // Record the work done by the parallel workers. - task_executor->set_single_threaded_mode(); - } - phase_times->set_total_time_ms((os::elapsedTime() - start_time) * 1000); return stats; @@ -662,7 +659,7 @@ void ReferenceProcessor::set_active_mt_degree(uint v) { } bool ReferenceProcessor::need_balance_queues(DiscoveredList refs_lists[]) { - assert(_processing_is_mt, "why balance non-mt processing?"); + assert(processing_is_mt(), "why balance non-mt processing?"); // _num_queues is the processing degree. Only list entries up to // _num_queues will be processed, so any non-empty lists beyond // that must be redistributed to lists in that range. Even if not @@ -684,7 +681,7 @@ bool ReferenceProcessor::need_balance_queues(DiscoveredList refs_lists[]) { } void ReferenceProcessor::maybe_balance_queues(DiscoveredList refs_lists[]) { - assert(_processing_is_mt, "Should not call this otherwise"); + assert(processing_is_mt(), "Should not call this otherwise"); if (need_balance_queues(refs_lists)) { balance_queues(refs_lists); } @@ -774,21 +771,16 @@ void ReferenceProcessor::balance_queues(DiscoveredList ref_lists[]) #endif } -bool ReferenceProcessor::is_mt_processing_set_up(AbstractRefProcTaskExecutor* task_executor) const { - return task_executor != NULL && _processing_is_mt; -} - void ReferenceProcessor::process_soft_ref_reconsider(BoolObjectClosure* is_alive, OopClosure* keep_alive, VoidClosure* complete_gc, AbstractRefProcTaskExecutor* task_executor, ReferenceProcessorPhaseTimes* phase_times) { - assert(!_processing_is_mt || task_executor != NULL, "Task executor must not be NULL when mt processing is set."); + assert(!processing_is_mt() || task_executor != NULL, "Task executor must not be NULL when mt processing is set."); size_t const num_soft_refs = total_count(_discoveredSoftRefs); phase_times->set_ref_discovered(REF_SOFT, num_soft_refs); - - phase_times->set_processing_is_mt(_processing_is_mt); + phase_times->set_processing_is_mt(processing_is_mt()); if (num_soft_refs == 0) { log_debug(gc, ref)("Skipped phase 1 of Reference Processing: no references"); @@ -802,7 +794,7 @@ void ReferenceProcessor::process_soft_ref_reconsider(BoolObjectClosure* is_alive RefProcMTDegreeAdjuster a(this, RefPhase1, num_soft_refs); - if (_processing_is_mt) { + if (processing_is_mt()) { RefProcBalanceQueuesTimeTracker tt(RefPhase1, phase_times); maybe_balance_queues(_discoveredSoftRefs); } @@ -810,7 +802,7 @@ void ReferenceProcessor::process_soft_ref_reconsider(BoolObjectClosure* is_alive RefProcPhaseTimeTracker tt(RefPhase1, phase_times); log_reflist("Phase 1 Soft before", _discoveredSoftRefs, _max_num_queues); - if (_processing_is_mt) { + if (processing_is_mt()) { RefProcPhase1Task phase1(*this, phase_times, _current_soft_ref_policy); task_executor->execute(phase1, num_queues()); } else { @@ -832,7 +824,7 @@ void ReferenceProcessor::process_soft_weak_final_refs(BoolObjectClosure* is_aliv VoidClosure* complete_gc, AbstractRefProcTaskExecutor* task_executor, ReferenceProcessorPhaseTimes* phase_times) { - assert(!_processing_is_mt || task_executor != NULL, "Task executor must not be NULL when mt processing is set."); + assert(!processing_is_mt() || task_executor != NULL, "Task executor must not be NULL when mt processing is set."); size_t const num_soft_refs = total_count(_discoveredSoftRefs); size_t const num_weak_refs = total_count(_discoveredWeakRefs); @@ -841,7 +833,7 @@ void ReferenceProcessor::process_soft_weak_final_refs(BoolObjectClosure* is_aliv phase_times->set_ref_discovered(REF_WEAK, num_weak_refs); phase_times->set_ref_discovered(REF_FINAL, num_final_refs); - phase_times->set_processing_is_mt(_processing_is_mt); + phase_times->set_processing_is_mt(processing_is_mt()); if (num_total_refs == 0) { log_debug(gc, ref)("Skipped phase 2 of Reference Processing: no references"); @@ -850,7 +842,7 @@ void ReferenceProcessor::process_soft_weak_final_refs(BoolObjectClosure* is_aliv RefProcMTDegreeAdjuster a(this, RefPhase2, num_total_refs); - if (_processing_is_mt) { + if (processing_is_mt()) { RefProcBalanceQueuesTimeTracker tt(RefPhase2, phase_times); maybe_balance_queues(_discoveredSoftRefs); maybe_balance_queues(_discoveredWeakRefs); @@ -862,7 +854,7 @@ void ReferenceProcessor::process_soft_weak_final_refs(BoolObjectClosure* is_aliv log_reflist("Phase 2 Soft before", _discoveredSoftRefs, _max_num_queues); log_reflist("Phase 2 Weak before", _discoveredWeakRefs, _max_num_queues); log_reflist("Phase 2 Final before", _discoveredFinalRefs, _max_num_queues); - if (_processing_is_mt) { + if (processing_is_mt()) { RefProcPhase2Task phase2(*this, phase_times); task_executor->execute(phase2, num_queues()); } else { @@ -908,11 +900,11 @@ void ReferenceProcessor::process_final_keep_alive(OopClosure* keep_alive, VoidClosure* complete_gc, AbstractRefProcTaskExecutor* task_executor, ReferenceProcessorPhaseTimes* phase_times) { - assert(!_processing_is_mt || task_executor != NULL, "Task executor must not be NULL when mt processing is set."); + assert(!processing_is_mt() || task_executor != NULL, "Task executor must not be NULL when mt processing is set."); size_t const num_final_refs = total_count(_discoveredFinalRefs); - phase_times->set_processing_is_mt(_processing_is_mt); + phase_times->set_processing_is_mt(processing_is_mt()); if (num_final_refs == 0) { log_debug(gc, ref)("Skipped phase 3 of Reference Processing: no references"); @@ -921,7 +913,7 @@ void ReferenceProcessor::process_final_keep_alive(OopClosure* keep_alive, RefProcMTDegreeAdjuster a(this, RefPhase3, num_final_refs); - if (_processing_is_mt) { + if (processing_is_mt()) { RefProcBalanceQueuesTimeTracker tt(RefPhase3, phase_times); maybe_balance_queues(_discoveredFinalRefs); } @@ -930,7 +922,7 @@ void ReferenceProcessor::process_final_keep_alive(OopClosure* keep_alive, // . Traverse referents of final references and keep them and followers alive. RefProcPhaseTimeTracker tt(RefPhase3, phase_times); - if (_processing_is_mt) { + if (processing_is_mt()) { RefProcPhase3Task phase3(*this, phase_times); task_executor->execute(phase3, num_queues()); } else { @@ -947,12 +939,12 @@ void ReferenceProcessor::process_phantom_refs(BoolObjectClosure* is_alive, VoidClosure* complete_gc, AbstractRefProcTaskExecutor* task_executor, ReferenceProcessorPhaseTimes* phase_times) { - assert(!_processing_is_mt || task_executor != NULL, "Task executor must not be NULL when mt processing is set."); + assert(!processing_is_mt() || task_executor != NULL, "Task executor must not be NULL when mt processing is set."); size_t const num_phantom_refs = total_count(_discoveredPhantomRefs); - phase_times->set_ref_discovered(REF_PHANTOM, num_phantom_refs); - phase_times->set_processing_is_mt(_processing_is_mt); + phase_times->set_ref_discovered(REF_PHANTOM, num_phantom_refs); + phase_times->set_processing_is_mt(processing_is_mt()); if (num_phantom_refs == 0) { log_debug(gc, ref)("Skipped phase 4 of Reference Processing: no references"); @@ -961,7 +953,7 @@ void ReferenceProcessor::process_phantom_refs(BoolObjectClosure* is_alive, RefProcMTDegreeAdjuster a(this, RefPhase4, num_phantom_refs); - if (_processing_is_mt) { + if (processing_is_mt()) { RefProcBalanceQueuesTimeTracker tt(RefPhase4, phase_times); maybe_balance_queues(_discoveredPhantomRefs); } @@ -970,7 +962,7 @@ void ReferenceProcessor::process_phantom_refs(BoolObjectClosure* is_alive, RefProcPhaseTimeTracker tt(RefPhase4, phase_times); log_reflist("Phase 4 Phantom before", _discoveredPhantomRefs, _max_num_queues); - if (_processing_is_mt) { + if (processing_is_mt()) { RefProcPhase4Task phase4(*this, phase_times); task_executor->execute(phase4, num_queues()); } else { @@ -997,7 +989,7 @@ inline DiscoveredList* ReferenceProcessor::get_discovered_list(ReferenceType rt) } else { // single-threaded discovery, we save in round-robin // fashion to each of the lists. - if (_processing_is_mt) { + if (processing_is_mt()) { id = next_id(); } } @@ -1165,8 +1157,7 @@ bool ReferenceProcessor::discover_reference(oop obj, ReferenceType rt) { // Check assumption that an object is not potentially // discovered twice except by concurrent collectors that potentially // trace the same Reference object twice. - assert(UseG1GC || UseShenandoahGC, - "Only possible with a concurrent marking collector"); + assert(UseG1GC, "Only possible with a concurrent marking collector"); return true; } } @@ -1381,7 +1372,6 @@ RefProcMTDegreeAdjuster::RefProcMTDegreeAdjuster(ReferenceProcessor* rp, RefProcPhases phase, size_t ref_count): _rp(rp), - _saved_mt_processing(_rp->processing_is_mt()), _saved_num_queues(_rp->num_queues()) { if (!_rp->processing_is_mt() || !_rp->adjust_no_of_processing_threads() || (ReferencesPerThread == 0)) { return; @@ -1389,12 +1379,10 @@ RefProcMTDegreeAdjuster::RefProcMTDegreeAdjuster(ReferenceProcessor* rp, uint workers = ergo_proc_thread_count(ref_count, _rp->num_queues(), phase); - _rp->set_mt_processing(workers > 1); _rp->set_active_mt_degree(workers); } RefProcMTDegreeAdjuster::~RefProcMTDegreeAdjuster() { // Revert to previous status. - _rp->set_mt_processing(_saved_mt_processing); _rp->set_active_mt_degree(_saved_num_queues); } diff --git a/src/hotspot/share/gc/shared/referenceProcessor.hpp b/src/hotspot/share/gc/shared/referenceProcessor.hpp index 695bdf49053f208aeff382679214599152ddfcfb..08519712e9deb363ce76839e81845d4b7a775e8c 100644 --- a/src/hotspot/share/gc/shared/referenceProcessor.hpp +++ b/src/hotspot/share/gc/shared/referenceProcessor.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2021, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -201,8 +201,6 @@ private: bool _discovery_is_mt; // true if reference discovery is MT. bool _enqueuing_is_done; // true if all weak references enqueued - bool _processing_is_mt; // true during phases when - // reference processing is MT. uint _next_id; // round-robin mod _num_queues counter in // support of work distribution @@ -371,12 +369,10 @@ private: bool is_subject_to_discovery(oop const obj) const; - bool is_mt_processing_set_up(AbstractRefProcTaskExecutor* task_executor) const; - public: // Default parameters give you a vanilla reference processor. ReferenceProcessor(BoolObjectClosure* is_subject_to_discovery, - bool mt_processing = false, uint mt_processing_degree = 1, + uint mt_processing_degree = 1, bool mt_discovery = false, uint mt_discovery_degree = 1, bool atomic_discovery = true, BoolObjectClosure* is_alive_non_header = NULL, @@ -417,8 +413,7 @@ public: void set_mt_discovery(bool mt) { _discovery_is_mt = mt; } // Whether we are in a phase when _processing_ is MT. - bool processing_is_mt() const { return _processing_is_mt; } - void set_mt_processing(bool mt) { _processing_is_mt = mt; } + bool processing_is_mt() const; // whether all enqueueing of weak references is complete bool enqueuing_is_done() { return _enqueuing_is_done; } @@ -601,28 +596,6 @@ class ReferenceProcessorAtomicMutator: StackObj { } }; - -// A utility class to temporarily change the MT processing -// disposition of the given ReferenceProcessor instance -// in the scope that contains it. -class ReferenceProcessorMTProcMutator: StackObj { - private: - ReferenceProcessor* _rp; - bool _saved_mt; - - public: - ReferenceProcessorMTProcMutator(ReferenceProcessor* rp, - bool mt): - _rp(rp) { - _saved_mt = _rp->processing_is_mt(); - _rp->set_mt_processing(mt); - } - - ~ReferenceProcessorMTProcMutator() { - _rp->set_mt_processing(_saved_mt); - } -}; - // This class is an interface used to implement task execution for the // reference processing. class AbstractRefProcTaskExecutor { @@ -633,9 +606,6 @@ public: // Executes a task using worker threads. virtual void execute(ProcessTask& task, uint ergo_workers) = 0; - - // Switch to single threaded mode. - virtual void set_single_threaded_mode() { }; }; // Abstract reference processing task to execute. @@ -670,7 +640,6 @@ class RefProcMTDegreeAdjuster : public StackObj { typedef ReferenceProcessor::RefProcPhases RefProcPhases; ReferenceProcessor* _rp; - bool _saved_mt_processing; uint _saved_num_queues; // Calculate based on total of references. diff --git a/src/hotspot/share/gc/shared/stringdedup/stringDedupTable.cpp b/src/hotspot/share/gc/shared/stringdedup/stringDedupTable.cpp index 63760bc563fc792dd75529a098ef096275235ea5..84a03402f9440ae6411a39852e97c0ac0f0a4cab 100644 --- a/src/hotspot/share/gc/shared/stringdedup/stringDedupTable.cpp +++ b/src/hotspot/share/gc/shared/stringdedup/stringDedupTable.cpp @@ -395,32 +395,33 @@ bool StringDedupTable::is_rehashing() { StringDedupTable* StringDedupTable::prepare_resize() { size_t size = _table->_size; - // Check if the hashtable needs to be resized + // Decide whether to resize, and compute desired new size if so. if (_table->_entries > _table->_grow_threshold) { - // Grow table, double the size - size *= 2; - if (size > _max_size) { - // Too big, don't resize - return NULL; + // Compute new size. + size_t needed = _table->_entries / _grow_load_factor; + if (needed < _max_size) { + size = round_up_power_of_2(needed); + } else { + size = _max_size; } } else if (_table->_entries < _table->_shrink_threshold) { - // Shrink table, half the size - size /= 2; - if (size < _min_size) { - // Too small, don't resize - return NULL; - } - } else if (StringDeduplicationResizeALot) { - // Force grow - size *= 2; - if (size > _max_size) { - // Too big, force shrink instead - size /= 4; + // Compute new size. We can't shrink by more than a factor of 2, + // because the partitioning for parallelization doesn't support more. + if (size > _min_size) size /= 2; + } + // If no change in size needed (and not forcing resize) then done. + if (size == _table->_size) { + if (!StringDeduplicationResizeALot) { + return NULL; // Don't resize. + } else if (size < _max_size) { + size *= 2; // Force grow, but not past _max_size. + } else { + size /= 2; // Can't force grow, so force shrink instead. } - } else { - // Resize not needed - return NULL; } + assert(size <= _max_size, "invariant: %zu", size); + assert(size >= _min_size, "invariant: %zu", size); + assert(is_power_of_2(size), "invariant: %zu", size); // Update statistics _resize_count++; diff --git a/src/hotspot/share/gc/shared/workgroup.cpp b/src/hotspot/share/gc/shared/workgroup.cpp index 3a1d7cfd694f746acbf5ea417017d65552a1bbae..f03a0610292f3b0cc1105df9f63ef5cbb15ddb97 100644 --- a/src/hotspot/share/gc/shared/workgroup.cpp +++ b/src/hotspot/share/gc/shared/workgroup.cpp @@ -352,26 +352,19 @@ void WorkGangBarrierSync::abort() { // SubTasksDone functions. SubTasksDone::SubTasksDone(uint n) : - _tasks(NULL), _n_tasks(n), _threads_completed(0) { + _tasks(NULL), _n_tasks(n) { _tasks = NEW_C_HEAP_ARRAY(bool, n, mtInternal); - clear(); -} - -bool SubTasksDone::valid() { - return _tasks != NULL; -} - -void SubTasksDone::clear() { for (uint i = 0; i < _n_tasks; i++) { _tasks[i] = false; } - _threads_completed = 0; } -void SubTasksDone::all_tasks_completed_impl(uint n_threads, - uint skipped[], - size_t skipped_size) { #ifdef ASSERT +void SubTasksDone::all_tasks_claimed_impl(uint skipped[], size_t skipped_size) { + if (Atomic::cmpxchg(&_verification_done, false, true)) { + // another thread has done the verification + return; + } // all non-skipped tasks are claimed for (uint i = 0; i < _n_tasks; ++i) { if (!_tasks[i]) { @@ -391,19 +384,8 @@ void SubTasksDone::all_tasks_completed_impl(uint n_threads, assert(task_index < _n_tasks, "Array in range."); assert(!_tasks[task_index], "%d is both claimed and skipped.", task_index); } -#endif - uint observed = _threads_completed; - uint old; - do { - old = observed; - observed = Atomic::cmpxchg(&_threads_completed, old, old+1); - } while (observed != old); - // If this was the last thread checking in, clear the tasks. - uint adjusted_thread_count = (n_threads == 0 ? 1 : n_threads); - if (observed + 1 == adjusted_thread_count) { - clear(); - } } +#endif bool SubTasksDone::try_claim_task(uint t) { assert(t < _n_tasks, "bad task id."); @@ -411,6 +393,7 @@ bool SubTasksDone::try_claim_task(uint t) { } SubTasksDone::~SubTasksDone() { + assert(_verification_done, "all_tasks_claimed must have been called."); FREE_C_HEAP_ARRAY(bool, _tasks); } diff --git a/src/hotspot/share/gc/shared/workgroup.hpp b/src/hotspot/share/gc/shared/workgroup.hpp index a499451333e9a02e8a505fa3ac1f71b506c2d64f..e6c7a686765f655ed388c3d7c2b8baaad9457c8d 100644 --- a/src/hotspot/share/gc/shared/workgroup.hpp +++ b/src/hotspot/share/gc/shared/workgroup.hpp @@ -305,24 +305,18 @@ public: class SubTasksDone: public CHeapObj { volatile bool* _tasks; uint _n_tasks; - volatile uint _threads_completed; - // Set all tasks to unclaimed. - void clear(); - - void all_tasks_completed_impl(uint n_threads, uint skipped[], size_t skipped_size); + // make sure verification logic is run exactly once to avoid duplicate assertion failures + DEBUG_ONLY(volatile bool _verification_done = false;) + void all_tasks_claimed_impl(uint skipped[], size_t skipped_size) NOT_DEBUG_RETURN; NONCOPYABLE(SubTasksDone); public: // Initializes "this" to a state in which there are "n" tasks to be - // processed, none of the which are originally claimed. The number of - // threads doing the tasks is initialized 1. + // processed, none of the which are originally claimed. SubTasksDone(uint n); - // True iff the object is in a valid state. - bool valid(); - // Attempt to claim the task "t", returning true if successful, // false if it has already been claimed. The task "t" is required // to be within the range of "this". @@ -331,21 +325,17 @@ public: // The calling thread asserts that it has attempted to claim all the tasks // that it will try to claim. Tasks that are meant to be skipped must be // explicitly passed as extra arguments. Every thread in the parallel task - // must execute this. (When the last thread does so, the task array is - // cleared.) - // - // n_threads - Number of threads executing the sub-tasks. - void all_tasks_completed(uint n_threads) { - all_tasks_completed_impl(n_threads, nullptr, 0); - } - - // Augmented by variadic args, each for a skipped task. + // must execute this. template...>::value)> - void all_tasks_completed(uint n_threads, T0 first_skipped, Ts... more_skipped) { + void all_tasks_claimed(T0 first_skipped, Ts... more_skipped) { static_assert(std::is_convertible::value, "not convertible"); uint skipped[] = { static_cast(first_skipped), static_cast(more_skipped)... }; - all_tasks_completed_impl(n_threads, skipped, ARRAY_SIZE(skipped)); + all_tasks_claimed_impl(skipped, ARRAY_SIZE(skipped)); + } + // if there are no skipped tasks. + void all_tasks_claimed() { + all_tasks_claimed_impl(nullptr, 0); } // Destructor. diff --git a/src/hotspot/share/gc/shenandoah/c1/shenandoahBarrierSetC1.cpp b/src/hotspot/share/gc/shenandoah/c1/shenandoahBarrierSetC1.cpp index 083951ca14003781e5844928f00a4d8ad09c186f..1478d64e5efc88f6359e8c51435a4589c3734616 100644 --- a/src/hotspot/share/gc/shenandoah/c1/shenandoahBarrierSetC1.cpp +++ b/src/hotspot/share/gc/shenandoah/c1/shenandoahBarrierSetC1.cpp @@ -220,27 +220,22 @@ void ShenandoahBarrierSetC1::load_at_resolved(LIRAccess& access, LIR_Opr result) BarrierSetC1::load_at_resolved(access, result); } - // 3: apply keep-alive barrier if ShenandoahSATBBarrier is set - if (ShenandoahSATBBarrier) { - bool is_weak = (decorators & ON_WEAK_OOP_REF) != 0; - bool is_phantom = (decorators & ON_PHANTOM_OOP_REF) != 0; + // 3: apply keep-alive barrier for java.lang.ref.Reference if needed + if (ShenandoahBarrierSet::need_keep_alive_barrier(decorators, type)) { bool is_anonymous = (decorators & ON_UNKNOWN_OOP_REF) != 0; - bool keep_alive = (decorators & AS_NO_KEEPALIVE) == 0; - - if ((is_weak || is_phantom || is_anonymous) && keep_alive) { - // Register the value in the referent field with the pre-barrier - LabelObj *Lcont_anonymous; - if (is_anonymous) { - Lcont_anonymous = new LabelObj(); - generate_referent_check(access, Lcont_anonymous); - } - pre_barrier(gen, access.access_emit_info(), decorators, LIR_OprFact::illegalOpr /* addr_opr */, - result /* pre_val */); - if (is_anonymous) { - __ branch_destination(Lcont_anonymous->label()); - } + + // Register the value in the referent field with the pre-barrier + LabelObj *Lcont_anonymous; + if (is_anonymous) { + Lcont_anonymous = new LabelObj(); + generate_referent_check(access, Lcont_anonymous); + } + pre_barrier(gen, access.access_emit_info(), decorators, LIR_OprFact::illegalOpr /* addr_opr */, + result /* pre_val */); + if (is_anonymous) { + __ branch_destination(Lcont_anonymous->label()); } - } + } } class C1ShenandoahPreBarrierCodeGenClosure : public StubAssemblerCodeGenClosure { diff --git a/src/hotspot/share/gc/shenandoah/c2/shenandoahBarrierSetC2.cpp b/src/hotspot/share/gc/shenandoah/c2/shenandoahBarrierSetC2.cpp index f561b760596ed035b0e3d80af5b5fbeffbde3a70..1bf51fbf3b83638f8adb0fc1b3cfe773281baa7c 100644 --- a/src/hotspot/share/gc/shenandoah/c2/shenandoahBarrierSetC2.cpp +++ b/src/hotspot/share/gc/shenandoah/c2/shenandoahBarrierSetC2.cpp @@ -551,7 +551,7 @@ Node* ShenandoahBarrierSetC2::load_at_resolved(C2Access& access, const Type* val } } - // 3: apply keep-alive barrier if needed + // 3: apply keep-alive barrier for java.lang.ref.Reference if needed if (ShenandoahBarrierSet::need_keep_alive_barrier(decorators, type)) { Node* top = Compile::current()->top(); Node* adr = access.addr().node(); diff --git a/src/hotspot/share/gc/shenandoah/mode/shenandoahIUMode.cpp b/src/hotspot/share/gc/shenandoah/mode/shenandoahIUMode.cpp index abc4a7cfdc9db1dc76c0f0d33688305426901775..00e71f4afc491f6da0be68dd4c03fdb7c017fe91 100644 --- a/src/hotspot/share/gc/shenandoah/mode/shenandoahIUMode.cpp +++ b/src/hotspot/share/gc/shenandoah/mode/shenandoahIUMode.cpp @@ -34,6 +34,11 @@ #include "runtime/java.hpp" void ShenandoahIUMode::initialize_flags() const { + if (FLAG_IS_CMDLINE(ClassUnloadingWithConcurrentMark) && ClassUnloading) { + log_warning(gc)("Shenandoah I-U mode sets -XX:-ClassUnloadingWithConcurrentMark; see JDK-8261341 for details"); + } + FLAG_SET_DEFAULT(ClassUnloadingWithConcurrentMark, false); + if (ClassUnloading) { FLAG_SET_DEFAULT(ShenandoahSuspendibleWorkers, true); FLAG_SET_DEFAULT(VerifyBeforeExit, false); diff --git a/src/hotspot/share/gc/shenandoah/shenandoahBreakpoint.cpp b/src/hotspot/share/gc/shenandoah/shenandoahBreakpoint.cpp new file mode 100644 index 0000000000000000000000000000000000000000..fb0de2bc741066cd3a798031d94b31b09dc985f2 --- /dev/null +++ b/src/hotspot/share/gc/shenandoah/shenandoahBreakpoint.cpp @@ -0,0 +1,65 @@ +/* + * Copyright (c) 2021, Red Hat, Inc. All rights reserved. + * Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" +#include "gc/shared/concurrentGCBreakpoints.hpp" +#include "gc/shenandoah/shenandoahBreakpoint.hpp" +#include "runtime/mutexLocker.hpp" +#include "utilities/debug.hpp" + +bool ShenandoahBreakpoint::_start_gc = false; + +void ShenandoahBreakpoint::start_gc() { + MonitorLocker ml(ConcurrentGCBreakpoints::monitor()); + assert(ConcurrentGCBreakpoints::is_controlled(), "Invalid state"); + assert(!_start_gc, "Invalid state"); + _start_gc = true; + ml.notify_all(); +} + +void ShenandoahBreakpoint::at_before_gc() { + MonitorLocker ml(ConcurrentGCBreakpoints::monitor(), Mutex::_no_safepoint_check_flag); + while (ConcurrentGCBreakpoints::is_controlled() && !_start_gc) { + ml.wait(); + } + _start_gc = false; + ConcurrentGCBreakpoints::notify_idle_to_active(); +} + +void ShenandoahBreakpoint::at_after_gc() { + ConcurrentGCBreakpoints::notify_active_to_idle(); +} + +void ShenandoahBreakpoint::at_after_marking_started() { + ConcurrentGCBreakpoints::at("AFTER MARKING STARTED"); +} + +void ShenandoahBreakpoint::at_before_marking_completed() { + ConcurrentGCBreakpoints::at("BEFORE MARKING COMPLETED"); +} + +void ShenandoahBreakpoint::at_after_reference_processing_started() { + ConcurrentGCBreakpoints::at("AFTER CONCURRENT REFERENCE PROCESSING STARTED"); +} diff --git a/src/hotspot/share/gc/shenandoah/shenandoahBreakpoint.hpp b/src/hotspot/share/gc/shenandoah/shenandoahBreakpoint.hpp new file mode 100644 index 0000000000000000000000000000000000000000..f8b7489a3bbd0e7aedc4d5b1fe599f531629aedc --- /dev/null +++ b/src/hotspot/share/gc/shenandoah/shenandoahBreakpoint.hpp @@ -0,0 +1,44 @@ +/* + * Copyright (c) 2021, Red Hat, Inc. All rights reserved. + * Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_GC_SHENANDOAH_SHENANDOAHBREAKPOINT_HPP +#define SHARE_GC_SHENANDOAH_SHENANDOAHBREAKPOINT_HPP + +#include "memory/allocation.hpp" + +class ShenandoahBreakpoint : public AllStatic { +private: + static bool _start_gc; + +public: + static void start_gc(); + + static void at_before_gc(); + static void at_after_gc(); + static void at_after_marking_started(); + static void at_before_marking_completed(); + static void at_after_reference_processing_started(); +}; +#endif // SHARE_GC_SHENANDOAH_SHENANDOAHBREAKPOINT_HPP diff --git a/src/hotspot/share/gc/shenandoah/shenandoahCollectionSet.cpp b/src/hotspot/share/gc/shenandoah/shenandoahCollectionSet.cpp index 385408d7fd56236b8925129a28d773f6698b2e1b..5f269a7f64a2372d24cdb75d53d2d9e335e26d8d 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahCollectionSet.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahCollectionSet.cpp @@ -110,27 +110,25 @@ void ShenandoahCollectionSet::clear() { } ShenandoahHeapRegion* ShenandoahCollectionSet::claim_next() { - size_t num_regions = _heap->num_regions(); - if (_current_index >= (jint)num_regions) { - return NULL; - } + // This code is optimized for the case when collection set contains only + // a few regions. In this case, it is more constructive to check for is_in + // before hitting the (potentially contended) atomic index. - jint saved_current = _current_index; - size_t index = (size_t)saved_current; + size_t max = _heap->num_regions(); + size_t old = Atomic::load(&_current_index); - while(index < num_regions) { + for (size_t index = old; index < max; index++) { if (is_in(index)) { - jint cur = Atomic::cmpxchg(&_current_index, saved_current, (jint)(index + 1)); - assert(cur >= (jint)saved_current, "Must move forward"); - if (cur == saved_current) { - assert(is_in(index), "Invariant"); + size_t cur = Atomic::cmpxchg(&_current_index, old, index + 1, memory_order_relaxed); + assert(cur >= old, "Always move forward"); + if (cur == old) { + // Successfully moved the claim index, this is our region. return _heap->get_region(index); } else { - index = (size_t)cur; - saved_current = cur; + // Somebody else moved the claim index, restart from there. + index = cur - 1; // adjust for loop post-increment + old = cur; } - } else { - index ++; } } return NULL; @@ -139,10 +137,11 @@ ShenandoahHeapRegion* ShenandoahCollectionSet::claim_next() { ShenandoahHeapRegion* ShenandoahCollectionSet::next() { assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint"); assert(Thread::current()->is_VM_thread(), "Must be VMThread"); - size_t num_regions = _heap->num_regions(); - for (size_t index = (size_t)_current_index; index < num_regions; index ++) { + + size_t max = _heap->num_regions(); + for (size_t index = _current_index; index < max; index++) { if (is_in(index)) { - _current_index = (jint)(index + 1); + _current_index = index + 1; return _heap->get_region(index); } } diff --git a/src/hotspot/share/gc/shenandoah/shenandoahCollectionSet.hpp b/src/hotspot/share/gc/shenandoah/shenandoahCollectionSet.hpp index 56e96522094fdca10b2de5b0a65b4755be8aa9b6..8ac2d9fb2eacb4cc95c094fb64958c4b1cd4b7bf 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahCollectionSet.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahCollectionSet.hpp @@ -26,6 +26,7 @@ #define SHARE_GC_SHENANDOAH_SHENANDOAHCOLLECTIONSET_HPP #include "memory/allocation.hpp" +#include "memory/virtualspace.hpp" #include "gc/shenandoah/shenandoahHeap.hpp" #include "gc/shenandoah/shenandoahHeapRegion.hpp" #include "gc/shenandoah/shenandoahPadding.hpp" @@ -47,7 +48,7 @@ private: size_t _region_count; shenandoah_padding(0); - volatile jint _current_index; + volatile size_t _current_index; shenandoah_padding(1); public: diff --git a/src/hotspot/share/gc/shenandoah/shenandoahConcurrentGC.cpp b/src/hotspot/share/gc/shenandoah/shenandoahConcurrentGC.cpp index 24bea946506977a35ccfe67378230b9b18ea29f5..8982a4e23096cf5a41b13733f5c6db38d58cc924 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahConcurrentGC.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahConcurrentGC.cpp @@ -26,6 +26,7 @@ #include "gc/shared/barrierSetNMethod.hpp" #include "gc/shared/collectorCounters.hpp" +#include "gc/shenandoah/shenandoahBreakpoint.hpp" #include "gc/shenandoah/shenandoahCollectorPolicy.hpp" #include "gc/shenandoah/shenandoahConcurrentGC.hpp" #include "gc/shenandoah/shenandoahFreeSet.hpp" @@ -42,9 +43,34 @@ #include "gc/shenandoah/shenandoahVMOperations.hpp" #include "gc/shenandoah/shenandoahWorkGroup.hpp" #include "gc/shenandoah/shenandoahWorkerPolicy.hpp" +#include "memory/allocation.hpp" #include "prims/jvmtiTagMap.hpp" +#include "runtime/vmThread.hpp" #include "utilities/events.hpp" +// Breakpoint support +class ShenandoahBreakpointGCScope : public StackObj { +public: + ShenandoahBreakpointGCScope() { + ShenandoahBreakpoint::at_before_gc(); + } + + ~ShenandoahBreakpointGCScope() { + ShenandoahBreakpoint::at_after_gc(); + } +}; + +class ShenandoahBreakpointMarkScope : public StackObj { +public: + ShenandoahBreakpointMarkScope() { + ShenandoahBreakpoint::at_after_marking_started(); + } + + ~ShenandoahBreakpointMarkScope() { + ShenandoahBreakpoint::at_before_marking_completed(); + } +}; + ShenandoahConcurrentGC::ShenandoahConcurrentGC() : _mark(), _degen_point(ShenandoahDegenPoint::_degenerated_unset) { @@ -60,6 +86,10 @@ void ShenandoahConcurrentGC::cancel() { bool ShenandoahConcurrentGC::collect(GCCause::Cause cause) { ShenandoahHeap* const heap = ShenandoahHeap::heap(); + if (cause == GCCause::_wb_breakpoint) { + ShenandoahBreakpoint::start_gc(); + } + ShenandoahBreakpointGCScope breakpoint_gc_scope; // Reset for upcoming marking entry_reset(); @@ -67,13 +97,16 @@ bool ShenandoahConcurrentGC::collect(GCCause::Cause cause) { // Start initial mark under STW vmop_entry_init_mark(); + { + ShenandoahBreakpointMarkScope breakpoint_mark_scope; // Concurrent mark roots - entry_mark_roots(); - if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_outside_cycle)) return false; + entry_mark_roots(); + if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_outside_cycle)) return false; - // Continue concurrent mark - entry_mark(); - if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_mark)) return false; + // Continue concurrent mark + entry_mark(); + if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_mark)) return false; + } // Complete marking under STW, and start evacuation vmop_entry_final_mark(); @@ -621,6 +654,7 @@ void ShenandoahConcurrentGC::op_weak_refs() { assert(heap->is_concurrent_weak_root_in_progress(), "Only during this phase"); // Concurrent weak refs processing ShenandoahGCWorkerPhase worker_phase(ShenandoahPhaseTimings::conc_weak_refs); + ShenandoahBreakpoint::at_after_reference_processing_started(); heap->ref_processor()->process_references(ShenandoahPhaseTimings::conc_weak_refs, heap->workers(), true /* concurrent */); } diff --git a/src/hotspot/share/gc/shenandoah/shenandoahControlThread.cpp b/src/hotspot/share/gc/shenandoah/shenandoahControlThread.cpp index 1a3030e1d24fc4cd730a2f1e8a27335fca0dddcb..fdc6943a9d2b89398c5efbba1c40d6d8fcca1e1b 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahControlThread.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahControlThread.cpp @@ -23,7 +23,6 @@ */ #include "precompiled.hpp" - #include "gc/shenandoah/shenandoahCollectorPolicy.hpp" #include "gc/shenandoah/shenandoahConcurrentGC.hpp" #include "gc/shenandoah/shenandoahControlThread.hpp" @@ -41,6 +40,7 @@ #include "gc/shenandoah/shenandoahWorkerPolicy.hpp" #include "gc/shenandoah/heuristics/shenandoahHeuristics.hpp" #include "memory/iterator.hpp" +#include "memory/metaspaceUtils.hpp" #include "memory/universe.hpp" #include "runtime/atomic.hpp" @@ -100,7 +100,7 @@ void ShenandoahControlThread::run_service() { bool implicit_gc_requested = _gc_requested.is_set() && !is_explicit_gc(_requested_gc_cause); // This control loop iteration have seen this much allocations. - size_t allocs_seen = Atomic::xchg(&_allocs_seen, (size_t)0); + size_t allocs_seen = Atomic::xchg(&_allocs_seen, (size_t)0, memory_order_relaxed); // Check if we have seen a new target for soft max heap size. bool soft_max_changed = check_soft_max_changed(); @@ -478,6 +478,7 @@ void ShenandoahControlThread::request_gc(GCCause::Cause cause) { cause == GCCause::_metadata_GC_clear_soft_refs || cause == GCCause::_full_gc_alot || cause == GCCause::_wb_full_gc || + cause == GCCause::_wb_breakpoint || cause == GCCause::_scavenge_alot, "only requested GCs here"); @@ -506,7 +507,10 @@ void ShenandoahControlThread::handle_requested_gc(GCCause::Cause cause) { while (current_gc_id < required_gc_id) { _gc_requested.set(); _requested_gc_cause = cause; - ml.wait(); + + if (cause != GCCause::_wb_breakpoint) { + ml.wait(); + } current_gc_id = get_gc_id(); } } @@ -595,7 +599,7 @@ void ShenandoahControlThread::notify_heap_changed() { void ShenandoahControlThread::pacing_notify_alloc(size_t words) { assert(ShenandoahPacing, "should only call when pacing is enabled"); - Atomic::add(&_allocs_seen, words); + Atomic::add(&_allocs_seen, words, memory_order_relaxed); } void ShenandoahControlThread::set_forced_counters_update(bool value) { diff --git a/src/hotspot/share/gc/shenandoah/shenandoahFullGC.cpp b/src/hotspot/share/gc/shenandoah/shenandoahFullGC.cpp index e7b87f7dcbf32068ad01c5176e4d06abaee09469..e54feb68298655525e2ab22fe50176480e26b41b 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahFullGC.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahFullGC.cpp @@ -48,7 +48,7 @@ #include "gc/shenandoah/shenandoahVerifier.hpp" #include "gc/shenandoah/shenandoahVMOperations.hpp" #include "gc/shenandoah/shenandoahWorkerPolicy.hpp" -#include "memory/metaspace.hpp" +#include "memory/metaspaceUtils.hpp" #include "memory/universe.hpp" #include "oops/compressedOops.inline.hpp" #include "oops/oop.inline.hpp" @@ -179,15 +179,14 @@ void ShenandoahFullGC::do_it(GCCause::Cause gc_cause) { ShenandoahReferenceProcessor* rp = heap->ref_processor(); rp->abandon_partial_discovery(); - // f. Set back forwarded objects bit back, in case some steps above dropped it. - heap->set_has_forwarded_objects(has_forwarded_objects); - - // g. Sync pinned region status from the CP marks + // f. Sync pinned region status from the CP marks heap->sync_pinned_region_status(); // The rest of prologue: BiasedLocking::preserve_marks(); _preserved_marks->init(heap->workers()->active_workers()); + + assert(heap->has_forwarded_objects() == has_forwarded_objects, "This should not change"); } if (UseTLAB) { diff --git a/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp b/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp index 375bbd441b68ddd5eda9fd9af6387944b4af341a..43af68534c71bef6a8ccdc5027a3a3fc568ce0a2 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp @@ -73,6 +73,7 @@ #include "classfile/systemDictionary.hpp" #include "memory/classLoaderMetaspace.hpp" +#include "memory/metaspaceUtils.hpp" #include "oops/compressedOops.inline.hpp" #include "prims/jvmtiTagMap.hpp" #include "runtime/atomic.hpp" @@ -620,12 +621,11 @@ void ShenandoahHeap::post_initialize() { } size_t ShenandoahHeap::used() const { - return Atomic::load_acquire(&_used); + return Atomic::load(&_used); } size_t ShenandoahHeap::committed() const { - OrderAccess::acquire(); - return _committed; + return Atomic::load(&_committed); } void ShenandoahHeap::increase_committed(size_t bytes) { @@ -639,20 +639,20 @@ void ShenandoahHeap::decrease_committed(size_t bytes) { } void ShenandoahHeap::increase_used(size_t bytes) { - Atomic::add(&_used, bytes); + Atomic::add(&_used, bytes, memory_order_relaxed); } void ShenandoahHeap::set_used(size_t bytes) { - Atomic::release_store_fence(&_used, bytes); + Atomic::store(&_used, bytes); } void ShenandoahHeap::decrease_used(size_t bytes) { assert(used() >= bytes, "never decrease heap size by more than we've left"); - Atomic::sub(&_used, bytes); + Atomic::sub(&_used, bytes, memory_order_relaxed); } void ShenandoahHeap::increase_allocated(size_t bytes) { - Atomic::add(&_bytes_allocated_since_gc_start, bytes); + Atomic::add(&_bytes_allocated_since_gc_start, bytes, memory_order_relaxed); } void ShenandoahHeap::notify_mutator_alloc_words(size_t words, bool waste) { @@ -1498,8 +1498,8 @@ public: size_t stride = ShenandoahParallelRegionStride; size_t max = _heap->num_regions(); - while (_index < max) { - size_t cur = Atomic::fetch_and_add(&_index, stride); + while (Atomic::load(&_index) < max) { + size_t cur = Atomic::fetch_and_add(&_index, stride, memory_order_relaxed); size_t start = cur; size_t end = MIN2(cur + stride, max); if (start >= max) break; @@ -1883,11 +1883,11 @@ address ShenandoahHeap::gc_state_addr() { } size_t ShenandoahHeap::bytes_allocated_since_gc_start() { - return Atomic::load_acquire(&_bytes_allocated_since_gc_start); + return Atomic::load(&_bytes_allocated_since_gc_start); } void ShenandoahHeap::reset_bytes_allocated_since_gc_start() { - Atomic::release_store_fence(&_bytes_allocated_since_gc_start, (size_t)0); + Atomic::store(&_bytes_allocated_since_gc_start, (size_t)0); } void ShenandoahHeap::set_degenerated_gc_in_progress(bool in_progress) { diff --git a/src/hotspot/share/gc/shenandoah/shenandoahHeap.hpp b/src/hotspot/share/gc/shenandoah/shenandoahHeap.hpp index d721d4c5ff5d82a004cbf21cdd1e3e7c7c475065..d0dda89a9e9fba81a4fc6a7435873e7e4342c220 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahHeap.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahHeap.hpp @@ -162,6 +162,11 @@ public: void prepare_for_verify(); void verify(VerifyOption vo); +// WhiteBox testing support. + bool supports_concurrent_gc_breakpoints() const { + return true; + } + // ---------- Heap counters and metrics // private: diff --git a/src/hotspot/share/gc/shenandoah/shenandoahHeap.inline.hpp b/src/hotspot/share/gc/shenandoah/shenandoahHeap.inline.hpp index cea6c322cbadb4db4099c2505a05e3d096670c4c..c39737c15b84398164205183f6ab11baba3bbd6e 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahHeap.inline.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahHeap.inline.hpp @@ -54,7 +54,7 @@ inline ShenandoahHeap* ShenandoahHeap::heap() { } inline ShenandoahHeapRegion* ShenandoahRegionIterator::next() { - size_t new_index = Atomic::add(&_index, (size_t) 1); + size_t new_index = Atomic::add(&_index, (size_t) 1, memory_order_relaxed); // get_region() provides the bounds-check and returns NULL on OOB. return _heap->get_region(new_index - 1); } diff --git a/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.inline.hpp b/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.inline.hpp index c02c00b7d4f28c2c674d464ec3daadd3dc4ef25f..461aa30fcbee1d2a84c7149cf04467b355e9b86c 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.inline.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.inline.hpp @@ -80,7 +80,7 @@ inline void ShenandoahHeapRegion::increase_live_data_gc_words(size_t s) { } inline void ShenandoahHeapRegion::internal_increase_live_data(size_t s) { - size_t new_live_data = Atomic::add(&_live_data, s); + size_t new_live_data = Atomic::add(&_live_data, s, memory_order_relaxed); #ifdef ASSERT size_t live_bytes = new_live_data * HeapWordSize; size_t used_bytes = used(); @@ -90,11 +90,11 @@ inline void ShenandoahHeapRegion::internal_increase_live_data(size_t s) { } inline void ShenandoahHeapRegion::clear_live_data() { - Atomic::release_store_fence(&_live_data, (size_t)0); + Atomic::store(&_live_data, (size_t)0); } inline size_t ShenandoahHeapRegion::get_live_data_words() const { - return Atomic::load_acquire(&_live_data); + return Atomic::load(&_live_data); } inline size_t ShenandoahHeapRegion::get_live_data_bytes() const { diff --git a/src/hotspot/share/gc/shenandoah/shenandoahHeapRegionSet.cpp b/src/hotspot/share/gc/shenandoah/shenandoahHeapRegionSet.cpp index 438441a400f7af6703a5279dbf18dbcee263ad32..fe34b629637d549b33a36ff116ddfa7d4eeeb433 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahHeapRegionSet.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahHeapRegionSet.cpp @@ -33,17 +33,10 @@ ShenandoahHeapRegionSetIterator::ShenandoahHeapRegionSetIterator(const ShenandoahHeapRegionSet* const set) : _set(set), _heap(ShenandoahHeap::heap()), _current_index(0) {} -void ShenandoahHeapRegionSetIterator::reset(const ShenandoahHeapRegionSet* const set) { - _set = set; - _current_index = 0; -} - ShenandoahHeapRegionSet::ShenandoahHeapRegionSet() : _heap(ShenandoahHeap::heap()), _map_size(_heap->num_regions()), - _region_size_bytes_shift(ShenandoahHeapRegion::region_size_bytes_shift()), _set_map(NEW_C_HEAP_ARRAY(jbyte, _map_size, mtGC)), - _biased_set_map(_set_map - ((uintx)_heap->base() >> _region_size_bytes_shift)), _region_count(0) { // Use 1-byte data type @@ -58,83 +51,40 @@ ShenandoahHeapRegionSet::~ShenandoahHeapRegionSet() { } void ShenandoahHeapRegionSet::add_region(ShenandoahHeapRegion* r) { - assert(!is_in(r), "Already in collection set"); + assert(!is_in(r), "Already in region set"); _set_map[r->index()] = 1; _region_count++; } -bool ShenandoahHeapRegionSet::add_region_check_for_duplicates(ShenandoahHeapRegion* r) { - if (!is_in(r)) { - add_region(r); - return true; - } else { - return false; - } -} - void ShenandoahHeapRegionSet::remove_region(ShenandoahHeapRegion* r) { assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint"); assert(Thread::current()->is_VM_thread(), "Must be VMThread"); assert(is_in(r), "Not in region set"); _set_map[r->index()] = 0; - _region_count --; + _region_count--; } void ShenandoahHeapRegionSet::clear() { assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint"); Copy::zero_to_bytes(_set_map, _map_size); - _region_count = 0; } -ShenandoahHeapRegion* ShenandoahHeapRegionSetIterator::claim_next() { - size_t num_regions = _heap->num_regions(); - if (_current_index >= (jint)num_regions) { - return NULL; - } - - jint saved_current = _current_index; - size_t index = (size_t)saved_current; - - while(index < num_regions) { - if (_set->is_in(index)) { - jint cur = Atomic::cmpxchg(&_current_index, saved_current, (jint)(index + 1)); - assert(cur >= (jint)saved_current, "Must move forward"); - if (cur == saved_current) { - assert(_set->is_in(index), "Invariant"); - return _heap->get_region(index); - } else { - index = (size_t)cur; - saved_current = cur; - } - } else { - index ++; - } - } - return NULL; -} - ShenandoahHeapRegion* ShenandoahHeapRegionSetIterator::next() { - size_t num_regions = _heap->num_regions(); - for (size_t index = (size_t)_current_index; index < num_regions; index ++) { + for (size_t index = _current_index; index < _heap->num_regions(); index++) { if (_set->is_in(index)) { - _current_index = (jint)(index + 1); + _current_index = index + 1; return _heap->get_region(index); } } - return NULL; } void ShenandoahHeapRegionSet::print_on(outputStream* out) const { out->print_cr("Region Set : " SIZE_FORMAT "", count()); - - debug_only(size_t regions = 0;) - for (size_t index = 0; index < _heap->num_regions(); index ++) { + for (size_t index = 0; index < _heap->num_regions(); index++) { if (is_in(index)) { _heap->get_region(index)->print_on(out); - debug_only(regions ++;) } } - assert(regions == count(), "Must match"); } diff --git a/src/hotspot/share/gc/shenandoah/shenandoahHeapRegionSet.hpp b/src/hotspot/share/gc/shenandoah/shenandoahHeapRegionSet.hpp index e910c6d21f771cd1a9fb4bbf69232baa993237f3..d933fda60b1779af328f071590511893c9b1d8e8 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahHeapRegionSet.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahHeapRegionSet.hpp @@ -37,10 +37,7 @@ class ShenandoahHeapRegionSetIterator : public StackObj { private: const ShenandoahHeapRegionSet* _set; ShenandoahHeap* const _heap; - - shenandoah_padding(0); - volatile jint _current_index; - shenandoah_padding(1); + size_t _current_index; // No implicit copying: iterators should be passed by reference to capture the state NONCOPYABLE(ShenandoahHeapRegionSetIterator); @@ -48,12 +45,6 @@ private: public: ShenandoahHeapRegionSetIterator(const ShenandoahHeapRegionSet* const set); - // Reset existing iterator to new set - void reset(const ShenandoahHeapRegionSet* const set); - - // MT version - ShenandoahHeapRegion* claim_next(); - // Single-thread version ShenandoahHeapRegion* next(); }; @@ -63,21 +54,14 @@ class ShenandoahHeapRegionSet : public CHeapObj { private: ShenandoahHeap* const _heap; size_t const _map_size; - size_t const _region_size_bytes_shift; jbyte* const _set_map; - // Bias set map's base address for fast test if an oop is in set - jbyte* const _biased_set_map; size_t _region_count; public: ShenandoahHeapRegionSet(); ~ShenandoahHeapRegionSet(); - // Add region to set void add_region(ShenandoahHeapRegion* r); - bool add_region_check_for_duplicates(ShenandoahHeapRegion* r); - - // Remove region from set void remove_region(ShenandoahHeapRegion* r); size_t count() const { return _region_count; } @@ -85,16 +69,10 @@ public: inline bool is_in(ShenandoahHeapRegion* r) const; inline bool is_in(size_t region_idx) const; - inline bool is_in(oop p) const; void print_on(outputStream* out) const; void clear(); - -private: - jbyte* biased_map_address() const { - return _biased_set_map; - } }; #endif // SHARE_GC_SHENANDOAH_SHENANDOAHHEAPREGIONSET_HPP diff --git a/src/hotspot/share/gc/shenandoah/shenandoahHeapRegionSet.inline.hpp b/src/hotspot/share/gc/shenandoah/shenandoahHeapRegionSet.inline.hpp index d6781c4ad78a3e77babb70bce29d5711e61e2306..84f58d3018949d1a08effb9f7e22ab08f441ebe7 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahHeapRegionSet.inline.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahHeapRegionSet.inline.hpp @@ -25,10 +25,8 @@ #ifndef SHARE_GC_SHENANDOAH_SHENANDOAHHEAPREGIONSET_INLINE_HPP #define SHARE_GC_SHENANDOAH_SHENANDOAHHEAPREGIONSET_INLINE_HPP -#include "gc/shenandoah/shenandoahAsserts.hpp" #include "gc/shenandoah/shenandoahHeapRegionSet.hpp" #include "gc/shenandoah/shenandoahHeap.hpp" -#include "gc/shenandoah/shenandoahHeap.inline.hpp" #include "gc/shenandoah/shenandoahHeapRegion.hpp" bool ShenandoahHeapRegionSet::is_in(size_t region_idx) const { @@ -40,12 +38,4 @@ bool ShenandoahHeapRegionSet::is_in(ShenandoahHeapRegion* r) const { return is_in(r->index()); } -bool ShenandoahHeapRegionSet::is_in(oop p) const { - shenandoah_assert_in_heap(NULL, p); - uintx index = (cast_from_oop(p)) >> _region_size_bytes_shift; - // no need to subtract the bottom of the heap from p, - // _biased_set_map is biased - return _biased_set_map[index] == 1; -} - #endif // SHARE_GC_SHENANDOAH_SHENANDOAHHEAPREGIONSET_INLINE_HPP diff --git a/src/hotspot/share/gc/shenandoah/shenandoahMarkBitMap.hpp b/src/hotspot/share/gc/shenandoah/shenandoahMarkBitMap.hpp index a37523f8197f675634cc211829de8c6555c7a476..a71cad75baedd87d72fb4c9480c049940ef5205c 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahMarkBitMap.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahMarkBitMap.hpp @@ -82,8 +82,6 @@ private: return map() + to_words_align_down(bit); } - static inline const bm_word_t load_word_ordered(const volatile bm_word_t* const addr, atomic_memory_order memory_order); - bool at(idx_t index) const { verify_index(index); return (*word_addr(index) & bit_mask(index)) != 0; diff --git a/src/hotspot/share/gc/shenandoah/shenandoahMarkBitMap.inline.hpp b/src/hotspot/share/gc/shenandoah/shenandoahMarkBitMap.inline.hpp index b9e0bb61f54c57bfa8a83efa003f63a5063f5d89..9dd3f7298993f7a40ccb042e8703588f854e2000 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahMarkBitMap.inline.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahMarkBitMap.inline.hpp @@ -46,7 +46,7 @@ inline bool ShenandoahMarkBitMap::mark_strong(HeapWord* heap_addr, bool& was_upg volatile bm_word_t* const addr = word_addr(bit); const bm_word_t mask = bit_mask(bit); const bm_word_t mask_weak = (bm_word_t)1 << (bit_in_word(bit) + 1); - bm_word_t old_val = load_word_ordered(addr, memory_order_conservative); + bm_word_t old_val = Atomic::load(addr); do { const bm_word_t new_val = old_val | mask; @@ -54,7 +54,7 @@ inline bool ShenandoahMarkBitMap::mark_strong(HeapWord* heap_addr, bool& was_upg assert(!was_upgraded, "Should be false already"); return false; // Someone else beat us to it. } - const bm_word_t cur_val = Atomic::cmpxchg(addr, old_val, new_val, memory_order_conservative); + const bm_word_t cur_val = Atomic::cmpxchg(addr, old_val, new_val, memory_order_relaxed); if (cur_val == old_val) { was_upgraded = (cur_val & mask_weak) != 0; return true; // Success. @@ -71,7 +71,7 @@ inline bool ShenandoahMarkBitMap::mark_weak(HeapWord* heap_addr) { volatile bm_word_t* const addr = word_addr(bit); const bm_word_t mask_weak = (bm_word_t)1 << (bit_in_word(bit) + 1); const bm_word_t mask_strong = (bm_word_t)1 << bit_in_word(bit); - bm_word_t old_val = load_word_ordered(addr, memory_order_conservative); + bm_word_t old_val = Atomic::load(addr); do { if ((old_val & mask_strong) != 0) { @@ -81,7 +81,7 @@ inline bool ShenandoahMarkBitMap::mark_weak(HeapWord* heap_addr) { if (new_val == old_val) { return false; // Someone else beat us to it. } - const bm_word_t cur_val = Atomic::cmpxchg(addr, old_val, new_val, memory_order_conservative); + const bm_word_t cur_val = Atomic::cmpxchg(addr, old_val, new_val, memory_order_relaxed); if (cur_val == old_val) { return true; // Success. } @@ -107,18 +107,6 @@ inline bool ShenandoahMarkBitMap::is_marked(HeapWord* addr) const { return (*word_addr(index) & mask) != 0; } -inline const ShenandoahMarkBitMap::bm_word_t ShenandoahMarkBitMap::load_word_ordered(const volatile bm_word_t* const addr, atomic_memory_order memory_order) { - if (memory_order == memory_order_relaxed || memory_order == memory_order_release) { - return Atomic::load(addr); - } else { - assert(memory_order == memory_order_acq_rel || - memory_order == memory_order_acquire || - memory_order == memory_order_conservative, - "unexpected memory ordering"); - return Atomic::load_acquire(addr); - } -} - template inline ShenandoahMarkBitMap::idx_t ShenandoahMarkBitMap::get_next_bit_impl(idx_t l_index, idx_t r_index) const { STATIC_ASSERT(flip == find_ones_flip || flip == find_zeros_flip); diff --git a/src/hotspot/share/gc/shenandoah/shenandoahPacer.cpp b/src/hotspot/share/gc/shenandoah/shenandoahPacer.cpp index d8866ebd6ddc9e19090dbffee75af750ccdd3757..b7553b7125ca753df2439afee8979e5210f56415 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahPacer.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahPacer.cpp @@ -30,6 +30,7 @@ #include "gc/shenandoah/shenandoahPhaseTimings.hpp" #include "runtime/atomic.hpp" #include "runtime/mutexLocker.hpp" +#include "runtime/threadSMR.hpp" /* * In normal concurrent cycle, we have to pace the application to let GC finish. @@ -179,7 +180,7 @@ size_t ShenandoahPacer::update_and_get_progress_history() { void ShenandoahPacer::restart_with(size_t non_taxable_bytes, double tax_rate) { size_t initial = (size_t)(non_taxable_bytes * tax_rate) >> LogHeapWordSize; STATIC_ASSERT(sizeof(size_t) <= sizeof(intptr_t)); - Atomic::xchg(&_budget, (intptr_t)initial); + Atomic::xchg(&_budget, (intptr_t)initial, memory_order_relaxed); Atomic::store(&_tax_rate, tax_rate); Atomic::inc(&_epoch); @@ -201,14 +202,14 @@ bool ShenandoahPacer::claim_for_alloc(size_t words, bool force) { return false; } new_val = cur - tax; - } while (Atomic::cmpxchg(&_budget, cur, new_val) != cur); + } while (Atomic::cmpxchg(&_budget, cur, new_val, memory_order_relaxed) != cur); return true; } void ShenandoahPacer::unpace_for_alloc(intptr_t epoch, size_t words) { assert(ShenandoahPacing, "Only be here when pacing is enabled"); - if (_epoch != epoch) { + if (Atomic::load(&_epoch) != epoch) { // Stale ticket, no need to unpace. return; } diff --git a/src/hotspot/share/gc/shenandoah/shenandoahPacer.inline.hpp b/src/hotspot/share/gc/shenandoah/shenandoahPacer.inline.hpp index c416d85234fe9149c17d03d35d4a4c41bcf21ccc..783755a432026067f95e6c22ff564ac135e5c56a 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahPacer.inline.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahPacer.inline.hpp @@ -53,13 +53,13 @@ inline void ShenandoahPacer::report_internal(size_t words) { inline void ShenandoahPacer::report_progress_internal(size_t words) { assert(ShenandoahPacing, "Only be here when pacing is enabled"); STATIC_ASSERT(sizeof(size_t) <= sizeof(intptr_t)); - Atomic::add(&_progress, (intptr_t)words); + Atomic::add(&_progress, (intptr_t)words, memory_order_relaxed); } inline void ShenandoahPacer::add_budget(size_t words) { STATIC_ASSERT(sizeof(size_t) <= sizeof(intptr_t)); intptr_t inc = (intptr_t) words; - intptr_t new_budget = Atomic::add(&_budget, inc); + intptr_t new_budget = Atomic::add(&_budget, inc, memory_order_relaxed); // Was the budget replenished beyond zero? Then all pacing claims // are satisfied, notify the waiters. Avoid taking any locks here, diff --git a/src/hotspot/share/gc/shenandoah/shenandoahRootProcessor.cpp b/src/hotspot/share/gc/shenandoah/shenandoahRootProcessor.cpp index 9aa10d17d32ec6bb0bb23b56441f1a71cfe2fd36..91d5a278e1e0b617b27dae7b665848ed18ba7005 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahRootProcessor.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahRootProcessor.cpp @@ -46,7 +46,7 @@ ShenandoahJavaThreadsIterator::ShenandoahJavaThreadsIterator(ShenandoahPhaseTimi } uint ShenandoahJavaThreadsIterator::claim() { - return Atomic::fetch_and_add(&_claimed, _stride); + return Atomic::fetch_and_add(&_claimed, _stride, memory_order_relaxed); } void ShenandoahJavaThreadsIterator::threads_do(ThreadClosure* cl, uint worker_id) { diff --git a/src/hotspot/share/gc/shenandoah/shenandoahRootVerifier.cpp b/src/hotspot/share/gc/shenandoah/shenandoahRootVerifier.cpp index 6992279c41e0993203245e70b0175566c7b314d6..ac835d55f6a1ede5999054777d764749f4916fa7 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahRootVerifier.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahRootVerifier.cpp @@ -55,69 +55,6 @@ ShenandoahGCStateResetter::~ShenandoahGCStateResetter() { _heap->set_concurrent_weak_root_in_progress(_concurrent_weak_root_in_progress); } -// Check for overflow of number of root types. -STATIC_ASSERT((static_cast(ShenandoahRootVerifier::AllRoots) + 1) > static_cast(ShenandoahRootVerifier::AllRoots)); - -ShenandoahRootVerifier::ShenandoahRootVerifier(RootTypes types) : _types(types) { - Threads::change_thread_claim_token(); -} - -void ShenandoahRootVerifier::excludes(RootTypes types) { - _types = static_cast(static_cast(_types) & (~static_cast(types))); -} - -bool ShenandoahRootVerifier::verify(RootTypes type) const { - return (_types & type) == type; -} - -ShenandoahRootVerifier::RootTypes ShenandoahRootVerifier::combine(RootTypes t1, RootTypes t2) { - return static_cast(static_cast(t1) | static_cast(t2)); -} - -void ShenandoahRootVerifier::oops_do(OopClosure* oops) { - ShenandoahGCStateResetter resetter; - - CodeBlobToOopClosure blobs(oops, !CodeBlobToOopClosure::FixRelocations); - if (verify(CodeRoots)) { - shenandoah_assert_locked_or_safepoint(CodeCache_lock); - CodeCache::blobs_do(&blobs); - } - - if (verify(CLDGRoots)) { - shenandoah_assert_locked_or_safepoint(ClassLoaderDataGraph_lock); - CLDToOopClosure clds(oops, ClassLoaderData::_claim_none); - ClassLoaderDataGraph::cld_do(&clds); - } - - if (verify(SerialRoots)) { - shenandoah_assert_safepoint(); - } - - if (verify(JNIHandleRoots)) { - shenandoah_assert_safepoint(); - JNIHandles::oops_do(oops); - Universe::vm_global()->oops_do(oops); - } - - if (verify(WeakRoots)) { - shenandoah_assert_safepoint(); - weak_roots_do(oops); - } - - if (ShenandoahStringDedup::is_enabled() && verify(StringDedupRoots)) { - shenandoah_assert_safepoint(); - ShenandoahStringDedup::oops_do_slow(oops); - } - - if (verify(ThreadRoots)) { - shenandoah_assert_safepoint(); - // Do thread roots the last. This allows verification code to find - // any broken objects from those special roots first, not the accidental - // dangling reference from the thread root. - Threads::possibly_parallel_oops_do(false, oops, &blobs); - } -} - void ShenandoahRootVerifier::roots_do(OopClosure* oops) { ShenandoahGCStateResetter resetter; shenandoah_assert_safepoint(); @@ -128,35 +65,37 @@ void ShenandoahRootVerifier::roots_do(OopClosure* oops) { CLDToOopClosure clds(oops, ClassLoaderData::_claim_none); ClassLoaderDataGraph::cld_do(&clds); - JNIHandles::oops_do(oops); - Universe::vm_global()->oops_do(oops); + if (ShenandoahStringDedup::is_enabled()) { + ShenandoahStringDedup::oops_do_slow(oops); + } + + for (auto id : EnumRange()) { + OopStorageSet::storage(id)->oops_do(oops); + } // Do thread roots the last. This allows verification code to find // any broken objects from those special roots first, not the accidental // dangling reference from the thread root. - Threads::possibly_parallel_oops_do(true, oops, &blobs); + Threads::possibly_parallel_oops_do(true, oops, NULL); } void ShenandoahRootVerifier::strong_roots_do(OopClosure* oops) { ShenandoahGCStateResetter resetter; shenandoah_assert_safepoint(); - CodeBlobToOopClosure blobs(oops, !CodeBlobToOopClosure::FixRelocations); - CLDToOopClosure clds(oops, ClassLoaderData::_claim_none); - ClassLoaderDataGraph::roots_cld_do(&clds, NULL); + ClassLoaderDataGraph::always_strong_cld_do(&clds); - JNIHandles::oops_do(oops); - Universe::vm_global()->oops_do(oops); + if (ShenandoahStringDedup::is_enabled()) { + ShenandoahStringDedup::oops_do_slow(oops); + } + for (auto id : EnumRange()) { + OopStorageSet::storage(id)->oops_do(oops); + } // Do thread roots the last. This allows verification code to find // any broken objects from those special roots first, not the accidental // dangling reference from the thread root. + CodeBlobToOopClosure blobs(oops, !CodeBlobToOopClosure::FixRelocations); Threads::possibly_parallel_oops_do(true, oops, &blobs); } - -void ShenandoahRootVerifier::weak_roots_do(OopClosure* cl) { - for (auto id : EnumRange()) { - OopStorageSet::storage(id)->oops_do(cl); - } -} diff --git a/src/hotspot/share/gc/shenandoah/shenandoahRootVerifier.hpp b/src/hotspot/share/gc/shenandoah/shenandoahRootVerifier.hpp index d79624f6f37c51edc40d872b5ff1a6187dd860dd..d7ec54e5873f50414a519f6fb5d6fc536bd49bd5 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahRootVerifier.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahRootVerifier.hpp @@ -39,38 +39,11 @@ public: ~ShenandoahGCStateResetter(); }; -class ShenandoahRootVerifier : public StackObj { +class ShenandoahRootVerifier : public AllStatic { public: - enum RootTypes { - None = 0, - SerialRoots = 1 << 0, - ThreadRoots = 1 << 1, - CodeRoots = 1 << 2, - CLDGRoots = 1 << 3, - WeakRoots = 1 << 4, - StringDedupRoots = 1 << 5, - JNIHandleRoots = 1 << 6, - AllRoots = (SerialRoots | ThreadRoots | CodeRoots | CLDGRoots | WeakRoots | StringDedupRoots | JNIHandleRoots) - }; - -private: - RootTypes _types; - -public: - ShenandoahRootVerifier(RootTypes types = AllRoots); - - void excludes(RootTypes types); - void oops_do(OopClosure* cl); - // Used to seed ShenandoahVerifier, do not honor root type filter - void roots_do(OopClosure* cl); - void strong_roots_do(OopClosure* cl); - - static RootTypes combine(RootTypes t1, RootTypes t2); -private: - bool verify(RootTypes type) const; - - void weak_roots_do(OopClosure* cl); + static void roots_do(OopClosure* cl); + static void strong_roots_do(OopClosure* cl); }; #endif // SHARE_GC_SHENANDOAH_SHENANDOAHROOTVERIFIER_HPP diff --git a/src/hotspot/share/gc/shenandoah/shenandoahUnload.cpp b/src/hotspot/share/gc/shenandoah/shenandoahUnload.cpp index e916690ec647d2e3a141b252b1f173b6ac1100ff..dc7af9cbeba7643901de0f6aec1dfbdc231fb6ad 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahUnload.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahUnload.cpp @@ -40,6 +40,7 @@ #include "gc/shenandoah/shenandoahUnload.hpp" #include "gc/shenandoah/shenandoahVerifier.hpp" #include "memory/iterator.hpp" +#include "memory/metaspaceUtils.hpp" #include "memory/resourceArea.hpp" #include "oops/access.inline.hpp" diff --git a/src/hotspot/share/gc/shenandoah/shenandoahVMOperations.hpp b/src/hotspot/share/gc/shenandoah/shenandoahVMOperations.hpp index 3f989df6f3b0b23fb3b6b062963c291f00d72917..fd58c92ee9bc4b1965280c563d06244dd14948c3 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahVMOperations.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahVMOperations.hpp @@ -35,11 +35,12 @@ class ShenandoahFullGC; // // VM_ShenandoahOperation // - VM_ShenandoahInitMark: initiate concurrent marking +// - VM_ShenandoahFinalMarkStartEvac: finish up concurrent marking, and start evacuation +// - VM_ShenandoahInitUpdateRefs: initiate update references +// - VM_ShenandoahFinalUpdateRefs: finish up update references // - VM_ShenandoahReferenceOperation: -// - VM_ShenandoahFinalMarkStartEvac: finish up concurrent marking, and start evacuation -// - VM_ShenandoahInitUpdateRefs: initiate update references -// - VM_ShenandoahFinalUpdateRefs: finish up update references // - VM_ShenandoahFullGC: do full GC +// - VM_ShenandoahDegeneratedGC: do STW degenerated GC class VM_ShenandoahOperation : public VM_Operation { protected: diff --git a/src/hotspot/share/gc/shenandoah/shenandoahVerifier.cpp b/src/hotspot/share/gc/shenandoah/shenandoahVerifier.cpp index 3eb2bbc78c9da923f59c19c55d98f64c5f66ca59..0fd315ce8ddc08fef96948eff94dcc66f6ce85e7 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahVerifier.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahVerifier.cpp @@ -160,7 +160,7 @@ private: // skip break; case ShenandoahVerifier::_verify_liveness_complete: - Atomic::add(&_ld[obj_reg->index()], (uint) obj->size()); + Atomic::add(&_ld[obj_reg->index()], (uint) obj->size(), memory_order_relaxed); // fallthrough for fast failure for un-live regions: case ShenandoahVerifier::_verify_liveness_conservative: check(ShenandoahAsserts::_safe_oop, obj, obj_reg->has_live(), @@ -424,7 +424,6 @@ public: class ShenandoahVerifierReachableTask : public AbstractGangTask { private: const char* _label; - ShenandoahRootVerifier* _verifier; ShenandoahVerifier::VerifyOptions _options; ShenandoahHeap* _heap; ShenandoahLivenessData* _ld; @@ -434,12 +433,10 @@ private: public: ShenandoahVerifierReachableTask(MarkBitMap* bitmap, ShenandoahLivenessData* ld, - ShenandoahRootVerifier* verifier, const char* label, ShenandoahVerifier::VerifyOptions options) : AbstractGangTask("Shenandoah Verifier Reachable Objects"), _label(label), - _verifier(verifier), _options(options), _heap(ShenandoahHeap::heap()), _ld(ld), @@ -464,9 +461,9 @@ public: ShenandoahMessageBuffer("%s, Roots", _label), _options); if (_heap->unload_classes()) { - _verifier->strong_roots_do(&cl); + ShenandoahRootVerifier::strong_roots_do(&cl); } else { - _verifier->roots_do(&cl); + ShenandoahRootVerifier::roots_do(&cl); } } @@ -483,7 +480,7 @@ public: } } - Atomic::add(&_processed, processed); + Atomic::add(&_processed, processed, memory_order_relaxed); } }; @@ -512,7 +509,7 @@ public: _processed(0) {}; size_t processed() { - return _processed; + return Atomic::load(&_processed); } virtual void work(uint worker_id) { @@ -522,7 +519,7 @@ public: _options); while (true) { - size_t v = Atomic::fetch_and_add(&_claimed, 1u); + size_t v = Atomic::fetch_and_add(&_claimed, 1u, memory_order_relaxed); if (v < _heap->num_regions()) { ShenandoahHeapRegion* r = _heap->get_region(v); if (!r->is_humongous() && !r->is_trash()) { @@ -542,7 +539,7 @@ public: if (_heap->complete_marking_context()->is_marked((oop)obj)) { verify_and_follow(obj, stack, cl, &processed); } - Atomic::add(&_processed, processed); + Atomic::add(&_processed, processed, memory_order_relaxed); } virtual void work_regular(ShenandoahHeapRegion *r, ShenandoahVerifierStack &stack, ShenandoahVerifyOopClosure &cl) { @@ -575,7 +572,7 @@ public: } } - Atomic::add(&_processed, processed); + Atomic::add(&_processed, processed, memory_order_relaxed); } void verify_and_follow(HeapWord *addr, ShenandoahVerifierStack &stack, ShenandoahVerifyOopClosure &cl, size_t *processed) { @@ -618,8 +615,7 @@ void ShenandoahVerifier::verify_at_safepoint(const char *label, VerifyForwarded forwarded, VerifyMarked marked, VerifyCollectionSet cset, VerifyLiveness liveness, VerifyRegions regions, - VerifyGCState gcstate, - VerifyWeakRoots weak_roots) { + VerifyGCState gcstate) { guarantee(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "only when nothing else happens"); guarantee(ShenandoahVerify, "only when enabled, and bitmap is initialized in ShenandoahHeap::initialize"); @@ -713,8 +709,7 @@ void ShenandoahVerifier::verify_at_safepoint(const char *label, // This verifies what application can see, since it only cares about reachable objects. size_t count_reachable = 0; if (ShenandoahVerifyLevel >= 2) { - ShenandoahRootVerifier verifier; - ShenandoahVerifierReachableTask task(_verification_bit_map, ld, &verifier, label, options); + ShenandoahVerifierReachableTask task(_verification_bit_map, ld, label, options); _heap->workers()->run_task(&task); count_reachable = task.processed(); } @@ -747,12 +742,12 @@ void ShenandoahVerifier::verify_at_safepoint(const char *label, if (r->is_humongous()) { // For humongous objects, test if start region is marked live, and if so, // all humongous regions in that chain have live data equal to their "used". - juint start_live = Atomic::load_acquire(&ld[r->humongous_start_region()->index()]); + juint start_live = Atomic::load(&ld[r->humongous_start_region()->index()]); if (start_live > 0) { verf_live = (juint)(r->used() / HeapWordSize); } } else { - verf_live = Atomic::load_acquire(&ld[r->index()]); + verf_live = Atomic::load(&ld[r->index()]); } size_t reg_live = r->get_live_data_words(); @@ -780,8 +775,7 @@ void ShenandoahVerifier::verify_generic(VerifyOption vo) { _verify_cset_disable, // cset may be inconsistent _verify_liveness_disable, // no reliable liveness data _verify_regions_disable, // no reliable region data - _verify_gcstate_disable, // no data about gcstate - _verify_all_weak_roots + _verify_gcstate_disable // no data about gcstate ); } @@ -793,8 +787,7 @@ void ShenandoahVerifier::verify_before_concmark() { _verify_cset_none, // UR should have fixed this _verify_liveness_disable, // no reliable liveness data _verify_regions_notrash, // no trash regions - _verify_gcstate_stable, // there are no forwarded objects - _verify_all_weak_roots + _verify_gcstate_stable // there are no forwarded objects ); } @@ -806,17 +799,11 @@ void ShenandoahVerifier::verify_after_concmark() { _verify_cset_none, // no references to cset anymore _verify_liveness_complete, // liveness data must be complete here _verify_regions_disable, // trash regions not yet recycled - _verify_gcstate_stable, // mark should have stabilized the heap - _verify_all_weak_roots + _verify_gcstate_stable // mark should have stabilized the heap ); } void ShenandoahVerifier::verify_before_evacuation() { - // Concurrent weak roots are evacuated during concurrent phase - VerifyWeakRoots verify_weak_roots = _heap->unload_classes() ? - _verify_serial_weak_roots : - _verify_all_weak_roots; - verify_at_safepoint( "Before Evacuation", _verify_forwarded_none, // no forwarded references @@ -824,17 +811,11 @@ void ShenandoahVerifier::verify_before_evacuation() { _verify_cset_disable, // non-forwarded references to cset expected _verify_liveness_complete, // liveness data must be complete here _verify_regions_disable, // trash regions not yet recycled - _verify_gcstate_stable, // mark should have stabilized the heap - verify_weak_roots + _verify_gcstate_stable // mark should have stabilized the heap ); } void ShenandoahVerifier::verify_during_evacuation() { - // Concurrent weak roots are evacuated during concurrent phase - VerifyWeakRoots verify_weak_roots = _heap->unload_classes() ? - _verify_serial_weak_roots : - _verify_all_weak_roots; - verify_at_safepoint( "During Evacuation", _verify_forwarded_allow, // some forwarded references are allowed @@ -842,8 +823,7 @@ void ShenandoahVerifier::verify_during_evacuation() { _verify_cset_disable, // some cset references are not forwarded yet _verify_liveness_disable, // liveness data might be already stale after pre-evacs _verify_regions_disable, // trash regions not yet recycled - _verify_gcstate_evacuation, // evacuation is in progress - verify_weak_roots + _verify_gcstate_evacuation // evacuation is in progress ); } @@ -855,8 +835,7 @@ void ShenandoahVerifier::verify_after_evacuation() { _verify_cset_forwarded, // all cset refs are fully forwarded _verify_liveness_disable, // no reliable liveness data anymore _verify_regions_notrash, // trash regions have been recycled already - _verify_gcstate_forwarded, // evacuation produced some forwarded objects - _verify_all_weak_roots + _verify_gcstate_forwarded // evacuation produced some forwarded objects ); } @@ -868,8 +847,7 @@ void ShenandoahVerifier::verify_before_updaterefs() { _verify_cset_forwarded, // all cset refs are fully forwarded _verify_liveness_disable, // no reliable liveness data anymore _verify_regions_notrash, // trash regions have been recycled already - _verify_gcstate_forwarded, // evacuation should have produced some forwarded objects - _verify_all_weak_roots + _verify_gcstate_forwarded // evacuation should have produced some forwarded objects ); } @@ -881,8 +859,7 @@ void ShenandoahVerifier::verify_after_updaterefs() { _verify_cset_none, // no cset references, all updated _verify_liveness_disable, // no reliable liveness data anymore _verify_regions_nocset, // no cset regions, trash regions have appeared - _verify_gcstate_stable, // update refs had cleaned up forwarded objects - _verify_all_weak_roots + _verify_gcstate_stable // update refs had cleaned up forwarded objects ); } @@ -894,8 +871,7 @@ void ShenandoahVerifier::verify_after_degenerated() { _verify_cset_none, // no cset references _verify_liveness_disable, // no reliable liveness data anymore _verify_regions_notrash_nocset, // no trash, no cset - _verify_gcstate_stable, // degenerated refs had cleaned up forwarded objects - _verify_all_weak_roots + _verify_gcstate_stable // degenerated refs had cleaned up forwarded objects ); } @@ -907,8 +883,7 @@ void ShenandoahVerifier::verify_before_fullgc() { _verify_cset_disable, // cset might be foobared _verify_liveness_disable, // no reliable liveness data anymore _verify_regions_disable, // no reliable region data here - _verify_gcstate_disable, // no reliable gcstate data - _verify_all_weak_roots + _verify_gcstate_disable // no reliable gcstate data ); } @@ -920,8 +895,7 @@ void ShenandoahVerifier::verify_after_fullgc() { _verify_cset_none, // no cset references _verify_liveness_disable, // no reliable liveness data anymore _verify_regions_notrash_nocset, // no trash, no cset - _verify_gcstate_stable, // full gc cleaned up everything - _verify_all_weak_roots + _verify_gcstate_stable // full gc cleaned up everything ); } @@ -978,33 +952,11 @@ public: }; void ShenandoahVerifier::verify_roots_in_to_space() { - ShenandoahRootVerifier verifier; - ShenandoahVerifyInToSpaceClosure cl; - verifier.oops_do(&cl); -} - -void ShenandoahVerifier::verify_roots_in_to_space_except(ShenandoahRootVerifier::RootTypes types) { - ShenandoahRootVerifier verifier; - verifier.excludes(types); ShenandoahVerifyInToSpaceClosure cl; - verifier.oops_do(&cl); + ShenandoahRootVerifier::roots_do(&cl); } void ShenandoahVerifier::verify_roots_no_forwarded() { - ShenandoahRootVerifier verifier; - ShenandoahVerifyNoForwared cl; - verifier.oops_do(&cl); -} - -void ShenandoahVerifier::verify_roots_no_forwarded(ShenandoahRootVerifier::RootTypes types) { - ShenandoahRootVerifier verifier(types); - ShenandoahVerifyNoForwared cl; - verifier.oops_do(&cl); -} - -void ShenandoahVerifier::verify_roots_no_forwarded_except(ShenandoahRootVerifier::RootTypes types) { - ShenandoahRootVerifier verifier; - verifier.excludes(types); ShenandoahVerifyNoForwared cl; - verifier.oops_do(&cl); + ShenandoahRootVerifier::roots_do(&cl); } diff --git a/src/hotspot/share/gc/shenandoah/shenandoahVerifier.hpp b/src/hotspot/share/gc/shenandoah/shenandoahVerifier.hpp index 838daf955b953a7f1f439ccdfb6139b3a32a8d84..9c9cd6117d5a73cffe7588e62e07b84023a47882 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahVerifier.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahVerifier.hpp @@ -136,12 +136,6 @@ public: _verify_gcstate_evacuation } VerifyGCState; - typedef enum { - _verify_all_weak_roots, - _verify_serial_weak_roots, - _verify_concurrent_weak_roots - } VerifyWeakRoots; - struct VerifyOptions { VerifyForwarded _verify_forwarded; VerifyMarked _verify_marked; @@ -149,20 +143,17 @@ public: VerifyLiveness _verify_liveness; VerifyRegions _verify_regions; VerifyGCState _verify_gcstate; - VerifyWeakRoots _verify_weak_roots; VerifyOptions(VerifyForwarded verify_forwarded, VerifyMarked verify_marked, VerifyCollectionSet verify_collection_set, VerifyLiveness verify_liveness, VerifyRegions verify_regions, - VerifyGCState verify_gcstate, - VerifyWeakRoots verify_weak_roots = _verify_all_weak_roots) : + VerifyGCState verify_gcstate) : _verify_forwarded(verify_forwarded), _verify_marked(verify_marked), _verify_cset(verify_collection_set), _verify_liveness(verify_liveness), _verify_regions(verify_regions), - _verify_gcstate(verify_gcstate), - _verify_weak_roots(verify_weak_roots) {} + _verify_gcstate(verify_gcstate) {} }; private: @@ -172,8 +163,7 @@ private: VerifyCollectionSet cset, VerifyLiveness liveness, VerifyRegions regions, - VerifyGCState gcstate, - VerifyWeakRoots weakRoots); + VerifyGCState gcstate); public: ShenandoahVerifier(ShenandoahHeap* heap, MarkBitMap* verification_bitmap) : @@ -193,11 +183,8 @@ public: // Roots should only contain to-space oops void verify_roots_in_to_space(); - void verify_roots_in_to_space_except(ShenandoahRootVerifier::RootTypes types); void verify_roots_no_forwarded(); - void verify_roots_no_forwarded(ShenandoahRootVerifier::RootTypes types); - void verify_roots_no_forwarded_except(ShenandoahRootVerifier::RootTypes types); }; #endif // SHARE_GC_SHENANDOAH_SHENANDOAHVERIFIER_HPP diff --git a/src/hotspot/share/gc/shenandoah/shenandoahWorkGroup.cpp b/src/hotspot/share/gc/shenandoah/shenandoahWorkGroup.cpp index 295a25b4a3ee4ca1cd35567564fa52f45604b8f1..538d739bf815c41a6a244e4a9c2aa3744757f4c1 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahWorkGroup.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahWorkGroup.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017, 2018, Red Hat, Inc. All rights reserved. + * Copyright (c) 2017, 2021, Red Hat, Inc. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -71,16 +71,6 @@ ShenandoahPushWorkerScope::~ShenandoahPushWorkerScope() { assert(nworkers == _old_workers, "Must be able to restore"); } -ShenandoahPushWorkerQueuesScope::ShenandoahPushWorkerQueuesScope(WorkGang* workers, ShenandoahObjToScanQueueSet* queues, uint nworkers, bool check) : - ShenandoahPushWorkerScope(workers, nworkers, check), _queues(queues) { - _queues->reserve(_n_workers); -} - -ShenandoahPushWorkerQueuesScope::~ShenandoahPushWorkerQueuesScope() { - // Restore old worker value - _queues->reserve(_old_workers); -} - AbstractGangWorker* ShenandoahWorkGang::install_worker(uint which) { AbstractGangWorker* worker = WorkGang::install_worker(which); ShenandoahThreadLocalData::create(worker); diff --git a/src/hotspot/share/gc/shenandoah/shenandoahWorkGroup.hpp b/src/hotspot/share/gc/shenandoah/shenandoahWorkGroup.hpp index 090676bc534969adab61086abc17129a1d87aad7..ab1ea8f5da2c7d565dd36eb33cb98ae62e3a8292 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahWorkGroup.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahWorkGroup.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017, 2019, Red Hat, Inc. All rights reserved. + * Copyright (c) 2017, 2021, Red Hat, Inc. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -51,15 +51,6 @@ public: ~ShenandoahPushWorkerScope(); }; -class ShenandoahPushWorkerQueuesScope : public ShenandoahPushWorkerScope { -private: - ShenandoahObjToScanQueueSet* _queues; - -public: - ShenandoahPushWorkerQueuesScope(WorkGang* workers, ShenandoahObjToScanQueueSet* queues, uint nworkers, bool do_check = true); - ~ShenandoahPushWorkerQueuesScope(); -}; - class ShenandoahWorkGang : public WorkGang { private: bool _initialize_gclab; diff --git a/src/hotspot/share/gc/z/zCollectedHeap.cpp b/src/hotspot/share/gc/z/zCollectedHeap.cpp index f07d4b4c7aaabab493b82d72f5ce1d1311fe4db2..24bd1e318166bde11e113569377d47ea2c3127e2 100644 --- a/src/hotspot/share/gc/z/zCollectedHeap.cpp +++ b/src/hotspot/share/gc/z/zCollectedHeap.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2021, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -22,6 +22,7 @@ */ #include "precompiled.hpp" +#include "classfile/classLoaderData.hpp" #include "gc/shared/gcHeapSummary.hpp" #include "gc/shared/suspendibleThreadSet.hpp" #include "gc/z/zCollectedHeap.hpp" diff --git a/src/hotspot/share/gc/z/zHeap.cpp b/src/hotspot/share/gc/z/zHeap.cpp index 89b99181b0f0a3b3a56c6522f2c039cd2b5b097c..ef53d4725d08a476c838d1f49d0c12f8b63db75a 100644 --- a/src/hotspot/share/gc/z/zHeap.cpp +++ b/src/hotspot/share/gc/z/zHeap.cpp @@ -42,6 +42,7 @@ #include "gc/z/zWorkers.inline.hpp" #include "logging/log.hpp" #include "memory/iterator.hpp" +#include "memory/metaspaceUtils.hpp" #include "memory/resourceArea.hpp" #include "prims/jvmtiTagMap.hpp" #include "runtime/handshake.hpp" diff --git a/src/hotspot/share/gc/z/zStat.cpp b/src/hotspot/share/gc/z/zStat.cpp index afba6a9db309509722f5a870f0b57305c59fd2d9..7b1faab50008effcee7278fe3a96c6d325d93a5e 100644 --- a/src/hotspot/share/gc/z/zStat.cpp +++ b/src/hotspot/share/gc/z/zStat.cpp @@ -32,6 +32,7 @@ #include "gc/z/zStat.hpp" #include "gc/z/zTracer.inline.hpp" #include "gc/z/zUtils.hpp" +#include "memory/metaspaceUtils.hpp" #include "memory/resourceArea.hpp" #include "runtime/atomic.hpp" #include "runtime/os.hpp" diff --git a/src/hotspot/share/gc/z/zUnload.cpp b/src/hotspot/share/gc/z/zUnload.cpp index 072530a6c23439c8429feef1c8b597de7622ec6f..335b01721d97771723f2a8a75fee916ae410593d 100644 --- a/src/hotspot/share/gc/z/zUnload.cpp +++ b/src/hotspot/share/gc/z/zUnload.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018, 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2018, 2021, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -34,6 +34,7 @@ #include "gc/z/zNMethod.hpp" #include "gc/z/zStat.hpp" #include "gc/z/zUnload.hpp" +#include "memory/metaspaceUtils.hpp" #include "oops/access.inline.hpp" static const ZStatSubPhase ZSubPhaseConcurrentClassesUnlink("Concurrent Classes Unlink"); diff --git a/src/hotspot/share/include/cds.h b/src/hotspot/share/include/cds.h index 9187445329da76f86647f1ecdf0ed7c969686fc2..ee821eb73ac6678c180281ee44709a407c83ba06 100644 --- a/src/hotspot/share/include/cds.h +++ b/src/hotspot/share/include/cds.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018, 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2018, 2021, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -33,10 +33,10 @@ // // Also, this is a C header file. Do not use C++ here. -#define NUM_CDS_REGIONS 8 // this must be the same as MetaspaceShared::n_regions +#define NUM_CDS_REGIONS 7 // this must be the same as MetaspaceShared::n_regions #define CDS_ARCHIVE_MAGIC 0xf00baba2 #define CDS_DYNAMIC_ARCHIVE_MAGIC 0xf00baba8 -#define CURRENT_CDS_ARCHIVE_VERSION 10 +#define CURRENT_CDS_ARCHIVE_VERSION 11 #define INVALID_CDS_ARCHIVE_VERSION -1 struct CDSFileMapRegion { @@ -44,7 +44,7 @@ struct CDSFileMapRegion { int _read_only; // read only region? int _allow_exec; // executable code in this region? int _is_heap_region; // Used by SA and debug build. - int _is_bitmap_region; // Relocation bitmap for RO/RW/MC/MD regions (used by SA and debug build). + int _is_bitmap_region; // Relocation bitmap for RO/RW regions (used by SA and debug build). int _mapped_from_file; // Is this region mapped from a file? // If false, this region was initialized using os::read(). size_t _file_offset; // Data for this region starts at this offset in the archive file. diff --git a/src/hotspot/share/interpreter/abstractInterpreter.cpp b/src/hotspot/share/interpreter/abstractInterpreter.cpp index 3d47c60f02fb6265b345fe26f694fdd9b5402d33..9db41dc10ed8738fd038b070bd9af5b4840251a3 100644 --- a/src/hotspot/share/interpreter/abstractInterpreter.cpp +++ b/src/hotspot/share/interpreter/abstractInterpreter.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2021, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -92,7 +92,6 @@ address AbstractInterpreter::_native_entry_begin = NU address AbstractInterpreter::_native_entry_end = NULL; address AbstractInterpreter::_slow_signature_handler; address AbstractInterpreter::_entry_table [AbstractInterpreter::number_of_method_entries]; -address AbstractInterpreter::_cds_entry_table [AbstractInterpreter::number_of_method_entries]; address AbstractInterpreter::_native_abi_to_tosca [AbstractInterpreter::number_of_result_handlers]; //------------------------------------------------------------------------------------------------------------------------ @@ -200,49 +199,11 @@ AbstractInterpreter::MethodKind AbstractInterpreter::method_kind(const methodHan return zerolocals; } -#if INCLUDE_CDS - -// For a shared Method m, to improve sharing across processes, we avoid writing to m->_i2i_entry -// at runtime. Instead, m->_i2i_entry points to a fixed location inside the CDS archive. -// This location contains a trampoline (generated by generate_entry_for_cds_method) -// which jumps to _entry_table[kind]. -address AbstractInterpreter::entry_for_cds_method(const methodHandle& m) { - MethodKind kind = method_kind(m); - assert(0 <= kind && kind < number_of_method_entries, "illegal kind"); - return entry_for_cds_method(kind); -} - -address AbstractInterpreter::entry_for_cds_method(AbstractInterpreter::MethodKind kind) { - const size_t trampoline_size = SharedRuntime::trampoline_size(); - address addr = MetaspaceShared::i2i_entry_code_buffers(); - addr += (size_t)(kind) * trampoline_size; - - return addr; -} - -void AbstractInterpreter::generate_entry_for_cds_method(AbstractInterpreter::MethodKind kind) { - if (UseSharedSpaces) { - address trampoline = entry_for_cds_method(kind); - CodeBuffer buffer(trampoline, (int)(SharedRuntime::trampoline_size())); - MacroAssembler _masm(&buffer); - SharedRuntime::generate_trampoline(&_masm, _entry_table[kind]); - _masm.flush(); - - if (PrintInterpreter) { - Disassembler::decode(buffer.insts_begin(), buffer.insts_end()); - } - } -} - -#endif - void AbstractInterpreter::set_entry_for_kind(AbstractInterpreter::MethodKind kind, address entry) { assert(kind >= method_handle_invoke_FIRST && kind <= method_handle_invoke_LAST, "late initialization only for MH entry points"); assert(_entry_table[kind] == _entry_table[abstract], "previous value must be AME entry"); _entry_table[kind] = entry; - - generate_entry_for_cds_method(kind); } // Return true if the interpreter can prove that the given bytecode has @@ -479,6 +440,5 @@ void AbstractInterpreter::initialize_method_handle_entries() { for (int i = method_handle_invoke_FIRST; i <= method_handle_invoke_LAST; i++) { MethodKind kind = (MethodKind) i; _entry_table[kind] = _entry_table[Interpreter::abstract]; - Interpreter::generate_entry_for_cds_method(kind); } } diff --git a/src/hotspot/share/interpreter/abstractInterpreter.hpp b/src/hotspot/share/interpreter/abstractInterpreter.hpp index b565ade3b34a69b50d949cac59ae649046e2a219..2fb5b97e8086257e974a680296c957502c08ba57 100644 --- a/src/hotspot/share/interpreter/abstractInterpreter.hpp +++ b/src/hotspot/share/interpreter/abstractInterpreter.hpp @@ -116,7 +116,6 @@ class AbstractInterpreter: AllStatic { // method entry points static address _entry_table[number_of_method_entries]; // entry points for a given method - static address _cds_entry_table[number_of_method_entries]; // entry points for methods in the CDS archive static address _native_abi_to_tosca[number_of_result_handlers]; // for native method result handlers static address _slow_signature_handler; // the native method generic (slow) signature handler @@ -136,11 +135,6 @@ class AbstractInterpreter: AllStatic { static address entry_for_kind(MethodKind k) { assert(0 <= k && k < number_of_method_entries, "illegal kind"); return _entry_table[k]; } static address entry_for_method(const methodHandle& m) { return entry_for_kind(method_kind(m)); } - // used by class data sharing - static address entry_for_cds_method(const methodHandle& m) NOT_CDS_RETURN_(NULL); - static address entry_for_cds_method(AbstractInterpreter::MethodKind kind) NOT_CDS_RETURN_(NULL); - static void generate_entry_for_cds_method(MethodKind kind) NOT_CDS_RETURN; - // used for bootstrapping method handles: static void set_entry_for_kind(MethodKind k, address e); diff --git a/src/hotspot/share/interpreter/templateInterpreterGenerator.cpp b/src/hotspot/share/interpreter/templateInterpreterGenerator.cpp index 5228fb9eefbac5292448b277231abd438aaa478c..af4771e7a78307cf349dc5d9e638799bb3561daa 100644 --- a/src/hotspot/share/interpreter/templateInterpreterGenerator.cpp +++ b/src/hotspot/share/interpreter/templateInterpreterGenerator.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2021, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -181,7 +181,6 @@ void TemplateInterpreterGenerator::generate_all() { #define method_entry(kind) \ { CodeletMark cm(_masm, "method entry point (kind = " #kind ")"); \ Interpreter::_entry_table[Interpreter::kind] = generate_method_entry(Interpreter::kind); \ - Interpreter::generate_entry_for_cds_method(Interpreter::kind); \ } // all non-native method kinds diff --git a/src/hotspot/share/interpreter/zero/bytecodeInterpreter.cpp b/src/hotspot/share/interpreter/zero/bytecodeInterpreter.cpp index 774dee95830be80a10a8d79179f02678751b8bc9..c28efdbef53439f665a803ff94cc04f755245664 100644 --- a/src/hotspot/share/interpreter/zero/bytecodeInterpreter.cpp +++ b/src/hotspot/share/interpreter/zero/bytecodeInterpreter.cpp @@ -1636,46 +1636,74 @@ run: if (support_IRIW_for_not_multiple_copy_atomic_cpu) { OrderAccess::fence(); } - if (tos_type == atos) { - VERIFY_OOP(obj->obj_field_acquire(field_offset)); - SET_STACK_OBJECT(obj->obj_field_acquire(field_offset), -1); - } else if (tos_type == itos) { - SET_STACK_INT(obj->int_field_acquire(field_offset), -1); - } else if (tos_type == ltos) { - SET_STACK_LONG(obj->long_field_acquire(field_offset), 0); - MORE_STACK(1); - } else if (tos_type == btos || tos_type == ztos) { - SET_STACK_INT(obj->byte_field_acquire(field_offset), -1); - } else if (tos_type == ctos) { - SET_STACK_INT(obj->char_field_acquire(field_offset), -1); - } else if (tos_type == stos) { - SET_STACK_INT(obj->short_field_acquire(field_offset), -1); - } else if (tos_type == ftos) { - SET_STACK_FLOAT(obj->float_field_acquire(field_offset), -1); - } else { - SET_STACK_DOUBLE(obj->double_field_acquire(field_offset), 0); - MORE_STACK(1); + switch (tos_type) { + case btos: + case ztos: + SET_STACK_INT(obj->byte_field_acquire(field_offset), -1); + break; + case ctos: + SET_STACK_INT(obj->char_field_acquire(field_offset), -1); + break; + case stos: + SET_STACK_INT(obj->short_field_acquire(field_offset), -1); + break; + case itos: + SET_STACK_INT(obj->int_field_acquire(field_offset), -1); + break; + case ftos: + SET_STACK_FLOAT(obj->float_field_acquire(field_offset), -1); + break; + case ltos: + SET_STACK_LONG(obj->long_field_acquire(field_offset), 0); + MORE_STACK(1); + break; + case dtos: + SET_STACK_DOUBLE(obj->double_field_acquire(field_offset), 0); + MORE_STACK(1); + break; + case atos: { + oop val = obj->obj_field_acquire(field_offset); + VERIFY_OOP(val); + SET_STACK_OBJECT(val, -1); + break; + } + default: + ShouldNotReachHere(); } } else { - if (tos_type == atos) { - VERIFY_OOP(obj->obj_field(field_offset)); - SET_STACK_OBJECT(obj->obj_field(field_offset), -1); - } else if (tos_type == itos) { - SET_STACK_INT(obj->int_field(field_offset), -1); - } else if (tos_type == ltos) { - SET_STACK_LONG(obj->long_field(field_offset), 0); - MORE_STACK(1); - } else if (tos_type == btos || tos_type == ztos) { - SET_STACK_INT(obj->byte_field(field_offset), -1); - } else if (tos_type == ctos) { - SET_STACK_INT(obj->char_field(field_offset), -1); - } else if (tos_type == stos) { - SET_STACK_INT(obj->short_field(field_offset), -1); - } else if (tos_type == ftos) { - SET_STACK_FLOAT(obj->float_field(field_offset), -1); - } else { - SET_STACK_DOUBLE(obj->double_field(field_offset), 0); - MORE_STACK(1); + switch (tos_type) { + case btos: + case ztos: + SET_STACK_INT(obj->byte_field(field_offset), -1); + break; + case ctos: + SET_STACK_INT(obj->char_field(field_offset), -1); + break; + case stos: + SET_STACK_INT(obj->short_field(field_offset), -1); + break; + case itos: + SET_STACK_INT(obj->int_field(field_offset), -1); + break; + case ftos: + SET_STACK_FLOAT(obj->float_field(field_offset), -1); + break; + case ltos: + SET_STACK_LONG(obj->long_field(field_offset), 0); + MORE_STACK(1); + break; + case dtos: + SET_STACK_DOUBLE(obj->double_field(field_offset), 0); + MORE_STACK(1); + break; + case atos: { + oop val = obj->obj_field(field_offset); + VERIFY_OOP(val); + SET_STACK_OBJECT(val, -1); + break; + } + default: + ShouldNotReachHere(); } } @@ -1745,49 +1773,75 @@ run: // int field_offset = cache->f2_as_index(); if (cache->is_volatile()) { - if (tos_type == itos) { - obj->release_int_field_put(field_offset, STACK_INT(-1)); - } else if (tos_type == atos) { - VERIFY_OOP(STACK_OBJECT(-1)); - obj->release_obj_field_put(field_offset, STACK_OBJECT(-1)); - } else if (tos_type == btos) { - obj->release_byte_field_put(field_offset, STACK_INT(-1)); - } else if (tos_type == ztos) { - int bool_field = STACK_INT(-1); // only store LSB - obj->release_byte_field_put(field_offset, (bool_field & 1)); - } else if (tos_type == ltos) { - obj->release_long_field_put(field_offset, STACK_LONG(-1)); - } else if (tos_type == ctos) { - obj->release_char_field_put(field_offset, STACK_INT(-1)); - } else if (tos_type == stos) { - obj->release_short_field_put(field_offset, STACK_INT(-1)); - } else if (tos_type == ftos) { - obj->release_float_field_put(field_offset, STACK_FLOAT(-1)); - } else { - obj->release_double_field_put(field_offset, STACK_DOUBLE(-1)); + switch (tos_type) { + case ztos: + obj->release_byte_field_put(field_offset, (STACK_INT(-1) & 1)); // only store LSB + break; + case btos: + obj->release_byte_field_put(field_offset, STACK_INT(-1)); + break; + case ctos: + obj->release_char_field_put(field_offset, STACK_INT(-1)); + break; + case stos: + obj->release_short_field_put(field_offset, STACK_INT(-1)); + break; + case itos: + obj->release_int_field_put(field_offset, STACK_INT(-1)); + break; + case ftos: + obj->release_float_field_put(field_offset, STACK_FLOAT(-1)); + break; + case ltos: + obj->release_long_field_put(field_offset, STACK_LONG(-1)); + break; + case dtos: + obj->release_double_field_put(field_offset, STACK_DOUBLE(-1)); + break; + case atos: { + oop val = STACK_OBJECT(-1); + VERIFY_OOP(val); + obj->release_obj_field_put(field_offset, val); + break; + } + default: + ShouldNotReachHere(); } OrderAccess::storeload(); } else { - if (tos_type == itos) { - obj->int_field_put(field_offset, STACK_INT(-1)); - } else if (tos_type == atos) { - VERIFY_OOP(STACK_OBJECT(-1)); - obj->obj_field_put(field_offset, STACK_OBJECT(-1)); - } else if (tos_type == btos) { - obj->byte_field_put(field_offset, STACK_INT(-1)); - } else if (tos_type == ztos) { - int bool_field = STACK_INT(-1); // only store LSB - obj->byte_field_put(field_offset, (bool_field & 1)); - } else if (tos_type == ltos) { - obj->long_field_put(field_offset, STACK_LONG(-1)); - } else if (tos_type == ctos) { - obj->char_field_put(field_offset, STACK_INT(-1)); - } else if (tos_type == stos) { - obj->short_field_put(field_offset, STACK_INT(-1)); - } else if (tos_type == ftos) { - obj->float_field_put(field_offset, STACK_FLOAT(-1)); - } else { - obj->double_field_put(field_offset, STACK_DOUBLE(-1)); + switch (tos_type) { + case ztos: + obj->byte_field_put(field_offset, (STACK_INT(-1) & 1)); // only store LSB + break; + case btos: + obj->byte_field_put(field_offset, STACK_INT(-1)); + break; + case ctos: + obj->char_field_put(field_offset, STACK_INT(-1)); + break; + case stos: + obj->short_field_put(field_offset, STACK_INT(-1)); + break; + case itos: + obj->int_field_put(field_offset, STACK_INT(-1)); + break; + case ftos: + obj->float_field_put(field_offset, STACK_FLOAT(-1)); + break; + case ltos: + obj->long_field_put(field_offset, STACK_LONG(-1)); + break; + case dtos: + obj->double_field_put(field_offset, STACK_DOUBLE(-1)); + break; + case atos: { + oop val = STACK_OBJECT(-1); + VERIFY_OOP(val); + obj->obj_field_put(field_offset, val); + break; + } + default: + ShouldNotReachHere(); } } diff --git a/src/hotspot/share/jfr/instrumentation/jfrEventClassTransformer.cpp b/src/hotspot/share/jfr/instrumentation/jfrEventClassTransformer.cpp index 3bf90ccacbf84c79f04f389af2c7717e11c98d59..7f968bab58ab4d9f6944482776530c9e9cf36a6f 100644 --- a/src/hotspot/share/jfr/instrumentation/jfrEventClassTransformer.cpp +++ b/src/hotspot/share/jfr/instrumentation/jfrEventClassTransformer.cpp @@ -1513,7 +1513,7 @@ static bool is_retransforming(const InstanceKlass* ik, TRAPS) { assert(name != NULL, "invariant"); Handle class_loader(THREAD, ik->class_loader()); Handle protection_domain(THREAD, ik->protection_domain()); - return SystemDictionary::find(name, class_loader, protection_domain, THREAD) != NULL; + return SystemDictionary::find_instance_klass(name, class_loader, protection_domain) != NULL; } // target for JFR_ON_KLASS_CREATION hook diff --git a/src/hotspot/share/jfr/periodic/jfrOSInterface.cpp b/src/hotspot/share/jfr/periodic/jfrOSInterface.cpp index 950dfd40c1e4a72ea43cf40837ce00d3c9680ca7..85f6614ff5e9d5ebfac29cb60f73a2497420757f 100644 --- a/src/hotspot/share/jfr/periodic/jfrOSInterface.cpp +++ b/src/hotspot/share/jfr/periodic/jfrOSInterface.cpp @@ -34,14 +34,6 @@ #include "utilities/ostream.hpp" #include // for environment variables -#ifdef __APPLE__ -#include -#define environ (*_NSGetEnviron()) -#endif - -#ifndef environ -extern char** environ; -#endif static JfrOSInterface* _instance = NULL; @@ -281,14 +273,14 @@ const char* JfrOSInterface::virtualization_name() { } int JfrOSInterface::generate_initial_environment_variable_events() { - if (environ == NULL) { + if (os::get_environ() == NULL) { return OS_ERR; } if (EventInitialEnvironmentVariable::is_enabled()) { // One time stamp for all events, so they can be grouped together JfrTicks time_stamp = JfrTicks::now(); - for (char** p = environ; *p != NULL; p++) { + for (char** p = os::get_environ(); *p != NULL; p++) { char* variable = *p; char* equal_sign = strchr(variable, '='); if (equal_sign != NULL) { diff --git a/src/hotspot/share/jfr/recorder/checkpoint/types/jfrType.cpp b/src/hotspot/share/jfr/recorder/checkpoint/types/jfrType.cpp index 38b028ac6247e0fd433c32f61388b66e43aceb7a..680460b88b28d3a5ea445aeaa9fead7505010d82 100644 --- a/src/hotspot/share/jfr/recorder/checkpoint/types/jfrType.cpp +++ b/src/hotspot/share/jfr/recorder/checkpoint/types/jfrType.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016, 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -42,6 +42,7 @@ #include "jfr/utilities/jfrThreadIterator.hpp" #include "memory/iterator.hpp" #include "memory/metaspace.hpp" +#include "memory/metaspaceUtils.hpp" #include "memory/referenceType.hpp" #include "memory/universe.hpp" #include "oops/compressedOops.hpp" diff --git a/src/hotspot/share/jfr/recorder/repository/jfrEmergencyDump.cpp b/src/hotspot/share/jfr/recorder/repository/jfrEmergencyDump.cpp index 45a3db363387345ea2d4b9ada532e2f1eda5d73e..55c1b7ece4c5965f24f430063d0746e4ed111f15 100644 --- a/src/hotspot/share/jfr/recorder/repository/jfrEmergencyDump.cpp +++ b/src/hotspot/share/jfr/recorder/repository/jfrEmergencyDump.cpp @@ -369,9 +369,10 @@ static void write_emergency_dump_file(const RepositoryIterator& iterator) { if (copy_block == NULL) { log_error(jfr, system)("Unable to malloc memory during jfr emergency dump"); log_error(jfr, system)("Unable to write jfr emergency dump file"); + } else { + write_repository_files(iterator, copy_block, block_size); + os::free(copy_block); } - write_repository_files(iterator, copy_block, block_size); - os::free(copy_block); } void JfrEmergencyDump::on_vm_error(const char* repository_path) { diff --git a/src/hotspot/share/jvmci/jvmci.cpp b/src/hotspot/share/jvmci/jvmci.cpp index 6c8a4ed16ed838c3f88eb684bc1dd5c343f6184b..e8fa7dbed262d58bf6a63a75a9e5c13b495ab02d 100644 --- a/src/hotspot/share/jvmci/jvmci.cpp +++ b/src/hotspot/share/jvmci/jvmci.cpp @@ -216,7 +216,9 @@ void JVMCI::vlog(int level, const char* format, va_list ap) { StringEventLog* events = level == 1 ? _events : _verbose_events; guarantee(events != NULL, "JVMCI event log not yet initialized"); Thread* thread = Thread::current_or_null_safe(); - events->logv(thread, format, ap); + if (thread != NULL) { + events->logv(thread, format, ap); + } } } diff --git a/src/hotspot/share/jvmci/jvmciCompilerToVM.cpp b/src/hotspot/share/jvmci/jvmciCompilerToVM.cpp index 90e7df47ecc9aebc00a11c170c3e8c5815226aee..1941160ed945d44df465318e1dfddab5ebd78282 100644 --- a/src/hotspot/share/jvmci/jvmciCompilerToVM.cpp +++ b/src/hotspot/share/jvmci/jvmciCompilerToVM.cpp @@ -547,16 +547,17 @@ C2V_VMENTRY_NULL(jobject, lookupType, (JNIEnv* env, jobject, jstring jname, jcla // This is a name from a signature. Strip off the trimmings. // Call recursive to keep scope of strippedsym. TempNewSymbol strippedsym = Signature::strip_envelope(class_name); - resolved_klass = SystemDictionary::find(strippedsym, class_loader, protection_domain, CHECK_NULL); + resolved_klass = SystemDictionary::find_instance_klass(strippedsym, + class_loader, + protection_domain); } else if (Signature::is_array(class_name)) { SignatureStream ss(class_name, false); int ndim = ss.skip_array_prefix(); if (ss.type() == T_OBJECT) { Symbol* strippedsym = ss.as_symbol(); - resolved_klass = SystemDictionary::find(strippedsym, - class_loader, - protection_domain, - CHECK_NULL); + resolved_klass = SystemDictionary::find_instance_klass(strippedsym, + class_loader, + protection_domain); if (!resolved_klass.is_null()) { resolved_klass = resolved_klass->array_klass(ndim, CHECK_NULL); } @@ -564,7 +565,9 @@ C2V_VMENTRY_NULL(jobject, lookupType, (JNIEnv* env, jobject, jstring jname, jcla resolved_klass = TypeArrayKlass::cast(Universe::typeArrayKlassObj(ss.type()))->array_klass(ndim, CHECK_NULL); } } else { - resolved_klass = SystemDictionary::find(class_name, class_loader, protection_domain, CHECK_NULL); + resolved_klass = SystemDictionary::find_instance_klass(class_name, + class_loader, + protection_domain); } } JVMCIObject result = JVMCIENV->get_jvmci_type(resolved_klass, JVMCI_CHECK_NULL); @@ -1230,7 +1233,7 @@ C2V_VMENTRY_NULL(jobject, iterateFrames, (JNIEnv* env, jobject compilerToVM, job jobjectArray methods = initial_methods; int frame_number = 0; - vframe* vf = vframe::new_vframe(fst.current(), fst.register_map(), thread); + vframe* vf = vframe::new_vframe(fst, thread); while (true) { // look for the given method @@ -1340,7 +1343,7 @@ C2V_VMENTRY_NULL(jobject, iterateFrames, (JNIEnv* env, jobject compilerToVM, job if (fst.current()->sp() != stack_pointer) { THROW_MSG_NULL(vmSymbols::java_lang_IllegalStateException(), "stack frame not found after deopt") } - vf = vframe::new_vframe(fst.current(), fst.register_map(), thread); + vf = vframe::new_vframe(fst, thread); if (!vf->is_compiled_frame()) { THROW_MSG_NULL(vmSymbols::java_lang_IllegalStateException(), "compiled stack frame expected") } @@ -1367,7 +1370,7 @@ C2V_VMENTRY_NULL(jobject, iterateFrames, (JNIEnv* env, jobject compilerToVM, job break; } fst.next(); - vf = vframe::new_vframe(fst.current(), fst.register_map(), thread); + vf = vframe::new_vframe(fst, thread); frame_number = 0; } // end of frame loop @@ -1575,91 +1578,22 @@ C2V_VMENTRY(void, materializeVirtualObjects, (JNIEnv* env, jobject, jobject _hs_ HotSpotJVMCI::HotSpotStackFrameReference::set_objectsMaterialized(JVMCIENV, hs_frame, JNI_TRUE); C2V_END -// Creates a scope where the current thread is attached and detached -// from HotSpot if it wasn't already attached when entering the scope. -extern "C" int jio_printf(const char *fmt, ...); -class AttachDetach : public StackObj { - public: - bool _attached; - AttachDetach(JNIEnv* env, JavaThread* current_thread) { - if (current_thread == NULL) { - extern struct JavaVM_ main_vm; - JNIEnv* hotspotEnv; - jint res = main_vm.AttachCurrentThread((void**)&hotspotEnv, NULL); - _attached = res == JNI_OK; - static volatile int report_attach_error = 0; - if (res != JNI_OK && report_attach_error == 0 && Atomic::cmpxchg(&report_attach_error, 0, 1) == 0) { - // Only report an attach error once - jio_printf("Warning: attaching current thread to VM failed with %d (future attach errors are suppressed)\n", res); - } - } else { - _attached = false; - } - } - ~AttachDetach() { - if (_attached && get_current_thread() != NULL) { - extern struct JavaVM_ main_vm; - jint res = main_vm.DetachCurrentThread(); - static volatile int report_detach_error = 0; - if (res != JNI_OK && report_detach_error == 0 && Atomic::cmpxchg(&report_detach_error, 0, 1) == 0) { - // Only report an attach error once - jio_printf("Warning: detaching current thread from VM failed with %d (future attach errors are suppressed)\n", res); - } - } - } -}; - -C2V_VMENTRY_PREFIX(jint, writeDebugOutput, (JNIEnv* env, jobject, jbyteArray bytes, jint offset, jint length, bool flush, bool can_throw)) - AttachDetach ad(env, thread); - bool use_tty = true; - if (thread == NULL) { - if (!ad._attached) { - // Can only use tty if the current thread is attached - JVMCI_event_1("Cannot write to tty on unattached thread"); - return 0; - } - thread = get_current_thread(); - } - JVMCITraceMark jtm("writeDebugOutput"); - C2V_BLOCK(void, writeDebugOutput, (JNIEnv* env, jobject, jbyteArray bytes, jint offset, jint length)) - if (bytes == NULL) { - if (can_throw) { - JVMCI_THROW_0(NullPointerException); - } - return -1; - } - JVMCIPrimitiveArray array = JVMCIENV->wrap(bytes); - - // Check if offset and length are non negative. - if (offset < 0 || length < 0) { - if (can_throw) { - JVMCI_THROW_0(ArrayIndexOutOfBoundsException); - } - return -2; - } - // Check if the range is valid. - int array_length = JVMCIENV->get_length(array); - if ((((unsigned int) length + (unsigned int) offset) > (unsigned int) array_length)) { - if (can_throw) { - JVMCI_THROW_0(ArrayIndexOutOfBoundsException); - } - return -2; - } - jbyte buffer[O_BUFLEN]; - while (length > 0) { - int copy_len = MIN2(length, (jint)O_BUFLEN); - JVMCIENV->copy_bytes_to(array, buffer, offset, copy_len); - tty->write((char*) buffer, copy_len); - length -= O_BUFLEN; - offset += O_BUFLEN; +// Use of tty does not require the current thread to be attached to the VM +// so no need for a full C2V_VMENTRY transition. +C2V_VMENTRY_PREFIX(void, writeDebugOutput, (JNIEnv* env, jobject, jlong buffer, jint length, bool flush)) + if (length <= 8) { + tty->write((char*) &buffer, length); + } else { + tty->write((char*) buffer, length); } if (flush) { tty->flush(); } - return 0; C2V_END -C2V_VMENTRY(void, flushDebugOutput, (JNIEnv* env, jobject)) +// Use of tty does not require the current thread to be attached to the VM +// so no need for a full C2V_VMENTRY transition. +C2V_VMENTRY_PREFIX(void, flushDebugOutput, (JNIEnv* env, jobject)) tty->flush(); C2V_END @@ -2793,7 +2727,7 @@ JNINativeMethod CompilerToVM::methods[] = { {CC "iterateFrames", CC "([" RESOLVED_METHOD "[" RESOLVED_METHOD "I" INSPECTED_FRAME_VISITOR ")" OBJECT, FN_PTR(iterateFrames)}, {CC "materializeVirtualObjects", CC "(" HS_STACK_FRAME_REF "Z)V", FN_PTR(materializeVirtualObjects)}, {CC "shouldDebugNonSafepoints", CC "()Z", FN_PTR(shouldDebugNonSafepoints)}, - {CC "writeDebugOutput", CC "([BIIZZ)I", FN_PTR(writeDebugOutput)}, + {CC "writeDebugOutput", CC "(JIZ)V", FN_PTR(writeDebugOutput)}, {CC "flushDebugOutput", CC "()V", FN_PTR(flushDebugOutput)}, {CC "methodDataProfileDataSize", CC "(JI)I", FN_PTR(methodDataProfileDataSize)}, {CC "getFingerprint", CC "(J)J", FN_PTR(getFingerprint)}, diff --git a/src/hotspot/share/jvmci/jvmciRuntime.cpp b/src/hotspot/share/jvmci/jvmciRuntime.cpp index c4278a9f231a69d7b27cabe83aa9d504eb72a843..3d817a5d129ecb7932358e924b66204cfd17432d 100644 --- a/src/hotspot/share/jvmci/jvmciRuntime.cpp +++ b/src/hotspot/share/jvmci/jvmciRuntime.cpp @@ -1160,9 +1160,9 @@ Klass* JVMCIRuntime::get_klass_by_name_impl(Klass*& accessing_klass, ttyUnlocker ttyul; // release tty lock to avoid ordering problems MutexLocker ml(Compile_lock); if (!require_local) { - found_klass = SystemDictionary::find_constrained_instance_or_array_klass(sym, loader, CHECK_NULL); + found_klass = SystemDictionary::find_constrained_instance_or_array_klass(sym, loader, THREAD); } else { - found_klass = SystemDictionary::find_instance_or_array_klass(sym, loader, domain, CHECK_NULL); + found_klass = SystemDictionary::find_instance_or_array_klass(sym, loader, domain); } } @@ -1627,7 +1627,7 @@ JVMCI::CodeInstallResult JVMCIRuntime::register_method(JVMCIEnv* JVMCIENV, debug_info, dependencies, code_buffer, frame_words, oop_map_set, handler_table, implicit_exception_table, - compiler, comp_level, GrowableArrayView::EMPTY, + compiler, comp_level, GrowableArrayView::EMPTY, speculations, speculations_len, nmethod_mirror_index, nmethod_mirror_name, failed_speculations); diff --git a/src/hotspot/share/jvmci/metadataHandles.cpp b/src/hotspot/share/jvmci/metadataHandles.cpp index da1ad7b61a08894b7f5c7640d7a16c9307ed918e..bcf66721f52ce9edbaa91a651b1c3cad3ecc67e8 100644 --- a/src/hotspot/share/jvmci/metadataHandles.cpp +++ b/src/hotspot/share/jvmci/metadataHandles.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2019, 2021, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -22,6 +22,7 @@ */ #include "precompiled.hpp" +#include "classfile/classLoaderData.hpp" #include "jvmci/metadataHandles.hpp" #include "runtime/atomic.hpp" diff --git a/src/hotspot/share/logging/log.hpp b/src/hotspot/share/logging/log.hpp index 3d9a268218286862a17a3b3b316781eafcc4eae3..b0d6d83434ecbc9a8bb306ddae6c0cf8a349a352 100644 --- a/src/hotspot/share/logging/log.hpp +++ b/src/hotspot/share/logging/log.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2021, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -72,16 +72,16 @@ class LogMessageBuffer; // Log class for more advanced logging scenarios. // Has printf-style member functions for each log level (trace(), debug(), etc). // -// Also has outputStream compatible API for the different log-levels. -// The streams are resource allocated when requested and are accessed through -// calls to _stream() functions (trace_stream(), debug_stream(), etc). +// The (trace(), debug(), etc) functions can also be used along with the LogStream +// class to obtain an outputStream object, to be passed to various printing +// functions that accept an outputStream: // // Example usage: -// Log(logging) log; +// Log(codecache, sweep) log; // if (log.is_debug()) { -// ... // log.debug("result = %d", result).trace(" tracing info"); -// obj->print_on(log.debug_stream()); +// LogStream ls(log.debug()); +// CodeCache::print_summary(&ls, false); // } // #define Log(...) LogImpl @@ -93,13 +93,11 @@ class LogMessageBuffer; // so that redundant specification of tags or levels can be avoided. // // Example usage: -// LogTarget(Debug, gc) out; +// LogTarget(Debug, codecache, sweep) out; // if (out.is_enabled()) { -// ... -// out.print("Worker: %u", i); -// out.print(" data: %d", x); -// ... -// print_stats(out.stream()); +// out.print("result = %d", result); +// LogStream ls(out); +// CodeCache::print_summary(&ls, false); // } // #define LogTarget(level, ...) LogTargetImpl diff --git a/src/hotspot/share/memory/allocation.cpp b/src/hotspot/share/memory/allocation.cpp index a4c55edac87aa672b986e98fbd72744e2d92c4f8..c3024bcb3725b6cbaab174312cd40deb11a1ee6e 100644 --- a/src/hotspot/share/memory/allocation.cpp +++ b/src/hotspot/share/memory/allocation.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2021, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -26,7 +26,7 @@ #include "memory/allocation.hpp" #include "memory/allocation.inline.hpp" #include "memory/arena.hpp" -#include "memory/metaspaceShared.hpp" +#include "memory/metaspace.hpp" #include "memory/resourceArea.hpp" #include "runtime/os.hpp" #include "runtime/task.hpp" diff --git a/src/hotspot/share/memory/archiveBuilder.cpp b/src/hotspot/share/memory/archiveBuilder.cpp index a4361c4ddeafcbea8aca043b646a601aaebd7c02..c84b860450a950c560ca635d2ba748e1ad16ccf1 100644 --- a/src/hotspot/share/memory/archiveBuilder.cpp +++ b/src/hotspot/share/memory/archiveBuilder.cpp @@ -27,6 +27,7 @@ #include "classfile/symbolTable.hpp" #include "classfile/systemDictionaryShared.hpp" #include "classfile/vmClasses.hpp" +#include "interpreter/abstractInterpreter.hpp" #include "logging/log.hpp" #include "logging/logStream.hpp" #include "memory/allStatic.hpp" @@ -44,32 +45,13 @@ #include "runtime/thread.hpp" #include "utilities/align.hpp" #include "utilities/bitMap.inline.hpp" +#include "utilities/formatBuffer.hpp" #include "utilities/hashtable.inline.hpp" ArchiveBuilder* ArchiveBuilder::_current = NULL; -class AdapterHandlerEntry; - -class MethodTrampolineInfo { - address _c2i_entry_trampoline; - AdapterHandlerEntry** _adapter_trampoline; -public: - address c2i_entry_trampoline() { return _c2i_entry_trampoline; } - AdapterHandlerEntry** adapter_trampoline() { return _adapter_trampoline; } - void set_c2i_entry_trampoline(address addr) { _c2i_entry_trampoline = addr; } - void set_adapter_trampoline(AdapterHandlerEntry** entry) { _adapter_trampoline = entry; } -}; - -class AdapterToTrampoline : public ResourceHashtable< - AdapterHandlerEntry*, MethodTrampolineInfo, - primitive_hash, - primitive_equals, - 941, // prime number - ResourceObj::C_HEAP> {}; - -static AdapterToTrampoline* _adapter_to_trampoline = NULL; ArchiveBuilder::OtherROAllocMark::~OtherROAllocMark() { - char* newtop = ArchiveBuilder::current()->_ro_region->top(); + char* newtop = ArchiveBuilder::current()->_ro_region.top(); ArchiveBuilder::alloc_stats()->record_other_type(int(newtop - _oldtop), true); } @@ -159,37 +141,38 @@ void ArchiveBuilder::SourceObjList::relocate(int i, ArchiveBuilder* builder) { _ptrmap.iterate(&relocator, start, end); } -ArchiveBuilder::ArchiveBuilder(DumpRegion* mc_region, DumpRegion* rw_region, DumpRegion* ro_region) - : _rw_src_objs(), _ro_src_objs(), _src_obj_table(INITIAL_TABLE_SIZE) { - assert(_current == NULL, "must be"); - _current = this; - +ArchiveBuilder::ArchiveBuilder() : + _current_dump_space(NULL), + _buffer_bottom(NULL), + _last_verified_top(NULL), + _num_dump_regions_used(0), + _other_region_used_bytes(0), + _requested_static_archive_bottom(NULL), + _requested_static_archive_top(NULL), + _requested_dynamic_archive_bottom(NULL), + _requested_dynamic_archive_top(NULL), + _mapped_static_archive_bottom(NULL), + _mapped_static_archive_top(NULL), + _buffer_to_requested_delta(0), + _rw_region("rw", MAX_SHARED_DELTA), + _ro_region("ro", MAX_SHARED_DELTA), + _rw_src_objs(), + _ro_src_objs(), + _src_obj_table(INITIAL_TABLE_SIZE), + _num_instance_klasses(0), + _num_obj_array_klasses(0), + _num_type_array_klasses(0), + _total_closed_heap_region_size(0), + _total_open_heap_region_size(0), + _estimated_metaspaceobj_bytes(0), + _estimated_hashtable_bytes(0) +{ _klasses = new (ResourceObj::C_HEAP, mtClassShared) GrowableArray(4 * K, mtClassShared); _symbols = new (ResourceObj::C_HEAP, mtClassShared) GrowableArray(256 * K, mtClassShared); _special_refs = new (ResourceObj::C_HEAP, mtClassShared) GrowableArray(24 * K, mtClassShared); - _num_instance_klasses = 0; - _num_obj_array_klasses = 0; - _num_type_array_klasses = 0; - _alloc_stats = new (ResourceObj::C_HEAP, mtClassShared) DumpAllocStats; - - _mc_region = mc_region; - _rw_region = rw_region; - _ro_region = ro_region; - - _num_dump_regions_used = 0; - - _estimated_metaspaceobj_bytes = 0; - _estimated_hashtable_bytes = 0; - _estimated_trampoline_bytes = 0; - - _requested_static_archive_bottom = NULL; - _requested_static_archive_top = NULL; - _mapped_static_archive_bottom = NULL; - _mapped_static_archive_top = NULL; - _requested_dynamic_archive_bottom = NULL; - _requested_dynamic_archive_top = NULL; - _buffer_to_requested_delta = 0; + assert(_current == NULL, "must be"); + _current = this; } ArchiveBuilder::~ArchiveBuilder() { @@ -205,7 +188,10 @@ ArchiveBuilder::~ArchiveBuilder() { delete _klasses; delete _symbols; delete _special_refs; - delete _alloc_stats; +} + +bool ArchiveBuilder::is_dumping_full_module_graph() { + return DumpSharedSpaces && MetaspaceShared::use_full_module_graph(); } class GatherKlassesAndSymbols : public UniqueMetaspaceClosure { @@ -261,7 +247,7 @@ void ArchiveBuilder::gather_klasses_and_symbols() { GatherKlassesAndSymbols doit(this); iterate_roots(&doit, /*is_relocating_pointers=*/false); #if INCLUDE_CDS_JAVA_HEAP - if (DumpSharedSpaces && MetaspaceShared::use_full_module_graph()) { + if (is_dumping_full_module_graph()) { ClassLoaderDataShared::iterate_symbols(&doit); } #endif @@ -335,13 +321,10 @@ size_t ArchiveBuilder::estimate_archive_size() { size_t dictionary_est = SystemDictionaryShared::estimate_size_for_archive(); _estimated_hashtable_bytes = symbol_table_est + dictionary_est; - _estimated_trampoline_bytes = allocate_method_trampoline_info(); - size_t total = 0; total += _estimated_metaspaceobj_bytes; total += _estimated_hashtable_bytes; - total += _estimated_trampoline_bytes; // allow fragmentation at the end of each dump region total += _total_dump_regions * reserve_alignment(); @@ -349,7 +332,6 @@ size_t ArchiveBuilder::estimate_archive_size() { log_info(cds)("_estimated_hashtable_bytes = " SIZE_FORMAT " + " SIZE_FORMAT " = " SIZE_FORMAT, symbol_table_est, dictionary_est, _estimated_hashtable_bytes); log_info(cds)("_estimated_metaspaceobj_bytes = " SIZE_FORMAT, _estimated_metaspaceobj_bytes); - log_info(cds)("_estimated_trampoline_bytes = " SIZE_FORMAT, _estimated_trampoline_bytes); log_info(cds)("total estimate bytes = " SIZE_FORMAT, total); return align_up(total, reserve_alignment()); @@ -366,18 +348,18 @@ address ArchiveBuilder::reserve_buffer() { // buffer_bottom is the lowest address of the 3 core regions (mc, rw, ro) when // we are copying the class metadata into the buffer. address buffer_bottom = (address)rs.base(); - log_info(cds)("Reserved output buffer space at : " PTR_FORMAT " [" SIZE_FORMAT " bytes]", + log_info(cds)("Reserved output buffer space at " PTR_FORMAT " [" SIZE_FORMAT " bytes]", p2i(buffer_bottom), buffer_size); - MetaspaceShared::set_shared_rs(rs); + _shared_rs = rs; - MetaspaceShared::init_shared_dump_space(_mc_region); _buffer_bottom = buffer_bottom; _last_verified_top = buffer_bottom; - _current_dump_space = _mc_region; + _current_dump_space = &_rw_region; _num_dump_regions_used = 1; _other_region_used_bytes = 0; + _current_dump_space->init(&_shared_rs, &_shared_vs); - ArchivePtrMarker::initialize(&_ptrmap, (address*)_mc_region->base(), (address*)_mc_region->top()); + ArchivePtrMarker::initialize(&_ptrmap, &_shared_vs); // The bottom of the static archive should be mapped at this address by default. _requested_static_archive_bottom = (address)MetaspaceShared::requested_base_address(); @@ -413,6 +395,12 @@ address ArchiveBuilder::reserve_buffer() { vm_direct_exit(0); } + if (DumpSharedSpaces) { + // We don't want any valid object to be at the very bottom of the archive. + // See ArchivePtrMarker::mark_pointer(). + rw_region()->allocate(16); + } + return buffer_bottom; } @@ -520,6 +508,7 @@ void ArchiveBuilder::remember_embedded_pointer_in_copied_obj(MetaspaceClosure::R void ArchiveBuilder::gather_source_objs() { ResourceMark rm; log_info(cds)("Gathering all archivable objects ... "); + gather_klasses_and_symbols(); GatherSortedSourceObjs doit(this); iterate_sorted_roots(&doit, /*is_relocating_pointers=*/false); doit.finish(); @@ -565,16 +554,60 @@ ArchiveBuilder::FollowMode ArchiveBuilder::get_follow_mode(MetaspaceClosure::Ref } } -void ArchiveBuilder::dump_rw_region() { +void ArchiveBuilder::start_dump_space(DumpRegion* next) { + address bottom = _last_verified_top; + address top = (address)(current_dump_space()->top()); + _other_region_used_bytes += size_t(top - bottom); + + current_dump_space()->pack(next); + _current_dump_space = next; + _num_dump_regions_used ++; + + _last_verified_top = (address)(current_dump_space()->top()); +} + +void ArchiveBuilder::verify_estimate_size(size_t estimate, const char* which) { + address bottom = _last_verified_top; + address top = (address)(current_dump_space()->top()); + size_t used = size_t(top - bottom) + _other_region_used_bytes; + int diff = int(estimate) - int(used); + + log_info(cds)("%s estimate = " SIZE_FORMAT " used = " SIZE_FORMAT "; diff = %d bytes", which, estimate, used, diff); + assert(diff >= 0, "Estimate is too small"); + + _last_verified_top = top; + _other_region_used_bytes = 0; +} + +void ArchiveBuilder::dump_rw_metadata() { ResourceMark rm; log_info(cds)("Allocating RW objects ... "); - make_shallow_copies(_rw_region, &_rw_src_objs); + make_shallow_copies(&_rw_region, &_rw_src_objs); + +#if INCLUDE_CDS_JAVA_HEAP + if (is_dumping_full_module_graph()) { + // Archive the ModuleEntry's and PackageEntry's of the 3 built-in loaders + char* start = rw_region()->top(); + ClassLoaderDataShared::allocate_archived_tables(); + alloc_stats()->record_modules(rw_region()->top() - start, /*read_only*/false); + } +#endif } -void ArchiveBuilder::dump_ro_region() { +void ArchiveBuilder::dump_ro_metadata() { ResourceMark rm; log_info(cds)("Allocating RO objects ... "); - make_shallow_copies(_ro_region, &_ro_src_objs); + + start_dump_space(&_ro_region); + make_shallow_copies(&_ro_region, &_ro_src_objs); + +#if INCLUDE_CDS_JAVA_HEAP + if (is_dumping_full_module_graph()) { + char* start = ro_region()->top(); + ClassLoaderDataShared::init_archived_tables(); + alloc_stats()->record_modules(ro_region()->top() - start, /*read_only*/true); + } +#endif } void ArchiveBuilder::make_shallow_copies(DumpRegion *dump_region, @@ -619,7 +652,7 @@ void ArchiveBuilder::make_shallow_copy(DumpRegion *dump_region, SourceObjInfo* s log_trace(cds)("Copy: " PTR_FORMAT " ==> " PTR_FORMAT " %d", p2i(src), p2i(dest), bytes); src_info->set_dumped_addr((address)dest); - _alloc_stats->record(ref->msotype(), int(newtop - oldtop), src_info->read_only()); + _alloc_stats.record(ref->msotype(), int(newtop - oldtop), src_info->read_only()); } address ArchiveBuilder::get_dumped_addr(address src_obj) const { @@ -821,6 +854,8 @@ class RelocateBufferToRequested : public BitMapClosure { void ArchiveBuilder::relocate_to_requested() { + ro_region()->pack(); + size_t my_archive_size = buffer_top() - buffer_bottom(); if (DumpSharedSpaces) { @@ -989,14 +1024,8 @@ public: write_header(mapinfo); write_data(header, header_end, 0); - DumpRegion* mc_region = builder->_mc_region; - DumpRegion* rw_region = builder->_rw_region; - DumpRegion* ro_region = builder->_ro_region; - - address mc = address(mc_region->base()); - address mc_end = address(mc_region->end()); - write_dump_region("mc region", mc_region); - write_data(mc, mc_end, mc + buffer_to_runtime_delta()); + DumpRegion* rw_region = &builder->_rw_region; + DumpRegion* ro_region = &builder->_ro_region; write_dump_region("rw region", rw_region); write_objects(rw_region, &builder->_rw_src_objs); @@ -1019,18 +1048,8 @@ public: } }; -void ArchiveBuilder::write_cds_map_to_log(FileMapInfo* mapinfo, - GrowableArray *closed_heap_regions, - GrowableArray *open_heap_regions, - char* bitmap, size_t bitmap_size_in_bytes) { - if (log_is_enabled(Info, cds, map)) { - CDSMapLogger::write(this, mapinfo, closed_heap_regions, open_heap_regions, - bitmap, bitmap_size_in_bytes); - } -} - -void ArchiveBuilder::print_stats(int ro_all, int rw_all, int mc_all) { - _alloc_stats->print_stats(ro_all, rw_all, mc_all); +void ArchiveBuilder::print_stats() { + _alloc_stats.print_stats(int(_ro_region.used()), int(_rw_region.used())); } void ArchiveBuilder::clean_up_src_obj_table() { @@ -1038,92 +1057,116 @@ void ArchiveBuilder::clean_up_src_obj_table() { _src_obj_table.iterate(&cleaner); } -void ArchiveBuilder::allocate_method_trampolines_for(InstanceKlass* ik) { - if (ik->methods() != NULL) { - for (int j = 0; j < ik->methods()->length(); j++) { - // Walk the methods in a deterministic order so that the trampolines are - // created in a deterministic order. - Method* m = ik->methods()->at(j); - AdapterHandlerEntry* ent = m->adapter(); // different methods can share the same AdapterHandlerEntry - MethodTrampolineInfo* info = _adapter_to_trampoline->get(ent); - if (info->c2i_entry_trampoline() == NULL) { - info->set_c2i_entry_trampoline( - (address)MetaspaceShared::misc_code_space_alloc(SharedRuntime::trampoline_size())); - info->set_adapter_trampoline( - (AdapterHandlerEntry**)MetaspaceShared::misc_code_space_alloc(sizeof(AdapterHandlerEntry*))); - } - } +void ArchiveBuilder::write_archive(FileMapInfo* mapinfo, + GrowableArray* closed_heap_regions, + GrowableArray* open_heap_regions, + GrowableArray* closed_heap_oopmaps, + GrowableArray* open_heap_oopmaps) { + // Make sure NUM_CDS_REGIONS (exported in cds.h) agrees with + // MetaspaceShared::n_regions (internal to hotspot). + assert(NUM_CDS_REGIONS == MetaspaceShared::n_regions, "sanity"); + + write_region(mapinfo, MetaspaceShared::rw, &_rw_region, /*read_only=*/false,/*allow_exec=*/false); + write_region(mapinfo, MetaspaceShared::ro, &_ro_region, /*read_only=*/true, /*allow_exec=*/false); + + size_t bitmap_size_in_bytes; + char* bitmap = mapinfo->write_bitmap_region(ArchivePtrMarker::ptrmap(), closed_heap_oopmaps, open_heap_oopmaps, + bitmap_size_in_bytes); + + if (closed_heap_regions != NULL) { + _total_closed_heap_region_size = mapinfo->write_archive_heap_regions( + closed_heap_regions, + closed_heap_oopmaps, + MetaspaceShared::first_closed_archive_heap_region, + MetaspaceShared::max_closed_archive_heap_region); + _total_open_heap_region_size = mapinfo->write_archive_heap_regions( + open_heap_regions, + open_heap_oopmaps, + MetaspaceShared::first_open_archive_heap_region, + MetaspaceShared::max_open_archive_heap_region); } -} -void ArchiveBuilder::allocate_method_trampolines() { - for (int i = 0; i < _klasses->length(); i++) { - Klass* k = _klasses->at(i); - if (k->is_instance_klass()) { - InstanceKlass* ik = InstanceKlass::cast(k); - allocate_method_trampolines_for(ik); - } - } -} + print_region_stats(mapinfo, closed_heap_regions, open_heap_regions); -// Allocate MethodTrampolineInfo for all Methods that will be archived. Also -// return the total number of bytes needed by the method trampolines in the MC -// region. -size_t ArchiveBuilder::allocate_method_trampoline_info() { - size_t total = 0; - size_t each_method_bytes = - align_up(SharedRuntime::trampoline_size(), BytesPerWord) + - align_up(sizeof(AdapterHandlerEntry*), BytesPerWord); + mapinfo->set_requested_base((char*)MetaspaceShared::requested_base_address()); + mapinfo->set_header_crc(mapinfo->compute_header_crc()); + mapinfo->write_header(); + mapinfo->close(); - if (_adapter_to_trampoline == NULL) { - _adapter_to_trampoline = new (ResourceObj::C_HEAP, mtClass)AdapterToTrampoline(); + if (log_is_enabled(Info, cds)) { + print_stats(); } - int count = 0; - for (int i = 0; i < _klasses->length(); i++) { - Klass* k = _klasses->at(i); - if (k->is_instance_klass()) { - InstanceKlass* ik = InstanceKlass::cast(k); - if (ik->methods() != NULL) { - for (int j = 0; j < ik->methods()->length(); j++) { - Method* m = ik->methods()->at(j); - AdapterHandlerEntry* ent = m->adapter(); // different methods can share the same AdapterHandlerEntry - bool is_created = false; - MethodTrampolineInfo* info = _adapter_to_trampoline->put_if_absent(ent, &is_created); - if (is_created) { - count++; - } - } - } - } + + if (log_is_enabled(Info, cds, map)) { + CDSMapLogger::write(this, mapinfo, closed_heap_regions, open_heap_regions, + bitmap, bitmap_size_in_bytes); } - if (count == 0) { - // We have nothing to archive, but let's avoid having an empty region. - total = SharedRuntime::trampoline_size(); - } else { - total = count * each_method_bytes; + FREE_C_HEAP_ARRAY(char, bitmap); +} + +void ArchiveBuilder::write_region(FileMapInfo* mapinfo, int region_idx, DumpRegion* dump_region, bool read_only, bool allow_exec) { + mapinfo->write_region(region_idx, dump_region->base(), dump_region->used(), read_only, allow_exec); +} + +void ArchiveBuilder::print_region_stats(FileMapInfo *mapinfo, + GrowableArray* closed_heap_regions, + GrowableArray* open_heap_regions) { + // Print statistics of all the regions + const size_t bitmap_used = mapinfo->space_at(MetaspaceShared::bm)->used(); + const size_t bitmap_reserved = mapinfo->space_at(MetaspaceShared::bm)->used_aligned(); + const size_t total_reserved = _ro_region.reserved() + _rw_region.reserved() + + bitmap_reserved + + _total_closed_heap_region_size + + _total_open_heap_region_size; + const size_t total_bytes = _ro_region.used() + _rw_region.used() + + bitmap_used + + _total_closed_heap_region_size + + _total_open_heap_region_size; + const double total_u_perc = percent_of(total_bytes, total_reserved); + + _rw_region.print(total_reserved); + _ro_region.print(total_reserved); + + print_bitmap_region_stats(bitmap_used, total_reserved); + + if (closed_heap_regions != NULL) { + print_heap_region_stats(closed_heap_regions, "ca", total_reserved); + print_heap_region_stats(open_heap_regions, "oa", total_reserved); } - return align_up(total, SharedSpaceObjectAlignment); + + log_debug(cds)("total : " SIZE_FORMAT_W(9) " [100.0%% of total] out of " SIZE_FORMAT_W(9) " bytes [%5.1f%% used]", + total_bytes, total_reserved, total_u_perc); } -void ArchiveBuilder::update_method_trampolines() { - for (int i = 0; i < klasses()->length(); i++) { - Klass* k = klasses()->at(i); - if (k->is_instance_klass()) { - InstanceKlass* ik = InstanceKlass::cast(k); - Array* methods = ik->methods(); - for (int j = 0; j < methods->length(); j++) { - Method* m = methods->at(j); - AdapterHandlerEntry* ent = m->adapter(); - MethodTrampolineInfo* info = _adapter_to_trampoline->get(ent); - // m is the "copy" of the original Method, but its adapter() field is still valid because - // we haven't called make_klasses_shareable() yet. - m->set_from_compiled_entry(info->c2i_entry_trampoline()); - m->set_adapter_trampoline(info->adapter_trampoline()); - } - } +void ArchiveBuilder::print_bitmap_region_stats(size_t size, size_t total_size) { + log_debug(cds)("bm space: " SIZE_FORMAT_W(9) " [ %4.1f%% of total] out of " SIZE_FORMAT_W(9) " bytes [100.0%% used]", + size, size/double(total_size)*100.0, size); +} + +void ArchiveBuilder::print_heap_region_stats(GrowableArray *heap_mem, + const char *name, size_t total_size) { + int arr_len = heap_mem == NULL ? 0 : heap_mem->length(); + for (int i = 0; i < arr_len; i++) { + char* start = (char*)heap_mem->at(i).start(); + size_t size = heap_mem->at(i).byte_size(); + char* top = start + size; + log_debug(cds)("%s%d space: " SIZE_FORMAT_W(9) " [ %4.1f%% of total] out of " SIZE_FORMAT_W(9) " bytes [100.0%% used] at " INTPTR_FORMAT, + name, i, size, size/double(total_size)*100.0, size, p2i(start)); } } +void ArchiveBuilder::report_out_of_space(const char* name, size_t needed_bytes) { + // This is highly unlikely to happen on 64-bits because we have reserved a 4GB space. + // On 32-bit we reserve only 256MB so you could run out of space with 100,000 classes + // or so. + _rw_region.print_out_of_space_msg(name, needed_bytes); + _ro_region.print_out_of_space_msg(name, needed_bytes); + + vm_exit_during_initialization(err_msg("Unable to allocate from '%s' region", name), + "Please reduce the number of shared classes."); +} + + #ifndef PRODUCT void ArchiveBuilder::assert_is_vm_thread() { assert(Thread::current()->is_VM_thread(), "ArchiveBuilder should be used only inside the VMThread"); diff --git a/src/hotspot/share/memory/archiveBuilder.hpp b/src/hotspot/share/memory/archiveBuilder.hpp index 0859b6a14b1763da812290f93f608d383c926476..2fbc1b34b15096e6f58756e1d7f766dec8cf2809 100644 --- a/src/hotspot/share/memory/archiveBuilder.hpp +++ b/src/hotspot/share/memory/archiveBuilder.hpp @@ -26,7 +26,9 @@ #define SHARE_MEMORY_ARCHIVEBUILDER_HPP #include "memory/archiveUtils.hpp" +#include "memory/dumpAllocStats.hpp" #include "memory/metaspaceClosure.hpp" +#include "oops/array.hpp" #include "oops/klass.hpp" #include "runtime/os.hpp" #include "utilities/bitMap.hpp" @@ -34,33 +36,35 @@ #include "utilities/hashtable.hpp" #include "utilities/resourceHash.hpp" +struct ArchiveHeapOopmapInfo; class CHeapBitMap; -class DumpAllocStats; class FileMapInfo; class Klass; class MemRegion; class Symbol; +// Metaspace::allocate() requires that all blocks must be aligned with KlassAlignmentInBytes. +// We enforce the same alignment rule in blocks allocated from the shared space. +const int SharedSpaceObjectAlignment = KlassAlignmentInBytes; + // Overview of CDS archive creation (for both static and dynamic dump): // // [1] Load all classes (static dump: from the classlist, dynamic dump: as part of app execution) // [2] Allocate "output buffer" -// [3] Copy contents of the 3 "core" regions (mc/rw/ro) into the output buffer. -// - mc region: -// allocate_method_trampolines(); -// allocate the cpp vtables (static dump only) +// [3] Copy contents of the 2 "core" regions (rw/ro) into the output buffer. +// - allocate the cpp vtables in rw (static dump only) // - memcpy the MetaspaceObjs into rw/ro: // dump_rw_region(); // dump_ro_region(); // - fix all the pointers in the MetaspaceObjs to point to the copies // relocate_metaspaceobj_embedded_pointers() // [4] Copy symbol table, dictionary, etc, into the ro region -// [5] Relocate all the pointers in mc/rw/ro, so that the archive can be mapped to +// [5] Relocate all the pointers in rw/ro, so that the archive can be mapped to // the "requested" location without runtime relocation. See relocate_to_requested() class ArchiveBuilder : public StackObj { protected: DumpRegion* _current_dump_space; - address _buffer_bottom; // for writing the contents of mc/rw/ro regions + address _buffer_bottom; // for writing the contents of rw/ro regions address _last_verified_top; int _num_dump_regions_used; size_t _other_region_used_bytes; @@ -186,9 +190,11 @@ private: static const int INITIAL_TABLE_SIZE = 15889; static const int MAX_TABLE_SIZE = 1000000; - DumpRegion* _mc_region; - DumpRegion* _rw_region; - DumpRegion* _ro_region; + ReservedSpace _shared_rs; + VirtualSpace _shared_vs; + + DumpRegion _rw_region; + DumpRegion _ro_region; CHeapBitMap _ptrmap; // bitmap used by ArchivePtrMarker SourceObjList _rw_src_objs; // objs to put in rw region @@ -202,7 +208,16 @@ private: int _num_instance_klasses; int _num_obj_array_klasses; int _num_type_array_klasses; - DumpAllocStats* _alloc_stats; + DumpAllocStats _alloc_stats; + size_t _total_closed_heap_region_size; + size_t _total_open_heap_region_size; + + void print_region_stats(FileMapInfo *map_info, + GrowableArray* closed_heap_regions, + GrowableArray* open_heap_regions); + void print_bitmap_region_stats(size_t size, size_t total_size); + void print_heap_region_stats(GrowableArray *heap_mem, + const char *name, size_t total_size); // For global access. static ArchiveBuilder* _current; @@ -215,12 +230,13 @@ public: char* _oldtop; public: OtherROAllocMark() { - _oldtop = _current->_ro_region->top(); + _oldtop = _current->_ro_region.top(); } ~OtherROAllocMark(); }; private: + bool is_dumping_full_module_graph(); FollowMode get_follow_mode(MetaspaceClosure::Ref *ref); void iterate_sorted_roots(MetaspaceClosure* it, bool is_relocating_pointers); @@ -244,9 +260,8 @@ protected: // Conservative estimate for number of bytes needed for: size_t _estimated_metaspaceobj_bytes; // all archived MetaspaceObj's. size_t _estimated_hashtable_bytes; // symbol table and dictionaries - size_t _estimated_trampoline_bytes; // method entry trampolines - static const int _total_dump_regions = 3; + static const int _total_dump_regions = 2; size_t estimate_archive_size(); @@ -254,8 +269,10 @@ protected: return os::vm_allocation_granularity(); } + void start_dump_space(DumpRegion* next); + void verify_estimate_size(size_t estimate, const char* which); + public: - void set_current_dump_space(DumpRegion* r) { _current_dump_space = r; } address reserve_buffer(); address buffer_bottom() const { return _buffer_bottom; } @@ -317,7 +334,7 @@ public: static void assert_is_vm_thread() PRODUCT_RETURN; public: - ArchiveBuilder(DumpRegion* mc_region, DumpRegion* rw_region, DumpRegion* ro_region); + ArchiveBuilder(); ~ArchiveBuilder(); void gather_klasses_and_symbols(); @@ -327,17 +344,52 @@ public: void add_special_ref(MetaspaceClosure::SpecialRef type, address src_obj, size_t field_offset); void remember_embedded_pointer_in_copied_obj(MetaspaceClosure::Ref* enclosing_ref, MetaspaceClosure::Ref* ref); - void dump_rw_region(); - void dump_ro_region(); + DumpRegion* rw_region() { return &_rw_region; } + DumpRegion* ro_region() { return &_ro_region; } + + static char* rw_region_alloc(size_t num_bytes) { + return current()->rw_region()->allocate(num_bytes); + } + static char* ro_region_alloc(size_t num_bytes) { + return current()->ro_region()->allocate(num_bytes); + } + + template + static Array* new_ro_array(int length) { + size_t byte_size = Array::byte_sizeof(length, sizeof(T)); + Array* array = (Array*)ro_region_alloc(byte_size); + array->initialize(length); + return array; + } + + template + static Array* new_rw_array(int length) { + size_t byte_size = Array::byte_sizeof(length, sizeof(T)); + Array* array = (Array*)rw_region_alloc(byte_size); + array->initialize(length); + return array; + } + + template + static size_t ro_array_bytesize(int length) { + size_t byte_size = Array::byte_sizeof(length, sizeof(T)); + return align_up(byte_size, SharedSpaceObjectAlignment); + } + + void dump_rw_metadata(); + void dump_ro_metadata(); void relocate_metaspaceobj_embedded_pointers(); void relocate_roots(); void relocate_vm_classes(); void make_klasses_shareable(); void relocate_to_requested(); - void write_cds_map_to_log(FileMapInfo* mapinfo, - GrowableArray *closed_heap_regions, - GrowableArray *open_heap_regions, - char* bitmap, size_t bitmap_size_in_bytes); + void write_archive(FileMapInfo* mapinfo, + GrowableArray* closed_heap_regions, + GrowableArray* open_heap_regions, + GrowableArray* closed_heap_oopmaps, + GrowableArray* open_heap_oopmaps); + void write_region(FileMapInfo* mapinfo, int region_idx, DumpRegion* dump_region, + bool read_only, bool allow_exec); address get_dumped_addr(address src_obj) const; @@ -356,7 +408,15 @@ public: } static DumpAllocStats* alloc_stats() { - return current()->_alloc_stats; + return &(current()->_alloc_stats); + } + + static CompactHashtableStats* symbol_stats() { + return alloc_stats()->symbol_stats(); + } + + static CompactHashtableStats* string_stats() { + return alloc_stats()->string_stats(); } void relocate_klass_ptr(oop o); @@ -371,13 +431,8 @@ public: return (Symbol*)current()->get_dumped_addr((address)orig_symbol); } - void print_stats(int ro_all, int rw_all, int mc_all); - - // Method trampolines related functions - void allocate_method_trampolines(); - void allocate_method_trampolines_for(InstanceKlass* ik); - size_t allocate_method_trampoline_info(); - void update_method_trampolines(); + void print_stats(); + void report_out_of_space(const char* name, size_t needed_bytes); }; #endif // SHARE_MEMORY_ARCHIVEBUILDER_HPP diff --git a/src/hotspot/share/memory/archiveUtils.cpp b/src/hotspot/share/memory/archiveUtils.cpp index 5a3bb5cf461cfac08f19bdf29631baf9a25c9408..7f701e1e87fcdc47a4fcc20620d5bd525c40d11e 100644 --- a/src/hotspot/share/memory/archiveUtils.cpp +++ b/src/hotspot/share/memory/archiveUtils.cpp @@ -33,21 +33,20 @@ #include "memory/dynamicArchive.hpp" #include "memory/filemap.hpp" #include "memory/heapShared.inline.hpp" -#include "memory/metaspace.hpp" #include "memory/metaspaceShared.hpp" +#include "memory/metaspaceUtils.hpp" #include "memory/resourceArea.hpp" #include "oops/compressedOops.inline.hpp" #include "utilities/bitMap.inline.hpp" CHeapBitMap* ArchivePtrMarker::_ptrmap = NULL; -address* ArchivePtrMarker::_ptr_base; -address* ArchivePtrMarker::_ptr_end; +VirtualSpace* ArchivePtrMarker::_vs; + bool ArchivePtrMarker::_compacted; -void ArchivePtrMarker::initialize(CHeapBitMap* ptrmap, address* ptr_base, address* ptr_end) { +void ArchivePtrMarker::initialize(CHeapBitMap* ptrmap, VirtualSpace* vs) { assert(_ptrmap == NULL, "initialize only once"); - _ptr_base = ptr_base; - _ptr_end = ptr_end; + _vs = vs; _compacted = false; _ptrmap = ptrmap; @@ -66,17 +65,17 @@ void ArchivePtrMarker::mark_pointer(address* ptr_loc) { assert(_ptrmap != NULL, "not initialized"); assert(!_compacted, "cannot mark anymore"); - if (_ptr_base <= ptr_loc && ptr_loc < _ptr_end) { + if (ptr_base() <= ptr_loc && ptr_loc < ptr_end()) { address value = *ptr_loc; // We don't want any pointer that points to very bottom of the archive, otherwise when // MetaspaceShared::default_base_address()==0, we can't distinguish between a pointer // to nothing (NULL) vs a pointer to an objects that happens to be at the very bottom // of the archive. - assert(value != (address)_ptr_base, "don't point to the bottom of the archive"); + assert(value != (address)ptr_base(), "don't point to the bottom of the archive"); if (value != NULL) { assert(uintx(ptr_loc) % sizeof(intptr_t) == 0, "pointers must be stored in aligned addresses"); - size_t idx = ptr_loc - _ptr_base; + size_t idx = ptr_loc - ptr_base(); if (_ptrmap->size() <= idx) { _ptrmap->resize((idx + 1) * 2); } @@ -91,9 +90,9 @@ void ArchivePtrMarker::clear_pointer(address* ptr_loc) { assert(_ptrmap != NULL, "not initialized"); assert(!_compacted, "cannot clear anymore"); - assert(_ptr_base <= ptr_loc && ptr_loc < _ptr_end, "must be"); + assert(ptr_base() <= ptr_loc && ptr_loc < ptr_end(), "must be"); assert(uintx(ptr_loc) % sizeof(intptr_t) == 0, "pointers must be stored in aligned addresses"); - size_t idx = ptr_loc - _ptr_base; + size_t idx = ptr_loc - ptr_base(); assert(idx < _ptrmap->size(), "cannot clear pointers that have not been marked"); _ptrmap->clear_bit(idx); //tty->print_cr("Clearing pointer [" PTR_FORMAT "] -> " PTR_FORMAT " @ " SIZE_FORMAT_W(5), p2i(ptr_loc), p2i(*ptr_loc), idx); @@ -132,7 +131,7 @@ public: void ArchivePtrMarker::compact(address relocatable_base, address relocatable_end) { assert(!_compacted, "cannot compact again"); - ArchivePtrBitmapCleaner cleaner(_ptrmap, _ptr_base, relocatable_base, relocatable_end); + ArchivePtrBitmapCleaner cleaner(_ptrmap, ptr_base(), relocatable_base, relocatable_end); _ptrmap->iterate(&cleaner); compact(cleaner.max_non_null_offset()); } @@ -147,16 +146,16 @@ char* DumpRegion::expand_top_to(char* newtop) { assert(is_allocatable(), "must be initialized and not packed"); assert(newtop >= _top, "must not grow backwards"); if (newtop > _end) { - MetaspaceShared::report_out_of_space(_name, newtop - _top); + ArchiveBuilder::current()->report_out_of_space(_name, newtop - _top); ShouldNotReachHere(); } - MetaspaceShared::commit_to(_rs, _vs, newtop); + commit_to(newtop); _top = newtop; - if (_rs == MetaspaceShared::shared_rs()) { + if (_max_delta > 0) { uintx delta = ArchiveBuilder::current()->buffer_to_offset((address)(newtop-1)); - if (delta > ArchiveBuilder::MAX_SHARED_DELTA) { + if (delta > _max_delta) { // This is just a sanity check and should not appear in any real world usage. This // happens only if you allocate more than 2GB of shared objects and would require // millions of shared classes. @@ -168,6 +167,39 @@ char* DumpRegion::expand_top_to(char* newtop) { return _top; } +void DumpRegion::commit_to(char* newtop) { + Arguments::assert_is_dumping_archive(); + char* base = _rs->base(); + size_t need_committed_size = newtop - base; + size_t has_committed_size = _vs->committed_size(); + if (need_committed_size < has_committed_size) { + return; + } + + size_t min_bytes = need_committed_size - has_committed_size; + size_t preferred_bytes = 1 * M; + size_t uncommitted = _vs->reserved_size() - has_committed_size; + + size_t commit = MAX2(min_bytes, preferred_bytes); + commit = MIN2(commit, uncommitted); + assert(commit <= uncommitted, "sanity"); + + if (!_vs->expand_by(commit, false)) { + vm_exit_during_initialization(err_msg("Failed to expand shared space to " SIZE_FORMAT " bytes", + need_committed_size)); + } + + const char* which; + if (_rs->base() == (char*)MetaspaceShared::symbol_rs_base()) { + which = "symbol"; + } else { + which = "shared"; + } + log_debug(cds)("Expanding %s spaces by " SIZE_FORMAT_W(7) " bytes [total " SIZE_FORMAT_W(9) " bytes ending at %p]", + which, commit, _vs->actual_committed_size(), _vs->high()); +} + + char* DumpRegion::allocate(size_t num_bytes) { char* p = (char*)align_up(_top, (size_t)SharedSpaceObjectAlignment); char* newtop = p + align_up(num_bytes, (size_t)SharedSpaceObjectAlignment); @@ -204,8 +236,7 @@ void DumpRegion::print_out_of_space_msg(const char* failing_region, size_t neede void DumpRegion::init(ReservedSpace* rs, VirtualSpace* vs) { _rs = rs; _vs = vs; - // Start with 0 committed bytes. The memory will be committed as needed by - // MetaspaceShared::commit_to(). + // Start with 0 committed bytes. The memory will be committed as needed. if (!_vs->initialize(*_rs, 0)) { fatal("Unable to allocate memory for shared space"); } diff --git a/src/hotspot/share/memory/archiveUtils.hpp b/src/hotspot/share/memory/archiveUtils.hpp index d61f974135df8786e7167f1aaa4a7e6d4e94ca95..4121b955e4300d527b0e53c7abeb64083938d7cb 100644 --- a/src/hotspot/share/memory/archiveUtils.hpp +++ b/src/hotspot/share/memory/archiveUtils.hpp @@ -27,6 +27,7 @@ #include "logging/log.hpp" #include "memory/iterator.hpp" +#include "memory/virtualspace.hpp" #include "runtime/arguments.hpp" #include "utilities/bitMap.hpp" @@ -39,15 +40,18 @@ class VirtualSpace; // mark_pointer(/*ptr_loc=*/&k->_name). It's required that (_prt_base <= ptr_loc < _ptr_end). _ptr_base is // fixed, but _ptr_end can be expanded as more objects are dumped. class ArchivePtrMarker : AllStatic { - static CHeapBitMap* _ptrmap; - static address* _ptr_base; - static address* _ptr_end; + static CHeapBitMap* _ptrmap; + static VirtualSpace* _vs; // Once _ptrmap is compacted, we don't allow bit marking anymore. This is to // avoid unintentional copy operations after the bitmap has been finalized and written. static bool _compacted; + + static address* ptr_base() { return (address*)_vs->low(); } // committed lower bound (inclusive) + static address* ptr_end() { return (address*)_vs->high(); } // committed upper bound (exclusive) + public: - static void initialize(CHeapBitMap* ptrmap, address* ptr_base, address* ptr_end); + static void initialize(CHeapBitMap* ptrmap, VirtualSpace* vs); static void mark_pointer(address* ptr_loc); static void clear_pointer(address* ptr_loc); static void compact(address relocatable_base, address relocatable_end); @@ -64,11 +68,6 @@ public: mark_pointer(ptr_loc); } - static void expand_ptr_end(address *new_ptr_end) { - assert(_ptr_end <= new_ptr_end, "must be"); - _ptr_end = new_ptr_end; - } - static CHeapBitMap* ptrmap() { return _ptrmap; } @@ -128,12 +127,17 @@ private: char* _base; char* _top; char* _end; + uintx _max_delta; bool _is_packed; ReservedSpace* _rs; VirtualSpace* _vs; + void commit_to(char* newtop); + public: - DumpRegion(const char* name) : _name(name), _base(NULL), _top(NULL), _end(NULL), _is_packed(false) {} + DumpRegion(const char* name, uintx max_delta = 0) + : _name(name), _base(NULL), _top(NULL), _end(NULL), + _max_delta(max_delta), _is_packed(false) {} char* expand_top_to(char* newtop); char* allocate(size_t num_bytes); diff --git a/src/hotspot/share/memory/arena.cpp b/src/hotspot/share/memory/arena.cpp index e7939d6f29e3af7de84dd4f06a61e4103f5014ed..8388f68c3592e3d1dc026e781a122763a1f9a1ab 100644 --- a/src/hotspot/share/memory/arena.cpp +++ b/src/hotspot/share/memory/arena.cpp @@ -25,7 +25,6 @@ #include "precompiled.hpp" #include "memory/allocation.hpp" #include "memory/allocation.inline.hpp" -#include "memory/metaspaceShared.hpp" #include "memory/resourceArea.hpp" #include "runtime/os.hpp" #include "runtime/task.hpp" diff --git a/src/hotspot/share/memory/classLoaderMetaspace.cpp b/src/hotspot/share/memory/classLoaderMetaspace.cpp index 6ec474e6d9765da610ecd4c00d904e7b13a41824..a876f9e7adf49989c792058ac5f8050ed82e2270 100644 --- a/src/hotspot/share/memory/classLoaderMetaspace.cpp +++ b/src/hotspot/share/memory/classLoaderMetaspace.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2020, 2021, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2020 SAP SE. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -27,6 +27,7 @@ #include "logging/log.hpp" #include "memory/classLoaderMetaspace.hpp" #include "memory/metaspace.hpp" +#include "memory/metaspaceUtils.hpp" #include "memory/metaspace/chunkManager.hpp" #include "memory/metaspace/internalStats.hpp" #include "memory/metaspace/metaspaceArena.hpp" diff --git a/src/hotspot/share/memory/cppVtables.cpp b/src/hotspot/share/memory/cppVtables.cpp index 617c4f4c8ef0a6412f9c272c2cb665735dfa8561..0b791184d9e26f766040a4b215118d313b8d7411 100644 --- a/src/hotspot/share/memory/cppVtables.cpp +++ b/src/hotspot/share/memory/cppVtables.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2020, 2021, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -25,6 +25,7 @@ #include "precompiled.hpp" #include "logging/log.hpp" #include "memory/archiveUtils.hpp" +#include "memory/archiveBuilder.hpp" #include "memory/cppVtables.hpp" #include "memory/metaspaceShared.hpp" #include "oops/instanceClassLoaderKlass.hpp" @@ -100,7 +101,7 @@ template CppVtableInfo* CppVtableCloner::allocate_and_initialize(const char* name) { int n = get_vtable_length(name); CppVtableInfo* info = - (CppVtableInfo*)MetaspaceShared::misc_code_dump_space()->allocate(CppVtableInfo::byte_size(n)); + (CppVtableInfo*)ArchiveBuilder::current()->rw_region()->allocate(CppVtableInfo::byte_size(n)); info->set_vtable_size(n); initialize(name, info); return info; @@ -211,13 +212,16 @@ void CppVtableCloner::init_orig_cpp_vtptr(int kind) { // _index[InstanceKlass_Kind]->cloned_vtable() == ((intptr_t**)ik)[0] CppVtableInfo** CppVtables::_index = NULL; -char* CppVtables::dumptime_init() { +char* CppVtables::dumptime_init(ArchiveBuilder* builder) { assert(DumpSharedSpaces, "must"); size_t vtptrs_bytes = _num_cloned_vtable_kinds * sizeof(CppVtableInfo*); - _index = (CppVtableInfo**)MetaspaceShared::misc_code_dump_space()->allocate(vtptrs_bytes); + _index = (CppVtableInfo**)builder->rw_region()->allocate(vtptrs_bytes); CPP_VTABLE_TYPES_DO(ALLOCATE_AND_INITIALIZE_VTABLE); + size_t cpp_tables_size = builder->rw_region()->top() - builder->rw_region()->base(); + builder->alloc_stats()->record_cpp_vtables((int)cpp_tables_size); + return (char*)_index; } diff --git a/src/hotspot/share/memory/cppVtables.hpp b/src/hotspot/share/memory/cppVtables.hpp index c476d67575519ebee67a9b388f5bab9b80a73dee..dbfe639cd6dfd197515f765a7ef5c3257b82d1ed 100644 --- a/src/hotspot/share/memory/cppVtables.hpp +++ b/src/hotspot/share/memory/cppVtables.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2020, 2021, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -29,6 +29,7 @@ #include "memory/allStatic.hpp" #include "utilities/globalDefinitions.hpp" +class ArchiveBuilder; class Method; class SerializeClosure; class CppVtableInfo; @@ -37,7 +38,7 @@ class CppVtableInfo; class CppVtables : AllStatic { static CppVtableInfo** _index; public: - static char* dumptime_init(); + static char* dumptime_init(ArchiveBuilder* builder); static void zero_archived_vtables(); static intptr_t* get_archived_vtable(MetaspaceObj::Type msotype, address obj); static void serialize(SerializeClosure* sc); diff --git a/src/hotspot/share/memory/dumpAllocStats.cpp b/src/hotspot/share/memory/dumpAllocStats.cpp index 23264ca11bf017c913f55a80ab41b25ec0511794..ffec46cd7f57c5e2fe2c8a5ac4b4aec1ddd2b102 100644 --- a/src/hotspot/share/memory/dumpAllocStats.cpp +++ b/src/hotspot/share/memory/dumpAllocStats.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2020, 2021, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -26,29 +26,21 @@ #include "logging/log.hpp" #include "logging/logMessage.hpp" #include "memory/dumpAllocStats.hpp" -#include "memory/metaspaceShared.hpp" - -void DumpAllocStats::print_stats(int ro_all, int rw_all, int mc_all) { - // Calculate size of data that was not allocated by Metaspace::allocate() - MetaspaceSharedStats *stats = MetaspaceShared::stats(); +void DumpAllocStats::print_stats(int ro_all, int rw_all) { // symbols - _counts[RO][SymbolHashentryType] = stats->symbol.hashentry_count; - _bytes [RO][SymbolHashentryType] = stats->symbol.hashentry_bytes; + _counts[RO][SymbolHashentryType] = _symbol_stats.hashentry_count; + _bytes [RO][SymbolHashentryType] = _symbol_stats.hashentry_bytes; - _counts[RO][SymbolBucketType] = stats->symbol.bucket_count; - _bytes [RO][SymbolBucketType] = stats->symbol.bucket_bytes; + _counts[RO][SymbolBucketType] = _symbol_stats.bucket_count; + _bytes [RO][SymbolBucketType] = _symbol_stats.bucket_bytes; // strings - _counts[RO][StringHashentryType] = stats->string.hashentry_count; - _bytes [RO][StringHashentryType] = stats->string.hashentry_bytes; - - _counts[RO][StringBucketType] = stats->string.bucket_count; - _bytes [RO][StringBucketType] = stats->string.bucket_bytes; + _counts[RO][StringHashentryType] = _string_stats.hashentry_count; + _bytes [RO][StringHashentryType] = _string_stats.hashentry_bytes; - // TODO: count things like dictionary, vtable, etc - _bytes[RW][OtherType] += mc_all; - rw_all += mc_all; // mc is mapped Read/Write + _counts[RO][StringBucketType] = _string_stats.bucket_count; + _bytes [RO][StringBucketType] = _string_stats.bucket_bytes; // prevent divide-by-zero if (ro_all < 1) { @@ -70,7 +62,7 @@ void DumpAllocStats::print_stats(int ro_all, int rw_all, int mc_all) { LogMessage(cds) msg; - msg.debug("Detailed metadata info (excluding st regions; rw stats include mc regions):"); + msg.debug("Detailed metadata info (excluding heap regions):"); msg.debug("%s", hdr); msg.debug("%s", sep); for (int type = 0; type < int(_number_of_types); type ++) { @@ -115,4 +107,3 @@ void DumpAllocStats::print_stats(int ro_all, int rw_all, int mc_all) { #undef fmt_stats } - diff --git a/src/hotspot/share/memory/dumpAllocStats.hpp b/src/hotspot/share/memory/dumpAllocStats.hpp index bb1e20b1191399aa26a49e5658e067945e5711d4..5bf7b9371a920042cf2ef420d5f1e69597fd1e1e 100644 --- a/src/hotspot/share/memory/dumpAllocStats.hpp +++ b/src/hotspot/share/memory/dumpAllocStats.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2020, 2021, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -26,6 +26,7 @@ #define SHARE_MEMORY_DUMPALLOCSTATS_HPP #include "memory/allocation.hpp" +#include "classfile/compactHashtable.hpp" // This is for dumping detailed statistics for the allocations // in the shared spaces. @@ -40,6 +41,7 @@ public: f(StringHashentry) \ f(StringBucket) \ f(ModulesNatives) \ + f(CppVTables) \ f(Other) enum Type { @@ -57,17 +59,23 @@ public: } } -public: - enum { RO = 0, RW = 1 }; + CompactHashtableStats _symbol_stats; + CompactHashtableStats _string_stats; int _counts[2][_number_of_types]; int _bytes [2][_number_of_types]; +public: + enum { RO = 0, RW = 1 }; + DumpAllocStats() { memset(_counts, 0, sizeof(_counts)); memset(_bytes, 0, sizeof(_bytes)); }; + CompactHashtableStats* symbol_stats() { return &_symbol_stats; } + CompactHashtableStats* string_stats() { return &_string_stats; } + void record(MetaspaceObj::Type type, int byte_size, bool read_only) { assert(int(type) >= 0 && type < MetaspaceObj::_number_of_types, "sanity"); int which = (read_only) ? RO : RW; @@ -84,7 +92,12 @@ public: int which = (read_only) ? RO : RW; _bytes [which][OtherType] += byte_size; } - void print_stats(int ro_all, int rw_all, int mc_all); + + void record_cpp_vtables(int byte_size) { + _bytes[RW][CppVTablesType] += byte_size; + } + + void print_stats(int ro_all, int rw_all); }; #endif // SHARE_MEMORY_DUMPALLOCSTATS_HPP diff --git a/src/hotspot/share/memory/dynamicArchive.cpp b/src/hotspot/share/memory/dynamicArchive.cpp index dc23c869de5839561d2fb447aaaae5f2759541b4..54a457561eb415d383a526db6dc1507b6e7942d8 100644 --- a/src/hotspot/share/memory/dynamicArchive.cpp +++ b/src/hotspot/share/memory/dynamicArchive.cpp @@ -92,35 +92,7 @@ public: void write_archive(char* serialized_data); public: - DynamicArchiveBuilder() : ArchiveBuilder(MetaspaceShared::misc_code_dump_space(), - MetaspaceShared::read_write_dump_space(), - MetaspaceShared::read_only_dump_space()) { - } - - void start_dump_space(DumpRegion* next) { - address bottom = _last_verified_top; - address top = (address)(current_dump_space()->top()); - _other_region_used_bytes += size_t(top - bottom); - - MetaspaceShared::pack_dump_space(current_dump_space(), next, MetaspaceShared::shared_rs()); - _current_dump_space = next; - _num_dump_regions_used ++; - - _last_verified_top = (address)(current_dump_space()->top()); - } - - void verify_estimate_size(size_t estimate, const char* which) { - address bottom = _last_verified_top; - address top = (address)(current_dump_space()->top()); - size_t used = size_t(top - bottom) + _other_region_used_bytes; - int diff = int(estimate) - int(used); - - log_info(cds)("%s estimate = " SIZE_FORMAT " used = " SIZE_FORMAT "; diff = %d bytes", which, estimate, used, diff); - assert(diff >= 0, "Estimate is too small"); - - _last_verified_top = top; - _other_region_used_bytes = 0; - } + DynamicArchiveBuilder() : ArchiveBuilder() { } // Do this before and after the archive dump to see if any corruption // is caused by dynamic dumping. @@ -140,28 +112,14 @@ public: DEBUG_ONLY(SystemDictionaryShared::NoClassLoadingMark nclm); SystemDictionaryShared::check_excluded_classes(); - gather_klasses_and_symbols(); - - // mc space starts ... - reserve_buffer(); init_header(); - - allocate_method_trampolines(); - verify_estimate_size(_estimated_trampoline_bytes, "Trampolines"); - gather_source_objs(); - // rw space starts ... - start_dump_space(MetaspaceShared::read_write_dump_space()); + reserve_buffer(); log_info(cds, dynamic)("Copying %d klasses and %d symbols", klasses()->length(), symbols()->length()); - - dump_rw_region(); - - // ro space starts ... - DumpRegion* ro_space = MetaspaceShared::read_only_dump_space(); - start_dump_space(ro_space); - dump_ro_region(); + dump_rw_metadata(); + dump_ro_metadata(); relocate_metaspaceobj_embedded_pointers(); relocate_roots(); @@ -173,19 +131,20 @@ public: // Note that these tables still point to the *original* objects, so // they would need to call DynamicArchive::original_to_target() to // get the correct addresses. - assert(current_dump_space() == ro_space, "Must be RO space"); + assert(current_dump_space() == ro_region(), "Must be RO space"); SymbolTable::write_to_archive(symbols()); + + ArchiveBuilder::OtherROAllocMark mark; SystemDictionaryShared::write_to_archive(false); - serialized_data = ro_space->top(); - WriteClosure wc(ro_space); + serialized_data = ro_region()->top(); + WriteClosure wc(ro_region()); SymbolTable::serialize_shared_table_header(&wc, false); SystemDictionaryShared::serialize_dictionary_headers(&wc, false); } verify_estimate_size(_estimated_hashtable_bytes, "Hashtables"); - update_method_trampolines(); sort_methods(); log_info(cds)("Make classes shareable"); @@ -333,9 +292,6 @@ void DynamicArchiveBuilder::remark_pointers_for_instance_klass(InstanceKlass* k, } void DynamicArchiveBuilder::write_archive(char* serialized_data) { - int num_klasses = klasses()->length(); - int num_symbols = symbols()->length(); - Array* table = FileMapInfo::saved_shared_path_table().table(); SharedPathTable runtime_table(table, FileMapInfo::shared_path_table().size()); _header->set_shared_path_table(runtime_table); @@ -344,19 +300,8 @@ void DynamicArchiveBuilder::write_archive(char* serialized_data) { FileMapInfo* dynamic_info = FileMapInfo::dynamic_info(); assert(dynamic_info != NULL, "Sanity"); - // Now write the archived data including the file offsets. - const char* archive_name = Arguments::GetSharedDynamicArchivePath(); - dynamic_info->open_for_write(archive_name); - size_t bitmap_size_in_bytes; - char* bitmap = MetaspaceShared::write_core_archive_regions(dynamic_info, NULL, NULL, bitmap_size_in_bytes); - dynamic_info->set_requested_base((char*)MetaspaceShared::requested_base_address()); - dynamic_info->set_header_crc(dynamic_info->compute_header_crc()); - dynamic_info->write_header(); - dynamic_info->close(); - - write_cds_map_to_log(dynamic_info, NULL, NULL, - bitmap, bitmap_size_in_bytes); - FREE_C_HEAP_ARRAY(char, bitmap); + dynamic_info->open_for_write(Arguments::GetSharedDynamicArchivePath()); + ArchiveBuilder::write_archive(dynamic_info, NULL, NULL, NULL, NULL); address base = _requested_dynamic_archive_bottom; address top = _requested_dynamic_archive_top; @@ -366,13 +311,13 @@ void DynamicArchiveBuilder::write_archive(char* serialized_data) { " [" SIZE_FORMAT " bytes header, " SIZE_FORMAT " bytes total]", p2i(base), p2i(top), _header->header_size(), file_size); - log_info(cds, dynamic)("%d klasses; %d symbols", num_klasses, num_symbols); + log_info(cds, dynamic)("%d klasses; %d symbols", klasses()->length(), symbols()->length()); } class VM_PopulateDynamicDumpSharedSpace: public VM_GC_Sync_Operation { - DynamicArchiveBuilder* _builder; + DynamicArchiveBuilder builder; public: - VM_PopulateDynamicDumpSharedSpace(DynamicArchiveBuilder* builder) : VM_GC_Sync_Operation(), _builder(builder) {} + VM_PopulateDynamicDumpSharedSpace() : VM_GC_Sync_Operation() {} VMOp_Type type() const { return VMOp_PopulateDumpSharedSpace; } void doit() { ResourceMark rm; @@ -386,7 +331,7 @@ public: } FileMapInfo::check_nonempty_dir_in_shared_path_table(); - _builder->doit(); + builder.doit(); } }; @@ -397,8 +342,7 @@ void DynamicArchive::dump() { return; } - DynamicArchiveBuilder builder; - VM_PopulateDynamicDumpSharedSpace op(&builder); + VM_PopulateDynamicDumpSharedSpace op; VMThread::execute(&op); } diff --git a/src/hotspot/share/memory/filemap.cpp b/src/hotspot/share/memory/filemap.cpp index 551a93b89c7f828307d249b1e39770f3a14e860b..3fea397fc60c818cab742d0a02a850ee99d00d82 100644 --- a/src/hotspot/share/memory/filemap.cpp +++ b/src/hotspot/share/memory/filemap.cpp @@ -280,7 +280,6 @@ void FileMapHeader::print(outputStream* st) { st->print_cr("- compressed_class_ptrs: %d", _compressed_class_ptrs); st->print_cr("- cloned_vtables_offset: " SIZE_FORMAT_HEX, _cloned_vtables_offset); st->print_cr("- serialized_data_offset: " SIZE_FORMAT_HEX, _serialized_data_offset); - st->print_cr("- i2i_entry_code_buffers_offset: " SIZE_FORMAT_HEX, _i2i_entry_code_buffers_offset); st->print_cr("- heap_end: " INTPTR_FORMAT, p2i(_heap_end)); st->print_cr("- base_archive_is_default: %d", _base_archive_is_default); st->print_cr("- jvm_ident: %s", _jvm_ident); @@ -304,7 +303,7 @@ void FileMapHeader::print(outputStream* st) { void SharedClassPathEntry::init_as_non_existent(const char* path, TRAPS) { _type = non_existent_entry; - set_name(path, THREAD); + set_name(path, CHECK); } void SharedClassPathEntry::init(bool is_modules_image, @@ -343,12 +342,12 @@ void SharedClassPathEntry::init(bool is_modules_image, // No need to save the name of the module file, as it will be computed at run time // to allow relocation of the JDK directory. const char* name = is_modules_image ? "" : cpe->name(); - set_name(name, THREAD); + set_name(name, CHECK); } void SharedClassPathEntry::set_name(const char* name, TRAPS) { size_t len = strlen(name) + 1; - _name = MetadataFactory::new_array(ClassLoaderData::the_null_class_loader_data(), (int)len, THREAD); + _name = MetadataFactory::new_array(ClassLoaderData::the_null_class_loader_data(), (int)len, CHECK); strcpy(_name->data(), name); } @@ -358,12 +357,12 @@ void SharedClassPathEntry::copy_from(SharedClassPathEntry* ent, ClassLoaderData* _timestamp = ent->_timestamp; _filesize = ent->_filesize; _from_class_path_attr = ent->_from_class_path_attr; - set_name(ent->name(), THREAD); + set_name(ent->name(), CHECK); if (ent->is_jar() && !ent->is_signed() && ent->manifest() != NULL) { Array* buf = MetadataFactory::new_array(loader_data, ent->manifest_size(), - THREAD); + CHECK); char* p = (char*)(buf->data()); memcpy(p, ent->manifest(), ent->manifest_size()); set_manifest(buf); @@ -449,7 +448,7 @@ void SharedPathTable::metaspace_pointers_do(MetaspaceClosure* it) { } } -void SharedPathTable::dumptime_init(ClassLoaderData* loader_data, Thread* THREAD) { +void SharedPathTable::dumptime_init(ClassLoaderData* loader_data, TRAPS) { size_t entry_size = sizeof(SharedClassPathEntry); int num_entries = 0; num_entries += ClassLoader::num_boot_classpath_entries(); @@ -458,7 +457,7 @@ void SharedPathTable::dumptime_init(ClassLoaderData* loader_data, Thread* THREAD num_entries += FileMapInfo::num_non_existent_class_paths(); size_t bytes = entry_size * num_entries; - _table = MetadataFactory::new_array(loader_data, (int)bytes, THREAD); + _table = MetadataFactory::new_array(loader_data, (int)bytes, CHECK); _size = num_entries; } @@ -466,44 +465,43 @@ void SharedPathTable::dumptime_init(ClassLoaderData* loader_data, Thread* THREAD // It is needed because some Java code continues to execute after dynamic dump has finished. // However, during dynamic dump, we have modified FileMapInfo::_shared_path_table so // FileMapInfo::shared_path(i) returns incorrect information in ClassLoader::record_result(). -void FileMapInfo::copy_shared_path_table(ClassLoaderData* loader_data, Thread* THREAD) { +void FileMapInfo::copy_shared_path_table(ClassLoaderData* loader_data, TRAPS) { size_t entry_size = sizeof(SharedClassPathEntry); size_t bytes = entry_size * _shared_path_table.size(); - _saved_shared_path_table = SharedPathTable(MetadataFactory::new_array(loader_data, (int)bytes, THREAD), - _shared_path_table.size()); + Array* array = MetadataFactory::new_array(loader_data, (int)bytes, CHECK); + _saved_shared_path_table = SharedPathTable(array, _shared_path_table.size()); for (int i = 0; i < _shared_path_table.size(); i++) { - _saved_shared_path_table.path_at(i)->copy_from(shared_path(i), loader_data, THREAD); + _saved_shared_path_table.path_at(i)->copy_from(shared_path(i), loader_data, CHECK); } } -void FileMapInfo::allocate_shared_path_table() { +void FileMapInfo::allocate_shared_path_table(TRAPS) { Arguments::assert_is_dumping_archive(); - EXCEPTION_MARK; // The following calls should never throw, but would exit VM on error. ClassLoaderData* loader_data = ClassLoaderData::the_null_class_loader_data(); ClassPathEntry* jrt = ClassLoader::get_jrt_entry(); assert(jrt != NULL, "No modular java runtime image present when allocating the CDS classpath entry table"); - _shared_path_table.dumptime_init(loader_data, THREAD); + _shared_path_table.dumptime_init(loader_data, CHECK); // 1. boot class path int i = 0; - i = add_shared_classpaths(i, "boot", jrt, THREAD); - i = add_shared_classpaths(i, "app", ClassLoader::app_classpath_entries(), THREAD); - i = add_shared_classpaths(i, "module", ClassLoader::module_path_entries(), THREAD); + i = add_shared_classpaths(i, "boot", jrt, CHECK); + i = add_shared_classpaths(i, "app", ClassLoader::app_classpath_entries(), CHECK); + i = add_shared_classpaths(i, "module", ClassLoader::module_path_entries(), CHECK); for (int x = 0; x < num_non_existent_class_paths(); x++, i++) { const char* path = _non_existent_class_paths->at(x); - shared_path(i)->init_as_non_existent(path, THREAD); + shared_path(i)->init_as_non_existent(path, CHECK); } assert(i == _shared_path_table.size(), "number of shared path entry mismatch"); - copy_shared_path_table(loader_data, THREAD); + copy_shared_path_table(loader_data, CHECK); } int FileMapInfo::add_shared_classpaths(int i, const char* which, ClassPathEntry *cpe, TRAPS) { @@ -513,9 +511,9 @@ int FileMapInfo::add_shared_classpaths(int i, const char* which, ClassPathEntry const char* type = (is_jrt ? "jrt" : (cpe->is_jar_file() ? "jar" : "dir")); log_info(class, path)("add %s shared path (%s) %s", which, type, cpe->name()); SharedClassPathEntry* ent = shared_path(i); - ent->init(is_jrt, is_module_path, cpe, THREAD); + ent->init(is_jrt, is_module_path, cpe, CHECK_0); if (cpe->is_jar_file()) { - update_jar_manifest(cpe, ent, THREAD); + update_jar_manifest(cpe, ent, CHECK_0); } if (is_jrt) { cpe = ClassLoader::get_next_boot_classpath_entry(cpe); @@ -670,7 +668,7 @@ void FileMapInfo::update_jar_manifest(ClassPathEntry *cpe, SharedClassPathEntry* manifest = ClassLoaderExt::read_raw_manifest(cpe, &manifest_size, CHECK); Array* buf = MetadataFactory::new_array(loader_data, manifest_size, - THREAD); + CHECK); char* p = (char*)(buf->data()); memcpy(p, manifest, manifest_size); ent->set_manifest(buf); @@ -1272,7 +1270,7 @@ void FileMapRegion::init(int region_index, size_t mapping_offset, size_t size, b static const char* region_name(int region_index) { static const char* names[] = { - "mc", "rw", "ro", "bm", "ca0", "ca1", "oa0", "oa1" + "rw", "ro", "bm", "ca0", "ca1", "oa0", "oa1" }; const int num_regions = sizeof(names)/sizeof(names[0]); assert(0 <= region_index && region_index < num_regions, "sanity"); @@ -1533,7 +1531,7 @@ bool FileMapInfo::remap_shared_readonly_as_readwrite() { } // Memory map a region in the address space. -static const char* shared_region_name[] = { "MiscCode", "ReadWrite", "ReadOnly", "Bitmap", +static const char* shared_region_name[] = { "ReadWrite", "ReadOnly", "Bitmap", "String1", "String2", "OpenArchive1", "OpenArchive2" }; MapArchiveResult FileMapInfo::map_regions(int regions[], int num_regions, char* mapped_base_address, ReservedSpace rs) { @@ -1678,7 +1676,7 @@ char* FileMapInfo::map_bitmap_region() { } // This is called when we cannot map the archive at the requested[ base address (usually 0x800000000). -// We relocate all pointers in the 3 core regions (mc, ro, rw). +// We relocate all pointers in the 2 core regions (ro, rw). bool FileMapInfo::relocate_pointers_in_core_regions(intx addr_delta) { log_debug(cds, reloc)("runtime archive relocation start"); char* bitmap_base = map_bitmap_region(); @@ -2173,9 +2171,9 @@ char* FileMapInfo::region_addr(int idx) { } } -// The 3 core spaces are MC->RW->RO +// The 2 core spaces are RW->RO FileMapRegion* FileMapInfo::first_core_space() const { - return space_at(MetaspaceShared::mc); + return space_at(MetaspaceShared::rw); } FileMapRegion* FileMapInfo::last_core_space() const { @@ -2281,8 +2279,7 @@ bool FileMapInfo::validate_header() { // Check if a given address is within one of the shared regions bool FileMapInfo::is_in_shared_region(const void* p, int idx) { assert(idx == MetaspaceShared::ro || - idx == MetaspaceShared::rw || - idx == MetaspaceShared::mc, "invalid region index"); + idx == MetaspaceShared::rw, "invalid region index"); char* base = region_addr(idx); if (p >= base && p < base + space_at(idx)->used()) { return true; @@ -2364,7 +2361,8 @@ ClassFileStream* FileMapInfo::open_stream_for_jvmti(InstanceKlass* ik, Handle cl name->utf8_length()); ClassLoaderData* loader_data = ClassLoaderData::class_loader_data(class_loader()); ClassFileStream* cfs = cpe->open_stream_for_loader(file_name, loader_data, THREAD); - assert(cfs != NULL, "must be able to read the classfile data of shared classes for built-in loaders."); + assert(!HAS_PENDING_EXCEPTION && + cfs != NULL, "must be able to read the classfile data of shared classes for built-in loaders."); log_debug(cds, jvmti)("classfile data for %s [%d: %s] = %d bytes", class_name, path_index, cfs->source(), cfs->length()); return cfs; diff --git a/src/hotspot/share/memory/filemap.hpp b/src/hotspot/share/memory/filemap.hpp index b402f9955903581efd83562b3b11f60b9e9231ce..457ce4459142ecacbf18761d909f8919159b6f2d 100644 --- a/src/hotspot/share/memory/filemap.hpp +++ b/src/hotspot/share/memory/filemap.hpp @@ -25,25 +25,23 @@ #ifndef SHARE_MEMORY_FILEMAP_HPP #define SHARE_MEMORY_FILEMAP_HPP -#include "classfile/classLoader.hpp" #include "include/cds.h" #include "memory/metaspaceShared.hpp" -#include "memory/metaspace.hpp" #include "oops/compressedOops.hpp" #include "utilities/align.hpp" -// Layout of the file: -// header: dump of archive instance plus versioning info, datestamp, etc. -// [magic # = 0xF00BABA2] -// ... padding to align on page-boundary -// read-write space -// read-only space -// misc data (block offset table, string table, symbols, dictionary, etc.) -// tag(666) +// To understand the layout of the CDS archive file: +// +// java -Xlog:cds+map=info:file=cds.map:none:filesize=0 +// java -Xlog:cds+map=debug:file=cds.map:none:filesize=0 +// java -Xlog:cds+map=trace:file=cds.map:none:filesize=0 static const int JVM_IDENT_MAX = 256; class CHeapBitMap; +class ClassFileStream; +class ClassLoaderData; +class ClassPathEntry; class outputStream; class SharedClassPathEntry { @@ -117,7 +115,7 @@ public: SharedPathTable() : _table(NULL), _size(0) {} SharedPathTable(Array* table, int size) : _table(table), _size(size) {} - void dumptime_init(ClassLoaderData* loader_data, Thread* THREAD); + void dumptime_init(ClassLoaderData* loader_data, TRAPS); void metaspace_pointers_do(MetaspaceClosure* it); int size() { @@ -201,7 +199,6 @@ class FileMapHeader: private CDSFileMapHeaderBase { bool _compressed_class_ptrs; // save the flag UseCompressedClassPointers size_t _cloned_vtables_offset; // The address of the first cloned vtable size_t _serialized_data_offset; // Data accessed using {ReadClosure,WriteClosure}::serialize() - size_t _i2i_entry_code_buffers_offset; address _heap_end; // heap end at dump time. bool _base_archive_is_default; // indicates if the base archive is the system default one @@ -265,7 +262,6 @@ public: address narrow_klass_base() const { return (address)mapped_base_address(); } char* cloned_vtables() const { return from_mapped_offset(_cloned_vtables_offset); } char* serialized_data() const { return from_mapped_offset(_serialized_data_offset); } - address i2i_entry_code_buffers() const { return (address)from_mapped_offset(_i2i_entry_code_buffers_offset); } address heap_end() const { return _heap_end; } bool base_archive_is_default() const { return _base_archive_is_default; } const char* jvm_ident() const { return _jvm_ident; } @@ -292,9 +288,6 @@ public: void set_ptrmap_size_in_bits(size_t s) { _ptrmap_size_in_bits = s; } void set_mapped_base_address(char* p) { _mapped_base_address = p; } void set_heap_obj_roots(narrowOop r) { _heap_obj_roots = r; } - void set_i2i_entry_code_buffers(address p) { - set_as_offset((char*)p, &_i2i_entry_code_buffers_offset); - } void set_shared_path_table(SharedPathTable table) { set_as_offset((char*)table.table(), &_shared_path_table_offset); @@ -409,11 +402,6 @@ public: bool is_file_position_aligned() const; void align_file_position(); - address i2i_entry_code_buffers() const { return header()->i2i_entry_code_buffers(); } - void set_i2i_entry_code_buffers(address addr) const { - header()->set_i2i_entry_code_buffers(addr); - } - bool is_static() const { return _is_static; } bool is_mapped() const { return _is_mapped; } void set_is_mapped(bool v) { _is_mapped = v; } @@ -498,8 +486,8 @@ public: // Stop CDS sharing and unmap CDS regions. static void stop_sharing_and_unmap(const char* msg); - static void allocate_shared_path_table(); - static void copy_shared_path_table(ClassLoaderData* loader_data, Thread* THREAD); + static void allocate_shared_path_table(TRAPS); + static void copy_shared_path_table(ClassLoaderData* loader_data, TRAPS); static int add_shared_classpaths(int i, const char* which, ClassPathEntry *cpe, TRAPS); static void check_nonempty_dir_in_shared_path_table(); bool validate_shared_path_table(); diff --git a/src/hotspot/share/memory/heap.cpp b/src/hotspot/share/memory/heap.cpp index 098095fb29353df2116276c940a4e4f16fcc2ab5..fa9de51598078464108af82d41c6b1cc6526f7a7 100644 --- a/src/hotspot/share/memory/heap.cpp +++ b/src/hotspot/share/memory/heap.cpp @@ -207,17 +207,12 @@ bool CodeHeap::reserve(ReservedSpace rs, size_t committed_size, size_t segment_s _log2_segment_size = exact_log2(segment_size); // Reserve and initialize space for _memory. - size_t page_size = os::vm_page_size(); - if (os::can_execute_large_page_memory()) { - const size_t min_pages = 8; - page_size = MIN2(os::page_size_for_region_aligned(committed_size, min_pages), - os::page_size_for_region_aligned(rs.size(), min_pages)); - } - + const size_t page_size = ReservedSpace::actual_reserved_page_size(rs); const size_t granularity = os::vm_allocation_granularity(); const size_t c_size = align_up(committed_size, page_size); + assert(c_size <= rs.size(), "alignment made committed size to large"); - os::trace_page_sizes(_name, committed_size, rs.size(), page_size, + os::trace_page_sizes(_name, c_size, rs.size(), page_size, rs.base(), rs.size()); if (!_memory.initialize(rs, c_size)) { return false; diff --git a/src/hotspot/share/memory/heapShared.cpp b/src/hotspot/share/memory/heapShared.cpp index 3fd2c228079ba51d59dd3c4f5e4dce792b83c7a8..b2471029c644b6ea466f9d2a48b47b217e81c0bc 100644 --- a/src/hotspot/share/memory/heapShared.cpp +++ b/src/hotspot/share/memory/heapShared.cpp @@ -298,7 +298,7 @@ oop HeapShared::archive_heap_object(oop obj) { } void HeapShared::archive_klass_objects() { - GrowableArray* klasses = MetaspaceShared::collected_klasses(); + GrowableArray* klasses = ArchiveBuilder::current()->klasses(); assert(klasses != NULL, "sanity"); for (int i = 0; i < klasses->length(); i++) { Klass* k = ArchiveBuilder::get_relocated_klass(klasses->at(i)); @@ -573,7 +573,7 @@ void ArchivedKlassSubGraphInfoRecord::init(KlassSubGraphInfo* info) { int num_entry_fields = entry_fields->length(); assert(num_entry_fields % 2 == 0, "sanity"); _entry_field_records = - MetaspaceShared::new_ro_array(num_entry_fields); + ArchiveBuilder::new_ro_array(num_entry_fields); for (int i = 0 ; i < num_entry_fields; i++) { _entry_field_records->at_put(i, entry_fields->at(i)); } @@ -584,7 +584,7 @@ void ArchivedKlassSubGraphInfoRecord::init(KlassSubGraphInfo* info) { if (subgraph_object_klasses != NULL) { int num_subgraphs_klasses = subgraph_object_klasses->length(); _subgraph_object_klasses = - MetaspaceShared::new_ro_array(num_subgraphs_klasses); + ArchiveBuilder::new_ro_array(num_subgraphs_klasses); for (int i = 0; i < num_subgraphs_klasses; i++) { Klass* subgraph_k = subgraph_object_klasses->at(i); if (log_is_enabled(Info, cds, heap)) { @@ -610,7 +610,7 @@ struct CopyKlassSubGraphInfoToArchive : StackObj { bool do_entry(Klass* klass, KlassSubGraphInfo& info) { if (info.subgraph_object_klasses() != NULL || info.subgraph_entry_fields() != NULL) { ArchivedKlassSubGraphInfoRecord* record = - (ArchivedKlassSubGraphInfoRecord*)MetaspaceShared::read_only_space_alloc(sizeof(ArchivedKlassSubGraphInfoRecord)); + (ArchivedKlassSubGraphInfoRecord*)ArchiveBuilder::ro_region_alloc(sizeof(ArchivedKlassSubGraphInfoRecord)); record->init(&info); unsigned int hash = SystemDictionaryShared::hash_for_shared_dictionary((address)klass); diff --git a/src/hotspot/share/memory/metadataFactory.hpp b/src/hotspot/share/memory/metadataFactory.hpp index d18f1301120b871f2659acf84bd535bc4830b6f5..45237efd6780bb46568f78a14694f4feb5e6300e 100644 --- a/src/hotspot/share/memory/metadataFactory.hpp +++ b/src/hotspot/share/memory/metadataFactory.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2010, 2019, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2010, 2021, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -27,7 +27,7 @@ #include "classfile/classLoaderData.hpp" #include "memory/classLoaderMetaspace.hpp" -#include "oops/array.hpp" +#include "oops/array.inline.hpp" #include "utilities/exceptions.hpp" #include "utilities/globalDefinitions.hpp" diff --git a/src/hotspot/share/memory/metaspace.cpp b/src/hotspot/share/memory/metaspace.cpp index 4462b681680dbb4dc45bbc8c213476a9fc4a89f0..87a0e5764b8469fb716a036a1c0fd346a8c5ef81 100644 --- a/src/hotspot/share/memory/metaspace.cpp +++ b/src/hotspot/share/memory/metaspace.cpp @@ -43,6 +43,7 @@ #include "memory/metaspace/virtualSpaceList.hpp" #include "memory/metaspaceShared.hpp" #include "memory/metaspaceTracer.hpp" +#include "memory/metaspaceUtils.hpp" #include "memory/resourceArea.hpp" #include "memory/universe.hpp" #include "oops/compressedOops.hpp" diff --git a/src/hotspot/share/memory/metaspace.hpp b/src/hotspot/share/memory/metaspace.hpp index 182660f01125d8cb62ecb04976b955be8b6b90cd..1309a2fe3adaff4a539e048909f46d5a2370555c 100644 --- a/src/hotspot/share/memory/metaspace.hpp +++ b/src/hotspot/share/memory/metaspace.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2011, 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2011, 2021, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -25,9 +25,6 @@ #define SHARE_MEMORY_METASPACE_HPP #include "memory/allocation.hpp" -#include "memory/memRegion.hpp" -#include "memory/metaspaceChunkFreeListSummary.hpp" -#include "memory/virtualspace.hpp" #include "runtime/globals.hpp" #include "utilities/exceptions.hpp" #include "utilities/globalDefinitions.hpp" @@ -37,6 +34,7 @@ class MetaspaceShared; class MetaspaceTracer; class Mutex; class outputStream; +class ReservedSpace; namespace metaspace { class MetaspaceSizesSnapshot; @@ -152,115 +150,5 @@ public: }; -////////////////// MetaspaceGC /////////////////////// - -// Metaspace are deallocated when their class loader are GC'ed. -// This class implements a policy for inducing GC's to recover -// Metaspaces. - -class MetaspaceGCThresholdUpdater : public AllStatic { - public: - enum Type { - ComputeNewSize, - ExpandAndAllocate, - Last - }; - - static const char* to_string(MetaspaceGCThresholdUpdater::Type updater) { - switch (updater) { - case ComputeNewSize: - return "compute_new_size"; - case ExpandAndAllocate: - return "expand_and_allocate"; - default: - assert(false, "Got bad updater: %d", (int) updater); - return NULL; - }; - } -}; - -class MetaspaceGC : public AllStatic { - - // The current high-water-mark for inducing a GC. - // When committed memory of all metaspaces reaches this value, - // a GC is induced and the value is increased. Size is in bytes. - static volatile size_t _capacity_until_GC; - static uint _shrink_factor; - - static size_t shrink_factor() { return _shrink_factor; } - void set_shrink_factor(uint v) { _shrink_factor = v; } - - public: - - static void initialize(); - static void post_initialize(); - - static size_t capacity_until_GC(); - static bool inc_capacity_until_GC(size_t v, - size_t* new_cap_until_GC = NULL, - size_t* old_cap_until_GC = NULL, - bool* can_retry = NULL); - static size_t dec_capacity_until_GC(size_t v); - - // The amount to increase the high-water-mark (_capacity_until_GC) - static size_t delta_capacity_until_GC(size_t bytes); - - // Tells if we have can expand metaspace without hitting set limits. - static bool can_expand(size_t words, bool is_class); - - // Returns amount that we can expand without hitting a GC, - // measured in words. - static size_t allowed_expansion(); - - // Calculate the new high-water mark at which to induce - // a GC. - static void compute_new_size(); -}; - -class MetaspaceUtils : AllStatic { -public: - - // Committed space actually in use by Metadata - static size_t used_words(); - static size_t used_words(Metaspace::MetadataType mdtype); - - // Space committed for Metaspace - static size_t committed_words(); - static size_t committed_words(Metaspace::MetadataType mdtype); - - // Space reserved for Metaspace - static size_t reserved_words(); - static size_t reserved_words(Metaspace::MetadataType mdtype); - - // _bytes() variants for convenience... - static size_t used_bytes() { return used_words() * BytesPerWord; } - static size_t used_bytes(Metaspace::MetadataType mdtype) { return used_words(mdtype) * BytesPerWord; } - static size_t committed_bytes() { return committed_words() * BytesPerWord; } - static size_t committed_bytes(Metaspace::MetadataType mdtype) { return committed_words(mdtype) * BytesPerWord; } - static size_t reserved_bytes() { return reserved_words() * BytesPerWord; } - static size_t reserved_bytes(Metaspace::MetadataType mdtype) { return reserved_words(mdtype) * BytesPerWord; } - - // (See JDK-8251342). Implement or Consolidate. - static MetaspaceChunkFreeListSummary chunk_free_list_summary(Metaspace::MetadataType mdtype) { - return MetaspaceChunkFreeListSummary(0,0,0,0,0,0,0,0); - } - - // Log change in used metadata. - static void print_metaspace_change(const metaspace::MetaspaceSizesSnapshot& pre_meta_values); - - // This will print out a basic metaspace usage report but - // unlike print_report() is guaranteed not to lock or to walk the CLDG. - static void print_basic_report(outputStream* st, size_t scale = 0); - - // Prints a report about the current metaspace state. - // Function will walk the CLDG and will lock the expand lock; if that is not - // convenient, use print_basic_report() instead. - static void print_report(outputStream* out, size_t scale = 0); - - static void print_on(outputStream * out); - - DEBUG_ONLY(static void verify();) - -}; #endif // SHARE_MEMORY_METASPACE_HPP diff --git a/src/hotspot/share/memory/metaspace/commitLimiter.cpp b/src/hotspot/share/memory/metaspace/commitLimiter.cpp index 94bffb0492a81a4b8187763a8618e8928f527e5f..8887804b030d11d2be117d01aae63cf262b67194 100644 --- a/src/hotspot/share/memory/metaspace/commitLimiter.cpp +++ b/src/hotspot/share/memory/metaspace/commitLimiter.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2020, 2021, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2020 SAP SE. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -26,6 +26,7 @@ #include "precompiled.hpp" #include "memory/metaspace.hpp" #include "memory/metaspace/commitLimiter.hpp" +#include "memory/metaspaceUtils.hpp" #include "utilities/debug.hpp" #include "utilities/globalDefinitions.hpp" diff --git a/src/hotspot/share/memory/metaspace/metaspaceDCmd.cpp b/src/hotspot/share/memory/metaspace/metaspaceDCmd.cpp index 7e0e0969f7c9e0ef992533204132a801fcb59e27..d54c9d236b904edefbec88b553c1ad6d18c672e8 100644 --- a/src/hotspot/share/memory/metaspace/metaspaceDCmd.cpp +++ b/src/hotspot/share/memory/metaspace/metaspaceDCmd.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018, 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2018, 2021, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2018, 2020 SAP SE. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -24,9 +24,12 @@ */ #include "precompiled.hpp" +#include "memory/metaspace.hpp" #include "memory/metaspace/metaspaceDCmd.hpp" #include "memory/metaspace/metaspaceReporter.hpp" +#include "memory/metaspaceUtils.hpp" #include "memory/resourceArea.hpp" +#include "runtime/vmOperations.hpp" #include "services/diagnosticCommand.hpp" #include "services/nmtCommon.hpp" diff --git a/src/hotspot/share/memory/metaspace/metaspaceReporter.cpp b/src/hotspot/share/memory/metaspace/metaspaceReporter.cpp index d772d07162530e20751842b4649688b142a6e758..f055f78730526640f2ea907c020ea7e0da9cf7bf 100644 --- a/src/hotspot/share/memory/metaspace/metaspaceReporter.cpp +++ b/src/hotspot/share/memory/metaspace/metaspaceReporter.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018, 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2018, 2021, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2018, 2020 SAP SE. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -37,6 +37,7 @@ #include "memory/metaspace/printCLDMetaspaceInfoClosure.hpp" #include "memory/metaspace/runningCounters.hpp" #include "memory/metaspace/virtualSpaceList.hpp" +#include "memory/metaspaceUtils.hpp" #include "runtime/os.hpp" namespace metaspace { @@ -95,7 +96,8 @@ static void print_vs(outputStream* out, size_t scale) { static void print_settings(outputStream* out, size_t scale) { out->print("MaxMetaspaceSize: "); - if (MaxMetaspaceSize >= (max_uintx) - (2 * os::vm_page_size())) { + // See Metaspace::ergo_initialize() for how MaxMetaspaceSize is rounded + if (MaxMetaspaceSize >= align_down(max_uintx, Metaspace::commit_alignment())) { // aka "very big". Default is max_uintx, but due to rounding in arg parsing the real // value is smaller. out->print("unlimited"); @@ -106,8 +108,18 @@ static void print_settings(outputStream* out, size_t scale) { if (Metaspace::using_class_space()) { out->print("CompressedClassSpaceSize: "); print_human_readable_size(out, CompressedClassSpaceSize, scale); + } else { + out->print("No class space"); } out->cr(); + out->print("Initial GC threshold: "); + print_human_readable_size(out, MetaspaceSize, scale); + out->cr(); + out->print("Current GC threshold: "); + print_human_readable_size(out, MetaspaceGC::capacity_until_GC(), scale); + out->cr(); + out->print_cr("CDS: %s", (UseSharedSpaces ? "on" : (DumpSharedSpaces ? "dump" : "off"))); + out->print_cr("MetaspaceReclaimPolicy: %s", MetaspaceReclaimPolicy); Settings::print_on(out); } diff --git a/src/hotspot/share/memory/metaspace/metaspaceSizesSnapshot.cpp b/src/hotspot/share/memory/metaspace/metaspaceSizesSnapshot.cpp index 26e07441504d22daf00d700ac5fcfdf93cefd524..b1d7269c629c868b2b36b407101fde6eb1129291 100644 --- a/src/hotspot/share/memory/metaspace/metaspaceSizesSnapshot.cpp +++ b/src/hotspot/share/memory/metaspace/metaspaceSizesSnapshot.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2019, 2021, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2019, Twitter, Inc. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -26,6 +26,7 @@ #include "precompiled.hpp" #include "memory/metaspace.hpp" #include "memory/metaspace/metaspaceSizesSnapshot.hpp" +#include "memory/metaspaceUtils.hpp" namespace metaspace { diff --git a/src/hotspot/share/memory/metaspace/printMetaspaceInfoKlassClosure.cpp b/src/hotspot/share/memory/metaspace/printMetaspaceInfoKlassClosure.cpp index b100bf957c0276fdcb57b39ebff5833da583bff5..f852a5782ef92d012bd15b5a97f2d659cc47e3ff 100644 --- a/src/hotspot/share/memory/metaspace/printMetaspaceInfoKlassClosure.cpp +++ b/src/hotspot/share/memory/metaspace/printMetaspaceInfoKlassClosure.cpp @@ -24,7 +24,6 @@ */ #include "precompiled.hpp" #include "memory/metaspace/printMetaspaceInfoKlassClosure.hpp" -#include "memory/metaspaceShared.hpp" #include "memory/resourceArea.hpp" #include "oops/klass.hpp" #include "oops/reflectionAccessorImplKlassHelper.hpp" diff --git a/src/hotspot/share/memory/metaspaceCounters.cpp b/src/hotspot/share/memory/metaspaceCounters.cpp index 09e0f47c4addd6c5f640ae4ed2193abc0b9adbfd..34f2011f2e624b4f744259211c8647d8b27cd243 100644 --- a/src/hotspot/share/memory/metaspaceCounters.cpp +++ b/src/hotspot/share/memory/metaspaceCounters.cpp @@ -25,6 +25,7 @@ #include "precompiled.hpp" #include "memory/metaspace.hpp" #include "memory/metaspaceCounters.hpp" +#include "memory/metaspaceUtils.hpp" #include "memory/resourceArea.hpp" #include "runtime/globals.hpp" #include "runtime/perfData.hpp" diff --git a/src/hotspot/share/memory/metaspaceShared.cpp b/src/hotspot/share/memory/metaspaceShared.cpp index 7603b4a4a5740f3272b1acbecfcf68bb66df2671..cdadceaf345c2820c9a28bde0cb5c4a62ec76de4 100644 --- a/src/hotspot/share/memory/metaspaceShared.cpp +++ b/src/hotspot/share/memory/metaspaceShared.cpp @@ -40,18 +40,15 @@ #include "classfile/vmSymbols.hpp" #include "code/codeCache.hpp" #include "gc/shared/gcVMOperations.hpp" -#include "interpreter/abstractInterpreter.hpp" #include "interpreter/bytecodeStream.hpp" #include "interpreter/bytecodes.hpp" #include "logging/log.hpp" #include "logging/logMessage.hpp" #include "memory/archiveBuilder.hpp" -#include "memory/archiveUtils.inline.hpp" #include "memory/cppVtables.hpp" #include "memory/dumpAllocStats.hpp" -#include "memory/dynamicArchive.hpp" #include "memory/filemap.hpp" -#include "memory/heapShared.inline.hpp" +#include "memory/heapShared.hpp" #include "memory/metaspace.hpp" #include "memory/metaspaceClosure.hpp" #include "memory/metaspaceShared.hpp" @@ -75,20 +72,15 @@ #include "utilities/bitMap.inline.hpp" #include "utilities/ostream.hpp" #include "utilities/defaultStream.hpp" -#include "utilities/hashtable.inline.hpp" #if INCLUDE_G1GC #include "gc/g1/g1CollectedHeap.inline.hpp" #endif -ReservedSpace MetaspaceShared::_shared_rs; -VirtualSpace MetaspaceShared::_shared_vs; ReservedSpace MetaspaceShared::_symbol_rs; VirtualSpace MetaspaceShared::_symbol_vs; -MetaspaceSharedStats MetaspaceShared::_stats; bool MetaspaceShared::_has_error_classes; bool MetaspaceShared::_archive_loading_failed = false; bool MetaspaceShared::_remapped_readwrite = false; -address MetaspaceShared::_i2i_entry_code_buffers = NULL; void* MetaspaceShared::_shared_metaspace_static_top = NULL; intx MetaspaceShared::_relocation_delta; char* MetaspaceShared::_requested_base_address; @@ -96,7 +88,6 @@ bool MetaspaceShared::_use_optimized_module_handling = true; bool MetaspaceShared::_use_full_module_graph = true; // The CDS archive is divided into the following regions: -// mc - misc code (the method entry trampolines, c++ vtables) // rw - read-write metadata // ro - read-only metadata and read-only tables // @@ -105,63 +96,30 @@ bool MetaspaceShared::_use_full_module_graph = true; // oa0 - open archive heap space #0 // oa1 - open archive heap space #1 (may be empty) // -// The mc, rw, and ro regions are linearly allocated, starting from -// SharedBaseAddress, in the order of mc->rw->ro. The size of these 3 regions -// are page-aligned, and there's no gap between any consecutive regions. +// bm - bitmap for relocating the above 7 regions. // -// These 3 regions are populated in the following steps: -// [1] All classes are loaded in MetaspaceShared::preload_classes(). All metadata are -// temporarily allocated outside of the shared regions. Only the method entry -// trampolines are written into the mc region. -// [2] C++ vtables are copied into the mc region. +// The rw, and ro regions are linearly allocated, in the order of rw->ro. +// These regions are aligned with MetaspaceShared::reserved_space_alignment(). +// +// These 2 regions are populated in the following steps: +// [0] All classes are loaded in MetaspaceShared::preload_classes(). All metadata are +// temporarily allocated outside of the shared regions. +// [1] We enter a safepoint and allocate a buffer for the rw/ro regions. +// [2] C++ vtables are copied into the rw region. // [3] ArchiveBuilder copies RW metadata into the rw region. // [4] ArchiveBuilder copies RO metadata into the ro region. // [5] SymbolTable, StringTable, SystemDictionary, and a few other read-only data // are copied into the ro region as read-only tables. // -// The s0/s1 and oa0/oa1 regions are populated inside HeapShared::archive_java_heap_objects. -// Their layout is independent of the other 4 regions. - -static DumpRegion _mc_region("mc"), _ro_region("ro"), _rw_region("rw"), _symbol_region("symbols"); -static size_t _total_closed_archive_region_size = 0, _total_open_archive_region_size = 0; - -void MetaspaceShared::init_shared_dump_space(DumpRegion* first_space) { - first_space->init(&_shared_rs, &_shared_vs); -} - -DumpRegion* MetaspaceShared::misc_code_dump_space() { - return &_mc_region; -} - -DumpRegion* MetaspaceShared::read_write_dump_space() { - return &_rw_region; -} - -DumpRegion* MetaspaceShared::read_only_dump_space() { - return &_ro_region; -} +// The ca0/ca1 and oa0/oa1 regions are populated inside HeapShared::archive_java_heap_objects. +// Their layout is independent of the rw/ro regions. -void MetaspaceShared::pack_dump_space(DumpRegion* current, DumpRegion* next, - ReservedSpace* rs) { - current->pack(next); -} +static DumpRegion _symbol_region("symbols"); char* MetaspaceShared::symbol_space_alloc(size_t num_bytes) { return _symbol_region.allocate(num_bytes); } -char* MetaspaceShared::misc_code_space_alloc(size_t num_bytes) { - return _mc_region.allocate(num_bytes); -} - -char* MetaspaceShared::read_only_space_alloc(size_t num_bytes) { - return _ro_region.allocate(num_bytes); -} - -char* MetaspaceShared::read_write_space_alloc(size_t num_bytes) { - return _rw_region.allocate(num_bytes); -} - size_t MetaspaceShared::reserved_space_alignment() { return os::vm_allocation_granularity(); } static bool shared_base_valid(char* shared_base) { @@ -316,39 +274,6 @@ void MetaspaceShared::read_extra_data(const char* filename, TRAPS) { } } -void MetaspaceShared::commit_to(ReservedSpace* rs, VirtualSpace* vs, char* newtop) { - Arguments::assert_is_dumping_archive(); - char* base = rs->base(); - size_t need_committed_size = newtop - base; - size_t has_committed_size = vs->committed_size(); - if (need_committed_size < has_committed_size) { - return; - } - - size_t min_bytes = need_committed_size - has_committed_size; - size_t preferred_bytes = 1 * M; - size_t uncommitted = vs->reserved_size() - has_committed_size; - - size_t commit =MAX2(min_bytes, preferred_bytes); - commit = MIN2(commit, uncommitted); - assert(commit <= uncommitted, "sanity"); - - bool result = vs->expand_by(commit, false); - if (rs == &_shared_rs) { - ArchivePtrMarker::expand_ptr_end((address*)vs->high()); - } - - if (!result) { - vm_exit_during_initialization(err_msg("Failed to expand shared space to " SIZE_FORMAT " bytes", - need_committed_size)); - } - - assert(rs == &_shared_rs || rs == &_symbol_rs, "must be"); - const char* which = (rs == &_shared_rs) ? "shared" : "symbol"; - log_debug(cds)("Expanding %s spaces by " SIZE_FORMAT_W(7) " bytes [total " SIZE_FORMAT_W(9) " bytes ending at %p]", - which, commit, vs->actual_committed_size(), vs->high()); -} - // Read/write a data stream for restoring/preserving metadata pointers and // miscellaneous data from/to the shared archive file. @@ -395,30 +320,6 @@ void MetaspaceShared::serialize(SerializeClosure* soc) { soc->do_tag(666); } -void MetaspaceShared::init_misc_code_space() { - // We don't want any valid object to be at the very bottom of the archive. - // See ArchivePtrMarker::mark_pointer(). - MetaspaceShared::misc_code_space_alloc(16); - - size_t trampoline_size = SharedRuntime::trampoline_size(); - size_t buf_size = (size_t)AbstractInterpreter::number_of_method_entries * trampoline_size; - _i2i_entry_code_buffers = (address)misc_code_space_alloc(buf_size); -} - -address MetaspaceShared::i2i_entry_code_buffers() { - assert(DumpSharedSpaces || UseSharedSpaces, "must be"); - assert(_i2i_entry_code_buffers != NULL, "must already been initialized"); - return _i2i_entry_code_buffers; -} - -// Global object for holding classes that have been loaded. Since this -// is run at a safepoint just before exit, this is the entire set of classes. -static GrowableArray* _global_klass_objects; - -GrowableArray* MetaspaceShared::collected_klasses() { - return _global_klass_objects; -} - static void rewrite_nofast_bytecode(const methodHandle& method) { BytecodeStream bcs(method); while (!bcs.is_last_bytecode()) { @@ -459,7 +360,7 @@ private: GrowableArray *_closed_archive_heap_oopmaps; GrowableArray *_open_archive_heap_oopmaps; - void dump_java_heap_objects() NOT_CDS_JAVA_HEAP_RETURN; + void dump_java_heap_objects(GrowableArray* klasses) NOT_CDS_JAVA_HEAP_RETURN; void dump_archive_heap_oopmaps() NOT_CDS_JAVA_HEAP_RETURN; void dump_archive_heap_oopmaps(GrowableArray* regions, GrowableArray* oopmaps); @@ -468,10 +369,6 @@ private: SymbolTable::write_to_archive(symbols); } char* dump_read_only_tables(); - void print_region_stats(FileMapInfo* map_info); - void print_bitmap_region_stats(size_t size, size_t total_size); - void print_heap_region_stats(GrowableArray *heap_mem, - const char *name, size_t total_size); public: @@ -488,8 +385,7 @@ public: class StaticArchiveBuilder : public ArchiveBuilder { public: - StaticArchiveBuilder(DumpRegion* mc_region, DumpRegion* rw_region, DumpRegion* ro_region) - : ArchiveBuilder(mc_region, rw_region, ro_region) {} + StaticArchiveBuilder() : ArchiveBuilder() {} virtual void iterate_roots(MetaspaceClosure* it, bool is_relocating_pointers) { FileMapInfo::metaspace_pointers_do(it, false); @@ -516,8 +412,9 @@ char* VM_PopulateDumpSharedSpace::dump_read_only_tables() { SystemDictionaryShared::write_to_archive(); // Write the other data to the output array. - char* start = _ro_region.top(); - WriteClosure wc(&_ro_region); + DumpRegion* ro_region = ArchiveBuilder::current()->ro_region(); + char* start = ro_region->top(); + WriteClosure wc(ro_region); MetaspaceShared::serialize(&wc); // Write the bitmaps for patching the archive heap regions @@ -557,111 +454,52 @@ void VM_PopulateDumpSharedSpace::doit() { // that so we don't have to walk the SystemDictionary again. SystemDictionaryShared::check_excluded_classes(); - StaticArchiveBuilder builder(&_mc_region, &_rw_region, &_ro_region); - builder.gather_klasses_and_symbols(); - builder.reserve_buffer(); - _global_klass_objects = builder.klasses(); - + StaticArchiveBuilder builder; builder.gather_source_objs(); + builder.reserve_buffer(); - MetaspaceShared::init_misc_code_space(); - builder.allocate_method_trampoline_info(); - builder.allocate_method_trampolines(); - - char* cloned_vtables = CppVtables::dumptime_init(); + char* cloned_vtables = CppVtables::dumptime_init(&builder); - { - _mc_region.pack(&_rw_region); - builder.set_current_dump_space(&_rw_region); - builder.dump_rw_region(); -#if INCLUDE_CDS_JAVA_HEAP - if (MetaspaceShared::use_full_module_graph()) { - // Archive the ModuleEntry's and PackageEntry's of the 3 built-in loaders - char* start = _rw_region.top(); - ClassLoaderDataShared::allocate_archived_tables(); - ArchiveBuilder::alloc_stats()->record_modules(_rw_region.top() - start, /*read_only*/false); - } -#endif - } - { - _rw_region.pack(&_ro_region); - builder.set_current_dump_space(&_ro_region); - builder.dump_ro_region(); -#if INCLUDE_CDS_JAVA_HEAP - if (MetaspaceShared::use_full_module_graph()) { - char* start = _ro_region.top(); - ClassLoaderDataShared::init_archived_tables(); - ArchiveBuilder::alloc_stats()->record_modules(_ro_region.top() - start, /*read_only*/true); - } -#endif - } + builder.dump_rw_metadata(); + builder.dump_ro_metadata(); builder.relocate_metaspaceobj_embedded_pointers(); // Dump supported java heap objects _closed_archive_heap_regions = NULL; _open_archive_heap_regions = NULL; - dump_java_heap_objects(); + dump_java_heap_objects(builder.klasses()); builder.relocate_roots(); dump_shared_symbol_table(builder.symbols()); builder.relocate_vm_classes(); - log_info(cds)("Update method trampolines"); - builder.update_method_trampolines(); - log_info(cds)("Make classes shareable"); builder.make_klasses_shareable(); char* serialized_data = dump_read_only_tables(); - _ro_region.pack(); SystemDictionaryShared::adjust_lambda_proxy_class_dictionary(); // The vtable clones contain addresses of the current process. - // We don't want to write these addresses into the archive. Same for i2i buffer. + // We don't want to write these addresses into the archive. CppVtables::zero_archived_vtables(); // relocate the data so that it can be mapped to MetaspaceShared::requested_base_address() // without runtime relocation. builder.relocate_to_requested(); - // Create and write the archive file that maps the shared spaces. - + // Write the archive file FileMapInfo* mapinfo = new FileMapInfo(true); mapinfo->populate_header(os::vm_allocation_granularity()); mapinfo->set_serialized_data(serialized_data); mapinfo->set_cloned_vtables(cloned_vtables); - mapinfo->set_i2i_entry_code_buffers(MetaspaceShared::i2i_entry_code_buffers()); mapinfo->open_for_write(); - size_t bitmap_size_in_bytes; - char* bitmap = MetaspaceShared::write_core_archive_regions(mapinfo, _closed_archive_heap_oopmaps, - _open_archive_heap_oopmaps, - bitmap_size_in_bytes); - _total_closed_archive_region_size = mapinfo->write_archive_heap_regions( - _closed_archive_heap_regions, - _closed_archive_heap_oopmaps, - MetaspaceShared::first_closed_archive_heap_region, - MetaspaceShared::max_closed_archive_heap_region); - _total_open_archive_region_size = mapinfo->write_archive_heap_regions( - _open_archive_heap_regions, - _open_archive_heap_oopmaps, - MetaspaceShared::first_open_archive_heap_region, - MetaspaceShared::max_open_archive_heap_region); - - mapinfo->set_requested_base((char*)MetaspaceShared::requested_base_address()); - mapinfo->set_header_crc(mapinfo->compute_header_crc()); - mapinfo->write_header(); - print_region_stats(mapinfo); - mapinfo->close(); - - builder.write_cds_map_to_log(mapinfo, _closed_archive_heap_regions, _open_archive_heap_regions, - bitmap, bitmap_size_in_bytes); - FREE_C_HEAP_ARRAY(char, bitmap); - - if (log_is_enabled(Info, cds)) { - builder.print_stats(int(_ro_region.used()), int(_rw_region.used()), int(_mc_region.used())); - } + builder.write_archive(mapinfo, + _closed_archive_heap_regions, + _open_archive_heap_regions, + _closed_archive_heap_oopmaps, + _open_archive_heap_oopmaps); if (PrintSystemDictionaryAtExit) { SystemDictionary::print(); @@ -678,73 +516,6 @@ void VM_PopulateDumpSharedSpace::doit() { vm_direct_exit(0); } -void VM_PopulateDumpSharedSpace::print_region_stats(FileMapInfo *map_info) { - // Print statistics of all the regions - const size_t bitmap_used = map_info->space_at(MetaspaceShared::bm)->used(); - const size_t bitmap_reserved = map_info->space_at(MetaspaceShared::bm)->used_aligned(); - const size_t total_reserved = _ro_region.reserved() + _rw_region.reserved() + - _mc_region.reserved() + - bitmap_reserved + - _total_closed_archive_region_size + - _total_open_archive_region_size; - const size_t total_bytes = _ro_region.used() + _rw_region.used() + - _mc_region.used() + - bitmap_used + - _total_closed_archive_region_size + - _total_open_archive_region_size; - const double total_u_perc = percent_of(total_bytes, total_reserved); - - _mc_region.print(total_reserved); - _rw_region.print(total_reserved); - _ro_region.print(total_reserved); - print_bitmap_region_stats(bitmap_used, total_reserved); - print_heap_region_stats(_closed_archive_heap_regions, "ca", total_reserved); - print_heap_region_stats(_open_archive_heap_regions, "oa", total_reserved); - - log_debug(cds)("total : " SIZE_FORMAT_W(9) " [100.0%% of total] out of " SIZE_FORMAT_W(9) " bytes [%5.1f%% used]", - total_bytes, total_reserved, total_u_perc); -} - -void VM_PopulateDumpSharedSpace::print_bitmap_region_stats(size_t size, size_t total_size) { - log_debug(cds)("bm space: " SIZE_FORMAT_W(9) " [ %4.1f%% of total] out of " SIZE_FORMAT_W(9) " bytes [100.0%% used]", - size, size/double(total_size)*100.0, size); -} - -void VM_PopulateDumpSharedSpace::print_heap_region_stats(GrowableArray *heap_mem, - const char *name, size_t total_size) { - int arr_len = heap_mem == NULL ? 0 : heap_mem->length(); - for (int i = 0; i < arr_len; i++) { - char* start = (char*)heap_mem->at(i).start(); - size_t size = heap_mem->at(i).byte_size(); - char* top = start + size; - log_debug(cds)("%s%d space: " SIZE_FORMAT_W(9) " [ %4.1f%% of total] out of " SIZE_FORMAT_W(9) " bytes [100.0%% used] at " INTPTR_FORMAT, - name, i, size, size/double(total_size)*100.0, size, p2i(start)); - - } -} - -char* MetaspaceShared::write_core_archive_regions(FileMapInfo* mapinfo, - GrowableArray* closed_oopmaps, - GrowableArray* open_oopmaps, - size_t& bitmap_size_in_bytes) { - // Make sure NUM_CDS_REGIONS (exported in cds.h) agrees with - // MetaspaceShared::n_regions (internal to hotspot). - assert(NUM_CDS_REGIONS == MetaspaceShared::n_regions, "sanity"); - - // mc contains the trampoline code for method entries, which are patched at run time, - // so it needs to be read/write. - write_region(mapinfo, mc, &_mc_region, /*read_only=*/false,/*allow_exec=*/true); - write_region(mapinfo, rw, &_rw_region, /*read_only=*/false,/*allow_exec=*/false); - write_region(mapinfo, ro, &_ro_region, /*read_only=*/true, /*allow_exec=*/false); - - return mapinfo->write_bitmap_region(ArchivePtrMarker::ptrmap(), closed_oopmaps, open_oopmaps, - bitmap_size_in_bytes); -} - -void MetaspaceShared::write_region(FileMapInfo* mapinfo, int region_idx, DumpRegion* dump_region, bool read_only, bool allow_exec) { - mapinfo->write_region(region_idx, dump_region->base(), dump_region->used(), read_only, allow_exec); -} - static GrowableArray* _loaded_cld = NULL; class CollectCLDClosure : public CLDClosure { @@ -816,8 +587,15 @@ void MetaspaceShared::link_and_cleanup_shared_classes(TRAPS) { } void MetaspaceShared::prepare_for_dumping() { + Arguments::assert_is_dumping_archive(); Arguments::check_unsupported_dumping_properties(); - ClassLoader::initialize_shared_path(); + + EXCEPTION_MARK; + ClassLoader::initialize_shared_path(THREAD); + if (HAS_PENDING_EXCEPTION) { + java_lang_Throwable::print(PENDING_EXCEPTION, tty); + vm_exit_during_initialization("ClassLoader::initialize_shared_path() failed unexpectedly"); + } } // Preload classes from a list, populate the shared spaces and dump to a @@ -980,7 +758,7 @@ bool MetaspaceShared::try_link_class(InstanceKlass* ik, TRAPS) { } #if INCLUDE_CDS_JAVA_HEAP -void VM_PopulateDumpSharedSpace::dump_java_heap_objects() { +void VM_PopulateDumpSharedSpace::dump_java_heap_objects(GrowableArray* klasses) { if(!HeapShared::is_heap_object_archiving_allowed()) { log_info(cds)( "Archived java heap is not supported as UseG1GC, " @@ -992,8 +770,8 @@ void VM_PopulateDumpSharedSpace::dump_java_heap_objects() { } // Find all the interned strings that should be dumped. int i; - for (i = 0; i < _global_klass_objects->length(); i++) { - Klass* k = _global_klass_objects->at(i); + for (i = 0; i < klasses->length(); i++) { + Klass* k = klasses->at(i); if (k->is_instance_klass()) { InstanceKlass* ik = InstanceKlass::cast(k); ik->constants()->add_dumped_interned_strings(); @@ -1059,13 +837,6 @@ bool MetaspaceShared::is_in_shared_region(const void* p, int idx) { return UseSharedSpaces && FileMapInfo::current_info()->is_in_shared_region(p, idx); } -bool MetaspaceShared::is_in_trampoline_frame(address addr) { - if (UseSharedSpaces && is_in_shared_region(addr, MetaspaceShared::mc)) { - return true; - } - return false; -} - bool MetaspaceShared::is_shared_dynamic(void* p) { if ((p < MetaspaceObj::shared_metaspace_top()) && (p >= _shared_metaspace_static_top)) { @@ -1518,10 +1289,8 @@ void MetaspaceShared::release_reserved_spaces(ReservedSpace& total_space_rs, } } -static int archive_regions[] = {MetaspaceShared::mc, - MetaspaceShared::rw, - MetaspaceShared::ro}; -static int archive_regions_count = 3; +static int archive_regions[] = { MetaspaceShared::rw, MetaspaceShared::ro }; +static int archive_regions_count = 2; MapArchiveResult MetaspaceShared::map_archive(FileMapInfo* mapinfo, char* mapped_base_address, ReservedSpace rs) { assert(UseSharedSpaces, "must be runtime"); @@ -1568,7 +1337,6 @@ void MetaspaceShared::unmap_archive(FileMapInfo* mapinfo) { void MetaspaceShared::initialize_shared_spaces() { FileMapInfo *static_mapinfo = FileMapInfo::current_info(); - _i2i_entry_code_buffers = static_mapinfo->i2i_entry_code_buffers(); // Verify various attributes of the archive, plus initialize the // shared string/symbol tables @@ -1633,18 +1401,6 @@ bool MetaspaceShared::remap_shared_readonly_as_readwrite() { return true; } -void MetaspaceShared::report_out_of_space(const char* name, size_t needed_bytes) { - // This is highly unlikely to happen on 64-bits because we have reserved a 4GB space. - // On 32-bit we reserve only 256MB so you could run out of space with 100,000 classes - // or so. - _mc_region.print_out_of_space_msg(name, needed_bytes); - _rw_region.print_out_of_space_msg(name, needed_bytes); - _ro_region.print_out_of_space_msg(name, needed_bytes); - - vm_exit_during_initialization(err_msg("Unable to allocate from '%s' region", name), - "Please reduce the number of shared classes."); -} - bool MetaspaceShared::use_full_module_graph() { #if INCLUDE_CDS_JAVA_HEAP if (ClassLoaderDataShared::is_full_module_graph_loaded()) { @@ -1662,24 +1418,16 @@ bool MetaspaceShared::use_full_module_graph() { } void MetaspaceShared::print_on(outputStream* st) { - if (UseSharedSpaces || DumpSharedSpaces) { + if (UseSharedSpaces) { st->print("CDS archive(s) mapped at: "); - address base; - address top; - if (UseSharedSpaces) { // Runtime - base = (address)MetaspaceObj::shared_metaspace_base(); - address static_top = (address)_shared_metaspace_static_top; - top = (address)MetaspaceObj::shared_metaspace_top(); - st->print("[" PTR_FORMAT "-" PTR_FORMAT "-" PTR_FORMAT "), ", p2i(base), p2i(static_top), p2i(top)); - } else if (DumpSharedSpaces) { // Dump Time - base = (address)_shared_rs.base(); - top = (address)_shared_rs.end(); - st->print("[" PTR_FORMAT "-" PTR_FORMAT "), ", p2i(base), p2i(top)); - } + address base = (address)MetaspaceObj::shared_metaspace_base(); + address static_top = (address)_shared_metaspace_static_top; + address top = (address)MetaspaceObj::shared_metaspace_top(); + st->print("[" PTR_FORMAT "-" PTR_FORMAT "-" PTR_FORMAT "), ", p2i(base), p2i(static_top), p2i(top)); st->print("size " SIZE_FORMAT ", ", top - base); st->print("SharedBaseAddress: " PTR_FORMAT ", ArchiveRelocationMode: %d.", SharedBaseAddress, (int)ArchiveRelocationMode); } else { - st->print("CDS disabled."); + st->print("CDS archive(s) not mapped"); } st->cr(); } diff --git a/src/hotspot/share/memory/metaspaceShared.hpp b/src/hotspot/share/memory/metaspaceShared.hpp index 215a7f2a37c66d9c3dc858fe54046ad04d1a0b64..481264b6437b98f34d530e14e85935cc079ce634 100644 --- a/src/hotspot/share/memory/metaspaceShared.hpp +++ b/src/hotspot/share/memory/metaspaceShared.hpp @@ -25,7 +25,6 @@ #ifndef SHARE_MEMORY_METASPACESHARED_HPP #define SHARE_MEMORY_METASPACESHARED_HPP -#include "classfile/compactHashtable.hpp" #include "memory/allocation.hpp" #include "memory/memRegion.hpp" #include "memory/virtualspace.hpp" @@ -33,15 +32,10 @@ #include "utilities/macros.hpp" #include "utilities/resourceHash.hpp" -// Metaspace::allocate() requires that all blocks must be aligned with KlassAlignmentInBytes. -// We enforce the same alignment rule in blocks allocated from the shared space. -const int SharedSpaceObjectAlignment = KlassAlignmentInBytes; - -class outputStream; -class CHeapBitMap; class FileMapInfo; -class DumpRegion; -struct ArchiveHeapOopmapInfo; +class outputStream; + +template class GrowableArray; enum MapArchiveResult { MAP_ARCHIVE_SUCCESS, @@ -49,32 +43,13 @@ enum MapArchiveResult { MAP_ARCHIVE_OTHER_FAILURE }; -class MetaspaceSharedStats { -public: - MetaspaceSharedStats() { - memset(this, 0, sizeof(*this)); - } - CompactHashtableStats symbol; - CompactHashtableStats string; -}; - // Class Data Sharing Support class MetaspaceShared : AllStatic { - - // CDS support - - // Note: _shared_rs and _symbol_rs are only used at dump time. - static ReservedSpace _shared_rs; - static VirtualSpace _shared_vs; - static ReservedSpace _symbol_rs; - static VirtualSpace _symbol_vs; - static int _max_alignment; - static MetaspaceSharedStats _stats; + static ReservedSpace _symbol_rs; // used only during -Xshare:dump + static VirtualSpace _symbol_vs; // used only during -Xshare:dump static bool _has_error_classes; static bool _archive_loading_failed; static bool _remapped_readwrite; - static address _i2i_entry_code_buffers; - static size_t _core_spaces_size; static void* _shared_metaspace_static_top; static intx _relocation_delta; static char* _requested_base_address; @@ -83,12 +58,11 @@ class MetaspaceShared : AllStatic { public: enum { // core archive spaces - mc = 0, // miscellaneous code for method trampolines - rw = 1, // read-write shared space in the heap - ro = 2, // read-only shared space in the heap - bm = 3, // relocation bitmaps (freed after file mapping is finished) - num_core_region = 3, - num_non_heap_spaces = 4, + rw = 0, // read-write shared space in the heap + ro = 1, // read-only shared space in the heap + bm = 2, // relocation bitmaps (freed after file mapping is finished) + num_core_region = 2, // rw and ro + num_non_heap_spaces = 3, // rw and ro and bm // mapped java heap regions first_closed_archive_heap_region = bm + 1, @@ -107,22 +81,10 @@ class MetaspaceShared : AllStatic { static int preload_classes(const char * class_list_path, TRAPS) NOT_CDS_RETURN_(0); - static GrowableArray* collected_klasses(); - - static ReservedSpace* shared_rs() { - CDS_ONLY(return &_shared_rs); - NOT_CDS(return NULL); - } - static Symbol* symbol_rs_base() { return (Symbol*)_symbol_rs.base(); } - static void set_shared_rs(ReservedSpace rs) { - CDS_ONLY(_shared_rs = rs); - } - - static void commit_to(ReservedSpace* rs, VirtualSpace* vs, char* newtop) NOT_CDS_RETURN; static void initialize_for_static_dump() NOT_CDS_RETURN; static void initialize_runtime_shared_and_meta_spaces() NOT_CDS_RETURN; static void post_initialize(TRAPS) NOT_CDS_RETURN; @@ -132,10 +94,6 @@ class MetaspaceShared : AllStatic { static void set_archive_loading_failed() { _archive_loading_failed = true; } - static bool is_in_output_space(void* ptr) { - assert(DumpSharedSpaces, "must be"); - return shared_rs()->contains(ptr); - } static bool map_shared_spaces(FileMapInfo* mapinfo) NOT_CDS_RETURN_(false); static void initialize_shared_spaces() NOT_CDS_RETURN; @@ -155,18 +113,10 @@ class MetaspaceShared : AllStatic { // Return true if given address is in the shared region corresponding to the idx static bool is_in_shared_region(const void* p, int idx) NOT_CDS_RETURN_(false); - static bool is_in_trampoline_frame(address addr) NOT_CDS_RETURN_(false); - static bool is_shared_dynamic(void* p) NOT_CDS_RETURN_(false); static void serialize(SerializeClosure* sc) NOT_CDS_RETURN; - static MetaspaceSharedStats* stats() { - return &_stats; - } - - static void report_out_of_space(const char* name, size_t needed_bytes); - // JVM/TI RedefineClasses() support: // Remap the shared readonly space to shared readwrite, private if // sharing is enabled. Simply returns true if sharing is not enabled @@ -184,51 +134,12 @@ class MetaspaceShared : AllStatic { #if INCLUDE_CDS static size_t reserved_space_alignment(); - static void init_shared_dump_space(DumpRegion* first_space); - static DumpRegion* misc_code_dump_space(); - static DumpRegion* read_write_dump_space(); - static DumpRegion* read_only_dump_space(); - static void pack_dump_space(DumpRegion* current, DumpRegion* next, - ReservedSpace* rs); - static void rewrite_nofast_bytecodes_and_calculate_fingerprints(Thread* thread, InstanceKlass* ik); #endif // Allocate a block of memory from the temporary "symbol" region. static char* symbol_space_alloc(size_t num_bytes); - // Allocate a block of memory from the "mc" or "ro" regions. - static char* misc_code_space_alloc(size_t num_bytes); - static char* read_only_space_alloc(size_t num_bytes); - static char* read_write_space_alloc(size_t num_bytes); - - template - static Array* new_ro_array(int length) { - size_t byte_size = Array::byte_sizeof(length, sizeof(T)); - Array* array = (Array*)read_only_space_alloc(byte_size); - array->initialize(length); - return array; - } - - template - static Array* new_rw_array(int length) { - size_t byte_size = Array::byte_sizeof(length, sizeof(T)); - Array* array = (Array*)read_write_space_alloc(byte_size); - array->initialize(length); - return array; - } - - template - static size_t ro_array_bytesize(int length) { - size_t byte_size = Array::byte_sizeof(length, sizeof(T)); - return align_up(byte_size, SharedSpaceObjectAlignment); - } - - static void init_misc_code_space(); - static address i2i_entry_code_buffers(); - - static void initialize_ptr_marker(CHeapBitMap* ptrmap); - // This is the base address as specified by -XX:SharedBaseAddress during -Xshare:dump. // Both the base/top archives are written using this as their base address. // @@ -254,13 +165,6 @@ class MetaspaceShared : AllStatic { return is_windows; } - // Returns the bitmap region which is allocated from C heap. - // Caller must free it with FREE_C_HEAP_ARRAY() - static char* write_core_archive_regions(FileMapInfo* mapinfo, - GrowableArray* closed_oopmaps, - GrowableArray* open_oopmaps, - size_t& bitmap_size_in_bytes); - // Can we skip some expensive operations related to modules? static bool use_optimized_module_handling() { return NOT_CDS(false) CDS_ONLY(_use_optimized_module_handling); } static void disable_optimized_module_handling() { _use_optimized_module_handling = false; } @@ -270,10 +174,6 @@ class MetaspaceShared : AllStatic { static void disable_full_module_graph() { _use_full_module_graph = false; } private: -#if INCLUDE_CDS - static void write_region(FileMapInfo* mapinfo, int region_idx, DumpRegion* dump_region, - bool read_only, bool allow_exec); -#endif static void read_extra_data(const char* filename, TRAPS) NOT_CDS_RETURN; static FileMapInfo* open_static_archive(); static FileMapInfo* open_dynamic_archive(); diff --git a/src/hotspot/share/memory/metaspaceTracer.hpp b/src/hotspot/share/memory/metaspaceTracer.hpp index 9f878451580dfe78bb45c3713759ba15fe25afd5..cafd6b5ee951d6dacbe16e8514ba3fba98614f9e 100644 --- a/src/hotspot/share/memory/metaspaceTracer.hpp +++ b/src/hotspot/share/memory/metaspaceTracer.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2014, 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2014, 2021, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -27,6 +27,7 @@ #include "memory/allocation.hpp" #include "memory/metaspace.hpp" +#include "memory/metaspaceUtils.hpp" class ClassLoaderData; diff --git a/src/hotspot/share/memory/metaspaceUtils.hpp b/src/hotspot/share/memory/metaspaceUtils.hpp new file mode 100644 index 0000000000000000000000000000000000000000..ccdd1aca059f33a4e6ea6b1b5906f2903b9ee431 --- /dev/null +++ b/src/hotspot/share/memory/metaspaceUtils.hpp @@ -0,0 +1,141 @@ +/* + * Copyright (c) 2021, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ +#ifndef SHARE_MEMORY_METASPACEUTILS_HPP +#define SHARE_MEMORY_METASPACEUTILS_HPP + +#include "memory/metaspace.hpp" +#include "memory/metaspaceChunkFreeListSummary.hpp" + +class outputStream; + +// Metaspace are deallocated when their class loader are GC'ed. +// This class implements a policy for inducing GC's to recover +// Metaspaces. + +class MetaspaceGCThresholdUpdater : public AllStatic { + public: + enum Type { + ComputeNewSize, + ExpandAndAllocate, + Last + }; + + static const char* to_string(MetaspaceGCThresholdUpdater::Type updater) { + switch (updater) { + case ComputeNewSize: + return "compute_new_size"; + case ExpandAndAllocate: + return "expand_and_allocate"; + default: + assert(false, "Got bad updater: %d", (int) updater); + return NULL; + }; + } +}; + +class MetaspaceGC : public AllStatic { + + // The current high-water-mark for inducing a GC. + // When committed memory of all metaspaces reaches this value, + // a GC is induced and the value is increased. Size is in bytes. + static volatile size_t _capacity_until_GC; + static uint _shrink_factor; + + static size_t shrink_factor() { return _shrink_factor; } + void set_shrink_factor(uint v) { _shrink_factor = v; } + + public: + + static void initialize(); + static void post_initialize(); + + static size_t capacity_until_GC(); + static bool inc_capacity_until_GC(size_t v, + size_t* new_cap_until_GC = NULL, + size_t* old_cap_until_GC = NULL, + bool* can_retry = NULL); + static size_t dec_capacity_until_GC(size_t v); + + // The amount to increase the high-water-mark (_capacity_until_GC) + static size_t delta_capacity_until_GC(size_t bytes); + + // Tells if we have can expand metaspace without hitting set limits. + static bool can_expand(size_t words, bool is_class); + + // Returns amount that we can expand without hitting a GC, + // measured in words. + static size_t allowed_expansion(); + + // Calculate the new high-water mark at which to induce + // a GC. + static void compute_new_size(); +}; + +class MetaspaceUtils : AllStatic { +public: + + // Committed space actually in use by Metadata + static size_t used_words(); + static size_t used_words(Metaspace::MetadataType mdtype); + + // Space committed for Metaspace + static size_t committed_words(); + static size_t committed_words(Metaspace::MetadataType mdtype); + + // Space reserved for Metaspace + static size_t reserved_words(); + static size_t reserved_words(Metaspace::MetadataType mdtype); + + // _bytes() variants for convenience... + static size_t used_bytes() { return used_words() * BytesPerWord; } + static size_t used_bytes(Metaspace::MetadataType mdtype) { return used_words(mdtype) * BytesPerWord; } + static size_t committed_bytes() { return committed_words() * BytesPerWord; } + static size_t committed_bytes(Metaspace::MetadataType mdtype) { return committed_words(mdtype) * BytesPerWord; } + static size_t reserved_bytes() { return reserved_words() * BytesPerWord; } + static size_t reserved_bytes(Metaspace::MetadataType mdtype) { return reserved_words(mdtype) * BytesPerWord; } + + // (See JDK-8251342). Implement or Consolidate. + static MetaspaceChunkFreeListSummary chunk_free_list_summary(Metaspace::MetadataType mdtype) { + return MetaspaceChunkFreeListSummary(0,0,0,0,0,0,0,0); + } + + // Log change in used metadata. + static void print_metaspace_change(const metaspace::MetaspaceSizesSnapshot& pre_meta_values); + + // This will print out a basic metaspace usage report but + // unlike print_report() is guaranteed not to lock or to walk the CLDG. + static void print_basic_report(outputStream* st, size_t scale = 0); + + // Prints a report about the current metaspace state. + // Function will walk the CLDG and will lock the expand lock; if that is not + // convenient, use print_basic_report() instead. + static void print_report(outputStream* out, size_t scale = 0); + + static void print_on(outputStream * out); + + DEBUG_ONLY(static void verify();) + +}; + +#endif // SHARE_MEMORY_METASPACEUTILS_HPP diff --git a/src/hotspot/share/memory/universe.cpp b/src/hotspot/share/memory/universe.cpp index 06a33bdf86a5670b124b9d5a96e394eabd67a774..417de7a2db254a3e81eb57062c4b72b1a9145c4a 100644 --- a/src/hotspot/share/memory/universe.cpp +++ b/src/hotspot/share/memory/universe.cpp @@ -48,6 +48,7 @@ #include "memory/metaspaceClosure.hpp" #include "memory/metaspaceCounters.hpp" #include "memory/metaspaceShared.hpp" +#include "memory/metaspaceUtils.hpp" #include "memory/oopFactory.hpp" #include "memory/resourceArea.hpp" #include "memory/universe.hpp" diff --git a/src/hotspot/share/memory/universe.hpp b/src/hotspot/share/memory/universe.hpp index 70fc2269ec474e5cf08e2502696b08c75ad18bd9..bb91bb4a2a1cfcbbee34e0fc6c117db31a30a7fe 100644 --- a/src/hotspot/share/memory/universe.hpp +++ b/src/hotspot/share/memory/universe.hpp @@ -42,6 +42,7 @@ class CollectedHeap; class DeferredObjAllocEvent; class OopStorage; +class ReservedHeapSpace; // A helper class for caching a Method* when the user of the cache // only cares about the latest version of the Method*. This cache safely diff --git a/src/hotspot/share/memory/virtualspace.cpp b/src/hotspot/share/memory/virtualspace.cpp index 0d16ffcd37d415668a8d45dd19641b209feb5dab..28015d76cea9bc948141aef8d42ac12c8f15329e 100644 --- a/src/hotspot/share/memory/virtualspace.cpp +++ b/src/hotspot/share/memory/virtualspace.cpp @@ -1086,343 +1086,4 @@ void VirtualSpace::print() { print_on(tty); } -/////////////// Unit tests /////////////// - -#ifndef PRODUCT - -class TestReservedSpace : AllStatic { - public: - static void small_page_write(void* addr, size_t size) { - size_t page_size = os::vm_page_size(); - - char* end = (char*)addr + size; - for (char* p = (char*)addr; p < end; p += page_size) { - *p = 1; - } - } - - static void release_memory_for_test(ReservedSpace rs) { - if (rs.special()) { - guarantee(os::release_memory_special(rs.base(), rs.size()), "Shouldn't fail"); - } else { - guarantee(os::release_memory(rs.base(), rs.size()), "Shouldn't fail"); - } - } - - static void test_reserved_space1(size_t size, size_t alignment) { - assert(is_aligned(size, alignment), "Incorrect input parameters"); - - ReservedSpace rs(size, // size - alignment, // alignment - UseLargePages, // large - (char *)NULL); // requested_address - - assert(rs.base() != NULL, "Must be"); - assert(rs.size() == size, "Must be"); - - assert(is_aligned(rs.base(), alignment), "aligned sizes should always give aligned addresses"); - assert(is_aligned(rs.size(), alignment), "aligned sizes should always give aligned addresses"); - - if (rs.special()) { - small_page_write(rs.base(), size); - } - - release_memory_for_test(rs); - } - - static void test_reserved_space2(size_t size) { - assert(is_aligned(size, os::vm_allocation_granularity()), "Must be at least AG aligned"); - - ReservedSpace rs(size); - - assert(rs.base() != NULL, "Must be"); - assert(rs.size() == size, "Must be"); - - if (rs.special()) { - small_page_write(rs.base(), size); - } - - release_memory_for_test(rs); - } - - static void test_reserved_space3(size_t size, size_t alignment, bool maybe_large) { - if (size < alignment) { - // Tests might set -XX:LargePageSizeInBytes= and cause unexpected input arguments for this test. - assert((size_t)os::vm_page_size() == os::large_page_size(), "Test needs further refinement"); - return; - } - - assert(is_aligned(size, os::vm_allocation_granularity()), "Must be at least AG aligned"); - assert(is_aligned(size, alignment), "Must be at least aligned against alignment"); - - bool large = maybe_large && UseLargePages && size >= os::large_page_size(); - - ReservedSpace rs(size, alignment, large); - - assert(rs.base() != NULL, "Must be"); - assert(rs.size() == size, "Must be"); - - if (rs.special()) { - small_page_write(rs.base(), size); - } - - release_memory_for_test(rs); - } - - - static void test_reserved_space1() { - size_t size = 2 * 1024 * 1024; - size_t ag = os::vm_allocation_granularity(); - - test_reserved_space1(size, ag); - test_reserved_space1(size * 2, ag); - test_reserved_space1(size * 10, ag); - } - - static void test_reserved_space2() { - size_t size = 2 * 1024 * 1024; - size_t ag = os::vm_allocation_granularity(); - - test_reserved_space2(size * 1); - test_reserved_space2(size * 2); - test_reserved_space2(size * 10); - test_reserved_space2(ag); - test_reserved_space2(size - ag); - test_reserved_space2(size); - test_reserved_space2(size + ag); - test_reserved_space2(size * 2); - test_reserved_space2(size * 2 - ag); - test_reserved_space2(size * 2 + ag); - test_reserved_space2(size * 3); - test_reserved_space2(size * 3 - ag); - test_reserved_space2(size * 3 + ag); - test_reserved_space2(size * 10); - test_reserved_space2(size * 10 + size / 2); - } - - static void test_reserved_space3() { - size_t ag = os::vm_allocation_granularity(); - - test_reserved_space3(ag, ag , false); - test_reserved_space3(ag * 2, ag , false); - test_reserved_space3(ag * 3, ag , false); - test_reserved_space3(ag * 2, ag * 2, false); - test_reserved_space3(ag * 4, ag * 2, false); - test_reserved_space3(ag * 8, ag * 2, false); - test_reserved_space3(ag * 4, ag * 4, false); - test_reserved_space3(ag * 8, ag * 4, false); - test_reserved_space3(ag * 16, ag * 4, false); - - if (UseLargePages) { - size_t lp = os::large_page_size(); - - // Without large pages - test_reserved_space3(lp, ag * 4, false); - test_reserved_space3(lp * 2, ag * 4, false); - test_reserved_space3(lp * 4, ag * 4, false); - test_reserved_space3(lp, lp , false); - test_reserved_space3(lp * 2, lp , false); - test_reserved_space3(lp * 3, lp , false); - test_reserved_space3(lp * 2, lp * 2, false); - test_reserved_space3(lp * 4, lp * 2, false); - test_reserved_space3(lp * 8, lp * 2, false); - - // With large pages - test_reserved_space3(lp, ag * 4 , true); - test_reserved_space3(lp * 2, ag * 4, true); - test_reserved_space3(lp * 4, ag * 4, true); - test_reserved_space3(lp, lp , true); - test_reserved_space3(lp * 2, lp , true); - test_reserved_space3(lp * 3, lp , true); - test_reserved_space3(lp * 2, lp * 2, true); - test_reserved_space3(lp * 4, lp * 2, true); - test_reserved_space3(lp * 8, lp * 2, true); - } - } - - static void test_reserved_space() { - test_reserved_space1(); - test_reserved_space2(); - test_reserved_space3(); - } -}; - -void TestReservedSpace_test() { - TestReservedSpace::test_reserved_space(); -} - -#define assert_equals(actual, expected) \ - assert(actual == expected, \ - "Got " SIZE_FORMAT " expected " \ - SIZE_FORMAT, actual, expected); - -#define assert_ge(value1, value2) \ - assert(value1 >= value2, \ - "'" #value1 "': " SIZE_FORMAT " '" \ - #value2 "': " SIZE_FORMAT, value1, value2); - -#define assert_lt(value1, value2) \ - assert(value1 < value2, \ - "'" #value1 "': " SIZE_FORMAT " '" \ - #value2 "': " SIZE_FORMAT, value1, value2); - - -class TestVirtualSpace : AllStatic { - enum TestLargePages { - Default, - Disable, - Reserve, - Commit - }; - - static ReservedSpace reserve_memory(size_t reserve_size_aligned, TestLargePages mode) { - switch(mode) { - default: - case Default: - case Reserve: - return ReservedSpace(reserve_size_aligned); - case Disable: - case Commit: - return ReservedSpace(reserve_size_aligned, - os::vm_allocation_granularity(), - /* large */ false); - } - } - - static bool initialize_virtual_space(VirtualSpace& vs, ReservedSpace rs, TestLargePages mode) { - switch(mode) { - default: - case Default: - case Reserve: - return vs.initialize(rs, 0); - case Disable: - return vs.initialize_with_granularity(rs, 0, os::vm_page_size()); - case Commit: - return vs.initialize_with_granularity(rs, 0, os::page_size_for_region_unaligned(rs.size(), 1)); - } - } - - public: - static void test_virtual_space_actual_committed_space(size_t reserve_size, size_t commit_size, - TestLargePages mode = Default) { - size_t granularity = os::vm_allocation_granularity(); - size_t reserve_size_aligned = align_up(reserve_size, granularity); - - ReservedSpace reserved = reserve_memory(reserve_size_aligned, mode); - - assert(reserved.is_reserved(), "Must be"); - - VirtualSpace vs; - bool initialized = initialize_virtual_space(vs, reserved, mode); - assert(initialized, "Failed to initialize VirtualSpace"); - - vs.expand_by(commit_size, false); - - if (vs.special()) { - assert_equals(vs.actual_committed_size(), reserve_size_aligned); - } else { - assert_ge(vs.actual_committed_size(), commit_size); - // Approximate the commit granularity. - // Make sure that we don't commit using large pages - // if large pages has been disabled for this VirtualSpace. - size_t commit_granularity = (mode == Disable || !UseLargePages) ? - os::vm_page_size() : os::large_page_size(); - assert_lt(vs.actual_committed_size(), commit_size + commit_granularity); - } - - reserved.release(); - } - - static void test_virtual_space_actual_committed_space_one_large_page() { - if (!UseLargePages) { - return; - } - - size_t large_page_size = os::large_page_size(); - - ReservedSpace reserved(large_page_size, large_page_size, true); - - assert(reserved.is_reserved(), "Must be"); - - VirtualSpace vs; - bool initialized = vs.initialize(reserved, 0); - assert(initialized, "Failed to initialize VirtualSpace"); - - vs.expand_by(large_page_size, false); - - assert_equals(vs.actual_committed_size(), large_page_size); - - reserved.release(); - } - - static void test_virtual_space_actual_committed_space() { - test_virtual_space_actual_committed_space(4 * K, 0); - test_virtual_space_actual_committed_space(4 * K, 4 * K); - test_virtual_space_actual_committed_space(8 * K, 0); - test_virtual_space_actual_committed_space(8 * K, 4 * K); - test_virtual_space_actual_committed_space(8 * K, 8 * K); - test_virtual_space_actual_committed_space(12 * K, 0); - test_virtual_space_actual_committed_space(12 * K, 4 * K); - test_virtual_space_actual_committed_space(12 * K, 8 * K); - test_virtual_space_actual_committed_space(12 * K, 12 * K); - test_virtual_space_actual_committed_space(64 * K, 0); - test_virtual_space_actual_committed_space(64 * K, 32 * K); - test_virtual_space_actual_committed_space(64 * K, 64 * K); - test_virtual_space_actual_committed_space(2 * M, 0); - test_virtual_space_actual_committed_space(2 * M, 4 * K); - test_virtual_space_actual_committed_space(2 * M, 64 * K); - test_virtual_space_actual_committed_space(2 * M, 1 * M); - test_virtual_space_actual_committed_space(2 * M, 2 * M); - test_virtual_space_actual_committed_space(10 * M, 0); - test_virtual_space_actual_committed_space(10 * M, 4 * K); - test_virtual_space_actual_committed_space(10 * M, 8 * K); - test_virtual_space_actual_committed_space(10 * M, 1 * M); - test_virtual_space_actual_committed_space(10 * M, 2 * M); - test_virtual_space_actual_committed_space(10 * M, 5 * M); - test_virtual_space_actual_committed_space(10 * M, 10 * M); - } - - static void test_virtual_space_disable_large_pages() { - if (!UseLargePages) { - return; - } - // These test cases verify that if we force VirtualSpace to disable large pages - test_virtual_space_actual_committed_space(10 * M, 0, Disable); - test_virtual_space_actual_committed_space(10 * M, 4 * K, Disable); - test_virtual_space_actual_committed_space(10 * M, 8 * K, Disable); - test_virtual_space_actual_committed_space(10 * M, 1 * M, Disable); - test_virtual_space_actual_committed_space(10 * M, 2 * M, Disable); - test_virtual_space_actual_committed_space(10 * M, 5 * M, Disable); - test_virtual_space_actual_committed_space(10 * M, 10 * M, Disable); - - test_virtual_space_actual_committed_space(10 * M, 0, Reserve); - test_virtual_space_actual_committed_space(10 * M, 4 * K, Reserve); - test_virtual_space_actual_committed_space(10 * M, 8 * K, Reserve); - test_virtual_space_actual_committed_space(10 * M, 1 * M, Reserve); - test_virtual_space_actual_committed_space(10 * M, 2 * M, Reserve); - test_virtual_space_actual_committed_space(10 * M, 5 * M, Reserve); - test_virtual_space_actual_committed_space(10 * M, 10 * M, Reserve); - - test_virtual_space_actual_committed_space(10 * M, 0, Commit); - test_virtual_space_actual_committed_space(10 * M, 4 * K, Commit); - test_virtual_space_actual_committed_space(10 * M, 8 * K, Commit); - test_virtual_space_actual_committed_space(10 * M, 1 * M, Commit); - test_virtual_space_actual_committed_space(10 * M, 2 * M, Commit); - test_virtual_space_actual_committed_space(10 * M, 5 * M, Commit); - test_virtual_space_actual_committed_space(10 * M, 10 * M, Commit); - } - - static void test_virtual_space() { - test_virtual_space_actual_committed_space(); - test_virtual_space_actual_committed_space_one_large_page(); - test_virtual_space_disable_large_pages(); - } -}; - -void TestVirtualSpace_test() { - TestVirtualSpace::test_virtual_space(); -} - -#endif // PRODUCT - #endif diff --git a/src/hotspot/share/oops/array.hpp b/src/hotspot/share/oops/array.hpp index f383564ac8459cf3dc3974c3a815d18a39f148df..428c2e63384f64897460179b63e7e3fff2933d69 100644 --- a/src/hotspot/share/oops/array.hpp +++ b/src/hotspot/share/oops/array.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2019, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2021, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -25,18 +25,18 @@ #ifndef SHARE_OOPS_ARRAY_HPP #define SHARE_OOPS_ARRAY_HPP -#include "memory/allocation.hpp" -#include "memory/metaspace.hpp" #include "runtime/atomic.hpp" #include "utilities/align.hpp" +#include "utilities/exceptions.hpp" #include "utilities/globalDefinitions.hpp" +#include "utilities/ostream.hpp" // Array for metadata allocation template class Array: public MetaspaceObj { + friend class ArchiveBuilder; friend class MetadataFactory; - friend class MetaspaceShared; friend class VMStructs; friend class JVMCIVMStructs; friend class MethodHandleCompiler; // special case @@ -52,11 +52,7 @@ protected: private: NONCOPYABLE(Array); - void* operator new(size_t size, ClassLoaderData* loader_data, int length, TRAPS) throw() { - size_t word_size = Array::size(length); - return (void*) Metaspace::allocate(loader_data, word_size, - MetaspaceObj::array_type(sizeof(T)), THREAD); - } + inline void* operator new(size_t size, ClassLoaderData* loader_data, int length, TRAPS) throw(); static size_t byte_sizeof(int length, size_t elm_byte_size) { return sizeof(Array) + MAX2(length - 1, 0) * elm_byte_size; diff --git a/src/hotspot/share/oops/array.inline.hpp b/src/hotspot/share/oops/array.inline.hpp new file mode 100644 index 0000000000000000000000000000000000000000..588955574e07aa418018e2b743fd2fcde9641bde --- /dev/null +++ b/src/hotspot/share/oops/array.inline.hpp @@ -0,0 +1,40 @@ +/* + * Copyright (c) 2021, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_OOPS_ARRAY_INLINE_HPP +#define SHARE_OOPS_ARRAY_INLINE_HPP + +#include "memory/allocation.hpp" +#include "memory/metaspace.hpp" +#include "oops/array.hpp" + + +template +inline void* Array::operator new(size_t size, ClassLoaderData* loader_data, int length, TRAPS) throw() { + size_t word_size = Array::size(length); + return (void*) Metaspace::allocate(loader_data, word_size, + MetaspaceObj::array_type(sizeof(T)), THREAD); +} + +#endif // SHARE_OOPS_ARRAY_INLINE_HPP diff --git a/src/hotspot/share/oops/compiledICHolder.hpp b/src/hotspot/share/oops/compiledICHolder.hpp index a1932cba56c4a6b1669fbbf34b904cb2399e4c5d..b3d0c8715bf840f43017c34eaa48f3ee60271dab 100644 --- a/src/hotspot/share/oops/compiledICHolder.hpp +++ b/src/hotspot/share/oops/compiledICHolder.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1998, 2019, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1998, 2021, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -71,16 +71,7 @@ class CompiledICHolder : public CHeapObj { CompiledICHolder* next() { return _next; } void set_next(CompiledICHolder* n) { _next = n; } - inline bool is_loader_alive() { - Klass* k = _is_metadata_method ? ((Method*)_holder_metadata)->method_holder() : (Klass*)_holder_metadata; - if (!k->is_loader_alive()) { - return false; - } - if (!_holder_klass->is_loader_alive()) { - return false; - } - return true; - } + inline bool is_loader_alive(); // Verify void verify_on(outputStream* st); diff --git a/src/hotspot/share/oops/compiledICHolder.inline.hpp b/src/hotspot/share/oops/compiledICHolder.inline.hpp new file mode 100644 index 0000000000000000000000000000000000000000..4a5531531f0abbb6f553a9bf0b7877855f3976fb --- /dev/null +++ b/src/hotspot/share/oops/compiledICHolder.inline.hpp @@ -0,0 +1,42 @@ +/* + * Copyright (c) 2021, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_OOPS_COMPILEDICHOLDER_INLINE_HPP +#define SHARE_OOPS_COMPILEDICHOLDER_INLINE_HPP + +#include "oops/compiledICHolder.hpp" +#include "oops/klass.inline.hpp" + +inline bool CompiledICHolder::is_loader_alive() { + Klass* k = _is_metadata_method ? ((Method*)_holder_metadata)->method_holder() : (Klass*)_holder_metadata; + if (!k->is_loader_alive()) { + return false; + } + if (!_holder_klass->is_loader_alive()) { + return false; + } + return true; +} + +#endif // SHARE_OOPS_COMPILEDICHOLDER_INLINE_HPP diff --git a/src/hotspot/share/oops/compressedOops.cpp b/src/hotspot/share/oops/compressedOops.cpp index 7f7b2a06fc858c9d09c9d060eca00ad623576093..1f094ed49f52f427e50d4f8b134fec065f80eabc 100644 --- a/src/hotspot/share/oops/compressedOops.cpp +++ b/src/hotspot/share/oops/compressedOops.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019, 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2019, 2021, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -29,6 +29,7 @@ #include "memory/memRegion.hpp" #include "memory/resourceArea.hpp" #include "memory/universe.hpp" +#include "memory/virtualspace.hpp" #include "oops/compressedOops.hpp" #include "gc/shared/collectedHeap.hpp" #include "runtime/arguments.hpp" diff --git a/src/hotspot/share/oops/constMethod.cpp b/src/hotspot/share/oops/constMethod.cpp index bd3fe91e1ebe497486bc839fe94d05578d73c44e..b70574b17a838ce4b0024be4f19f53c931bc61f9 100644 --- a/src/hotspot/share/oops/constMethod.cpp +++ b/src/hotspot/share/oops/constMethod.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2021, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -421,16 +421,6 @@ void ConstMethod::metaspace_pointers_do(MetaspaceClosure* it) { it->push(default_annotations_addr()); } ConstMethod* this_ptr = this; - it->push_method_entry(&this_ptr, (intptr_t*)&_adapter_trampoline); -} - -void ConstMethod::set_adapter_trampoline(AdapterHandlerEntry** trampoline) { - Arguments::assert_is_dumping_archive(); - if (DumpSharedSpaces) { - assert(*trampoline == NULL, - "must be NULL during dump time, to be initialized at run time"); - } - _adapter_trampoline = trampoline; } // Printing diff --git a/src/hotspot/share/oops/constMethod.hpp b/src/hotspot/share/oops/constMethod.hpp index f841f1cc996352c3c925ebaaa783d7ff202872d4..dfa4a352c2dbffcec5e6644de248dc5b5f6a6640 100644 --- a/src/hotspot/share/oops/constMethod.hpp +++ b/src/hotspot/share/oops/constMethod.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2021, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -121,8 +121,6 @@ class MethodParametersElement { u2 flags; }; -class AdapterHandlerEntry; - // Class to collect the sizes of ConstMethod inline tables #define INLINE_TABLES_DO(do_element) \ do_element(localvariable_table_length) \ @@ -205,12 +203,6 @@ private: // Raw stackmap data for the method Array* _stackmap_data; - // Adapter blob (i2c/c2i) for this Method*. Set once when method is linked. - union { - AdapterHandlerEntry* _adapter; - AdapterHandlerEntry** _adapter_trampoline; // see comments around Method::link_method() - }; - int _constMethod_size; u2 _flags; u1 _result_type; // BasicType of result @@ -285,26 +277,6 @@ public: void copy_stackmap_data(ClassLoaderData* loader_data, u1* sd, int length, TRAPS); bool has_stackmap_table() const { return _stackmap_data != NULL; } - // adapter - void set_adapter_entry(AdapterHandlerEntry* adapter) { - assert(!is_shared(), - "shared methods in archive have fixed adapter_trampoline"); - _adapter = adapter; - } - void set_adapter_trampoline(AdapterHandlerEntry** trampoline); - void update_adapter_trampoline(AdapterHandlerEntry* adapter) { - assert(is_shared(), "must be"); - *_adapter_trampoline = adapter; - assert(this->adapter() == adapter, "must be"); - } - AdapterHandlerEntry* adapter() { - if (is_shared()) { - return *_adapter_trampoline; - } else { - return _adapter; - } - } - void init_fingerprint() { const uint64_t initval = UCONST64(0x8000000000000000); _fingerprint = initval; diff --git a/src/hotspot/share/oops/constantPool.cpp b/src/hotspot/share/oops/constantPool.cpp index 1dc0404c701927cf59fef0c39c954a0de7f05624..866b11ae597c442e7ec6f665961d213eee03235a 100644 --- a/src/hotspot/share/oops/constantPool.cpp +++ b/src/hotspot/share/oops/constantPool.cpp @@ -39,7 +39,6 @@ #include "memory/heapShared.hpp" #include "memory/metadataFactory.hpp" #include "memory/metaspaceClosure.hpp" -#include "memory/metaspaceShared.hpp" #include "memory/oopFactory.hpp" #include "memory/resourceArea.hpp" #include "memory/universe.hpp" @@ -583,7 +582,7 @@ Klass* ConstantPool::klass_at_if_loaded(const constantPoolHandle& this_cp, int w oop protection_domain = this_cp->pool_holder()->protection_domain(); Handle h_prot (thread, protection_domain); Handle h_loader (thread, loader); - Klass* k = SystemDictionary::find(name, h_loader, h_prot, thread); + Klass* k = SystemDictionary::find_instance_klass(name, h_loader, h_prot); // Avoid constant pool verification at a safepoint, which takes the Module_lock. if (k != NULL && !SafepointSynchronize::is_at_safepoint()) { diff --git a/src/hotspot/share/oops/cpCache.cpp b/src/hotspot/share/oops/cpCache.cpp index 35a159919ce0cb28ef71ec28d3ccadf47f063267..fc3fc1a72ed644a0c14f209cd244f64ac2cc91d7 100644 --- a/src/hotspot/share/oops/cpCache.cpp +++ b/src/hotspot/share/oops/cpCache.cpp @@ -36,7 +36,6 @@ #include "memory/heapShared.hpp" #include "memory/metadataFactory.hpp" #include "memory/metaspaceClosure.hpp" -#include "memory/metaspaceShared.hpp" #include "memory/resourceArea.hpp" #include "oops/access.inline.hpp" #include "oops/compressedOops.hpp" diff --git a/src/hotspot/share/oops/instanceKlass.cpp b/src/hotspot/share/oops/instanceKlass.cpp index 38aed251bf71223a064cf4f2d6d7467c0e8a2757..dcc79f1d605c54a551a4759fc95b8dc20835c185 100644 --- a/src/hotspot/share/oops/instanceKlass.cpp +++ b/src/hotspot/share/oops/instanceKlass.cpp @@ -1223,37 +1223,37 @@ void InstanceKlass::set_initialization_state_and_notify(ClassState state, TRAPS) } } -Klass* InstanceKlass::implementor() const { - Klass* volatile* k = adr_implementor(); - if (k == NULL) { +InstanceKlass* InstanceKlass::implementor() const { + InstanceKlass* volatile* ik = adr_implementor(); + if (ik == NULL) { return NULL; } else { // This load races with inserts, and therefore needs acquire. - Klass* kls = Atomic::load_acquire(k); - if (kls != NULL && !kls->is_loader_alive()) { + InstanceKlass* ikls = Atomic::load_acquire(ik); + if (ikls != NULL && !ikls->is_loader_alive()) { return NULL; // don't return unloaded class } else { - return kls; + return ikls; } } } -void InstanceKlass::set_implementor(Klass* k) { +void InstanceKlass::set_implementor(InstanceKlass* ik) { assert_locked_or_safepoint(Compile_lock); assert(is_interface(), "not interface"); - Klass* volatile* addr = adr_implementor(); + InstanceKlass* volatile* addr = adr_implementor(); assert(addr != NULL, "null addr"); if (addr != NULL) { - Atomic::release_store(addr, k); + Atomic::release_store(addr, ik); } } int InstanceKlass::nof_implementors() const { - Klass* k = implementor(); - if (k == NULL) { + InstanceKlass* ik = implementor(); + if (ik == NULL) { return 0; - } else if (k != this) { + } else if (ik != this) { return 1; } else { return 2; @@ -1269,29 +1269,29 @@ int InstanceKlass::nof_implementors() const { // self - more than one implementor // // The _implementor field only exists for interfaces. -void InstanceKlass::add_implementor(Klass* k) { +void InstanceKlass::add_implementor(InstanceKlass* ik) { if (Universe::is_fully_initialized()) { assert_lock_strong(Compile_lock); } assert(is_interface(), "not interface"); // Filter out my subinterfaces. // (Note: Interfaces are never on the subklass list.) - if (InstanceKlass::cast(k)->is_interface()) return; + if (ik->is_interface()) return; // Filter out subclasses whose supers already implement me. // (Note: CHA must walk subclasses of direct implementors // in order to locate indirect implementors.) - Klass* sk = k->super(); - if (sk != NULL && InstanceKlass::cast(sk)->implements_interface(this)) + InstanceKlass* super_ik = ik->java_super(); + if (super_ik != NULL && super_ik->implements_interface(this)) // We only need to check one immediate superclass, since the // implements_interface query looks at transitive_interfaces. // Any supers of the super have the same (or fewer) transitive_interfaces. return; - Klass* ik = implementor(); - if (ik == NULL) { - set_implementor(k); - } else if (ik != this && ik != k) { + InstanceKlass* iklass = implementor(); + if (iklass == NULL) { + set_implementor(ik); + } else if (iklass != this && iklass != ik) { // There is already an implementor. Use itself as an indicator of // more than one implementors. set_implementor(this); @@ -1299,7 +1299,7 @@ void InstanceKlass::add_implementor(Klass* k) { // The implementor also implements the transitive_interfaces for (int index = 0; index < local_interfaces()->length(); index++) { - InstanceKlass::cast(local_interfaces()->at(index))->add_implementor(k); + local_interfaces()->at(index)->add_implementor(ik); } } @@ -1314,7 +1314,7 @@ void InstanceKlass::process_interfaces() { // link this class into the implementors list of every interface it implements for (int i = local_interfaces()->length() - 1; i >= 0; i--) { assert(local_interfaces()->at(i)->is_klass(), "must be a klass"); - InstanceKlass* interf = InstanceKlass::cast(local_interfaces()->at(i)); + InstanceKlass* interf = local_interfaces()->at(i); assert(interf->is_interface(), "expected interface"); interf->add_implementor(this); } @@ -2344,11 +2344,11 @@ void InstanceKlass::clean_implementors_list() { assert (ClassUnloading, "only called for ClassUnloading"); for (;;) { // Use load_acquire due to competing with inserts - Klass* impl = Atomic::load_acquire(adr_implementor()); + InstanceKlass* impl = Atomic::load_acquire(adr_implementor()); if (impl != NULL && !impl->is_loader_alive()) { - // NULL this field, might be an unloaded klass or NULL - Klass* volatile* klass = adr_implementor(); - if (Atomic::cmpxchg(klass, impl, (Klass*)NULL) == impl) { + // NULL this field, might be an unloaded instance klass or NULL + InstanceKlass* volatile* iklass = adr_implementor(); + if (Atomic::cmpxchg(iklass, impl, (InstanceKlass*)NULL) == impl) { // Successfully unlinking implementor. if (log_is_enabled(Trace, class, unload)) { ResourceMark rm; @@ -2857,9 +2857,10 @@ void InstanceKlass::set_package(ClassLoaderData* loader_data, PackageEntry* pkg_ check_prohibited_package(name(), loader_data, CHECK); } - if (is_shared() && _package_entry == pkg_entry) { - if (MetaspaceShared::use_full_module_graph()) { + if (is_shared() && _package_entry != NULL) { + if (MetaspaceShared::use_full_module_graph() && _package_entry == pkg_entry) { // we can use the saved package + assert(MetaspaceShared::is_in_shared_metaspace(_package_entry), "must be"); return; } else { _package_entry = NULL; @@ -2934,7 +2935,7 @@ void InstanceKlass::set_package(ClassLoaderData* loader_data, PackageEntry* pkg_ // in an unnamed module. It is also used to indicate (for all packages whose // classes are loaded by the boot loader) that at least one of the package's // classes has been loaded. -void InstanceKlass::set_classpath_index(s2 path_index, TRAPS) { +void InstanceKlass::set_classpath_index(s2 path_index) { if (_package_entry != NULL) { DEBUG_ONLY(PackageEntryTable* pkg_entry_tbl = ClassLoaderData::the_null_class_loader_data()->packages();) assert(pkg_entry_tbl->lookup_only(_package_entry->name()) == _package_entry, "Should be same"); @@ -4284,3 +4285,24 @@ void InstanceKlass::log_to_classlist(const ClassFileStream* stream) const { } #endif // INCLUDE_CDS } + +// Make a step iterating over the class hierarchy under the root class. +// Skips subclasses if requested. +void ClassHierarchyIterator::next() { + assert(_current != NULL, "required"); + if (_visit_subclasses && _current->subklass() != NULL) { + _current = _current->subklass(); + return; // visit next subclass + } + _visit_subclasses = true; // reset + while (_current->next_sibling() == NULL && _current != _root) { + _current = _current->superklass(); // backtrack; no more sibling subclasses left + } + if (_current == _root) { + // Iteration is over (back at root after backtracking). Invalidate the iterator. + _current = NULL; + return; + } + _current = _current->next_sibling(); + return; // visit next sibling subclass +} diff --git a/src/hotspot/share/oops/instanceKlass.hpp b/src/hotspot/share/oops/instanceKlass.hpp index bf9ecc94733294d1e6dcf03a65f11a8910cf3270..809c7fe2b21a544bfc304b1fd9917bab5b07098f 100644 --- a/src/hotspot/share/oops/instanceKlass.hpp +++ b/src/hotspot/share/oops/instanceKlass.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2021, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -25,7 +25,6 @@ #ifndef SHARE_OOPS_INSTANCEKLASS_HPP #define SHARE_OOPS_INSTANCEKLASS_HPP -#include "classfile/classLoaderData.hpp" #include "memory/referenceType.hpp" #include "oops/annotations.hpp" #include "oops/constMethod.hpp" @@ -328,7 +327,7 @@ class InstanceKlass: public Klass { // embedded nonstatic oop-map blocks follows here // embedded implementor of this interface follows here // The embedded implementor only exists if the current klass is an - // iterface. The possible values of the implementor fall into following + // interface. The possible values of the implementor fall into following // three cases: // NULL: no implementor. // A Klass* that's not itself: one implementor. @@ -524,7 +523,7 @@ public: // packages returned by get_system_packages(). // For packages whose classes are loaded from the boot loader class path, the // classpath_index indicates which entry on the boot loader class path. - void set_classpath_index(s2 path_index, TRAPS); + void set_classpath_index(s2 path_index); bool is_same_class_package(const Klass* class2) const; bool is_same_class_package(oop other_class_loader, const Symbol* other_class_name) const; @@ -1017,10 +1016,10 @@ public: #endif // Access to the implementor of an interface. - Klass* implementor() const; - void set_implementor(Klass* k); + InstanceKlass* implementor() const; + void set_implementor(InstanceKlass* ik); int nof_implementors() const; - void add_implementor(Klass* k); // k is a new class that implements this interface + void add_implementor(InstanceKlass* ik); // ik is a new class that implements this interface void init_implementor(); // initialize // link this class into the implementors list of every interface it implements @@ -1088,7 +1087,7 @@ public: inline OopMapBlock* start_of_nonstatic_oop_maps() const; inline Klass** end_of_nonstatic_oop_maps() const; - inline Klass* volatile* adr_implementor() const; + inline InstanceKlass* volatile* adr_implementor() const; inline InstanceKlass** adr_unsafe_anonymous_host() const; inline address adr_fingerprint() const; @@ -1431,4 +1430,41 @@ class InnerClassesIterator : public StackObj { } }; +// Iterator over class hierarchy under a particular class. Implements depth-first pre-order traversal. +// Usage: +// for (ClassHierarchyIterator iter(root_klass); !iter.done(); iter.next()) { +// Klass* k = iter.klass(); +// ... +// } +class ClassHierarchyIterator : public StackObj { + private: + InstanceKlass* _root; + Klass* _current; + bool _visit_subclasses; + + public: + ClassHierarchyIterator(InstanceKlass* root) : _root(root), _current(root), _visit_subclasses(true) { + assert(!root->is_interface(), "no subclasses"); + assert(_root == _current, "required"); // initial state + } + + bool done() { + return (_current == NULL); + } + + // Make a step iterating over the class hierarchy under the root class. + // Skips subclasses if requested. + void next(); + + Klass* klass() { + assert(!done(), "sanity"); + return _current; + } + + // Skip subclasses of the current class. + void skip_subclasses() { + _visit_subclasses = false; + } +}; + #endif // SHARE_OOPS_INSTANCEKLASS_HPP diff --git a/src/hotspot/share/oops/instanceKlass.inline.hpp b/src/hotspot/share/oops/instanceKlass.inline.hpp index a1e40852a8e3e17cdce0fdc2c3c1cd500707e9bd..b5c0c168fc68bbb230aa18d3890f3ed33ac442d5 100644 --- a/src/hotspot/share/oops/instanceKlass.inline.hpp +++ b/src/hotspot/share/oops/instanceKlass.inline.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2021, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -74,9 +74,9 @@ inline Klass** InstanceKlass::end_of_nonstatic_oop_maps() const { nonstatic_oop_map_count()); } -inline Klass* volatile* InstanceKlass::adr_implementor() const { +inline InstanceKlass* volatile* InstanceKlass::adr_implementor() const { if (is_interface()) { - return (Klass* volatile*)end_of_nonstatic_oop_maps(); + return (InstanceKlass* volatile*)end_of_nonstatic_oop_maps(); } else { return NULL; } @@ -102,7 +102,7 @@ inline address InstanceKlass::adr_fingerprint() const { return (address)(adr_host + 1); } - Klass* volatile* adr_impl = adr_implementor(); + InstanceKlass* volatile* adr_impl = adr_implementor(); if (adr_impl != NULL) { return (address)(adr_impl + 1); } diff --git a/src/hotspot/share/oops/klass.cpp b/src/hotspot/share/oops/klass.cpp index 143172dfa247f129299563ea6308c229caba0839..cb62cb5cd3932368e95321536cff7865c0ee8cc2 100644 --- a/src/hotspot/share/oops/klass.cpp +++ b/src/hotspot/share/oops/klass.cpp @@ -38,7 +38,6 @@ #include "memory/heapShared.hpp" #include "memory/metadataFactory.hpp" #include "memory/metaspaceClosure.hpp" -#include "memory/metaspaceShared.hpp" #include "memory/oopFactory.hpp" #include "memory/resourceArea.hpp" #include "memory/universe.hpp" diff --git a/src/hotspot/share/oops/klass.hpp b/src/hotspot/share/oops/klass.hpp index 6449eda3778505fd29d35eb0734b518bc9d70871..cfed2823f117de1d056dc628ae91209d91d302d7 100644 --- a/src/hotspot/share/oops/klass.hpp +++ b/src/hotspot/share/oops/klass.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2021, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -25,7 +25,6 @@ #ifndef SHARE_OOPS_KLASS_HPP #define SHARE_OOPS_KLASS_HPP -#include "classfile/classLoaderData.hpp" #include "memory/iterator.hpp" #include "memory/memRegion.hpp" #include "oops/markWord.hpp" @@ -67,6 +66,7 @@ const uint KLASS_ID_COUNT = 6; // Forward declarations. template class Array; template class GrowableArray; +class ClassLoaderData; class fieldDescriptor; class klassVtable; class ModuleEntry; @@ -508,11 +508,7 @@ protected: oop class_loader() const; - // This loads the klass's holder as a phantom. This is useful when a weak Klass - // pointer has been "peeked" and then must be kept alive before it may - // be used safely. All uses of klass_holder need to apply the appropriate barriers, - // except during GC. - oop klass_holder() const { return class_loader_data()->holder_phantom(); } + inline oop klass_holder() const; protected: virtual Klass* array_klass_impl(bool or_null, int rank, TRAPS); @@ -635,11 +631,11 @@ protected: void set_is_shared() { _access_flags.set_is_shared_class(); } bool is_hidden() const { return access_flags().is_hidden_class(); } void set_is_hidden() { _access_flags.set_is_hidden_class(); } - bool is_non_strong_hidden() const { return access_flags().is_hidden_class() && - class_loader_data()->has_class_mirror_holder(); } bool is_value_based() { return _access_flags.is_value_based_class(); } void set_is_value_based() { _access_flags.set_is_value_based_class(); } + inline bool is_non_strong_hidden() const; + bool is_cloneable() const; void set_is_cloneable(); @@ -672,10 +668,7 @@ protected: virtual void metaspace_pointers_do(MetaspaceClosure* iter); virtual MetaspaceObj::Type type() const { return ClassType; } - // Iff the class loader (or mirror for unsafe anonymous classes) is alive the - // Klass is considered alive. This is safe to call before the CLD is marked as - // unloading, and hence during concurrent class unloading. - bool is_loader_alive() const { return class_loader_data()->is_alive(); } + inline bool is_loader_alive() const; void clean_subklass(); diff --git a/src/hotspot/share/oops/klass.inline.hpp b/src/hotspot/share/oops/klass.inline.hpp index df4e8ded40086d0a4f40829feaa0690f97d63a36..6842363577c88ad0f47a9eb07c73b9dddeb5094f 100644 --- a/src/hotspot/share/oops/klass.inline.hpp +++ b/src/hotspot/share/oops/klass.inline.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2005, 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2005, 2021, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -29,6 +29,26 @@ #include "oops/klass.hpp" #include "oops/markWord.hpp" +// This loads the klass's holder as a phantom. This is useful when a weak Klass +// pointer has been "peeked" and then must be kept alive before it may +// be used safely. All uses of klass_holder need to apply the appropriate barriers, +// except during GC. +inline oop Klass::klass_holder() const { + return class_loader_data()->holder_phantom(); +} + +inline bool Klass::is_non_strong_hidden() const { + return access_flags().is_hidden_class() && + class_loader_data()->has_class_mirror_holder(); +} + +// Iff the class loader (or mirror for unsafe anonymous classes) is alive the +// Klass is considered alive. This is safe to call before the CLD is marked as +// unloading, and hence during concurrent class unloading. +inline bool Klass::is_loader_alive() const { + return class_loader_data()->is_alive(); +} + inline void Klass::set_prototype_header(markWord header) { assert(!header.has_bias_pattern() || is_instance_klass(), "biased locking currently only supported for Java instances"); _prototype_header = header; diff --git a/src/hotspot/share/oops/metadata.cpp b/src/hotspot/share/oops/metadata.cpp index b50a5a277a21ceb0004c4eea0436bf98ff4d501b..242fc89c4137e8a45a1f05ecfbdc7428de868bc6 100644 --- a/src/hotspot/share/oops/metadata.cpp +++ b/src/hotspot/share/oops/metadata.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2011, 2019, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2011, 2021, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -24,7 +24,6 @@ #include "precompiled.hpp" #include "oops/metadata.hpp" -#include "memory/metaspace.hpp" #include "memory/resourceArea.hpp" #include "prims/jvmtiRedefineClasses.hpp" diff --git a/src/hotspot/share/oops/method.cpp b/src/hotspot/share/oops/method.cpp index d785c8d34acb96ad8d773f4b1ecc94b20680f85b..71371edb15b3e6dae1b9d49ed6930799037db173 100644 --- a/src/hotspot/share/oops/method.cpp +++ b/src/hotspot/share/oops/method.cpp @@ -350,9 +350,6 @@ void Method::metaspace_pointers_do(MetaspaceClosure* it) { it->push(&_method_counters); Method* this_ptr = this; - it->push_method_entry(&this_ptr, (intptr_t*)&_i2i_entry); - it->push_method_entry(&this_ptr, (intptr_t*)&_from_compiled_entry); - it->push_method_entry(&this_ptr, (intptr_t*)&_from_interpreted_entry); } // Attempt to return method to original state. Clear any pointers @@ -493,6 +490,7 @@ bool Method::was_executed_more_than(int n) { } void Method::print_invocation_count() { + //---< compose+print method return type, klass, name, and signature >--- if (is_static()) tty->print("static "); if (is_final()) tty->print("final "); if (is_synchronized()) tty->print("synchronized "); @@ -507,12 +505,22 @@ void Method::print_invocation_count() { } tty->cr(); - tty->print_cr (" interpreter_invocation_count: %8d ", interpreter_invocation_count()); - tty->print_cr (" invocation_counter: %8d ", invocation_count()); - tty->print_cr (" backedge_counter: %8d ", backedge_count()); + // Counting based on signed int counters tends to overflow with + // longer-running workloads on fast machines. The counters under + // consideration here, however, are limited in range by counting + // logic. See InvocationCounter:count_limit for example. + // No "overflow precautions" need to be implemented here. + tty->print_cr (" interpreter_invocation_count: " INT32_FORMAT_W(11), interpreter_invocation_count()); + tty->print_cr (" invocation_counter: " INT32_FORMAT_W(11), invocation_count()); + tty->print_cr (" backedge_counter: " INT32_FORMAT_W(11), backedge_count()); + + if (method_data() != NULL) { + tty->print_cr (" decompile_count: " UINT32_FORMAT_W(11), method_data()->decompile_count()); + } + #ifndef PRODUCT if (CountCompiledCalls) { - tty->print_cr (" compiled_invocation_count: %8d ", compiled_invocation_count()); + tty->print_cr (" compiled_invocation_count: " INT64_FORMAT_W(11), compiled_invocation_count()); } #endif } @@ -891,7 +899,7 @@ bool Method::is_klass_loaded_by_klass_index(int klass_index) const { Symbol* klass_name = constants()->klass_name_at(klass_index); Handle loader(thread, method_holder()->class_loader()); Handle prot (thread, method_holder()->protection_domain()); - return SystemDictionary::find(klass_name, loader, prot, thread) != NULL; + return SystemDictionary::find_instance_klass(klass_name, loader, prot) != NULL; } else { return true; } @@ -1102,17 +1110,12 @@ void Method::unlink_code() { #if INCLUDE_CDS // Called by class data sharing to remove any entry points (which are not shared) void Method::unlink_method() { - _code = NULL; - Arguments::assert_is_dumping_archive(); - // Set the values to what they should be at run time. Note that - // this Method can no longer be executed during dump time. - _i2i_entry = Interpreter::entry_for_cds_method(methodHandle(Thread::current(), this)); - _from_interpreted_entry = _i2i_entry; - - assert(_from_compiled_entry != NULL, "sanity"); - assert(*((int*)_from_compiled_entry) == 0, - "must be NULL during dump time, to be initialized at run time"); + _code = NULL; + _adapter = NULL; + _i2i_entry = NULL; + _from_compiled_entry = NULL; + _from_interpreted_entry = NULL; if (is_native()) { *native_function_addr() = NULL; @@ -1125,90 +1128,12 @@ void Method::unlink_method() { } #endif -/**************************************************************************** -// The following illustrates how the entries work for CDS shared Methods: -// -// Our goal is to delay writing into a shared Method until it's compiled. -// Hence, we want to determine the initial values for _i2i_entry, -// _from_interpreted_entry and _from_compiled_entry during CDS dump time. -// -// In this example, both Methods A and B have the _i2i_entry of "zero_locals". -// They also have similar signatures so that they will share the same -// AdapterHandlerEntry. -// -// _adapter_trampoline points to a fixed location in the RW section of -// the CDS archive. This location initially contains a NULL pointer. When the -// first of method A or B is linked, an AdapterHandlerEntry is allocated -// dynamically, and its c2i/i2c entries are generated. -// -// _i2i_entry and _from_interpreted_entry initially points to the same -// (fixed) location in the CODE section of the CDS archive. This contains -// an unconditional branch to the actual entry for "zero_locals", which is -// generated at run time and may be on an arbitrary address. Thus, the -// unconditional branch is also generated at run time to jump to the correct -// address. -// -// Similarly, _from_compiled_entry points to a fixed address in the CODE -// section. This address has enough space for an unconditional branch -// instruction, and is initially zero-filled. After the AdapterHandlerEntry is -// initialized, and the address for the actual c2i_entry is known, we emit a -// branch instruction here to branch to the actual c2i_entry. -// -// The effect of the extra branch on the i2i and c2i entries is negligible. -// -// The reason for putting _adapter_trampoline in RO is many shared Methods -// share the same AdapterHandlerEntry, so we can save space in the RW section -// by having the extra indirection. - - -[Method A: RW] - _constMethod ----> [ConstMethod: RO] - _adapter_trampoline -----------+ - | - _i2i_entry (same value as method B) | - _from_interpreted_entry (same value as method B) | - _from_compiled_entry (same value as method B) | - | - | -[Method B: RW] +--------+ - _constMethod ----> [ConstMethod: RO] | - _adapter_trampoline --+--->(AdapterHandlerEntry* ptr: RW)-+ - | - +-------------------------------+ - | - +----> [AdapterHandlerEntry] (allocated at run time) - _fingerprint - _c2i_entry ---------------------------------+->[c2i entry..] - _i2i_entry -------------+ _i2c_entry ---------------+-> [i2c entry..] | - _from_interpreted_entry | _c2i_unverified_entry | | - | | _c2i_no_clinit_check_entry| | - | | (_cds_entry_table: CODE) | | - | +->[0]: jmp _entry_table[0] --> (i2i_entry_for "zero_locals") | | - | | (allocated at run time) | | - | | ... [asm code ...] | | - +-[not compiled]-+ [n]: jmp _entry_table[n] | | - | | | - | | | - +-[compiled]-------------------------------------------------------------------+ | - | - _from_compiled_entry------------> (_c2i_entry_trampoline: CODE) | - [jmp c2i_entry] ------------------------------------------------------+ - -***/ - // Called when the method_holder is getting linked. Setup entrypoints so the method // is ready to be called from interpreter, compiler, and vtables. void Method::link_method(const methodHandle& h_method, TRAPS) { // If the code cache is full, we may reenter this function for the // leftover methods that weren't linked. - if (is_shared()) { - // Can't assert that the adapters are sane, because methods get linked before - // the interpreter is generated, and hence before its adapters are generated. - // If you messed them up you will notice soon enough though, don't you worry. - if (adapter() != NULL) { - return; - } - } else if (_i2i_entry != NULL) { + if (_i2i_entry != NULL) { return; } assert( _code == NULL, "nothing compiled yet" ); @@ -1216,13 +1141,11 @@ void Method::link_method(const methodHandle& h_method, TRAPS) { // Setup interpreter entrypoint assert(this == h_method(), "wrong h_method()" ); - if (!is_shared()) { - assert(adapter() == NULL, "init'd to NULL"); - address entry = Interpreter::entry_for_method(h_method); - assert(entry != NULL, "interpreter entry must be non-null"); - // Sets both _i2i_entry and _from_interpreted_entry - set_interpreter_entry(entry); - } + assert(adapter() == NULL, "init'd to NULL"); + address entry = Interpreter::entry_for_method(h_method); + assert(entry != NULL, "interpreter entry must be non-null"); + // Sets both _i2i_entry and _from_interpreted_entry + set_interpreter_entry(entry); // Don't overwrite already registered native entries. if (is_native() && !has_native_function()) { @@ -1242,7 +1165,6 @@ void Method::link_method(const methodHandle& h_method, TRAPS) { (void) make_adapters(h_method, CHECK); // ONLY USE the h_method now as make_adapter may have blocked - } address Method::make_adapters(const methodHandle& mh, TRAPS) { @@ -1261,25 +1183,13 @@ address Method::make_adapters(const methodHandle& mh, TRAPS) { } } - if (mh->is_shared()) { - assert(mh->adapter() == adapter, "must be"); - assert(mh->_from_compiled_entry != NULL, "must be"); - } else { - mh->set_adapter_entry(adapter); - mh->_from_compiled_entry = adapter->get_c2i_entry(); - } + mh->set_adapter_entry(adapter); + mh->_from_compiled_entry = adapter->get_c2i_entry(); return adapter->get_c2i_entry(); } void Method::restore_unshareable_info(TRAPS) { assert(is_method() && is_valid_method(this), "ensure C++ vtable is restored"); - - // Since restore_unshareable_info can be called more than once for a method, don't - // redo any work. - if (adapter() == NULL) { - methodHandle mh(THREAD, this); - link_method(mh, CHECK); - } } address Method::from_compiled_entry_no_trampoline() const { @@ -1960,7 +1870,7 @@ void Method::clear_all_breakpoints() { #endif // INCLUDE_JVMTI -int Method::invocation_count() { +int Method::invocation_count() const { MethodCounters* mcs = method_counters(); MethodData* mdo = method_data(); if (((mcs != NULL) ? mcs->invocation_counter()->carry() : false) || @@ -1972,7 +1882,7 @@ int Method::invocation_count() { } } -int Method::backedge_count() { +int Method::backedge_count() const { MethodCounters* mcs = method_counters(); MethodData* mdo = method_data(); if (((mcs != NULL) ? mcs->backedge_counter()->carry() : false) || diff --git a/src/hotspot/share/oops/method.hpp b/src/hotspot/share/oops/method.hpp index 2944946238fa3897646440f754ba60b36237e1c5..d94d428583188efa4c39a8abc6a1d5e728faeb0d 100644 --- a/src/hotspot/share/oops/method.hpp +++ b/src/hotspot/share/oops/method.hpp @@ -76,6 +76,7 @@ class Method : public Metadata { ConstMethod* _constMethod; // Method read-only data. MethodData* _method_data; MethodCounters* _method_counters; + AdapterHandlerEntry* _adapter; AccessFlags _access_flags; // Access flags int _vtable_index; // vtable index of this method (see VtableIndexFlag) // note: can have vtables with >2**16 elements (because of inheritance) @@ -98,7 +99,7 @@ class Method : public Metadata { JFR_ONLY(DEFINE_TRACE_FLAG;) #ifndef PRODUCT - int _compiled_invocation_count; // Number of nmethod invocations so far (for perf. debugging) + int64_t _compiled_invocation_count; #endif // Entry point for calling both from and to the interpreter. address _i2i_entry; // All-args-on-stack calling convention @@ -423,8 +424,8 @@ class Method : public Metadata { } } - int invocation_count(); - int backedge_count(); + int invocation_count() const; + int backedge_count() const; bool was_executed_more_than(int n); bool was_never_executed() { return !was_executed_more_than(0); } @@ -436,11 +437,11 @@ class Method : public Metadata { int interpreter_invocation_count() { return invocation_count(); } #ifndef PRODUCT - int compiled_invocation_count() const { return _compiled_invocation_count; } - void set_compiled_invocation_count(int count) { _compiled_invocation_count = count; } + int64_t compiled_invocation_count() const { return _compiled_invocation_count;} + void set_compiled_invocation_count(int count) { _compiled_invocation_count = (int64_t)count; } #else // for PrintMethodData in a product build - int compiled_invocation_count() const { return 0; } + int64_t compiled_invocation_count() const { return 0; } #endif // not PRODUCT // Clear (non-shared space) pointers which could not be relevant @@ -464,13 +465,7 @@ private: public: static void set_code(const methodHandle& mh, CompiledMethod* code); void set_adapter_entry(AdapterHandlerEntry* adapter) { - constMethod()->set_adapter_entry(adapter); - } - void set_adapter_trampoline(AdapterHandlerEntry** trampoline) { - constMethod()->set_adapter_trampoline(trampoline); - } - void update_adapter_trampoline(AdapterHandlerEntry* adapter) { - constMethod()->update_adapter_trampoline(adapter); + _adapter = adapter; } void set_from_compiled_entry(address entry) { _from_compiled_entry = entry; @@ -481,7 +476,7 @@ public: address get_c2i_unverified_entry(); address get_c2i_no_clinit_check_entry(); AdapterHandlerEntry* adapter() const { - return constMethod()->adapter(); + return _adapter; } // setup entry points void link_method(const methodHandle& method, TRAPS); @@ -516,8 +511,6 @@ public: address interpreter_entry() const { return _i2i_entry; } // Only used when first initialize so we can set _i2i_entry and _from_interpreted_entry void set_interpreter_entry(address entry) { - assert(!is_shared(), - "shared method's interpreter entry should not be changed at run time"); if (_i2i_entry != entry) { _i2i_entry = entry; } diff --git a/src/hotspot/share/oops/methodData.cpp b/src/hotspot/share/oops/methodData.cpp index 54aa8ec0a4edb5d63d38d82c7cb25394a430049e..1aa89934562018aef65e8f1dda0c5c471c6a3440 100644 --- a/src/hotspot/share/oops/methodData.cpp +++ b/src/hotspot/share/oops/methodData.cpp @@ -32,6 +32,7 @@ #include "interpreter/linkResolver.hpp" #include "memory/metaspaceClosure.hpp" #include "memory/resourceArea.hpp" +#include "oops/klass.inline.hpp" #include "oops/methodData.inline.hpp" #include "prims/jvmtiRedefineClasses.hpp" #include "runtime/arguments.hpp" diff --git a/src/hotspot/share/oops/methodData.hpp b/src/hotspot/share/oops/methodData.hpp index a02362b01f861ee2d2baab5d271af536db9579d9..a33b118334e4af5353a0e4ee5485ad2462257026 100644 --- a/src/hotspot/share/oops/methodData.hpp +++ b/src/hotspot/share/oops/methodData.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2021, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -30,6 +30,7 @@ #include "oops/method.hpp" #include "oops/oop.hpp" #include "runtime/atomic.hpp" +#include "runtime/mutex.hpp" #include "utilities/align.hpp" #include "utilities/copy.hpp" diff --git a/src/hotspot/share/oops/objArrayKlass.hpp b/src/hotspot/share/oops/objArrayKlass.hpp index e2feb61f4e9d6afe3e8663704b5d9bd1f87a5390..6ed8ae6f760426a964bc6d546f69fe22516ab2f9 100644 --- a/src/hotspot/share/oops/objArrayKlass.hpp +++ b/src/hotspot/share/oops/objArrayKlass.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2021, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -25,10 +25,11 @@ #ifndef SHARE_OOPS_OBJARRAYKLASS_HPP #define SHARE_OOPS_OBJARRAYKLASS_HPP -#include "classfile/classLoaderData.hpp" #include "oops/arrayKlass.hpp" #include "utilities/macros.hpp" +class ClassLoaderData; + // ObjArrayKlass is the klass for objArrays class ObjArrayKlass : public ArrayKlass { diff --git a/src/hotspot/share/oops/recordComponent.cpp b/src/hotspot/share/oops/recordComponent.cpp index fb0b167a415a93700c1babeb22f802e416258972..c01b3c03386fd00a8c42300a11f61409271daae2 100644 --- a/src/hotspot/share/oops/recordComponent.cpp +++ b/src/hotspot/share/oops/recordComponent.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019, 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2019, 2021, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -25,7 +25,6 @@ #include "precompiled.hpp" #include "logging/log.hpp" #include "memory/metadataFactory.hpp" -#include "memory/metaspace.hpp" #include "memory/metaspaceClosure.hpp" #include "oops/annotations.hpp" #include "oops/instanceKlass.hpp" diff --git a/src/hotspot/share/oops/typeArrayKlass.hpp b/src/hotspot/share/oops/typeArrayKlass.hpp index 436d369f340cbd0b81eb1606dfeb57380031c7bb..0c57c8a375fd8758e572888db960617c566aecd6 100644 --- a/src/hotspot/share/oops/typeArrayKlass.hpp +++ b/src/hotspot/share/oops/typeArrayKlass.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2021, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -25,9 +25,10 @@ #ifndef SHARE_OOPS_TYPEARRAYKLASS_HPP #define SHARE_OOPS_TYPEARRAYKLASS_HPP -#include "classfile/classLoaderData.hpp" #include "oops/arrayKlass.hpp" +class ClassLoaderData; + // A TypeArrayKlass is the klass of a typeArray // It contains the type and size of the elements diff --git a/src/hotspot/share/opto/addnode.cpp b/src/hotspot/share/opto/addnode.cpp index cb4f26ca3f7a6c4ac20b8df7d9c73ed70d87f470..cfcf068242a5e45e15d7a564e656cc095609b4c5 100644 --- a/src/hotspot/share/opto/addnode.cpp +++ b/src/hotspot/share/opto/addnode.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2021, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -332,6 +332,23 @@ Node *AddINode::Ideal(PhaseGVN *phase, bool can_reshape) { } } + // Convert (x >>> rshift) + (x << lshift) into RotateRight(x, rshift) + if (Matcher::match_rule_supported(Op_RotateRight) && + ((op1 == Op_URShiftI && op2 == Op_LShiftI) || (op1 == Op_LShiftI && op2 == Op_URShiftI)) && + in1->in(1) != NULL && in1->in(1) == in2->in(1)) { + Node* rshift = op1 == Op_URShiftI ? in1->in(2) : in2->in(2); + Node* lshift = op1 == Op_URShiftI ? in2->in(2) : in1->in(2); + if (rshift != NULL && lshift != NULL) { + const TypeInt* rshift_t = phase->type(rshift)->isa_int(); + const TypeInt* lshift_t = phase->type(lshift)->isa_int(); + if (lshift_t != NULL && lshift_t->is_con() && + rshift_t != NULL && rshift_t->is_con() && + ((lshift_t->get_con() & 0x1F) == (32 - (rshift_t->get_con() & 0x1F)))) { + return new RotateRightNode(in1->in(1), phase->intcon(rshift_t->get_con() & 0x1F), TypeInt::INT); + } + } + } + return AddNode::Ideal(phase, can_reshape); } @@ -448,6 +465,24 @@ Node *AddLNode::Ideal(PhaseGVN *phase, bool can_reshape) { return new AddLNode(shift,in2->in(2)); } + // Convert (x >>> rshift) + (x << lshift) into RotateRight(x, rshift) + if (Matcher::match_rule_supported(Op_RotateRight) && + ((op1 == Op_URShiftL && op2 == Op_LShiftL) || (op1 == Op_LShiftL && op2 == Op_URShiftL)) && + in1->in(1) != NULL && in1->in(1) == in2->in(1)) { + Node* rshift = op1 == Op_URShiftL ? in1->in(2) : in2->in(2); + Node* lshift = op1 == Op_URShiftL ? in2->in(2) : in1->in(2); + if (rshift != NULL && lshift != NULL) { + const TypeInt* rshift_t = phase->type(rshift)->isa_int(); + const TypeInt* lshift_t = phase->type(lshift)->isa_int(); + if (lshift_t != NULL && lshift_t->is_con() && + rshift_t != NULL && rshift_t->is_con() && + ((lshift_t->get_con() & 0x3F) == (64 - (rshift_t->get_con() & 0x3F)))) { + return new RotateRightNode(in1->in(1), phase->intcon(rshift_t->get_con() & 0x3F), TypeLong::LONG); + } + } + } + + return AddNode::Ideal(phase, can_reshape); } @@ -878,6 +913,22 @@ const Type *OrLNode::add_ring( const Type *t0, const Type *t1 ) const { } //============================================================================= + +const Type* XorINode::Value(PhaseGVN* phase) const { + Node* in1 = in(1); + Node* in2 = in(2); + const Type* t1 = phase->type(in1); + const Type* t2 = phase->type(in2); + if (t1 == Type::TOP || t2 == Type::TOP) { + return Type::TOP; + } + // x ^ x ==> 0 + if (in1->eqv_uncast(in2)) { + return add_id(); + } + return AddNode::Value(phase); +} + //------------------------------add_ring--------------------------------------- // Supplied function returns the sum of the inputs IN THE CURRENT RING. For // the logical operations the ring's ADD is really a logical OR function. @@ -913,6 +964,20 @@ const Type *XorLNode::add_ring( const Type *t0, const Type *t1 ) const { return TypeLong::make( r0->get_con() ^ r1->get_con() ); } +const Type* XorLNode::Value(PhaseGVN* phase) const { + Node* in1 = in(1); + Node* in2 = in(2); + const Type* t1 = phase->type(in1); + const Type* t2 = phase->type(in2); + if (t1 == Type::TOP || t2 == Type::TOP) { + return Type::TOP; + } + // x ^ x ==> 0 + if (in1->eqv_uncast(in2)) { + return add_id(); + } + return AddNode::Value(phase); +} Node* MaxNode::build_min_max(Node* a, Node* b, bool is_max, bool is_unsigned, const Type* t, PhaseGVN& gvn) { bool is_int = gvn.type(a)->isa_int(); diff --git a/src/hotspot/share/opto/addnode.hpp b/src/hotspot/share/opto/addnode.hpp index 610bfffce39c87776f96f109647634bfbd46b03d..b23fc9b18779c4782942a0d16f4a16610cf9178f 100644 --- a/src/hotspot/share/opto/addnode.hpp +++ b/src/hotspot/share/opto/addnode.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2021, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -212,6 +212,7 @@ public: virtual const Type *add_ring( const Type *, const Type * ) const; virtual const Type *add_id() const { return TypeInt::ZERO; } virtual const Type *bottom_type() const { return TypeInt::INT; } + virtual const Type *Value(PhaseGVN *phase) const; virtual uint ideal_reg() const { return Op_RegI; } }; @@ -224,6 +225,7 @@ public: virtual const Type *add_ring( const Type *, const Type * ) const; virtual const Type *add_id() const { return TypeLong::ZERO; } virtual const Type *bottom_type() const { return TypeLong::LONG; } + virtual const Type *Value(PhaseGVN *phase) const; virtual uint ideal_reg() const { return Op_RegL; } }; diff --git a/src/hotspot/share/opto/arraycopynode.cpp b/src/hotspot/share/opto/arraycopynode.cpp index e5be140901d8373150d38c16bf05340fd3fd5712..f0e25b8f81e2e2130e9a829a67cb523ea945e9b3 100644 --- a/src/hotspot/share/opto/arraycopynode.cpp +++ b/src/hotspot/share/opto/arraycopynode.cpp @@ -183,15 +183,17 @@ Node* ArrayCopyNode::try_clone_instance(PhaseGVN *phase, bool can_reshape, int c Node* in_mem = in(TypeFunc::Memory); const Type* src_type = phase->type(base_src); - - MergeMemNode* mem = phase->transform(MergeMemNode::make(in_mem))->as_MergeMem(); - const TypeInstPtr* inst_src = src_type->isa_instptr(); - if (inst_src == NULL) { return NULL; } + MergeMemNode* mem = phase->transform(MergeMemNode::make(in_mem))->as_MergeMem(); + PhaseIterGVN* igvn = phase->is_IterGVN(); + if (igvn != NULL) { + igvn->_worklist.push(mem); + } + if (!inst_src->klass_is_exact()) { ciInstanceKlass* ik = inst_src->klass()->as_instance_klass(); assert(!ik->is_interface() && !ik->has_subklass(), "inconsistent klass hierarchy"); diff --git a/src/hotspot/share/opto/chaitin.hpp b/src/hotspot/share/opto/chaitin.hpp index e9db98395ddb915641b425ee7f598a81837ddf57..29c58d6efe70ca976fec1c0d84fa663c82fabe9d 100644 --- a/src/hotspot/share/opto/chaitin.hpp +++ b/src/hotspot/share/opto/chaitin.hpp @@ -443,7 +443,7 @@ class PhaseChaitin : public PhaseRegAlloc { // Helper functions for Split() uint split_DEF(Node *def, Block *b, int loc, uint max, Node **Reachblock, Node **debug_defs, GrowableArray splits, int slidx ); - uint split_USE(MachSpillCopyNode::SpillType spill_type, Node *def, Block *b, Node *use, uint useidx, uint max, bool def_down, bool cisc_sp, GrowableArray splits, int slidx ); + int split_USE(MachSpillCopyNode::SpillType spill_type, Node *def, Block *b, Node *use, uint useidx, uint max, bool def_down, bool cisc_sp, GrowableArray splits, int slidx ); //------------------------------clone_projs------------------------------------ // After cloning some rematerialized instruction, clone any MachProj's that diff --git a/src/hotspot/share/opto/classes.hpp b/src/hotspot/share/opto/classes.hpp index 41dcfacfa7e66ad6c728ffa4628a4437db608dd5..ac2202bebb94f809de4458e47f0e1b58e9b66c16 100644 --- a/src/hotspot/share/opto/classes.hpp +++ b/src/hotspot/share/opto/classes.hpp @@ -200,6 +200,7 @@ macro(Lock) macro(Loop) macro(LoopLimit) macro(Mach) +macro(MachNullCheck) macro(MachProj) macro(MulAddS2I) macro(MaxI) diff --git a/src/hotspot/share/opto/compile.cpp b/src/hotspot/share/opto/compile.cpp index 7fa125351e29db26ce9f758326c25e242f608cb0..e6e9f04dcf6a735e481197eea71124102eee6b9a 100644 --- a/src/hotspot/share/opto/compile.cpp +++ b/src/hotspot/share/opto/compile.cpp @@ -4161,10 +4161,10 @@ Node* Compile::conv_I2X_index(PhaseGVN* phase, Node* idx, const TypeInt* sizetyp } // Convert integer value to a narrowed long type dependent on ctrl (for example, a range check) -Node* Compile::constrained_convI2L(PhaseGVN* phase, Node* value, const TypeInt* itype, Node* ctrl) { +Node* Compile::constrained_convI2L(PhaseGVN* phase, Node* value, const TypeInt* itype, Node* ctrl, bool carry_dependency) { if (ctrl != NULL) { // Express control dependency by a CastII node with a narrow type. - value = new CastIINode(value, itype, false, true /* range check dependency */); + value = new CastIINode(value, itype, carry_dependency, true /* range check dependency */); // Make the CastII node dependent on the control input to prevent the narrowed ConvI2L // node from floating above the range check during loop optimizations. Otherwise, the // ConvI2L node may be eliminated independently of the range check, causing the data path @@ -4790,6 +4790,30 @@ void Compile::igv_print_method_to_network(const char* phase_name) { } #endif -void Compile::add_native_invoker(BufferBlob* stub) { +void Compile::add_native_invoker(RuntimeStub* stub) { _native_invokers.append(stub); } + +Node* Compile::narrow_value(BasicType bt, Node* value, const Type* type, PhaseGVN* phase, bool transform_res) { + if (type != NULL && phase->type(value)->higher_equal(type)) { + return value; + } + Node* result = NULL; + if (bt == T_BYTE) { + result = phase->transform(new LShiftINode(value, phase->intcon(24))); + result = new RShiftINode(result, phase->intcon(24)); + } else if (bt == T_BOOLEAN) { + result = new AndINode(value, phase->intcon(0xFF)); + } else if (bt == T_CHAR) { + result = new AndINode(value,phase->intcon(0xFFFF)); + } else { + assert(bt == T_SHORT, "unexpected narrow type"); + result = phase->transform(new LShiftINode(value, phase->intcon(16))); + result = new RShiftINode(result, phase->intcon(16)); + } + if (transform_res) { + result = phase->transform(result); + } + return result; +} + diff --git a/src/hotspot/share/opto/compile.hpp b/src/hotspot/share/opto/compile.hpp index ae9d6a3488fe2f01c3c5086c4343231af51a01f8..9b342dc9eea4ab50b097b7483186ee8f73d12059 100644 --- a/src/hotspot/share/opto/compile.hpp +++ b/src/hotspot/share/opto/compile.hpp @@ -388,7 +388,7 @@ class Compile : public Phase { int _late_inlines_pos; // Where in the queue should the next late inlining candidate go (emulate depth first inlining) uint _number_of_mh_late_inlines; // number of method handle late inlining still pending - GrowableArray _native_invokers; + GrowableArray _native_invokers; // Inlining may not happen in parse order which would make // PrintInlining output confusing. Keep track of PrintInlining @@ -951,9 +951,9 @@ class Compile : public Phase { _vector_reboxing_late_inlines.push(cg); } - void add_native_invoker(BufferBlob* stub); + void add_native_invoker(RuntimeStub* stub); - const GrowableArray& native_invokers() const { return _native_invokers; } + const GrowableArray native_invokers() const { return _native_invokers; } void remove_useless_nodes (GrowableArray& node_list, Unique_Node_List &useful); @@ -1157,7 +1157,7 @@ class Compile : public Phase { Node* ctrl = NULL); // Convert integer value to a narrowed long type dependent on ctrl (for example, a range check) - static Node* constrained_convI2L(PhaseGVN* phase, Node* value, const TypeInt* itype, Node* ctrl); + static Node* constrained_convI2L(PhaseGVN* phase, Node* value, const TypeInt* itype, Node* ctrl, bool carry_dependency = false); // Auxiliary methods for randomized fuzzing/stressing int random(); @@ -1192,9 +1192,10 @@ class Compile : public Phase { bool has_exception_backedge() const { return _exception_backedge; } #endif - static bool - push_thru_add(PhaseGVN* phase, Node* z, const TypeInteger* tz, const TypeInteger*& rx, const TypeInteger*& ry, - BasicType bt); + static bool push_thru_add(PhaseGVN* phase, Node* z, const TypeInteger* tz, const TypeInteger*& rx, const TypeInteger*& ry, + BasicType bt); + + static Node* narrow_value(BasicType bt, Node* value, const Type* type, PhaseGVN* phase, bool transform_res); }; #endif // SHARE_OPTO_COMPILE_HPP diff --git a/src/hotspot/share/opto/gcm.cpp b/src/hotspot/share/opto/gcm.cpp index 816ec3b2b0e469637821f75b05613a1995fa389f..6195fb3e7b2ced37e5dd2709c6a25076837ae510 100644 --- a/src/hotspot/share/opto/gcm.cpp +++ b/src/hotspot/share/opto/gcm.cpp @@ -770,7 +770,24 @@ Block* PhaseCFG::insert_anti_dependences(Block* LCA, Node* load, bool verify) { // Add an anti-dep edge, and squeeze 'load' into the highest block. assert(store != load->find_exact_control(load->in(0)), "dependence cycle found"); if (verify) { - assert(store->find_edge(load) != -1, "missing precedence edge"); +#ifdef ASSERT + // We expect an anti-dependence edge from 'load' to 'store', except when + // implicit_null_check() has hoisted 'store' above its early block to + // perform an implicit null check, and 'load' is placed in the null + // block. In this case it is safe to ignore the anti-dependence, as the + // null block is only reached if 'store' tries to write to null. + Block* store_null_block = NULL; + Node* store_null_check = store->find_out_with(Op_MachNullCheck); + if (store_null_check != NULL) { + Node* if_true = store_null_check->find_out_with(Op_IfTrue); + assert(if_true != NULL, "null check without null projection"); + Node* null_block_region = if_true->find_out_with(Op_Region); + assert(null_block_region != NULL, "null check without null region"); + store_null_block = get_block_for_node(null_block_region); + } +#endif + assert(LCA == store_null_block || store->find_edge(load) != -1, + "missing precedence edge"); } else { store->add_prec(load); } diff --git a/src/hotspot/share/opto/graphKit.cpp b/src/hotspot/share/opto/graphKit.cpp index 0c9199054298fb4a3cb1ed5bbbb9cab431dd1547..69e3c4fa9dde33df0064d50888fa11025f6038c2 100644 --- a/src/hotspot/share/opto/graphKit.cpp +++ b/src/hotspot/share/opto/graphKit.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2021, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -2000,9 +2000,9 @@ void GraphKit::increment_counter(address counter_addr) { void GraphKit::increment_counter(Node* counter_addr) { int adr_type = Compile::AliasIdxRaw; Node* ctrl = control(); - Node* cnt = make_load(ctrl, counter_addr, TypeInt::INT, T_INT, adr_type, MemNode::unordered); - Node* incr = _gvn.transform(new AddINode(cnt, _gvn.intcon(1))); - store_to_memory(ctrl, counter_addr, incr, T_INT, adr_type, MemNode::unordered); + Node* cnt = make_load(ctrl, counter_addr, TypeLong::LONG, T_LONG, adr_type, MemNode::unordered); + Node* incr = _gvn.transform(new AddLNode(cnt, _gvn.longcon(1))); + store_to_memory(ctrl, counter_addr, incr, T_LONG, adr_type, MemNode::unordered); } @@ -2632,9 +2632,9 @@ Node* GraphKit::make_native_call(const TypeFunc* call_type, uint nargs, ciNative address call_addr = nep->entry_point(); if (nep->need_transition()) { - BufferBlob* invoker = SharedRuntime::make_native_invoker(call_addr, - nep->shadow_space(), - arg_regs, ret_regs); + RuntimeStub* invoker = SharedRuntime::make_native_invoker(call_addr, + nep->shadow_space(), + arg_regs, ret_regs); if (invoker == NULL) { C->record_failure("native invoker not implemented on this platform"); return NULL; diff --git a/src/hotspot/share/opto/idealGraphPrinter.cpp b/src/hotspot/share/opto/idealGraphPrinter.cpp index c7f1a17aa8c7b6aa9bf0044c2dc50d57805ebcc8..11dfc43a79f61b314a9d92101a1150a67fc89af9 100644 --- a/src/hotspot/share/opto/idealGraphPrinter.cpp +++ b/src/hotspot/share/opto/idealGraphPrinter.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2007, 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2007, 2021, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -30,6 +30,7 @@ #include "opto/parse.hpp" #include "runtime/threadCritical.hpp" #include "runtime/threadSMR.hpp" +#include "utilities/stringUtils.hpp" #ifndef PRODUCT @@ -378,9 +379,39 @@ void IdealGraphPrinter::visit_node(Node *n, bool edges, VectorSet* temp_set) { print_prop("block", C->cfg()->get_block(0)->_pre_order); } else { print_prop("block", block->_pre_order); + // Print estimated execution frequency, normalized within a [0,1] range. + buffer[0] = 0; + stringStream freq(buffer, sizeof(buffer) - 1); + // Higher precision has no practical effect in visualizations. + freq.print("%.8f", block->_freq / _max_freq); + assert(freq.size() < sizeof(buffer), "size in range"); + // Enforce dots as decimal separators, as required by IGV. + StringUtils::replace_no_expand(buffer, ",", "."); + print_prop("frequency", buffer); } } + switch (t->category()) { + case Type::Category::Data: + print_prop("category", "data"); + break; + case Type::Category::Memory: + print_prop("category", "memory"); + break; + case Type::Category::Mixed: + print_prop("category", "mixed"); + break; + case Type::Category::Control: + print_prop("category", "control"); + break; + case Type::Category::Other: + print_prop("category", "other"); + break; + case Type::Category::Undef: + print_prop("category", "undef"); + break; + } + const jushort flags = node->flags(); if (flags & Node::Flag_is_Copy) { print_prop("is_copy", "true"); @@ -649,6 +680,16 @@ void IdealGraphPrinter::print(const char *name, Node *node) { VectorSet temp_set; head(NODES_ELEMENT); + if (C->cfg() != NULL) { + // Compute the maximum estimated frequency in the current graph. + _max_freq = 1.0e-6; + for (uint i = 0; i < C->cfg()->number_of_blocks(); i++) { + Block* block = C->cfg()->get_block(i); + if (block->_freq > _max_freq) { + _max_freq = block->_freq; + } + } + } walk_nodes(node, false, &temp_set); tail(NODES_ELEMENT); diff --git a/src/hotspot/share/opto/idealGraphPrinter.hpp b/src/hotspot/share/opto/idealGraphPrinter.hpp index 48da2b2970f16b496f54e7384aa94061352e0f9c..6d8db169b9f36a5b736f686a587434d94d56490a 100644 --- a/src/hotspot/share/opto/idealGraphPrinter.hpp +++ b/src/hotspot/share/opto/idealGraphPrinter.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2007, 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2007, 2021, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -92,6 +92,7 @@ class IdealGraphPrinter : public CHeapObj { PhaseChaitin* _chaitin; bool _traverse_outs; Compile *C; + double _max_freq; void print_method(ciMethod *method, int bci, InlineTree *tree); void print_inline_tree(InlineTree *tree); diff --git a/src/hotspot/share/opto/ifnode.cpp b/src/hotspot/share/opto/ifnode.cpp index 2962476532472f1c378abe5cd6ba10422d89d82b..54ab48c47dd742b2e5ea2b8325f3a9a7f876b701 100644 --- a/src/hotspot/share/opto/ifnode.cpp +++ b/src/hotspot/share/opto/ifnode.cpp @@ -884,8 +884,6 @@ bool IfNode::fold_compares_helper(ProjNode* proj, ProjNode* success, ProjNode* f hi_type->_hi == max_jint && lo_type->_lo == min_jint && lo_test != BoolTest::ne) { assert((dom_bool->_test.is_less() && !proj->_con) || (dom_bool->_test.is_greater() && proj->_con), "incorrect test"); - // this test was canonicalized - assert(this_bool->_test.is_less() && fail->_con, "incorrect test"); // this_bool = < // dom_bool = >= (proj = True) or dom_bool = < (proj = False) @@ -908,19 +906,25 @@ bool IfNode::fold_compares_helper(ProjNode* proj, ProjNode* success, ProjNode* f if (lo_test == BoolTest::gt || lo_test == BoolTest::le) { lo = igvn->transform(new AddINode(lo, igvn->intcon(1))); } - } else { - assert(hi_test == BoolTest::le, "bad test"); + } else if (hi_test == BoolTest::le) { if (lo_test == BoolTest::ge || lo_test == BoolTest::lt) { adjusted_lim = igvn->transform(new SubINode(hi, lo)); adjusted_lim = igvn->transform(new AddINode(adjusted_lim, igvn->intcon(1))); cond = BoolTest::lt; - } else { - assert(lo_test == BoolTest::gt || lo_test == BoolTest::le, "bad test"); + } else if (lo_test == BoolTest::gt || lo_test == BoolTest::le) { adjusted_lim = igvn->transform(new SubINode(hi, lo)); lo = igvn->transform(new AddINode(lo, igvn->intcon(1))); cond = BoolTest::lt; + } else { + assert(false, "unhandled lo_test: %d", lo_test); + return false; } + } else { + assert(igvn->_worklist.member(in(1)) && in(1)->Value(igvn) != igvn->type(in(1)), "unhandled hi_test: %d", hi_test); + return false; } + // this test was canonicalized + assert(this_bool->_test.is_less() && fail->_con, "incorrect test"); } else if (lo_type != NULL && hi_type != NULL && lo_type->_lo > hi_type->_hi && lo_type->_hi == max_jint && hi_type->_lo == min_jint && lo_test != BoolTest::ne) { @@ -947,31 +951,38 @@ bool IfNode::fold_compares_helper(ProjNode* proj, ProjNode* success, ProjNode* f assert((dom_bool->_test.is_less() && proj->_con) || (dom_bool->_test.is_greater() && !proj->_con), "incorrect test"); - // this test was canonicalized - assert(this_bool->_test.is_less() && !fail->_con, "incorrect test"); cond = (hi_test == BoolTest::le || hi_test == BoolTest::gt) ? BoolTest::gt : BoolTest::ge; if (lo_test == BoolTest::lt) { if (hi_test == BoolTest::lt || hi_test == BoolTest::ge) { cond = BoolTest::ge; - } else { - assert(hi_test == BoolTest::le || hi_test == BoolTest::gt, "bad test"); + } else if (hi_test == BoolTest::le || hi_test == BoolTest::gt) { adjusted_lim = igvn->transform(new SubINode(hi, lo)); adjusted_lim = igvn->transform(new AddINode(adjusted_lim, igvn->intcon(1))); cond = BoolTest::ge; + } else { + assert(false, "unhandled hi_test: %d", hi_test); + return false; } } else if (lo_test == BoolTest::le) { if (hi_test == BoolTest::lt || hi_test == BoolTest::ge) { lo = igvn->transform(new AddINode(lo, igvn->intcon(1))); cond = BoolTest::ge; - } else { - assert(hi_test == BoolTest::le || hi_test == BoolTest::gt, "bad test"); + } else if (hi_test == BoolTest::le || hi_test == BoolTest::gt) { adjusted_lim = igvn->transform(new SubINode(hi, lo)); lo = igvn->transform(new AddINode(lo, igvn->intcon(1))); cond = BoolTest::ge; + } else { + assert(false, "unhandled hi_test: %d", hi_test); + return false; } + } else { + assert(igvn->_worklist.member(in(1)) && in(1)->Value(igvn) != igvn->type(in(1)), "unhandled lo_test: %d", lo_test); + return false; } + // this test was canonicalized + assert(this_bool->_test.is_less() && !fail->_con, "incorrect test"); } else { const TypeInt* failtype = filtered_int_type(igvn, n, proj); if (failtype != NULL) { diff --git a/src/hotspot/share/opto/library_call.cpp b/src/hotspot/share/opto/library_call.cpp index c147e0ffbb785be1fd7f8197a23ba7f0f650159e..da95ce4f3b9ac0c6a1cc19cc83c6e0a4a46f75e0 100644 --- a/src/hotspot/share/opto/library_call.cpp +++ b/src/hotspot/share/opto/library_call.cpp @@ -108,7 +108,9 @@ JVMState* LibraryIntrinsic::generate(JVMState* jvms) { #endif ciMethod* callee = kit.callee(); const int bci = kit.bci(); - +#ifdef ASSERT + Node* ctrl = kit.control(); +#endif // Try to inline the intrinsic. if ((CheckIntrinsics ? callee->intrinsic_candidate() : true) && kit.try_to_inline(_last_predicate)) { @@ -132,6 +134,7 @@ JVMState* LibraryIntrinsic::generate(JVMState* jvms) { } // The intrinsic bailed out + assert(ctrl == kit.control(), "Control flow was added although the intrinsic bailed out"); if (jvms->has_method()) { // Not a root compile. const char* msg; @@ -2198,15 +2201,12 @@ bool LibraryCallKit::inline_unsafe_access(bool is_store, const BasicType type, c Node* receiver = argument(0); // type: oop // Build address expression. - Node* adr; Node* heap_base_oop = top(); - Node* offset = top(); - Node* val; // The base is either a Java object or a value produced by Unsafe.staticFieldBase Node* base = argument(1); // type: oop // The offset is a value produced by Unsafe.staticFieldOffset or Unsafe.objectFieldOffset - offset = argument(2); // type: long + Node* offset = argument(2); // type: long // We currently rely on the cookies produced by Unsafe.xxxFieldOffset // to be plain byte offsets, which are also the same as those accepted // by oopDesc::field_addr. @@ -2214,12 +2214,19 @@ bool LibraryCallKit::inline_unsafe_access(bool is_store, const BasicType type, c "fieldOffset must be byte-scaled"); // 32-bit machines ignore the high half! offset = ConvL2X(offset); - adr = make_unsafe_address(base, offset, is_store ? ACCESS_WRITE : ACCESS_READ, type, kind == Relaxed); + + // Save state and restore on bailout + uint old_sp = sp(); + SafePointNode* old_map = clone_map(); + + Node* adr = make_unsafe_address(base, offset, is_store ? ACCESS_WRITE : ACCESS_READ, type, kind == Relaxed); if (_gvn.type(base)->isa_ptr() == TypePtr::NULL_PTR) { if (type != T_OBJECT) { decorators |= IN_NATIVE; // off-heap primitive access } else { + set_map(old_map); + set_sp(old_sp); return false; // off-heap oop accesses are not supported } } else { @@ -2233,10 +2240,12 @@ bool LibraryCallKit::inline_unsafe_access(bool is_store, const BasicType type, c decorators |= IN_HEAP; } - val = is_store ? argument(4) : NULL; + Node* val = is_store ? argument(4) : NULL; const TypePtr* adr_type = _gvn.type(adr)->isa_ptr(); if (adr_type == TypePtr::NULL_PTR) { + set_map(old_map); + set_sp(old_sp); return false; // off-heap access with zero address } @@ -2246,6 +2255,8 @@ bool LibraryCallKit::inline_unsafe_access(bool is_store, const BasicType type, c if (alias_type->adr_type() == TypeInstPtr::KLASS || alias_type->adr_type() == TypeAryPtr::RANGE) { + set_map(old_map); + set_sp(old_sp); return false; // not supported } @@ -2264,6 +2275,8 @@ bool LibraryCallKit::inline_unsafe_access(bool is_store, const BasicType type, c } if ((bt == T_OBJECT) != (type == T_OBJECT)) { // Don't intrinsify mismatched object accesses + set_map(old_map); + set_sp(old_sp); return false; } mismatched = (bt != type); @@ -2271,6 +2284,7 @@ bool LibraryCallKit::inline_unsafe_access(bool is_store, const BasicType type, c mismatched = true; // conservatively mark all "wide" on-heap accesses as mismatched } + old_map->destruct(&_gvn); assert(!mismatched || alias_type->adr_type()->is_oopptr(), "off-heap access can't be mismatched"); if (mismatched) { @@ -2505,6 +2519,9 @@ bool LibraryCallKit::inline_unsafe_load_store(const BasicType type, const LoadSt assert(Unsafe_field_offset_to_byte_offset(11) == 11, "fieldOffset must be byte-scaled"); // 32-bit machines ignore the high half of long offsets offset = ConvL2X(offset); + // Save state and restore on bailout + uint old_sp = sp(); + SafePointNode* old_map = clone_map(); Node* adr = make_unsafe_address(base, offset, ACCESS_WRITE | ACCESS_READ, type, false); const TypePtr *adr_type = _gvn.type(adr)->isa_ptr(); @@ -2513,9 +2530,13 @@ bool LibraryCallKit::inline_unsafe_load_store(const BasicType type, const LoadSt if (bt != T_ILLEGAL && (is_reference_type(bt) != (type == T_OBJECT))) { // Don't intrinsify mismatched object accesses. + set_map(old_map); + set_sp(old_sp); return false; } + old_map->destruct(&_gvn); + // For CAS, unlike inline_unsafe_access, there seems no point in // trying to refine types. Just use the coarse types here. assert(alias_type->index() != Compile::AliasIdxBot, "no bare pointers here"); diff --git a/src/hotspot/share/opto/loopTransform.cpp b/src/hotspot/share/opto/loopTransform.cpp index a4486df9afd4077e601c056b525a230284a103e5..235b03b9a5ce5c9faf08f2ee935726bf2e0750e6 100644 --- a/src/hotspot/share/opto/loopTransform.cpp +++ b/src/hotspot/share/opto/loopTransform.cpp @@ -124,7 +124,10 @@ void IdealLoopTree::compute_trip_count(PhaseIdealLoop* phase) { jlong limit_con = (stride_con > 0) ? limit_type->_hi : limit_type->_lo; int stride_m = stride_con - (stride_con > 0 ? 1 : -1); jlong trip_count = (limit_con - init_con + stride_m)/stride_con; - if (trip_count > 0 && (julong)trip_count < (julong)max_juint) { + // The loop body is always executed at least once even if init >= limit (for stride_con > 0) or + // init <= limit (for stride_con < 0). + trip_count = MAX2(trip_count, (jlong)1); + if (trip_count < (jlong)max_juint) { if (init_n->is_Con() && limit_n->is_Con()) { // Set exact trip count. cl->set_exact_trip_count((uint)trip_count); diff --git a/src/hotspot/share/opto/loopnode.cpp b/src/hotspot/share/opto/loopnode.cpp index 38d98fb0fcb393d84efe79581ca0ec5c9b3926b8..9041f77a69ab419e8428a3a4f6c33290633a13b2 100644 --- a/src/hotspot/share/opto/loopnode.cpp +++ b/src/hotspot/share/opto/loopnode.cpp @@ -5301,7 +5301,7 @@ void PhaseIdealLoop::build_loop_late_post_work(Node *n, bool pinned) { case Op_HasNegatives: pinned = false; } - if (n->is_CMove()) { + if (n->is_CMove() || n->is_ConstraintCast()) { pinned = false; } if( pinned ) { diff --git a/src/hotspot/share/opto/machnode.hpp b/src/hotspot/share/opto/machnode.hpp index b3fbd39b250883d142e9203304fc6d7c4fcf5243..3a45d1ff402448b66dfdbb49d9f92c3d3efb2eec 100644 --- a/src/hotspot/share/opto/machnode.hpp +++ b/src/hotspot/share/opto/machnode.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2021, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -699,6 +699,7 @@ public: add_req(ctrl); add_req(memop); } + virtual int Opcode() const; virtual uint size_of() const { return sizeof(*this); } virtual void emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const; diff --git a/src/hotspot/share/opto/macro.cpp b/src/hotspot/share/opto/macro.cpp index 5eb07da817d1ca32cd30ad48c8a6f6a1b7825a5c..88e30ff368767e137005f2090f747cc83858912f 100644 --- a/src/hotspot/share/opto/macro.cpp +++ b/src/hotspot/share/opto/macro.cpp @@ -450,6 +450,9 @@ Node *PhaseMacroExpand::value_from_mem_phi(Node *mem, BasicType ft, const Type * Node* n = val->in(MemNode::ValueIn); BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2(); n = bs->step_over_gc_barrier(n); + if (is_subword_type(ft)) { + n = Compile::narrow_value(ft, n, phi_type, &_igvn, true); + } values.at_put(j, n); } else if(val->is_Proj() && val->in(0) == alloc) { values.at_put(j, _igvn.zerocon(ft)); diff --git a/src/hotspot/share/opto/memnode.cpp b/src/hotspot/share/opto/memnode.cpp index 0716c8c1d1860d407b551166c4f1b7f99f5d4a40..440e29f7488d66b3f11a612f6bbf110cfaa1da4e 100644 --- a/src/hotspot/share/opto/memnode.cpp +++ b/src/hotspot/share/opto/memnode.cpp @@ -2079,12 +2079,14 @@ uint LoadNode::match_edge(uint idx) const { // with the value stored truncated to a byte. If no truncation is // needed, the replacement is done in LoadNode::Identity(). // -Node *LoadBNode::Ideal(PhaseGVN *phase, bool can_reshape) { +Node* LoadBNode::Ideal(PhaseGVN* phase, bool can_reshape) { Node* mem = in(MemNode::Memory); Node* value = can_see_stored_value(mem,phase); - if( value && !phase->type(value)->higher_equal( _type ) ) { - Node *result = phase->transform( new LShiftINode(value, phase->intcon(24)) ); - return new RShiftINode(result, phase->intcon(24)); + if (value != NULL) { + Node* narrow = Compile::narrow_value(T_BYTE, value, _type, phase, false); + if (narrow != value) { + return narrow; + } } // Identity call will handle the case where truncation is not needed. return LoadNode::Ideal(phase, can_reshape); @@ -2114,8 +2116,12 @@ const Type* LoadBNode::Value(PhaseGVN* phase) const { Node* LoadUBNode::Ideal(PhaseGVN* phase, bool can_reshape) { Node* mem = in(MemNode::Memory); Node* value = can_see_stored_value(mem, phase); - if (value && !phase->type(value)->higher_equal(_type)) - return new AndINode(value, phase->intcon(0xFF)); + if (value != NULL) { + Node* narrow = Compile::narrow_value(T_BOOLEAN, value, _type, phase, false); + if (narrow != value) { + return narrow; + } + } // Identity call will handle the case where truncation is not needed. return LoadNode::Ideal(phase, can_reshape); } @@ -2141,11 +2147,15 @@ const Type* LoadUBNode::Value(PhaseGVN* phase) const { // with the value stored truncated to a char. If no truncation is // needed, the replacement is done in LoadNode::Identity(). // -Node *LoadUSNode::Ideal(PhaseGVN *phase, bool can_reshape) { +Node* LoadUSNode::Ideal(PhaseGVN* phase, bool can_reshape) { Node* mem = in(MemNode::Memory); Node* value = can_see_stored_value(mem,phase); - if( value && !phase->type(value)->higher_equal( _type ) ) - return new AndINode(value,phase->intcon(0xFFFF)); + if (value != NULL) { + Node* narrow = Compile::narrow_value(T_CHAR, value, _type, phase, false); + if (narrow != value) { + return narrow; + } + } // Identity call will handle the case where truncation is not needed. return LoadNode::Ideal(phase, can_reshape); } @@ -2171,12 +2181,14 @@ const Type* LoadUSNode::Value(PhaseGVN* phase) const { // with the value stored truncated to a short. If no truncation is // needed, the replacement is done in LoadNode::Identity(). // -Node *LoadSNode::Ideal(PhaseGVN *phase, bool can_reshape) { +Node* LoadSNode::Ideal(PhaseGVN* phase, bool can_reshape) { Node* mem = in(MemNode::Memory); Node* value = can_see_stored_value(mem,phase); - if( value && !phase->type(value)->higher_equal( _type ) ) { - Node *result = phase->transform( new LShiftINode(value, phase->intcon(16)) ); - return new RShiftINode(result, phase->intcon(16)); + if (value != NULL) { + Node* narrow = Compile::narrow_value(T_SHORT, value, _type, phase, false); + if (narrow != value) { + return narrow; + } } // Identity call will handle the case where truncation is not needed. return LoadNode::Ideal(phase, can_reshape); diff --git a/src/hotspot/share/opto/parse2.cpp b/src/hotspot/share/opto/parse2.cpp index 31f816cb685100b4e5a9d86a50ba59a048d73ac2..f59ab502de8ef167decf325f28c5ac63c79efae4 100644 --- a/src/hotspot/share/opto/parse2.cpp +++ b/src/hotspot/share/opto/parse2.cpp @@ -863,10 +863,16 @@ bool Parse::create_jump_tables(Node* key_val, SwitchRange* lo, SwitchRange* hi) // Clean the 32-bit int into a real 64-bit offset. // Otherwise, the jint value 0 might turn into an offset of 0x0800000000. - const TypeInt* ikeytype = TypeInt::make(0, num_cases, Type::WidenMin); // Make I2L conversion control dependent to prevent it from // floating above the range check during loop optimizations. - key_val = C->conv_I2X_index(&_gvn, key_val, ikeytype, control()); + // Do not use a narrow int type here to prevent the data path from dying + // while the control path is not removed. This can happen if the type of key_val + // is later known to be out of bounds of [0, num_cases] and therefore a narrow cast + // would be replaced by TOP while C2 is not able to fold the corresponding range checks. + // Set _carry_dependency for the cast to avoid being removed by IGVN. +#ifdef _LP64 + key_val = C->constrained_convI2L(&_gvn, key_val, TypeInt::INT, control(), true /* carry_dependency */); +#endif // Shift the value by wordsize so we have an index into the table, rather // than a switch value @@ -2586,19 +2592,18 @@ void Parse::do_one_bytecode() { case Bytecodes::_i2b: // Sign extend a = pop(); - a = _gvn.transform( new LShiftINode(a,_gvn.intcon(24)) ); - a = _gvn.transform( new RShiftINode(a,_gvn.intcon(24)) ); - push( a ); + a = Compile::narrow_value(T_BYTE, a, NULL, &_gvn, true); + push(a); break; case Bytecodes::_i2s: a = pop(); - a = _gvn.transform( new LShiftINode(a,_gvn.intcon(16)) ); - a = _gvn.transform( new RShiftINode(a,_gvn.intcon(16)) ); - push( a ); + a = Compile::narrow_value(T_SHORT, a, NULL, &_gvn, true); + push(a); break; case Bytecodes::_i2c: a = pop(); - push( _gvn.transform( new AndINode(a,_gvn.intcon(0xFFFF)) ) ); + a = Compile::narrow_value(T_CHAR, a, NULL, &_gvn, true); + push(a); break; case Bytecodes::_i2f: diff --git a/src/hotspot/share/opto/phaseX.cpp b/src/hotspot/share/opto/phaseX.cpp index 2913936dee82f5106b37c6e682444f25cff761e3..087460e80995d993c7e449910de2078b95bbdd9f 100644 --- a/src/hotspot/share/opto/phaseX.cpp +++ b/src/hotspot/share/opto/phaseX.cpp @@ -330,10 +330,15 @@ void NodeHash::remove_useless_nodes(VectorSet &useful) { void NodeHash::check_no_speculative_types() { #ifdef ASSERT uint max = size(); + Unique_Node_List live_nodes; + Compile::current()->identify_useful_nodes(live_nodes); Node *sentinel_node = sentinel(); for (uint i = 0; i < max; ++i) { Node *n = at(i); - if(n != NULL && n != sentinel_node && n->is_Type() && n->outcnt() > 0) { + if (n != NULL && + n != sentinel_node && + n->is_Type() && + live_nodes.member(n)) { TypeNode* tn = n->as_Type(); const Type* t = tn->type(); const Type* t_no_spec = t->remove_speculative(); diff --git a/src/hotspot/share/opto/reg_split.cpp b/src/hotspot/share/opto/reg_split.cpp index a3c39cbfcae1a447474f7c7f154ff5e50ac8adeb..d112e5f55753496187e40108a96e21f17e608b37 100644 --- a/src/hotspot/share/opto/reg_split.cpp +++ b/src/hotspot/share/opto/reg_split.cpp @@ -183,7 +183,9 @@ uint PhaseChaitin::split_DEF( Node *def, Block *b, int loc, uint maxlrg, Node ** //------------------------------split_USE-------------------------------------- // Splits at uses can involve redeffing the LRG, so no CISC Spilling there. // Debug uses want to know if def is already stack enabled. -uint PhaseChaitin::split_USE(MachSpillCopyNode::SpillType spill_type, Node *def, Block *b, Node *use, uint useidx, uint maxlrg, bool def_down, bool cisc_sp, GrowableArray splits, int slidx ) { +// Return value +// -1 : bailout, 0: no spillcopy created, 1: create a new spillcopy +int PhaseChaitin::split_USE(MachSpillCopyNode::SpillType spill_type, Node *def, Block *b, Node *use, uint useidx, uint maxlrg, bool def_down, bool cisc_sp, GrowableArray splits, int slidx ) { #ifdef ASSERT // Increment the counter for this lrg splits.at_put(slidx, splits.at(slidx)+1); @@ -211,6 +213,7 @@ uint PhaseChaitin::split_USE(MachSpillCopyNode::SpillType spill_type, Node *def, if( def_down ) { // DEF is DOWN, so connect USE directly to the DEF use->set_req(useidx, def); + return 0; } else { // Block and index where the use occurs. Block *b = _cfg.get_block_for_node(use); @@ -223,15 +226,15 @@ uint PhaseChaitin::split_USE(MachSpillCopyNode::SpillType spill_type, Node *def, // did we fail to split? if (!spill) { // Bail - return 0; + return -1; } // insert into basic block - insert_proj( b, bindex, spill, maxlrg++ ); + insert_proj( b, bindex, spill, maxlrg ); // Use the new split use->set_req(useidx,spill); + return 1; } // No further split handling needed for this use - return maxlrg; } // End special splitting for debug info live range } // If debug info @@ -253,7 +256,7 @@ uint PhaseChaitin::split_USE(MachSpillCopyNode::SpillType spill_type, Node *def, use->dump(); } #endif - return maxlrg; + return 0; } } @@ -272,15 +275,14 @@ uint PhaseChaitin::split_USE(MachSpillCopyNode::SpillType spill_type, Node *def, } Node *spill = get_spillcopy_wide(spill_type, def, use, useidx ); - if( !spill ) return 0; // Bailed out + if( !spill ) return -1; // Bailed out // Insert SpillCopy before the USE, which uses the reaching DEF as // its input, and defs a new live range, which is used by this node. - insert_proj( b, bindex, spill, maxlrg++ ); + insert_proj( b, bindex, spill, maxlrg ); // Use the spill/clone use->set_req(useidx,spill); - // return updated live range count - return maxlrg; + return 1; } //------------------------------clone_node---------------------------- @@ -956,12 +958,13 @@ uint PhaseChaitin::Split(uint maxlrg, ResourceArea* split_arena) { // This def has been rematerialized a couple of times without // progress. It doesn't care if it lives UP or DOWN, so // spill it down now. - maxlrg = split_USE(MachSpillCopyNode::BasePointerToMem, def,b,n,inpidx,maxlrg,false,false,splits,slidx); + int delta = split_USE(MachSpillCopyNode::BasePointerToMem, def,b,n,inpidx,maxlrg,false,false,splits,slidx); // If it wasn't split bail - if (!maxlrg) { + if (delta < 0) { return 0; } - insidx++; // Reset iterator to skip USE side split + maxlrg += delta; + insidx += delta; // Reset iterator to skip USE side split } else { // Just hook the def edge n->set_req(inpidx, def); @@ -1036,24 +1039,26 @@ uint PhaseChaitin::Split(uint maxlrg, ResourceArea* split_arena) { (!is_vect && umask.is_misaligned_pair()))) { // These need a Split regardless of overlap or pressure // SPLIT - NO DEF - NO CISC SPILL - maxlrg = split_USE(MachSpillCopyNode::Bound, def,b,n,inpidx,maxlrg,dup,false, splits,slidx); + int delta = split_USE(MachSpillCopyNode::Bound, def,b,n,inpidx,maxlrg,dup,false, splits,slidx); // If it wasn't split bail - if (!maxlrg) { + if (delta < 0) { return 0; } - insidx++; // Reset iterator to skip USE side split + maxlrg += delta; + insidx += delta; // Reset iterator to skip USE side split continue; } if (UseFPUForSpilling && n->is_MachCall() && !uup && !dup ) { // The use at the call can force the def down so insert // a split before the use to allow the def more freedom. - maxlrg = split_USE(MachSpillCopyNode::CallUse, def,b,n,inpidx,maxlrg,dup,false, splits,slidx); + int delta = split_USE(MachSpillCopyNode::CallUse, def,b,n,inpidx,maxlrg,dup,false, splits,slidx); // If it wasn't split bail - if (!maxlrg) { + if (delta < 0) { return 0; } - insidx++; // Reset iterator to skip USE side split + maxlrg += delta; + insidx += delta; // Reset iterator to skip USE side split continue; } @@ -1084,12 +1089,13 @@ uint PhaseChaitin::Split(uint maxlrg, ResourceArea* split_arena) { else { // Both are either up or down, and there is no overlap if( dup ) { // If UP, reg->reg copy // COPY ACROSS HERE - NO DEF - NO CISC SPILL - maxlrg = split_USE(MachSpillCopyNode::RegToReg, def,b,n,inpidx,maxlrg,false,false, splits,slidx); + int delta = split_USE(MachSpillCopyNode::RegToReg, def,b,n,inpidx,maxlrg,false,false, splits,slidx); // If it wasn't split bail - if (!maxlrg) { + if (delta < 0) { return 0; } - insidx++; // Reset iterator to skip USE side split + maxlrg += delta; + insidx += delta; // Reset iterator to skip USE side split } else { // DOWN, mem->mem copy // COPY UP & DOWN HERE - NO DEF - NO CISC SPILL @@ -1098,13 +1104,15 @@ uint PhaseChaitin::Split(uint maxlrg, ResourceArea* split_arena) { const RegMask* tmp_rm = Matcher::idealreg2regmask[def_ideal]; Node *spill = new MachSpillCopyNode(MachSpillCopyNode::MemToReg, def, dmask, *tmp_rm); insert_proj( b, insidx, spill, maxlrg ); + maxlrg++; insidx++; // Then Split-DOWN as if previous Split was DEF - maxlrg = split_USE(MachSpillCopyNode::RegToMem, spill,b,n,inpidx,maxlrg,false,false, splits,slidx); + int delta = split_USE(MachSpillCopyNode::RegToMem, spill,b,n,inpidx,maxlrg,false,false, splits,slidx); // If it wasn't split bail - if (!maxlrg) { + if (delta < 0) { return 0; } - insidx += 2; // Reset iterator to skip USE side splits + maxlrg += delta; + insidx += delta; // Reset iterator to skip USE side splits } } // End else no overlap } // End if dup == uup @@ -1124,12 +1132,13 @@ uint PhaseChaitin::Split(uint maxlrg, ResourceArea* split_arena) { } } // COPY DOWN HERE - NO DEF - NO CISC SPILL - maxlrg = split_USE(MachSpillCopyNode::RegToMem, def,b,n,inpidx,maxlrg,false,false, splits,slidx); + int delta = split_USE(MachSpillCopyNode::RegToMem, def,b,n,inpidx,maxlrg,false,false, splits,slidx); // If it wasn't split bail - if (!maxlrg) { + if (delta < 0) { return 0; } - insidx++; // Reset iterator to skip USE side split + maxlrg += delta; + insidx += delta; // Reset iterator to skip USE side split // Check for debug-info split. Capture it for later // debug splits of the same value if (jvms && jvms->debug_start() <= inpidx && inpidx < oopoff) @@ -1139,17 +1148,18 @@ uint PhaseChaitin::Split(uint maxlrg, ResourceArea* split_arena) { else { // DOWN, Split-UP and check register pressure if( is_high_pressure( b, &lrgs(useidx), insidx ) ) { // COPY UP HERE - NO DEF - CISC SPILL - maxlrg = split_USE(MachSpillCopyNode::MemToReg, def,b,n,inpidx,maxlrg,true,true, splits,slidx); + int delta = split_USE(MachSpillCopyNode::MemToReg, def,b,n,inpidx,maxlrg,true,true, splits,slidx); // If it wasn't split bail - if (!maxlrg) { + if (delta < 0) { return 0; } - insidx++; // Reset iterator to skip USE side split + maxlrg += delta; + insidx += delta; // Reset iterator to skip USE side split } else { // LRP // COPY UP HERE - WITH DEF - NO CISC SPILL - maxlrg = split_USE(MachSpillCopyNode::MemToReg, def,b,n,inpidx,maxlrg,true,false, splits,slidx); + int delta = split_USE(MachSpillCopyNode::MemToReg, def,b,n,inpidx,maxlrg,true,false, splits,slidx); // If it wasn't split bail - if (!maxlrg) { + if (delta < 0) { return 0; } // Flag this lift-up in a low-pressure block as @@ -1160,7 +1170,8 @@ uint PhaseChaitin::Split(uint maxlrg, ResourceArea* split_arena) { // Since this is a new DEF, update Reachblock & UP Reachblock[slidx] = n->in(inpidx); UPblock[slidx] = true; - insidx++; // Reset iterator to skip USE side split + maxlrg += delta; + insidx += delta; // Reset iterator to skip USE side split } } // End else DOWN } // End dup != uup @@ -1358,11 +1369,12 @@ uint PhaseChaitin::Split(uint maxlrg, ResourceArea* split_arena) { // Grab the UP/DOWN sense for the input u1 = UP[pidx][slidx]; if( u1 != (phi_up != 0)) { - maxlrg = split_USE(MachSpillCopyNode::PhiLocationDifferToInputLocation, def, b, phi, i, maxlrg, !u1, false, splits,slidx); + int delta = split_USE(MachSpillCopyNode::PhiLocationDifferToInputLocation, def, b, phi, i, maxlrg, !u1, false, splits,slidx); // If it wasn't split bail - if (!maxlrg) { + if (delta < 0) { return 0; } + maxlrg += delta; } } // End for all inputs to the Phi } // End for all Phi Nodes diff --git a/src/hotspot/share/opto/runtime.cpp b/src/hotspot/share/opto/runtime.cpp index f5cec6b7d05ab9d14fa82d6ee722b7001804a7d0..5017efdb17f87e202a564cedc72a205e2ce5ef5f 100644 --- a/src/hotspot/share/opto/runtime.cpp +++ b/src/hotspot/share/opto/runtime.cpp @@ -47,6 +47,7 @@ #include "memory/oopFactory.hpp" #include "memory/resourceArea.hpp" #include "oops/objArrayKlass.hpp" +#include "oops/klass.inline.hpp" #include "oops/oop.inline.hpp" #include "oops/typeArrayOop.inline.hpp" #include "opto/ad.hpp" diff --git a/src/hotspot/share/opto/superword.cpp b/src/hotspot/share/opto/superword.cpp index 00f4a50f291ad90f8ed364190c2616cedf9f3511..90ce0f99c51a5d26721111fba0f337536dcb7244 100644 --- a/src/hotspot/share/opto/superword.cpp +++ b/src/hotspot/share/opto/superword.cpp @@ -2316,18 +2316,13 @@ Node* SuperWord::pick_mem_state(Node_List* pk) { assert(current->is_Mem() && in_bb(current), "unexpected memory"); assert(current != first_mem, "corrupted memory graph"); if (!independent(current, ld)) { -#ifdef ASSERT - // Added assertion code since no case has been observed that should pick the first memory state. - // Remove the assertion code whenever we find a (valid) case that really needs the first memory state. - pk->dump(); - first_mem->dump(); - last_mem->dump(); - current->dump(); - ld->dump(); - ld->in(MemNode::Memory)->dump(); - assert(false, "never observed that first memory should be picked"); -#endif - return first_mem; // A later store depends on this load, pick memory state of first load + // A later store depends on this load, pick the memory state of the first load. This can happen, for example, + // if a load pack has interleaving stores that are part of a store pack which, however, is removed at the pack + // filtering stage. This leaves us with only a load pack for which we cannot take the memory state of the + // last load as the remaining unvectorized stores could interfere since they have a dependency to the loads. + // Some stores could be executed before the load vector resulting in a wrong result. We need to take the + // memory state of the first load to prevent this. + return first_mem; } } } diff --git a/src/hotspot/share/opto/type.cpp b/src/hotspot/share/opto/type.cpp index 5c97e1c91e039174dfa4b2c8c08e027b5669f455..5da741526b5325b0f6b9f11e89d642e0693ba992 100644 --- a/src/hotspot/share/opto/type.cpp +++ b/src/hotspot/share/opto/type.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2021, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -40,6 +40,7 @@ #include "opto/opcodes.hpp" #include "opto/type.hpp" #include "utilities/powerOfTwo.hpp" +#include "utilities/stringUtils.hpp" // Portions of code courtesy of Clifford Click @@ -1113,6 +1114,73 @@ void Type::dump_stats() { } #endif +//------------------------------category--------------------------------------- +#ifndef PRODUCT +Type::Category Type::category() const { + const TypeTuple* tuple; + switch (base()) { + case Type::Int: + case Type::Long: + case Type::Half: + case Type::NarrowOop: + case Type::NarrowKlass: + case Type::Array: + case Type::VectorA: + case Type::VectorS: + case Type::VectorD: + case Type::VectorX: + case Type::VectorY: + case Type::VectorZ: + case Type::AnyPtr: + case Type::RawPtr: + case Type::OopPtr: + case Type::InstPtr: + case Type::AryPtr: + case Type::MetadataPtr: + case Type::KlassPtr: + case Type::Function: + case Type::Return_Address: + case Type::FloatTop: + case Type::FloatCon: + case Type::FloatBot: + case Type::DoubleTop: + case Type::DoubleCon: + case Type::DoubleBot: + return Category::Data; + case Type::Memory: + return Category::Memory; + case Type::Control: + return Category::Control; + case Type::Top: + case Type::Abio: + case Type::Bottom: + return Category::Other; + case Type::Bad: + case Type::lastype: + return Category::Undef; + case Type::Tuple: + // Recursive case. Return CatMixed if the tuple contains types of + // different categories (e.g. CallStaticJavaNode's type), or the specific + // category if all types are of the same category (e.g. IfNode's type). + tuple = is_tuple(); + if (tuple->cnt() == 0) { + return Category::Undef; + } else { + Category first = tuple->field_at(0)->category(); + for (uint i = 1; i < tuple->cnt(); i++) { + if (tuple->field_at(i)->category() != first) { + return Category::Mixed; + } + } + return first; + } + default: + assert(false, "unmatched base type: all base types must be categorized"); + } + return Category::Undef; +} +#endif + //------------------------------typerr----------------------------------------- void Type::typerr( const Type *t ) const { #ifndef PRODUCT @@ -4036,15 +4104,23 @@ int TypeInstPtr::hash(void) const { //------------------------------dump2------------------------------------------ // Dump oop Type #ifndef PRODUCT -void TypeInstPtr::dump2( Dict &d, uint depth, outputStream *st ) const { +void TypeInstPtr::dump2(Dict &d, uint depth, outputStream* st) const { // Print the name of the klass. klass()->print_name_on(st); switch( _ptr ) { case Constant: - // TO DO: Make CI print the hex address of the underlying oop. if (WizardMode || Verbose) { - const_oop()->print_oop(st); + ResourceMark rm; + stringStream ss; + + st->print(" "); + const_oop()->print_oop(&ss); + // 'const_oop->print_oop()' may emit newlines('\n') into ss. + // suppress newlines from it so -XX:+Verbose -XX:+PrintIdeal dumps one-liner for each node. + char* buf = ss.as_string(/* c_heap= */false); + StringUtils::replace_no_expand(buf, "\n", ""); + st->print_raw(buf); } case BotPTR: if (!WizardMode && !Verbose) { diff --git a/src/hotspot/share/opto/type.hpp b/src/hotspot/share/opto/type.hpp index 3b635adcce5cd2266f90fcb6b8cc75f913a23a2e..e4ebb8e24f8e6978c207db2661ef741cd5034133 100644 --- a/src/hotspot/share/opto/type.hpp +++ b/src/hotspot/share/opto/type.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2021, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -364,6 +364,17 @@ public: } virtual void dump2( Dict &d, uint depth, outputStream *st ) const; static void dump_stats(); + // Groups of types, for debugging and visualization only. + enum class Category { + Data, + Memory, + Mixed, // Tuples with types of different categories. + Control, + Other, // {Type::Top, Type::Abio, Type::Bottom}. + Undef // {Type::Bad, Type::lastype}, for completeness. + }; + // Return the category of this type. + Category category() const; static const char* str(const Type* t); #endif diff --git a/src/hotspot/share/opto/vectorIntrinsics.cpp b/src/hotspot/share/opto/vectorIntrinsics.cpp index 218ae76eada653f1160c8e4e1e763b420661bbb6..4f8bb7f08ad96e3b923a678169e8a63263bbddd9 100644 --- a/src/hotspot/share/opto/vectorIntrinsics.cpp +++ b/src/hotspot/share/opto/vectorIntrinsics.cpp @@ -609,8 +609,12 @@ bool LibraryCallKit::inline_vector_mem_operation(bool is_store) { Node* base = argument(3); Node* offset = ConvL2X(argument(4)); DecoratorSet decorators = C2_UNSAFE_ACCESS; - Node* addr = make_unsafe_address(base, offset, decorators, (is_mask ? T_BOOLEAN : elem_bt), true); + // Save state and restore on bailout + uint old_sp = sp(); + SafePointNode* old_map = clone_map(); + + Node* addr = make_unsafe_address(base, offset, decorators, (is_mask ? T_BOOLEAN : elem_bt), true); // Can base be NULL? Otherwise, always on-heap access. bool can_access_non_heap = TypePtr::NULL_PTR->higher_equal(gvn().type(base)); @@ -622,6 +626,8 @@ bool LibraryCallKit::inline_vector_mem_operation(bool is_store) { // Handle loading masks. // If there is no consistency between array and vector element types, it must be special byte array case or loading masks if (arr_type != NULL && !using_byte_array && elem_bt != arr_type->elem()->array_element_basic_type() && !is_mask) { + set_map(old_map); + set_sp(old_sp); return false; } // Since we are using byte array, we need to double check that the byte operations are supported by backend. @@ -634,6 +640,8 @@ bool LibraryCallKit::inline_vector_mem_operation(bool is_store) { is_store, is_store ? "store" : "load", byte_num_elem, type2name(elem_bt)); } + set_map(old_map); + set_sp(old_sp); return false; // not supported } } @@ -644,14 +652,20 @@ bool LibraryCallKit::inline_vector_mem_operation(bool is_store) { is_store, is_store ? "store" : "load", num_elem); } + set_map(old_map); + set_sp(old_sp); return false; // not supported } if (!is_store) { if (!arch_supports_vector(Op_LoadVector, num_elem, elem_bt, VecMaskUseLoad)) { + set_map(old_map); + set_sp(old_sp); return false; // not supported } } else { if (!arch_supports_vector(Op_StoreVector, num_elem, elem_bt, VecMaskUseStore)) { + set_map(old_map); + set_sp(old_sp); return false; // not supported } } @@ -666,6 +680,8 @@ bool LibraryCallKit::inline_vector_mem_operation(bool is_store) { if (is_store) { Node* val = unbox_vector(argument(6), vbox_type, elem_bt, num_elem); if (val == NULL) { + set_map(old_map); + set_sp(old_sp); return false; // operand unboxing failed } set_all_memory(reset_memory()); @@ -702,6 +718,8 @@ bool LibraryCallKit::inline_vector_mem_operation(bool is_store) { set_result(box); } + old_map->destruct(&_gvn); + if (can_access_non_heap) { insert_mem_bar(Op_MemBarCPUOrder); } @@ -779,6 +797,11 @@ bool LibraryCallKit::inline_vector_gather_scatter(bool is_scatter) { Node* base = argument(4); Node* offset = ConvL2X(argument(5)); + + // Save state and restore on bailout + uint old_sp = sp(); + SafePointNode* old_map = clone_map(); + Node* addr = make_unsafe_address(base, offset, C2_UNSAFE_ACCESS, elem_bt, true); const TypePtr *addr_type = gvn().type(addr)->isa_ptr(); @@ -786,6 +809,8 @@ bool LibraryCallKit::inline_vector_gather_scatter(bool is_scatter) { // The array must be consistent with vector type if (arr_type == NULL || (arr_type != NULL && elem_bt != arr_type->elem()->array_element_basic_type())) { + set_map(old_map); + set_sp(old_sp); return false; } ciKlass* vbox_klass = vector_klass->const_oop()->as_instance()->java_lang_Class_klass(); @@ -794,6 +819,8 @@ bool LibraryCallKit::inline_vector_gather_scatter(bool is_scatter) { ciKlass* vbox_idx_klass = vector_idx_klass->const_oop()->as_instance()->java_lang_Class_klass(); if (vbox_idx_klass == NULL) { + set_map(old_map); + set_sp(old_sp); return false; } @@ -801,12 +828,16 @@ bool LibraryCallKit::inline_vector_gather_scatter(bool is_scatter) { Node* index_vect = unbox_vector(argument(7), vbox_idx_type, T_INT, num_elem); if (index_vect == NULL) { + set_map(old_map); + set_sp(old_sp); return false; } const TypeVect* vector_type = TypeVect::make(elem_bt, num_elem); if (is_scatter) { Node* val = unbox_vector(argument(8), vbox_type, elem_bt, num_elem); if (val == NULL) { + set_map(old_map); + set_sp(old_sp); return false; // operand unboxing failed } set_all_memory(reset_memory()); @@ -820,6 +851,8 @@ bool LibraryCallKit::inline_vector_gather_scatter(bool is_scatter) { set_result(box); } + old_map->destruct(&_gvn); + C->set_max_vector_size(MAX2(C->max_vector_size(), (uint)(num_elem * type2aelembytes(elem_bt)))); return true; } diff --git a/src/hotspot/share/opto/vectornode.cpp b/src/hotspot/share/opto/vectornode.cpp index 0979adce7a0e42e5eca098d3f7a05b90c6d8f462..3870d1089f49547ddc82bbee3080ce1f8f5e28f4 100644 --- a/src/hotspot/share/opto/vectornode.cpp +++ b/src/hotspot/share/opto/vectornode.cpp @@ -1225,25 +1225,30 @@ Node* VectorUnboxNode::Ideal(PhaseGVN* phase, bool can_reshape) { ciKlass* vbox_klass = vbox->box_type()->klass(); const TypeVect* in_vt = vbox->vec_type(); const TypeVect* out_vt = type()->is_vect(); - assert(in_vt->length() == out_vt->length(), "mismatch on number of elements"); - Node* value = vbox->in(VectorBoxNode::Value); - - bool is_vector_mask = vbox_klass->is_subclass_of(ciEnv::current()->vector_VectorMask_klass()); - bool is_vector_shuffle = vbox_klass->is_subclass_of(ciEnv::current()->vector_VectorShuffle_klass()); - if (is_vector_mask) { - // VectorUnbox (VectorBox vmask) ==> VectorLoadMask (VectorStoreMask vmask) - value = phase->transform(VectorStoreMaskNode::make(*phase, value, in_vt->element_basic_type(), in_vt->length())); - return new VectorLoadMaskNode(value, out_vt); - } else if (is_vector_shuffle) { - if (is_shuffle_to_vector()) { - // VectorUnbox (VectorBox vshuffle) ==> VectorCastB2X vshuffle - return new VectorCastB2XNode(value, out_vt); + + if (in_vt->length() == out_vt->length()) { + Node* value = vbox->in(VectorBoxNode::Value); + + bool is_vector_mask = vbox_klass->is_subclass_of(ciEnv::current()->vector_VectorMask_klass()); + bool is_vector_shuffle = vbox_klass->is_subclass_of(ciEnv::current()->vector_VectorShuffle_klass()); + if (is_vector_mask) { + // VectorUnbox (VectorBox vmask) ==> VectorLoadMask (VectorStoreMask vmask) + value = phase->transform(VectorStoreMaskNode::make(*phase, value, in_vt->element_basic_type(), in_vt->length())); + return new VectorLoadMaskNode(value, out_vt); + } else if (is_vector_shuffle) { + if (is_shuffle_to_vector()) { + // VectorUnbox (VectorBox vshuffle) ==> VectorCastB2X vshuffle + return new VectorCastB2XNode(value, out_vt); + } else { + // VectorUnbox (VectorBox vshuffle) ==> VectorLoadShuffle vshuffle + return new VectorLoadShuffleNode(value, out_vt); + } } else { - // VectorUnbox (VectorBox vshuffle) ==> VectorLoadShuffle vshuffle - return new VectorLoadShuffleNode(value, out_vt); + // Vector type mismatch is only supported for masks and shuffles, but sometimes it happens in pathological cases. } } else { - assert(false, "type mismatch on vector: %s", vbox_klass->name()->as_utf8()); + // Vector length mismatch. + // Sometimes happen in pathological cases (e.g., when unboxing happens in effectively dead code). } } } diff --git a/src/hotspot/share/prims/jni.cpp b/src/hotspot/share/prims/jni.cpp index 917e0bff389e81aec1972e1ea919d4e230e6a33a..bb7d8546162b93cf9a895a9d3bcf2fa2eb7725c2 100644 --- a/src/hotspot/share/prims/jni.cpp +++ b/src/hotspot/share/prims/jni.cpp @@ -29,7 +29,6 @@ #include "ci/ciReplay.hpp" #include "classfile/altHashing.hpp" #include "classfile/classFileStream.hpp" -#include "classfile/classLoader.hpp" #include "classfile/javaClasses.hpp" #include "classfile/javaClasses.inline.hpp" #include "classfile/javaThreadStatus.hpp" @@ -289,7 +288,7 @@ JNI_ENTRY(jclass, jni_DefineClass(JNIEnv *env, const char *name, jobject loaderR &st, CHECK_NULL); - if (log_is_enabled(Debug, class, resolve) && k != NULL) { + if (log_is_enabled(Debug, class, resolve)) { trace_class_resolution(k); } diff --git a/src/hotspot/share/prims/jvm.cpp b/src/hotspot/share/prims/jvm.cpp index 53ffb2cef3ed2b18251ab5b95fb0ed69e4bcb9af..656a2c0228cb60cbd5d4415bef1a3f5894b566f5 100644 --- a/src/hotspot/share/prims/jvm.cpp +++ b/src/hotspot/share/prims/jvm.cpp @@ -866,7 +866,7 @@ static jclass jvm_define_class_common(const char *name, &st, CHECK_NULL); - if (log_is_enabled(Debug, class, resolve) && k != NULL) { + if (log_is_enabled(Debug, class, resolve)) { trace_class_resolution(k); } @@ -945,19 +945,17 @@ static jclass jvm_lookup_define_class(jclass lookup, const char *name, const char* source = is_nestmate ? host_class->external_name() : "__JVM_LookupDefineClass__"; ClassFileStream st((u1*)buf, len, source, ClassFileStream::verify); - Klass* defined_k; InstanceKlass* ik = NULL; if (!is_hidden) { - defined_k = SystemDictionary::resolve_from_stream(class_name, - class_loader, - protection_domain, - &st, - CHECK_NULL); - - if (log_is_enabled(Debug, class, resolve) && defined_k != NULL) { - trace_class_resolution(defined_k); + ik = SystemDictionary::resolve_from_stream(class_name, + class_loader, + protection_domain, + &st, + CHECK_NULL); + + if (log_is_enabled(Debug, class, resolve)) { + trace_class_resolution(ik); } - ik = InstanceKlass::cast(defined_k); } else { // hidden Handle classData_h(THREAD, JNIHandles::resolve(classData)); ClassLoadInfo cl_info(protection_domain, @@ -968,16 +966,11 @@ static jclass jvm_lookup_define_class(jclass lookup, const char *name, is_hidden, is_strong, vm_annotations); - defined_k = SystemDictionary::parse_stream(class_name, - class_loader, - &st, - cl_info, - CHECK_NULL); - if (defined_k == NULL) { - THROW_MSG_0(vmSymbols::java_lang_Error(), "Failure to define a hidden class"); - } - - ik = InstanceKlass::cast(defined_k); + ik = SystemDictionary::parse_stream(class_name, + class_loader, + &st, + cl_info, + CHECK_NULL); // The hidden class loader data has been artificially been kept alive to // this point. The mirror and any instances of this class have to keep @@ -994,7 +987,7 @@ static jclass jvm_lookup_define_class(jclass lookup, const char *name, ik->is_hidden() ? "is hidden" : "is not hidden"); } } - assert(Reflection::is_same_class_package(lookup_k, defined_k), + assert(Reflection::is_same_class_package(lookup_k, ik), "lookup class and defined class are in different packages"); if (init) { @@ -1003,7 +996,7 @@ static jclass jvm_lookup_define_class(jclass lookup, const char *name, ik->link_class(CHECK_NULL); } - return (jclass) JNIHandles::make_local(THREAD, defined_k->java_mirror()); + return (jclass) JNIHandles::make_local(THREAD, ik->java_mirror()); } JVM_ENTRY(jclass, JVM_DefineClass(JNIEnv *env, const char *name, jobject loader, const jbyte *buf, jsize len, jobject pd)) @@ -1070,8 +1063,7 @@ JVM_ENTRY(jclass, JVM_FindLoadedClass(JNIEnv *env, jobject loader, jstring name) Handle h_loader(THREAD, JNIHandles::resolve(loader)); Klass* k = SystemDictionary::find_instance_or_array_klass(klass_name, h_loader, - Handle(), - CHECK_NULL); + Handle()); #if INCLUDE_CDS if (k == NULL) { // If the class is not already loaded, try to see if it's in the shared @@ -1087,31 +1079,41 @@ JVM_END JVM_ENTRY(void, JVM_DefineModule(JNIEnv *env, jobject module, jboolean is_open, jstring version, jstring location, jobjectArray packages)) - Modules::define_module(module, is_open, version, location, packages, CHECK); + Handle h_module (THREAD, JNIHandles::resolve(module)); + Modules::define_module(h_module, is_open, version, location, packages, CHECK); JVM_END JVM_ENTRY(void, JVM_SetBootLoaderUnnamedModule(JNIEnv *env, jobject module)) - Modules::set_bootloader_unnamed_module(module, CHECK); + Handle h_module (THREAD, JNIHandles::resolve(module)); + Modules::set_bootloader_unnamed_module(h_module, CHECK); JVM_END JVM_ENTRY(void, JVM_AddModuleExports(JNIEnv *env, jobject from_module, jstring package, jobject to_module)) - Modules::add_module_exports_qualified(from_module, package, to_module, CHECK); + Handle h_from_module (THREAD, JNIHandles::resolve(from_module)); + Handle h_to_module (THREAD, JNIHandles::resolve(to_module)); + Modules::add_module_exports_qualified(h_from_module, package, h_to_module, CHECK); JVM_END JVM_ENTRY(void, JVM_AddModuleExportsToAllUnnamed(JNIEnv *env, jobject from_module, jstring package)) - Modules::add_module_exports_to_all_unnamed(from_module, package, CHECK); + Handle h_from_module (THREAD, JNIHandles::resolve(from_module)); + Modules::add_module_exports_to_all_unnamed(h_from_module, package, CHECK); JVM_END JVM_ENTRY(void, JVM_AddModuleExportsToAll(JNIEnv *env, jobject from_module, jstring package)) - Modules::add_module_exports(from_module, package, NULL, CHECK); + Handle h_from_module (THREAD, JNIHandles::resolve(from_module)); + Modules::add_module_exports(h_from_module, package, Handle(), CHECK); JVM_END JVM_ENTRY (void, JVM_AddReadsModule(JNIEnv *env, jobject from_module, jobject source_module)) - Modules::add_reads_module(from_module, source_module, CHECK); + Handle h_from_module (THREAD, JNIHandles::resolve(from_module)); + Handle h_source_module (THREAD, JNIHandles::resolve(source_module)); + Modules::add_reads_module(h_from_module, h_source_module, CHECK); JVM_END JVM_ENTRY(void, JVM_DefineArchivedModules(JNIEnv *env, jobject platform_loader, jobject system_loader)) - Modules::define_archived_modules(platform_loader, system_loader, CHECK); + Handle h_platform_loader (THREAD, JNIHandles::resolve(platform_loader)); + Handle h_system_loader (THREAD, JNIHandles::resolve(system_loader)); + Modules::define_archived_modules(h_platform_loader, h_system_loader, CHECK); JVM_END // Reflection support ////////////////////////////////////////////////////////////////////////////// @@ -2934,8 +2936,6 @@ JVM_END // but is thought to be reliable and simple. In the case, where the receiver is the // same thread as the sender, no VM_Operation is needed. JVM_ENTRY(void, JVM_StopThread(JNIEnv* env, jobject jthread, jobject throwable)) - // A nested ThreadsListHandle will grab the Threads_lock so create - // tlh before we resolve throwable. ThreadsListHandle tlh(thread); oop java_throwable = JNIHandles::resolve(throwable); if (java_throwable == NULL) { @@ -3287,10 +3287,14 @@ JVM_END JVM_ENTRY(jobject, JVM_LatestUserDefinedLoader(JNIEnv *env)) for (vframeStream vfst(thread); !vfst.at_end(); vfst.next()) { - vfst.skip_reflection_related_frames(); // Only needed for 1.4 reflection - oop loader = vfst.method()->method_holder()->class_loader(); + InstanceKlass* ik = vfst.method()->method_holder(); + oop loader = ik->class_loader(); if (loader != NULL && !SystemDictionary::is_platform_class_loader(loader)) { - return JNIHandles::make_local(THREAD, loader); + // Skip reflection related frames + if (!ik->is_subclass_of(vmClasses::reflect_MethodAccessorImpl_klass()) && + !ik->is_subclass_of(vmClasses::reflect_ConstructorAccessorImpl_klass())) { + return JNIHandles::make_local(THREAD, loader); + } } } return NULL; diff --git a/src/hotspot/share/prims/jvmtiEnvBase.hpp b/src/hotspot/share/prims/jvmtiEnvBase.hpp index fa516fe568ada6e7ff7fc9ef6b9e93ac17a93c02..ad29077f9ba0282ec5d30ff93fe8033db71c05eb 100644 --- a/src/hotspot/share/prims/jvmtiEnvBase.hpp +++ b/src/hotspot/share/prims/jvmtiEnvBase.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2021, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -25,7 +25,6 @@ #ifndef SHARE_PRIMS_JVMTIENVBASE_HPP #define SHARE_PRIMS_JVMTIENVBASE_HPP -#include "classfile/classLoader.hpp" #include "prims/jvmtiEnvThreadState.hpp" #include "prims/jvmtiEventController.hpp" #include "prims/jvmtiThreadState.hpp" @@ -34,7 +33,7 @@ #include "runtime/fieldDescriptor.hpp" #include "runtime/frame.hpp" #include "runtime/thread.hpp" -#include "runtime/vmOperations.hpp" +#include "runtime/vmOperation.hpp" #include "utilities/growableArray.hpp" #include "utilities/macros.hpp" diff --git a/src/hotspot/share/prims/jvmtiImpl.cpp b/src/hotspot/share/prims/jvmtiImpl.cpp index a034eac3a1708190902a6de91dfa08fcf50cc4cb..6fcf47cee6ebc7178fc0b7601ae9ce64c7d7f454 100644 --- a/src/hotspot/share/prims/jvmtiImpl.cpp +++ b/src/hotspot/share/prims/jvmtiImpl.cpp @@ -34,6 +34,7 @@ #include "memory/allocation.inline.hpp" #include "memory/resourceArea.hpp" #include "oops/instanceKlass.hpp" +#include "oops/klass.inline.hpp" #include "oops/oop.inline.hpp" #include "oops/oopHandle.inline.hpp" #include "prims/jvmtiAgentThread.hpp" diff --git a/src/hotspot/share/prims/jvmtiRedefineClasses.hpp b/src/hotspot/share/prims/jvmtiRedefineClasses.hpp index 91d505ebcd96ba4310c1a29908231c045449c62c..35933664a06b83f5f72d23eff28c0710db3854e8 100644 --- a/src/hotspot/share/prims/jvmtiRedefineClasses.hpp +++ b/src/hotspot/share/prims/jvmtiRedefineClasses.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2021, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -30,7 +30,7 @@ #include "memory/resourceArea.hpp" #include "oops/objArrayKlass.hpp" #include "oops/objArrayOop.hpp" -#include "runtime/vmOperations.hpp" +#include "runtime/vmOperation.hpp" // Introduction: // diff --git a/src/hotspot/share/prims/jvmtiTagMapTable.cpp b/src/hotspot/share/prims/jvmtiTagMapTable.cpp index 6bfca1371025a1fb6f0ad010f37c341c1f00b6df..d8e297bd9006607f6c2df8b2e51c03f52ee0652c 100644 --- a/src/hotspot/share/prims/jvmtiTagMapTable.cpp +++ b/src/hotspot/share/prims/jvmtiTagMapTable.cpp @@ -215,7 +215,7 @@ void JvmtiTagMapTable::remove_dead_entries(JvmtiEnv* env, bool post_object_free) } // get next entry - entry = (JvmtiTagMapEntry*)HashtableEntry::make_ptr(*p); + entry = *p; } } @@ -252,7 +252,7 @@ void JvmtiTagMapTable::rehash() { p = entry->next_addr(); } // get next entry - entry = (JvmtiTagMapEntry*)HashtableEntry::make_ptr(*p); + entry = *p; } } diff --git a/src/hotspot/share/prims/jvmtiTrace.hpp b/src/hotspot/share/prims/jvmtiTrace.hpp index 61e657f6ef1515de500ca6dd8c09a6724efcd79d..51c8f2d1e3ce40b7b712826a36fa0ea570b1bf52 100644 --- a/src/hotspot/share/prims/jvmtiTrace.hpp +++ b/src/hotspot/share/prims/jvmtiTrace.hpp @@ -31,7 +31,6 @@ #include "prims/jvmtiEventController.hpp" #include "prims/jvmtiUtil.hpp" #include "runtime/stackValueCollection.hpp" -#include "runtime/vmOperations.hpp" /////////////////////////////////////////////////////////////// // diff --git a/src/hotspot/share/prims/stackwalk.cpp b/src/hotspot/share/prims/stackwalk.cpp index 039058f064ec087a1f5f8047fdc7afcad7090e2d..321da9192f26c60dd45137223ad395bf3d5dc540 100644 --- a/src/hotspot/share/prims/stackwalk.cpp +++ b/src/hotspot/share/prims/stackwalk.cpp @@ -38,6 +38,7 @@ #include "runtime/globals.hpp" #include "runtime/handles.inline.hpp" #include "runtime/javaCalls.hpp" +#include "runtime/keepStackGCProcessed.hpp" #include "runtime/stackWatermarkSet.hpp" #include "runtime/thread.inline.hpp" #include "runtime/vframe.inline.hpp" @@ -406,6 +407,7 @@ oop StackWalk::fetchFirstBatch(BaseFrameStream& stream, Handle stackStream, int end_index = start_index; int numFrames = 0; if (!stream.at_end()) { + KeepStackGCProcessedMark keep_stack(THREAD->as_Java_thread()); numFrames = fill_in_frames(mode, stream, frame_count, start_index, frames_array, end_index, CHECK_NULL); if (numFrames < 1) { @@ -488,7 +490,7 @@ jint StackWalk::fetchNextBatch(Handle stackStream, jlong mode, jlong magic, // an accurate hint suggesting the depth of the stack walk, and 2) we are not just // peeking at a few frames. Take the cost of flushing out any pending deferred GC // processing of the stack. - StackWatermarkSet::finish_processing(jt, NULL /* context */, StackWatermarkKind::gc); + KeepStackGCProcessedMark keep_stack(jt); stream.next(); // advance past the last frame decoded in previous batch if (!stream.at_end()) { int n = fill_in_frames(mode, stream, frame_count, start_index, diff --git a/src/hotspot/share/prims/unsafe.cpp b/src/hotspot/share/prims/unsafe.cpp index 1448a0b5b19a016e266730cde5d66e67f4abd57a..1ddbb068bdbb1b1bcd6270c4c29bbd849768355a 100644 --- a/src/hotspot/share/prims/unsafe.cpp +++ b/src/hotspot/share/prims/unsafe.cpp @@ -52,6 +52,7 @@ #include "runtime/stubRoutines.hpp" #include "runtime/thread.hpp" #include "runtime/threadSMR.hpp" +#include "runtime/vmOperations.hpp" #include "runtime/vm_version.hpp" #include "services/threadService.hpp" #include "utilities/align.hpp" @@ -861,16 +862,13 @@ Unsafe_DefineAnonymousClass_impl(JNIEnv *env, false, // is_strong_hidden true); // can_access_vm_annotations - Klass* anonk = SystemDictionary::parse_stream(no_class_name, - host_loader, - &st, - cl_info, - CHECK_NULL); - if (anonk == NULL) { - return NULL; - } - - return InstanceKlass::cast(anonk); + InstanceKlass* anonk = SystemDictionary::parse_stream(no_class_name, + host_loader, + &st, + cl_info, + CHECK_NULL); + assert(anonk != NULL, "no klass created"); + return anonk; } UNSAFE_ENTRY(jclass, Unsafe_DefineAnonymousClass0(JNIEnv *env, jobject unsafe, jclass host_class, jbyteArray data, jobjectArray cp_patches_jh)) { diff --git a/src/hotspot/share/prims/upcallStubs.cpp b/src/hotspot/share/prims/upcallStubs.cpp index e6f3af44494ebe4be7e0cbdf1d83b314364bb85f..8ed046f473d04abb6318a8ba435e72b11f6400bb 100644 --- a/src/hotspot/share/prims/upcallStubs.cpp +++ b/src/hotspot/share/prims/upcallStubs.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2020, 2021, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -25,6 +25,7 @@ #include "runtime/jniHandles.inline.hpp" #include "runtime/interfaceSupport.inline.hpp" #include "code/codeCache.hpp" +#include "runtime/vmOperations.hpp" JVM_ENTRY(static jboolean, UH_FreeUpcallStub0(JNIEnv *env, jobject _unused, jlong addr)) //acquire code cache lock diff --git a/src/hotspot/share/prims/whitebox.cpp b/src/hotspot/share/prims/whitebox.cpp index ab4bff412d7ba998aa2154f48cce3afe7d0c0c76..9aafefdb2c30ac92e9062334930392cd1a8a8525 100644 --- a/src/hotspot/share/prims/whitebox.cpp +++ b/src/hotspot/share/prims/whitebox.cpp @@ -23,9 +23,7 @@ */ #include "precompiled.hpp" - #include - #include "classfile/classLoaderDataGraph.hpp" #include "classfile/javaClasses.inline.hpp" #include "classfile/modules.hpp" @@ -47,13 +45,14 @@ #include "logging/log.hpp" #include "memory/filemap.hpp" #include "memory/heapShared.inline.hpp" -#include "memory/metaspaceShared.hpp" +#include "memory/iterator.hpp" #include "memory/metadataFactory.hpp" #include "memory/metaspace/testHelpers.hpp" -#include "memory/iterator.hpp" +#include "memory/metaspaceShared.hpp" +#include "memory/metaspaceUtils.hpp" +#include "memory/oopFactory.hpp" #include "memory/resourceArea.hpp" #include "memory/universe.hpp" -#include "memory/oopFactory.hpp" #include "oops/array.hpp" #include "oops/compressedOops.hpp" #include "oops/constantPool.inline.hpp" @@ -254,21 +253,6 @@ WB_ENTRY(void, WB_PrintHeapSizes(JNIEnv* env, jobject o)) { } WB_END -#ifndef PRODUCT -// Forward declaration -void TestReservedSpace_test(); -void TestReserveMemorySpecial_test(); -void TestVirtualSpace_test(); -#endif - -WB_ENTRY(void, WB_RunMemoryUnitTests(JNIEnv* env, jobject o)) -#ifndef PRODUCT - TestReservedSpace_test(); - TestReserveMemorySpecial_test(); - TestVirtualSpace_test(); -#endif -WB_END - WB_ENTRY(void, WB_ReadFromNoaccessArea(JNIEnv* env, jobject o)) size_t granularity = os::vm_allocation_granularity(); ReservedHeapSpace rhs(100 * granularity, granularity, false); @@ -1703,23 +1687,30 @@ WB_END WB_ENTRY(void, WB_DefineModule(JNIEnv* env, jobject o, jobject module, jboolean is_open, jstring version, jstring location, jobjectArray packages)) - Modules::define_module(module, is_open, version, location, packages, CHECK); + Handle h_module (THREAD, JNIHandles::resolve(module)); + Modules::define_module(h_module, is_open, version, location, packages, CHECK); WB_END WB_ENTRY(void, WB_AddModuleExports(JNIEnv* env, jobject o, jobject from_module, jstring package, jobject to_module)) - Modules::add_module_exports_qualified(from_module, package, to_module, CHECK); + Handle h_from_module (THREAD, JNIHandles::resolve(from_module)); + Handle h_to_module (THREAD, JNIHandles::resolve(to_module)); + Modules::add_module_exports_qualified(h_from_module, package, h_to_module, CHECK); WB_END WB_ENTRY(void, WB_AddModuleExportsToAllUnnamed(JNIEnv* env, jobject o, jclass module, jstring package)) - Modules::add_module_exports_to_all_unnamed(module, package, CHECK); + Handle h_module (THREAD, JNIHandles::resolve(module)); + Modules::add_module_exports_to_all_unnamed(h_module, package, CHECK); WB_END WB_ENTRY(void, WB_AddModuleExportsToAll(JNIEnv* env, jobject o, jclass module, jstring package)) - Modules::add_module_exports(module, package, NULL, CHECK); + Handle h_module (THREAD, JNIHandles::resolve(module)); + Modules::add_module_exports(h_module, package, Handle(), CHECK); WB_END WB_ENTRY(void, WB_AddReadsModule(JNIEnv* env, jobject o, jobject from_module, jobject source_module)) - Modules::add_reads_module(from_module, source_module, CHECK); + Handle h_from_module (THREAD, JNIHandles::resolve(from_module)); + Handle h_source_module (THREAD, JNIHandles::resolve(source_module)); + Modules::add_reads_module(h_from_module, h_source_module, CHECK); WB_END WB_ENTRY(jlong, WB_IncMetaspaceCapacityUntilGC(JNIEnv* env, jobject wb, jlong inc)) @@ -2349,7 +2340,6 @@ static JNINativeMethod methods[] = { {CC"getCompressedOopsMaxHeapSize", CC"()J", (void*)&WB_GetCompressedOopsMaxHeapSize}, {CC"printHeapSizes", CC"()V", (void*)&WB_PrintHeapSizes }, - {CC"runMemoryUnitTests", CC"()V", (void*)&WB_RunMemoryUnitTests}, {CC"readFromNoaccessArea",CC"()V", (void*)&WB_ReadFromNoaccessArea}, {CC"stressVirtualSpaceResize",CC"(JJJ)I", (void*)&WB_StressVirtualSpaceResize}, #if INCLUDE_CDS diff --git a/src/hotspot/share/runtime/arguments.cpp b/src/hotspot/share/runtime/arguments.cpp index d6fc67f2cc82aa13993a00c0043e9f6576a50bdf..4db073d069bd708d3e5f2e6d3041153b000de11e 100644 --- a/src/hotspot/share/runtime/arguments.cpp +++ b/src/hotspot/share/runtime/arguments.cpp @@ -44,6 +44,7 @@ #include "runtime/arguments.hpp" #include "runtime/flags/jvmFlag.hpp" #include "runtime/flags/jvmFlagAccess.hpp" +#include "runtime/flags/jvmFlagLimit.hpp" #include "runtime/globals_extension.hpp" #include "runtime/java.hpp" #include "runtime/os.inline.hpp" @@ -84,8 +85,6 @@ bool Arguments::_AlwaysCompileLoopMethods = AlwaysCompileLoopMethods; bool Arguments::_UseOnStackReplacement = UseOnStackReplacement; bool Arguments::_BackgroundCompilation = BackgroundCompilation; bool Arguments::_ClipInlining = ClipInlining; -intx Arguments::_Tier3InvokeNotifyFreqLog = Tier3InvokeNotifyFreqLog; -intx Arguments::_Tier4InvocationThreshold = Tier4InvocationThreshold; size_t Arguments::_default_SharedBaseAddress = SharedBaseAddress; bool Arguments::_enable_preview = false; @@ -1457,12 +1456,6 @@ void Arguments::set_mode_flags(Mode mode) { AlwaysCompileLoopMethods = Arguments::_AlwaysCompileLoopMethods; UseOnStackReplacement = Arguments::_UseOnStackReplacement; BackgroundCompilation = Arguments::_BackgroundCompilation; - if (FLAG_IS_DEFAULT(Tier3InvokeNotifyFreqLog)) { - Tier3InvokeNotifyFreqLog = Arguments::_Tier3InvokeNotifyFreqLog; - } - if (FLAG_IS_DEFAULT(Tier4InvocationThreshold)) { - Tier4InvocationThreshold = Arguments::_Tier4InvocationThreshold; - } // Change from defaults based on mode switch (mode) { @@ -1482,13 +1475,6 @@ void Arguments::set_mode_flags(Mode mode) { UseInterpreter = false; BackgroundCompilation = false; ClipInlining = false; - // Be much more aggressive in tiered mode with -Xcomp and exercise C2 more. - // We will first compile a level 3 version (C1 with full profiling), then do one invocation of it and - // compile a level 4 (C2) and then continue executing it. - if (CompilerConfig::is_c2_or_jvmci_compiler_enabled()) { - Tier3InvokeNotifyFreqLog = 0; - Tier4InvocationThreshold = 0; - } break; } } @@ -2135,8 +2121,6 @@ jint Arguments::parse_vm_init_args(const JavaVMInitArgs *vm_options_args, Arguments::_UseOnStackReplacement = UseOnStackReplacement; Arguments::_ClipInlining = ClipInlining; Arguments::_BackgroundCompilation = BackgroundCompilation; - Arguments::_Tier3InvokeNotifyFreqLog = Tier3InvokeNotifyFreqLog; - Arguments::_Tier4InvocationThreshold = Tier4InvocationThreshold; // Remember the default value of SharedBaseAddress. Arguments::_default_SharedBaseAddress = SharedBaseAddress; @@ -2270,6 +2254,11 @@ jint Arguments::parse_xss(const JavaVMOption* option, const char* tail, intx* ou const julong min_ThreadStackSize = 0; const julong max_ThreadStackSize = 1 * M; + // Make sure the above values match the range set in globals.hpp + const JVMTypedFlagLimit* limit = JVMFlagLimit::get_range_at(FLAG_MEMBER_ENUM(ThreadStackSize))->cast(); + assert(min_ThreadStackSize == static_cast(limit->min()), "must be"); + assert(max_ThreadStackSize == static_cast(limit->max()), "must be"); + const julong min_size = min_ThreadStackSize * K; const julong max_size = max_ThreadStackSize * K; @@ -4078,7 +4067,7 @@ jint Arguments::apply_ergo() { UseOptoBiasInlining = false; } - if (!EnableVectorSupport) { + if (!FLAG_IS_DEFAULT(EnableVectorSupport) && !EnableVectorSupport) { if (!FLAG_IS_DEFAULT(EnableVectorReboxing) && EnableVectorReboxing) { warning("Disabling EnableVectorReboxing since EnableVectorSupport is turned off."); } diff --git a/src/hotspot/share/runtime/arguments.hpp b/src/hotspot/share/runtime/arguments.hpp index 4fa2a10551888604d285fb21f76cf0dfddcecda2..3876be4a9dad74424fca84963f49d442a064ac08 100644 --- a/src/hotspot/share/runtime/arguments.hpp +++ b/src/hotspot/share/runtime/arguments.hpp @@ -354,8 +354,6 @@ class Arguments : AllStatic { static bool _UseOnStackReplacement; static bool _BackgroundCompilation; static bool _ClipInlining; - static intx _Tier3InvokeNotifyFreqLog; - static intx _Tier4InvocationThreshold; // GC ergonomics static void set_conservative_max_heap_alignment(); @@ -617,8 +615,10 @@ class Arguments : AllStatic { static bool init_shared_archive_paths() NOT_CDS_RETURN_(false); // Operation modi - static Mode mode() { return _mode; } - static bool is_interpreter_only() { return mode() == _int; } + static Mode mode() { return _mode; } + static bool is_interpreter_only() { return mode() == _int; } + static bool is_compiler_only() { return mode() == _comp; } + // preview features static void set_enable_preview() { _enable_preview = true; } diff --git a/src/hotspot/share/runtime/deoptimization.cpp b/src/hotspot/share/runtime/deoptimization.cpp index 470d41ff6946091cedd387287eb42ede8b079d32..5e04174a13b0271ed12ca36896dfa790a16f968a 100644 --- a/src/hotspot/share/runtime/deoptimization.cpp +++ b/src/hotspot/share/runtime/deoptimization.cpp @@ -78,6 +78,7 @@ #include "runtime/vframe.hpp" #include "runtime/vframeArray.hpp" #include "runtime/vframe_hp.hpp" +#include "runtime/vmOperations.hpp" #include "utilities/events.hpp" #include "utilities/macros.hpp" #include "utilities/preserveException.hpp" @@ -902,12 +903,11 @@ Deoptimization::DeoptAction Deoptimization::_unloaded_action template class BoxCacheBase : public CHeapObj { protected: - static InstanceKlass* find_cache_klass(Symbol* klass_name, TRAPS) { + static InstanceKlass* find_cache_klass(Symbol* klass_name) { ResourceMark rm; char* klass_name_str = klass_name->as_C_string(); - Klass* k = SystemDictionary::find(klass_name, Handle(), Handle(), THREAD); - guarantee(k != NULL, "%s must be loaded", klass_name_str); - InstanceKlass* ik = InstanceKlass::cast(k); + InstanceKlass* ik = SystemDictionary::find_instance_klass(klass_name, Handle(), Handle()); + guarantee(ik != NULL, "%s must be loaded", klass_name_str); guarantee(ik->is_initialized(), "%s must be initialized", klass_name_str); CacheType::compute_offsets(ik); return ik; @@ -921,7 +921,7 @@ template class Box protected: static BoxCache *_singleton; BoxCache(Thread* thread) { - InstanceKlass* ik = BoxCacheBase::find_cache_klass(CacheType::symbol(), thread); + InstanceKlass* ik = BoxCacheBase::find_cache_klass(CacheType::symbol()); objArrayOop cache = CacheType::cache(ik); assert(cache->length() > 0, "Empty cache"); _low = BoxType::value(cache->obj_at(0)); @@ -977,7 +977,7 @@ class BooleanBoxCache : public BoxCacheBase { protected: static BooleanBoxCache *_singleton; BooleanBoxCache(Thread *thread) { - InstanceKlass* ik = find_cache_klass(java_lang_Boolean::symbol(), thread); + InstanceKlass* ik = find_cache_klass(java_lang_Boolean::symbol()); _true_cache = JNIHandles::make_global(Handle(thread, java_lang_Boolean::get_TRUE(ik))); _false_cache = JNIHandles::make_global(Handle(thread, java_lang_Boolean::get_FALSE(ik))); } @@ -2465,6 +2465,7 @@ Deoptimization::UnrollBlock* Deoptimization::uncommon_trap(JavaThread* thread, j // This enters VM and may safepoint uncommon_trap_inner(thread, trap_request); } + HandleMark hm(thread); return fetch_unroll_info_helper(thread, exec_mode); } diff --git a/src/hotspot/share/runtime/flags/jvmFlag.hpp b/src/hotspot/share/runtime/flags/jvmFlag.hpp index d4beb7d85e3fecdfcf5212f9c84520e63c55eb39..36f2c53902bbaa4154b7398ff603a7f901c2f163 100644 --- a/src/hotspot/share/runtime/flags/jvmFlag.hpp +++ b/src/hotspot/share/runtime/flags/jvmFlag.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2021, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -29,6 +29,7 @@ #include "utilities/enumIterator.hpp" #include "utilities/macros.hpp" #include "utilities/vmEnums.hpp" +#include class outputStream; @@ -290,6 +291,26 @@ public: static const char* flag_error_str(JVMFlag::Error error); + // type checking +#define CHECK_COMPATIBLE(type) \ + case TYPE_##type: \ + assert(sizeof(T) == sizeof(type) && \ + std::is_integral::value == std::is_integral::value && \ + std::is_signed ::value == std::is_signed ::value, "must be"); \ + break; + + template + static void assert_compatible_type(int type_enum) { +#ifndef PRODUCT + switch (type_enum) { + JVM_FLAG_NON_STRING_TYPES_DO(CHECK_COMPATIBLE); + default: ShouldNotReachHere(); + } +#endif + } + +#undef CHECK_COMPATIBLE + public: static void printSetFlags(outputStream* out); diff --git a/src/hotspot/share/runtime/flags/jvmFlagAccess.cpp b/src/hotspot/share/runtime/flags/jvmFlagAccess.cpp index 4695aab8fa42748d2272a1a00d98e0f9e425ff3f..34c9b949049df335d0c1b226a79434e5bb265ccb 100644 --- a/src/hotspot/share/runtime/flags/jvmFlagAccess.cpp +++ b/src/hotspot/share/runtime/flags/jvmFlagAccess.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2020, 2021, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -29,6 +29,7 @@ #include "runtime/flags/jvmFlagAccess.hpp" #include "runtime/flags/jvmFlagLimit.hpp" #include "runtime/flags/jvmFlagConstraintsRuntime.hpp" +#include "runtime/os.hpp" #include "utilities/macros.hpp" #include "utilities/ostream.hpp" diff --git a/src/hotspot/share/runtime/flags/jvmFlagLimit.cpp b/src/hotspot/share/runtime/flags/jvmFlagLimit.cpp index c4ab9360c459b1c8de74df99b94fa9ade0273e66..5b81444eb84abe561910d976a435f899d6432df0 100644 --- a/src/hotspot/share/runtime/flags/jvmFlagLimit.cpp +++ b/src/hotspot/share/runtime/flags/jvmFlagLimit.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2021, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -92,7 +92,7 @@ public: // macro body starts here -------------------+ // | // v -#define FLAG_LIMIT_DEFINE( type, name, ...) ); constexpr JVMTypedFlagLimit limit_##name(0 +#define FLAG_LIMIT_DEFINE( type, name, ...) ); constexpr JVMTypedFlagLimit limit_##name(JVMFlag::TYPE_##type #define FLAG_LIMIT_DEFINE_DUMMY(type, name, ...) ); constexpr DummyLimit nolimit_##name(0 #define FLAG_LIMIT_PTR( type, name, ...) ), LimitGetter::get_limit(&limit_##name, 0 #define FLAG_LIMIT_PTR_NONE( type, name, ...) ), LimitGetter::no_limit(0 diff --git a/src/hotspot/share/runtime/flags/jvmFlagLimit.hpp b/src/hotspot/share/runtime/flags/jvmFlagLimit.hpp index 28d2d3cfcd03cf1a78910a840a1ae46bc719affb..482fd292af54b457ff6ceabebbe592ff0a03340e 100644 --- a/src/hotspot/share/runtime/flags/jvmFlagLimit.hpp +++ b/src/hotspot/share/runtime/flags/jvmFlagLimit.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2020, 2021, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -50,6 +50,8 @@ typedef JVMFlag::Error (*JVMFlagConstraintFunc_size_t)(size_t value, bool verbos typedef JVMFlag::Error (*JVMFlagConstraintFunc_double)(double value, bool verbose); typedef JVMFlag::Error (*JVMFlagConstraintFunc_ccstr)(ccstr value, bool verbose); +template class JVMTypedFlagLimit; + // A JVMFlagLimit is created for each JVMFlag that has a range() and/or constraint() in its declaration in // the globals_xxx.hpp file. // @@ -68,6 +70,10 @@ class JVMFlagLimit { char _phase; char _kind; +#ifdef ASSERT + int _type_enum; +#endif + static const JVMFlagLimit* const* flagLimits; static JVMFlagsEnum _last_checked; static JVMFlagConstraintPhase _validating_phase; @@ -97,7 +103,8 @@ public: char phase() const { return _phase; } char kind() const { return _kind; } - constexpr JVMFlagLimit(short func, short phase, short kind) : _constraint_func(func), _phase(phase), _kind(kind) {} + constexpr JVMFlagLimit(int type_enum, short func, short phase, short kind) + : _constraint_func(func), _phase(phase), _kind(kind) DEBUG_ONLY(COMMA _type_enum(type_enum)) {} static const JVMFlagLimit* get_range(const JVMFlag* flag) { return get_range_at(flag->flag_enum()); @@ -130,6 +137,9 @@ public: } static JVMFlagConstraintPhase validating_phase() { return _validating_phase; } + + template + const JVMTypedFlagLimit* cast() const; }; enum ConstraintMarker { @@ -144,27 +154,33 @@ class JVMTypedFlagLimit : public JVMFlagLimit { public: // dummy - no range or constraint. This object will not be emitted into the .o file // because we declare it as "const" but has no reference to it. - constexpr JVMTypedFlagLimit(int dummy) : - JVMFlagLimit(0, 0, 0), _min(0), _max(0) {} + constexpr JVMTypedFlagLimit(int type_enum) : + JVMFlagLimit(0, 0, 0, 0), _min(0), _max(0) {} // range only - constexpr JVMTypedFlagLimit(int dummy, T min, T max) : - JVMFlagLimit(0, 0, HAS_RANGE), _min(min), _max(max) {} + constexpr JVMTypedFlagLimit(int type_enum, T min, T max) : + JVMFlagLimit(type_enum, 0, 0, HAS_RANGE), _min(min), _max(max) {} // constraint only - constexpr JVMTypedFlagLimit(int dummy, ConstraintMarker dummy2, short func, int phase) : - JVMFlagLimit(func, phase, HAS_CONSTRAINT), _min(0), _max(0) {} + constexpr JVMTypedFlagLimit(int type_enum, ConstraintMarker dummy2, short func, int phase) : + JVMFlagLimit(type_enum, func, phase, HAS_CONSTRAINT), _min(0), _max(0) {} // range and constraint - constexpr JVMTypedFlagLimit(int dummy, T min, T max, ConstraintMarker dummy2, short func, int phase) : - JVMFlagLimit(func, phase, HAS_RANGE | HAS_CONSTRAINT), _min(min), _max(max) {} + constexpr JVMTypedFlagLimit(int type_enum, T min, T max, ConstraintMarker dummy2, short func, int phase) : + JVMFlagLimit(type_enum, func, phase, HAS_RANGE | HAS_CONSTRAINT), _min(min), _max(max) {} // constraint and range - constexpr JVMTypedFlagLimit(int dummy, ConstraintMarker dummy2, short func, int phase, T min, T max) : - JVMFlagLimit(func, phase, HAS_RANGE | HAS_CONSTRAINT), _min(min), _max(max) {} + constexpr JVMTypedFlagLimit(int type_enum, ConstraintMarker dummy2, short func, int phase, T min, T max) : + JVMFlagLimit(type_enum, func, phase, HAS_RANGE | HAS_CONSTRAINT), _min(min), _max(max) {} T min() const { return _min; } T max() const { return _max; } }; +template +const JVMTypedFlagLimit* JVMFlagLimit::cast() const { + DEBUG_ONLY(JVMFlag::assert_compatible_type(_type_enum)); + return static_cast*>(this); +} + #endif // SHARE_RUNTIME_FLAGS_JVMFLAGLIMIT_HPP diff --git a/src/hotspot/share/runtime/globals.hpp b/src/hotspot/share/runtime/globals.hpp index cfe5fd8116ce813d2239410121a660fd0679e0cc..049652904cadb5224ab3cf1766082a55a5170422 100644 --- a/src/hotspot/share/runtime/globals.hpp +++ b/src/hotspot/share/runtime/globals.hpp @@ -1470,7 +1470,7 @@ const intx ObjectAlignmentInBytes = 8; "Force inlining of throwing methods smaller than this") \ range(0, max_jint) \ \ - product_pd(size_t, MetaspaceSize, \ + product(size_t, MetaspaceSize, NOT_LP64(16 * M) LP64_ONLY(21 * M), \ "Initial threshold (in bytes) at which a garbage collection " \ "is done to reduce Metaspace usage") \ constraint(MetaspaceSizeConstraintFunc,AfterErgo) \ diff --git a/src/hotspot/share/runtime/handshake.cpp b/src/hotspot/share/runtime/handshake.cpp index a9e22049eadab173d048940e10e994de3bdcf30e..de174c7ceb9123de1dcfd966418dacf45ede4a07 100644 --- a/src/hotspot/share/runtime/handshake.cpp +++ b/src/hotspot/share/runtime/handshake.cpp @@ -34,9 +34,11 @@ #include "runtime/stackWatermarkSet.hpp" #include "runtime/task.hpp" #include "runtime/thread.hpp" +#include "runtime/threadSMR.hpp" #include "runtime/vmThread.hpp" #include "utilities/formatBuffer.hpp" #include "utilities/filterQueue.inline.hpp" +#include "utilities/globalDefinitions.hpp" #include "utilities/preserveException.hpp" class HandshakeOperation : public CHeapObj { @@ -47,19 +49,23 @@ class HandshakeOperation : public CHeapObj { // Once it reaches zero all handshake operations have been performed. int32_t _pending_threads; JavaThread* _target; + Thread* _requester; // Must use AsyncHandshakeOperation when using AsyncHandshakeClosure. - HandshakeOperation(AsyncHandshakeClosure* cl, JavaThread* target) : + HandshakeOperation(AsyncHandshakeClosure* cl, JavaThread* target, Thread* requester) : _handshake_cl(cl), _pending_threads(1), - _target(target) {} + _target(target), + _requester(requester) {} public: - HandshakeOperation(HandshakeClosure* cl, JavaThread* target) : + HandshakeOperation(HandshakeClosure* cl, JavaThread* target, Thread* requester) : _handshake_cl(cl), _pending_threads(1), - _target(target) {} + _target(target), + _requester(requester) {} virtual ~HandshakeOperation() {} + void prepare(JavaThread* current_target, Thread* executing_thread); void do_handshake(JavaThread* thread); bool is_completed() { int32_t val = Atomic::load(&_pending_threads); @@ -76,7 +82,7 @@ class AsyncHandshakeOperation : public HandshakeOperation { jlong _start_time_ns; public: AsyncHandshakeOperation(AsyncHandshakeClosure* cl, JavaThread* target, jlong start_ns) - : HandshakeOperation(cl, target), _start_time_ns(start_ns) {} + : HandshakeOperation(cl, target, NULL), _start_time_ns(start_ns) {} virtual ~AsyncHandshakeOperation() { delete _handshake_cl; } jlong start_time() const { return _start_time_ns; } }; @@ -177,7 +183,7 @@ class VM_Handshake: public VM_Operation { HandshakeOperation* const _op; VM_Handshake(HandshakeOperation* op) : - _handshake_timeout(TimeHelper::millis_to_counter(HandshakeTimeout)), _op(op) {} + _handshake_timeout(millis_to_nanos(HandshakeTimeout)), _op(op) {} bool handshake_has_timed_out(jlong start_time); static void handle_timeout(); @@ -275,6 +281,22 @@ class VM_HandshakeAllThreads: public VM_Handshake { VMOp_Type type() const { return VMOp_HandshakeAllThreads; } }; +void HandshakeOperation::prepare(JavaThread* current_target, Thread* executing_thread) { + if (current_target->is_terminated()) { + // Will never execute any handshakes on this thread. + return; + } + if (current_target != executing_thread) { + // Only when the target is not executing the handshake itself. + StackWatermarkSet::start_processing(current_target, StackWatermarkKind::gc); + } + if (_requester != NULL && _requester != executing_thread && _requester->is_Java_thread()) { + // The handshake closure may contain oop Handles from the _requester. + // We must make sure we can use them. + StackWatermarkSet::start_processing(_requester->as_Java_thread(), StackWatermarkKind::gc); + } +} + void HandshakeOperation::do_handshake(JavaThread* thread) { jlong start_time_ns = 0; if (log_is_enabled(Debug, handshake, task)) { @@ -304,14 +326,14 @@ void HandshakeOperation::do_handshake(JavaThread* thread) { } void Handshake::execute(HandshakeClosure* hs_cl) { - HandshakeOperation cto(hs_cl, NULL); + HandshakeOperation cto(hs_cl, NULL, Thread::current()); VM_HandshakeAllThreads handshake(&cto); VMThread::execute(&handshake); } void Handshake::execute(HandshakeClosure* hs_cl, JavaThread* target) { JavaThread* self = JavaThread::current(); - HandshakeOperation op(hs_cl, target); + HandshakeOperation op(hs_cl, target, Thread::current()); jlong start_time_ns = os::javaTimeNanos(); @@ -429,6 +451,7 @@ void HandshakeState::process_self_inner() { bool async = op->is_async(); log_trace(handshake)("Proc handshake %s " INTPTR_FORMAT " on " INTPTR_FORMAT " by self", async ? "asynchronous" : "synchronous", p2i(op), p2i(_handshakee)); + op->prepare(_handshakee, _handshakee); op->do_handshake(_handshakee); if (async) { log_handshake_info(((AsyncHandshakeOperation*)op)->start_time(), op->name(), 1, 0, "asynchronous"); @@ -521,9 +544,7 @@ HandshakeState::ProcessResult HandshakeState::try_process(HandshakeOperation* ma pr_ret = HandshakeState::_succeeded; } - if (!_handshakee->is_terminated()) { - StackWatermarkSet::start_processing(_handshakee, StackWatermarkKind::gc); - } + op->prepare(_handshakee, current_thread); _active_handshaker = current_thread; op->do_handshake(_handshakee); diff --git a/src/hotspot/share/runtime/interfaceSupport.cpp b/src/hotspot/share/runtime/interfaceSupport.cpp index 0dc681aa5209e456499bb868dfc2efe724c7e646..99b7592f68b11c0c21c757166ba932fad45f1320 100644 --- a/src/hotspot/share/runtime/interfaceSupport.cpp +++ b/src/hotspot/share/runtime/interfaceSupport.cpp @@ -37,6 +37,7 @@ #include "runtime/thread.inline.hpp" #include "runtime/safepointVerifiers.hpp" #include "runtime/vframe.hpp" +#include "runtime/vmOperations.hpp" #include "runtime/vmThread.hpp" #include "utilities/preserveException.hpp" diff --git a/src/hotspot/share/runtime/java.cpp b/src/hotspot/share/runtime/java.cpp index cd3228870176573c2e5ced0882d9a88ff9c43212..9e8af9a6cacbbbf36d3489f663ea6fa087c5cf5d 100644 --- a/src/hotspot/share/runtime/java.cpp +++ b/src/hotspot/share/runtime/java.cpp @@ -25,7 +25,6 @@ #include "precompiled.hpp" #include "jvm.h" #include "aot/aotLoader.hpp" -#include "classfile/classLoader.hpp" #include "classfile/classLoaderDataGraph.hpp" #include "classfile/javaClasses.hpp" #include "classfile/stringTable.hpp" @@ -43,6 +42,7 @@ #endif #include "logging/log.hpp" #include "logging/logStream.hpp" +#include "memory/metaspaceUtils.hpp" #include "memory/oopFactory.hpp" #include "memory/resourceArea.hpp" #include "memory/dynamicArchive.hpp" @@ -97,9 +97,11 @@ GrowableArray* collected_profiled_methods; int compare_methods(Method** a, Method** b) { - // %%% there can be 32-bit overflow here - return ((*b)->invocation_count() + (*b)->compiled_invocation_count()) - - ((*a)->invocation_count() + (*a)->compiled_invocation_count()); + // compiled_invocation_count() returns int64_t, forcing the entire expression + // to be evaluated as int64_t. Overflow is not an issue. + int64_t diff = (((*b)->invocation_count() + (*b)->compiled_invocation_count()) + - ((*a)->invocation_count() + (*a)->compiled_invocation_count())); + return (diff < 0) ? -1 : (diff > 0) ? 1 : 0; } void collect_profiled_methods(Method* m) { @@ -151,14 +153,15 @@ void print_method_profiling_data() { GrowableArray* collected_invoked_methods; void collect_invoked_methods(Method* m) { - if (m->invocation_count() + m->compiled_invocation_count() >= 1 ) { + if (m->invocation_count() + m->compiled_invocation_count() >= 1) { collected_invoked_methods->push(m); } } - - +// Invocation count accumulators should be unsigned long to shift the +// overflow border. Longer-running workloads tend to create invocation +// counts which already overflow 32-bit counters for individual methods. void print_method_invocation_histogram() { ResourceMark rm; collected_invoked_methods = new GrowableArray(1024); @@ -169,31 +172,45 @@ void print_method_invocation_histogram() { tty->print_cr("Histogram Over Method Invocation Counters (cutoff = " INTX_FORMAT "):", MethodHistogramCutoff); tty->cr(); tty->print_cr("____Count_(I+C)____Method________________________Module_________________"); - unsigned total = 0, int_total = 0, comp_total = 0, static_total = 0, final_total = 0, - synch_total = 0, nativ_total = 0, acces_total = 0; + uint64_t total = 0, + int_total = 0, + comp_total = 0, + special_total= 0, + static_total = 0, + final_total = 0, + synch_total = 0, + native_total = 0, + access_total = 0; for (int index = 0; index < collected_invoked_methods->length(); index++) { + // Counter values returned from getter methods are signed int. + // To shift the overflow border by a factor of two, we interpret + // them here as unsigned long. A counter can't be negative anyway. Method* m = collected_invoked_methods->at(index); - int c = m->invocation_count() + m->compiled_invocation_count(); - if (c >= MethodHistogramCutoff) m->print_invocation_count(); - int_total += m->invocation_count(); - comp_total += m->compiled_invocation_count(); - if (m->is_final()) final_total += c; - if (m->is_static()) static_total += c; - if (m->is_synchronized()) synch_total += c; - if (m->is_native()) nativ_total += c; - if (m->is_accessor()) acces_total += c; + uint64_t iic = (uint64_t)m->invocation_count(); + uint64_t cic = (uint64_t)m->compiled_invocation_count(); + if ((iic + cic) >= (uint64_t)MethodHistogramCutoff) m->print_invocation_count(); + int_total += iic; + comp_total += cic; + if (m->is_final()) final_total += iic + cic; + if (m->is_static()) static_total += iic + cic; + if (m->is_synchronized()) synch_total += iic + cic; + if (m->is_native()) native_total += iic + cic; + if (m->is_accessor()) access_total += iic + cic; } tty->cr(); total = int_total + comp_total; - tty->print_cr("Invocations summary:"); - tty->print_cr("\t%9d (%4.1f%%) interpreted", int_total, 100.0 * int_total / total); - tty->print_cr("\t%9d (%4.1f%%) compiled", comp_total, 100.0 * comp_total / total); - tty->print_cr("\t%9d (100%%) total", total); - tty->print_cr("\t%9d (%4.1f%%) synchronized", synch_total, 100.0 * synch_total / total); - tty->print_cr("\t%9d (%4.1f%%) final", final_total, 100.0 * final_total / total); - tty->print_cr("\t%9d (%4.1f%%) static", static_total, 100.0 * static_total / total); - tty->print_cr("\t%9d (%4.1f%%) native", nativ_total, 100.0 * nativ_total / total); - tty->print_cr("\t%9d (%4.1f%%) accessor", acces_total, 100.0 * acces_total / total); + special_total = final_total + static_total +synch_total + native_total + access_total; + tty->print_cr("Invocations summary for %d methods:", collected_invoked_methods->length()); + tty->print_cr("\t" UINT64_FORMAT_W(12) " (100%%) total", total); + tty->print_cr("\t" UINT64_FORMAT_W(12) " (%4.1f%%) |- interpreted", int_total, 100.0 * int_total / total); + tty->print_cr("\t" UINT64_FORMAT_W(12) " (%4.1f%%) |- compiled", comp_total, 100.0 * comp_total / total); + tty->print_cr("\t" UINT64_FORMAT_W(12) " (%4.1f%%) |- special methods (interpreted and compiled)", + special_total, 100.0 * special_total/ total); + tty->print_cr("\t" UINT64_FORMAT_W(12) " (%4.1f%%) |- synchronized",synch_total, 100.0 * synch_total / total); + tty->print_cr("\t" UINT64_FORMAT_W(12) " (%4.1f%%) |- final", final_total, 100.0 * final_total / total); + tty->print_cr("\t" UINT64_FORMAT_W(12) " (%4.1f%%) |- static", static_total, 100.0 * static_total / total); + tty->print_cr("\t" UINT64_FORMAT_W(12) " (%4.1f%%) |- native", native_total, 100.0 * native_total / total); + tty->print_cr("\t" UINT64_FORMAT_W(12) " (%4.1f%%) |- accessor", access_total, 100.0 * access_total / total); tty->cr(); SharedRuntime::print_call_statistics(comp_total); } diff --git a/src/hotspot/share/runtime/objectMonitor.cpp b/src/hotspot/share/runtime/objectMonitor.cpp index f7afa9fdba92cb91eae2c0398b945292a7f49624..76976c774f1ca4a160a5f748c7b721981a7d110c 100644 --- a/src/hotspot/share/runtime/objectMonitor.cpp +++ b/src/hotspot/share/runtime/objectMonitor.cpp @@ -47,9 +47,9 @@ #include "runtime/orderAccess.hpp" #include "runtime/osThread.hpp" #include "runtime/perfData.hpp" +#include "runtime/safefetch.inline.hpp" #include "runtime/safepointMechanism.inline.hpp" #include "runtime/sharedRuntime.hpp" -#include "runtime/stubRoutines.hpp" #include "runtime/thread.inline.hpp" #include "services/threadService.hpp" #include "utilities/dtrace.hpp" diff --git a/src/hotspot/share/runtime/os.cpp b/src/hotspot/share/runtime/os.cpp index 798d36f0981e9ca6454afc585be38a683bc6972b..e65975dc1ee83a1e4f2771aef1be63f4c70e6eac 100644 --- a/src/hotspot/share/runtime/os.cpp +++ b/src/hotspot/share/runtime/os.cpp @@ -24,7 +24,6 @@ #include "precompiled.hpp" #include "jvm.h" -#include "classfile/classLoader.hpp" #include "classfile/javaClasses.hpp" #include "classfile/moduleEntry.hpp" #include "classfile/systemDictionary.hpp" @@ -56,10 +55,11 @@ #include "runtime/mutexLocker.hpp" #include "runtime/os.inline.hpp" #include "runtime/osThread.hpp" +#include "runtime/safefetch.inline.hpp" #include "runtime/sharedRuntime.hpp" -#include "runtime/stubRoutines.hpp" #include "runtime/thread.inline.hpp" #include "runtime/threadSMR.hpp" +#include "runtime/vmOperations.hpp" #include "runtime/vm_version.hpp" #include "services/attachListener.hpp" #include "services/mallocTracker.hpp" diff --git a/src/hotspot/share/runtime/os.hpp b/src/hotspot/share/runtime/os.hpp index c9a7a6e2b3a9e6754ae68e1106d196e0e0c2f9ee..e43db9b9fe0a508a1f7ab3584fdd23e626b1d92c 100644 --- a/src/hotspot/share/runtime/os.hpp +++ b/src/hotspot/share/runtime/os.hpp @@ -182,6 +182,8 @@ class os: AllStatic { // unset environment variable static bool unsetenv(const char* name); + // Get environ pointer, platform independently + static char** get_environ(); static bool have_special_privileges(); @@ -501,8 +503,12 @@ class os: AllStatic { static bool message_box(const char* title, const char* message); - // run cmd in a separate process and return its exit code; or -1 on failures - static int fork_and_exec(char *cmd, bool use_vfork_if_available = false); + // run cmd in a separate process and return its exit code; or -1 on failures. + // Note: only safe to use in fatal error situations. + // The "prefer_vfork" argument is only used on POSIX platforms to + // indicate whether vfork should be used instead of fork to spawn the + // child process (ignored on AIX, which always uses vfork). + static int fork_and_exec(const char *cmd, bool prefer_vfork = false); // Call ::exit() on all platforms but Windows static void exit(int num); diff --git a/src/hotspot/share/runtime/safefetch.inline.hpp b/src/hotspot/share/runtime/safefetch.inline.hpp new file mode 100644 index 0000000000000000000000000000000000000000..09fc5b6a67ef90c85be80b7cb9bca37e6f405ee4 --- /dev/null +++ b/src/hotspot/share/runtime/safefetch.inline.hpp @@ -0,0 +1,51 @@ +/* + * Copyright (c) 1997, 2021, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_RUNTIME_SAFEFETCH_INLINE_HPP +#define SHARE_RUNTIME_SAFEFETCH_INLINE_HPP + +#include "runtime/stubRoutines.hpp" + +// Safefetch allows to load a value from a location that's not known +// to be valid. If the load causes a fault, the error value is returned. +inline int SafeFetch32(int* adr, int errValue) { + assert(StubRoutines::SafeFetch32_stub(), "stub not yet generated"); + return StubRoutines::SafeFetch32_stub()(adr, errValue); +} + +inline intptr_t SafeFetchN(intptr_t* adr, intptr_t errValue) { + assert(StubRoutines::SafeFetchN_stub(), "stub not yet generated"); + return StubRoutines::SafeFetchN_stub()(adr, errValue); +} + +// returns true if SafeFetch32 and SafeFetchN can be used safely (stubroutines are already generated) +inline bool CanUseSafeFetch32() { + return StubRoutines::SafeFetch32_stub() ? true : false; +} + +inline bool CanUseSafeFetchN() { + return StubRoutines::SafeFetchN_stub() ? true : false; +} + +#endif // SHARE_RUNTIME_SAFEFETCH_INLINE_HPP diff --git a/src/hotspot/share/runtime/safepoint.cpp b/src/hotspot/share/runtime/safepoint.cpp index 92cb498412c8d8f362d59a236566f9946b60e1d5..42870b7fef4f39c4be7c11b48878ce44fb25b700 100644 --- a/src/hotspot/share/runtime/safepoint.cpp +++ b/src/hotspot/share/runtime/safepoint.cpp @@ -590,7 +590,7 @@ public: OopStorage::trigger_cleanup_if_needed(); } - _subtasks.all_tasks_completed(_num_workers); + _subtasks.all_tasks_claimed(); } }; diff --git a/src/hotspot/share/runtime/safepoint.hpp b/src/hotspot/share/runtime/safepoint.hpp index df9086be145a053840ca5e5d5f014947b70c1c40..c35f55f0c276d1dd8cb71fde6310ba95e7fcdd6c 100644 --- a/src/hotspot/share/runtime/safepoint.hpp +++ b/src/hotspot/share/runtime/safepoint.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2021, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -28,7 +28,7 @@ #include "memory/allocation.hpp" #include "runtime/os.hpp" #include "runtime/thread.hpp" -#include "runtime/vmOperations.hpp" +#include "runtime/vmOperation.hpp" #include "utilities/ostream.hpp" #include "utilities/waitBarrier.hpp" diff --git a/src/hotspot/share/runtime/sharedRuntime.cpp b/src/hotspot/share/runtime/sharedRuntime.cpp index 5b6950f015282e947408a9a83c90cec3e9b06c32..48fd9db1890f72255bf876aac428cf3fac792182 100644 --- a/src/hotspot/share/runtime/sharedRuntime.cpp +++ b/src/hotspot/share/runtime/sharedRuntime.cpp @@ -45,9 +45,9 @@ #include "interpreter/interpreterRuntime.hpp" #include "jfr/jfrEvents.hpp" #include "logging/log.hpp" -#include "memory/metaspaceShared.hpp" #include "memory/resourceArea.hpp" #include "memory/universe.hpp" +#include "oops/compiledICHolder.inline.hpp" #include "oops/klass.hpp" #include "oops/method.inline.hpp" #include "oops/objArrayKlass.hpp" @@ -143,16 +143,16 @@ int SharedRuntime::_implicit_null_throws = 0; int SharedRuntime::_implicit_div0_throws = 0; int SharedRuntime::_throw_null_ctr = 0; -int SharedRuntime::_nof_normal_calls = 0; -int SharedRuntime::_nof_optimized_calls = 0; -int SharedRuntime::_nof_inlined_calls = 0; -int SharedRuntime::_nof_megamorphic_calls = 0; -int SharedRuntime::_nof_static_calls = 0; -int SharedRuntime::_nof_inlined_static_calls = 0; -int SharedRuntime::_nof_interface_calls = 0; -int SharedRuntime::_nof_optimized_interface_calls = 0; -int SharedRuntime::_nof_inlined_interface_calls = 0; -int SharedRuntime::_nof_megamorphic_interface_calls = 0; +int64_t SharedRuntime::_nof_normal_calls = 0; +int64_t SharedRuntime::_nof_optimized_calls = 0; +int64_t SharedRuntime::_nof_inlined_calls = 0; +int64_t SharedRuntime::_nof_megamorphic_calls = 0; +int64_t SharedRuntime::_nof_static_calls = 0; +int64_t SharedRuntime::_nof_inlined_static_calls = 0; +int64_t SharedRuntime::_nof_interface_calls = 0; +int64_t SharedRuntime::_nof_optimized_interface_calls = 0; +int64_t SharedRuntime::_nof_inlined_interface_calls = 0; +int64_t SharedRuntime::_nof_megamorphic_interface_calls = 0; int SharedRuntime::_new_instance_ctr=0; int SharedRuntime::_new_array_ctr=0; @@ -2203,14 +2203,20 @@ inline double percent(int x, int y) { return 100.0 * x / MAX2(y, 1); } +inline double percent(int64_t x, int64_t y) { + return 100.0 * x / MAX2(y, (int64_t)1); +} + class MethodArityHistogram { public: enum { MAX_ARITY = 256 }; private: - static int _arity_histogram[MAX_ARITY]; // histogram of #args - static int _size_histogram[MAX_ARITY]; // histogram of arg size in words - static int _max_arity; // max. arity seen - static int _max_size; // max. arg size seen + static uint64_t _arity_histogram[MAX_ARITY]; // histogram of #args + static uint64_t _size_histogram[MAX_ARITY]; // histogram of arg size in words + static uint64_t _total_compiled_calls; + static uint64_t _max_compiled_calls_per_method; + static int _max_arity; // max. arity seen + static int _max_size; // max. arg size seen static void add_method_to_histogram(nmethod* nm) { Method* method = (nm == NULL) ? NULL : nm->method(); @@ -2220,7 +2226,9 @@ class MethodArityHistogram { int argsize = method->size_of_parameters(); arity = MIN2(arity, MAX_ARITY-1); argsize = MIN2(argsize, MAX_ARITY-1); - int count = method->compiled_invocation_count(); + uint64_t count = (uint64_t)method->compiled_invocation_count(); + _max_compiled_calls_per_method = count > _max_compiled_calls_per_method ? count : _max_compiled_calls_per_method; + _total_compiled_calls += count; _arity_histogram[arity] += count; _size_histogram[argsize] += count; _max_arity = MAX2(_max_arity, arity); @@ -2228,27 +2236,31 @@ class MethodArityHistogram { } } - void print_histogram_helper(int n, int* histo, const char* name) { - const int N = MIN2(5, n); - tty->print_cr("\nHistogram of call arity (incl. rcvr, calls to compiled methods only):"); + void print_histogram_helper(int n, uint64_t* histo, const char* name) { + const int N = MIN2(9, n); double sum = 0; double weighted_sum = 0; - int i; - for (i = 0; i <= n; i++) { sum += histo[i]; weighted_sum += i*histo[i]; } - double rest = sum; - double percent = sum / 100; - for (i = 0; i <= N; i++) { - rest -= histo[i]; - tty->print_cr("%4d: %7d (%5.1f%%)", i, histo[i], histo[i] / percent); + for (int i = 0; i <= n; i++) { sum += histo[i]; weighted_sum += i*histo[i]; } + if (sum >= 1.0) { // prevent divide by zero or divide overflow + double rest = sum; + double percent = sum / 100; + for (int i = 0; i <= N; i++) { + rest -= histo[i]; + tty->print_cr("%4d: " UINT64_FORMAT_W(12) " (%5.1f%%)", i, histo[i], histo[i] / percent); + } + tty->print_cr("rest: " INT64_FORMAT_W(12) " (%5.1f%%)", (int64_t)rest, rest / percent); + tty->print_cr("(avg. %s = %3.1f, max = %d)", name, weighted_sum / sum, n); + tty->print_cr("(total # of compiled calls = " INT64_FORMAT_W(14) ")", _total_compiled_calls); + tty->print_cr("(max # of compiled calls = " INT64_FORMAT_W(14) ")", _max_compiled_calls_per_method); + } else { + tty->print_cr("Histogram generation failed for %s. n = %d, sum = %7.5f", name, n, sum); } - tty->print_cr("rest: %7d (%5.1f%%))", (int)rest, rest / percent); - tty->print_cr("(avg. %s = %3.1f, max = %d)", name, weighted_sum / sum, n); } void print_histogram() { tty->print_cr("\nHistogram of call arity (incl. rcvr, calls to compiled methods only):"); print_histogram_helper(_max_arity, _arity_histogram, "arity"); - tty->print_cr("\nSame for parameter size (in words):"); + tty->print_cr("\nHistogram of parameter block size (in words, incl. rcvr):"); print_histogram_helper(_max_size, _size_histogram, "size"); tty->cr(); } @@ -2260,35 +2272,39 @@ class MethodArityHistogram { // Take the CodeCache_lock to protect against changes in the CodeHeap structure MutexLocker mu2(CodeCache_lock, Mutex::_no_safepoint_check_flag); _max_arity = _max_size = 0; + _total_compiled_calls = 0; + _max_compiled_calls_per_method = 0; for (int i = 0; i < MAX_ARITY; i++) _arity_histogram[i] = _size_histogram[i] = 0; CodeCache::nmethods_do(add_method_to_histogram); print_histogram(); } }; -int MethodArityHistogram::_arity_histogram[MethodArityHistogram::MAX_ARITY]; -int MethodArityHistogram::_size_histogram[MethodArityHistogram::MAX_ARITY]; +uint64_t MethodArityHistogram::_arity_histogram[MethodArityHistogram::MAX_ARITY]; +uint64_t MethodArityHistogram::_size_histogram[MethodArityHistogram::MAX_ARITY]; +uint64_t MethodArityHistogram::_total_compiled_calls; +uint64_t MethodArityHistogram::_max_compiled_calls_per_method; int MethodArityHistogram::_max_arity; int MethodArityHistogram::_max_size; -void SharedRuntime::print_call_statistics(int comp_total) { +void SharedRuntime::print_call_statistics(uint64_t comp_total) { tty->print_cr("Calls from compiled code:"); - int total = _nof_normal_calls + _nof_interface_calls + _nof_static_calls; - int mono_c = _nof_normal_calls - _nof_optimized_calls - _nof_megamorphic_calls; - int mono_i = _nof_interface_calls - _nof_optimized_interface_calls - _nof_megamorphic_interface_calls; - tty->print_cr("\t%9d (%4.1f%%) total non-inlined ", total, percent(total, total)); - tty->print_cr("\t%9d (%4.1f%%) virtual calls ", _nof_normal_calls, percent(_nof_normal_calls, total)); - tty->print_cr("\t %9d (%3.0f%%) inlined ", _nof_inlined_calls, percent(_nof_inlined_calls, _nof_normal_calls)); - tty->print_cr("\t %9d (%3.0f%%) optimized ", _nof_optimized_calls, percent(_nof_optimized_calls, _nof_normal_calls)); - tty->print_cr("\t %9d (%3.0f%%) monomorphic ", mono_c, percent(mono_c, _nof_normal_calls)); - tty->print_cr("\t %9d (%3.0f%%) megamorphic ", _nof_megamorphic_calls, percent(_nof_megamorphic_calls, _nof_normal_calls)); - tty->print_cr("\t%9d (%4.1f%%) interface calls ", _nof_interface_calls, percent(_nof_interface_calls, total)); - tty->print_cr("\t %9d (%3.0f%%) inlined ", _nof_inlined_interface_calls, percent(_nof_inlined_interface_calls, _nof_interface_calls)); - tty->print_cr("\t %9d (%3.0f%%) optimized ", _nof_optimized_interface_calls, percent(_nof_optimized_interface_calls, _nof_interface_calls)); - tty->print_cr("\t %9d (%3.0f%%) monomorphic ", mono_i, percent(mono_i, _nof_interface_calls)); - tty->print_cr("\t %9d (%3.0f%%) megamorphic ", _nof_megamorphic_interface_calls, percent(_nof_megamorphic_interface_calls, _nof_interface_calls)); - tty->print_cr("\t%9d (%4.1f%%) static/special calls", _nof_static_calls, percent(_nof_static_calls, total)); - tty->print_cr("\t %9d (%3.0f%%) inlined ", _nof_inlined_static_calls, percent(_nof_inlined_static_calls, _nof_static_calls)); + int64_t total = _nof_normal_calls + _nof_interface_calls + _nof_static_calls; + int64_t mono_c = _nof_normal_calls - _nof_optimized_calls - _nof_megamorphic_calls; + int64_t mono_i = _nof_interface_calls - _nof_optimized_interface_calls - _nof_megamorphic_interface_calls; + tty->print_cr("\t" INT64_FORMAT_W(12) " (100%%) total non-inlined ", total); + tty->print_cr("\t" INT64_FORMAT_W(12) " (%4.1f%%) |- virtual calls ", _nof_normal_calls, percent(_nof_normal_calls, total)); + tty->print_cr("\t" INT64_FORMAT_W(12) " (%4.0f%%) | |- inlined ", _nof_inlined_calls, percent(_nof_inlined_calls, _nof_normal_calls)); + tty->print_cr("\t" INT64_FORMAT_W(12) " (%4.0f%%) | |- optimized ", _nof_optimized_calls, percent(_nof_optimized_calls, _nof_normal_calls)); + tty->print_cr("\t" INT64_FORMAT_W(12) " (%4.0f%%) | |- monomorphic ", mono_c, percent(mono_c, _nof_normal_calls)); + tty->print_cr("\t" INT64_FORMAT_W(12) " (%4.0f%%) | |- megamorphic ", _nof_megamorphic_calls, percent(_nof_megamorphic_calls, _nof_normal_calls)); + tty->print_cr("\t" INT64_FORMAT_W(12) " (%4.1f%%) |- interface calls ", _nof_interface_calls, percent(_nof_interface_calls, total)); + tty->print_cr("\t" INT64_FORMAT_W(12) " (%4.0f%%) | |- inlined ", _nof_inlined_interface_calls, percent(_nof_inlined_interface_calls, _nof_interface_calls)); + tty->print_cr("\t" INT64_FORMAT_W(12) " (%4.0f%%) | |- optimized ", _nof_optimized_interface_calls, percent(_nof_optimized_interface_calls, _nof_interface_calls)); + tty->print_cr("\t" INT64_FORMAT_W(12) " (%4.0f%%) | |- monomorphic ", mono_i, percent(mono_i, _nof_interface_calls)); + tty->print_cr("\t" INT64_FORMAT_W(12) " (%4.0f%%) | |- megamorphic ", _nof_megamorphic_interface_calls, percent(_nof_megamorphic_interface_calls, _nof_interface_calls)); + tty->print_cr("\t" INT64_FORMAT_W(12) " (%4.1f%%) |- static/special calls", _nof_static_calls, percent(_nof_static_calls, total)); + tty->print_cr("\t" INT64_FORMAT_W(12) " (%4.0f%%) | |- inlined ", _nof_inlined_static_calls, percent(_nof_inlined_static_calls, _nof_static_calls)); tty->cr(); tty->print_cr("Note 1: counter updates are not MT-safe."); tty->print_cr("Note 2: %% in major categories are relative to total non-inlined calls;"); @@ -2627,31 +2643,6 @@ AdapterHandlerEntry* AdapterHandlerLibrary::new_entry(AdapterFingerPrint* finger } AdapterHandlerEntry* AdapterHandlerLibrary::get_adapter(const methodHandle& method) { - AdapterHandlerEntry* entry = get_adapter0(method); - if (entry != NULL && method->is_shared()) { - // See comments around Method::link_method() - MutexLocker mu(AdapterHandlerLibrary_lock); - if (method->adapter() == NULL) { - method->update_adapter_trampoline(entry); - } - address trampoline = method->from_compiled_entry(); - if (*(int*)trampoline == 0) { - CodeBuffer buffer(trampoline, (int)SharedRuntime::trampoline_size()); - MacroAssembler _masm(&buffer); - SharedRuntime::generate_trampoline(&_masm, entry->get_c2i_entry()); - assert(*(int*)trampoline != 0, "Instruction(s) for trampoline must not be encoded as zeros."); - _masm.flush(); - - if (PrintInterpreter) { - Disassembler::decode(buffer.insts_begin(), buffer.insts_end()); - } - } - } - - return entry; -} - -AdapterHandlerEntry* AdapterHandlerLibrary::get_adapter0(const methodHandle& method) { // Use customized signature handler. Need to lock around updates to // the AdapterHandlerTable (it is not safe for concurrent readers // and a single writer: this could be fixed if it becomes a diff --git a/src/hotspot/share/runtime/sharedRuntime.hpp b/src/hotspot/share/runtime/sharedRuntime.hpp index 16e274cb1a671fea54b90c15c4d3f7d4b5505c25..2b87c4015846cd7d70e2ffbda1773f68a380d816 100644 --- a/src/hotspot/share/runtime/sharedRuntime.hpp +++ b/src/hotspot/share/runtime/sharedRuntime.hpp @@ -78,7 +78,7 @@ class SharedRuntime: AllStatic { #ifndef PRODUCT // Counters - static int _nof_megamorphic_calls; // total # of megamorphic calls (through vtable) + static int64_t _nof_megamorphic_calls; // total # of megamorphic calls (through vtable) #endif // !PRODUCT private: @@ -390,8 +390,6 @@ class SharedRuntime: AllStatic { static size_t trampoline_size(); - static void generate_trampoline(MacroAssembler *masm, address destination); - // Generate I2C and C2I adapters. These adapters are simple argument marshalling // blobs. Unlike adapters in the tiger and earlier releases the code in these // blobs does not create a new frame and are therefore virtually invisible @@ -517,10 +515,12 @@ class SharedRuntime: AllStatic { static address handle_unsafe_access(JavaThread* thread, address next_pc); - static BufferBlob* make_native_invoker(address call_target, - int shadow_space_bytes, - const GrowableArray& input_registers, - const GrowableArray& output_registers); +#ifdef COMPILER2 + static RuntimeStub* make_native_invoker(address call_target, + int shadow_space_bytes, + const GrowableArray& input_registers, + const GrowableArray& output_registers); +#endif #ifndef PRODUCT @@ -565,16 +565,16 @@ class SharedRuntime: AllStatic { // Statistics code // stats for "normal" compiled calls (non-interface) - static int _nof_normal_calls; // total # of calls - static int _nof_optimized_calls; // total # of statically-bound calls - static int _nof_inlined_calls; // total # of inlined normal calls - static int _nof_static_calls; // total # of calls to static methods or super methods (invokespecial) - static int _nof_inlined_static_calls; // total # of inlined static calls + static int64_t _nof_normal_calls; // total # of calls + static int64_t _nof_optimized_calls; // total # of statically-bound calls + static int64_t _nof_inlined_calls; // total # of inlined normal calls + static int64_t _nof_static_calls; // total # of calls to static methods or super methods (invokespecial) + static int64_t _nof_inlined_static_calls; // total # of inlined static calls // stats for compiled interface calls - static int _nof_interface_calls; // total # of compiled calls - static int _nof_optimized_interface_calls; // total # of statically-bound interface calls - static int _nof_inlined_interface_calls; // total # of inlined interface calls - static int _nof_megamorphic_interface_calls;// total # of megamorphic interface calls + static int64_t _nof_interface_calls; // total # of compiled calls + static int64_t _nof_optimized_interface_calls; // total # of statically-bound interface calls + static int64_t _nof_inlined_interface_calls; // total # of inlined interface calls + static int64_t _nof_megamorphic_interface_calls;// total # of megamorphic interface calls public: // for compiler static address nof_normal_calls_addr() { return (address)&_nof_normal_calls; } @@ -586,7 +586,7 @@ class SharedRuntime: AllStatic { static address nof_optimized_interface_calls_addr() { return (address)&_nof_optimized_interface_calls; } static address nof_inlined_interface_calls_addr() { return (address)&_nof_inlined_interface_calls; } static address nof_megamorphic_interface_calls_addr() { return (address)&_nof_megamorphic_interface_calls; } - static void print_call_statistics(int comp_total); + static void print_call_statistics(uint64_t comp_total); static void print_statistics(); static void print_ic_miss_histogram(); @@ -693,7 +693,6 @@ class AdapterHandlerLibrary: public AllStatic { static AdapterHandlerEntry* _abstract_method_handler; static BufferBlob* buffer_blob(); static void initialize(); - static AdapterHandlerEntry* get_adapter0(const methodHandle& method); public: diff --git a/src/hotspot/share/runtime/signature.cpp b/src/hotspot/share/runtime/signature.cpp index 3fd5811e551a0c5de143c264eeae1e208335ec67..9e1e9dd584e9779cb2f94ac0e3d6f36d5617a1a7 100644 --- a/src/hotspot/share/runtime/signature.cpp +++ b/src/hotspot/share/runtime/signature.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2021, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -393,7 +393,7 @@ Klass* SignatureStream::as_klass(Handle class_loader, Handle protection_domain, } else if (failure_mode == CachedOrNull) { NoSafepointVerifier nsv; // no loading, now, we mean it! assert(!HAS_PENDING_EXCEPTION, ""); - k = SystemDictionary::find(name, class_loader, protection_domain, CHECK_NULL); + k = SystemDictionary::find_instance_klass(name, class_loader, protection_domain); // SD::find does not trigger loading, so there should be no throws // Still, bad things can happen, so we CHECK_NULL and ask callers // to do likewise. diff --git a/src/hotspot/share/runtime/stubRoutines.cpp b/src/hotspot/share/runtime/stubRoutines.cpp index 8a4afc0fc6f2fe337f6004035be8dcb67ad321f7..59d6cb8ea3bc3ef5e1e4a5199bd0238bb9ff99c3 100644 --- a/src/hotspot/share/runtime/stubRoutines.cpp +++ b/src/hotspot/share/runtime/stubRoutines.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2021, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -30,8 +30,8 @@ #include "oops/oop.inline.hpp" #include "runtime/interfaceSupport.inline.hpp" #include "runtime/timerTrace.hpp" +#include "runtime/safefetch.inline.hpp" #include "runtime/sharedRuntime.hpp" -#include "runtime/stubRoutines.hpp" #include "utilities/align.hpp" #include "utilities/copy.hpp" #ifdef COMPILER2 diff --git a/src/hotspot/share/runtime/stubRoutines.hpp b/src/hotspot/share/runtime/stubRoutines.hpp index 9b6ae56963a4c122610f315b38f613a0a40d076d..d3e001ebdfe029b64c82668e8ef8455d9180d401 100644 --- a/src/hotspot/share/runtime/stubRoutines.hpp +++ b/src/hotspot/share/runtime/stubRoutines.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2021, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -488,24 +488,4 @@ class StubRoutines: AllStatic { static void arrayof_oop_copy_uninit(HeapWord* src, HeapWord* dest, size_t count); }; -// Safefetch allows to load a value from a location that's not known -// to be valid. If the load causes a fault, the error value is returned. -inline int SafeFetch32(int* adr, int errValue) { - assert(StubRoutines::SafeFetch32_stub(), "stub not yet generated"); - return StubRoutines::SafeFetch32_stub()(adr, errValue); -} -inline intptr_t SafeFetchN(intptr_t* adr, intptr_t errValue) { - assert(StubRoutines::SafeFetchN_stub(), "stub not yet generated"); - return StubRoutines::SafeFetchN_stub()(adr, errValue); -} - - -// returns true if SafeFetch32 and SafeFetchN can be used safely (stubroutines are already generated) -inline bool CanUseSafeFetch32() { - return StubRoutines::SafeFetch32_stub() ? true : false; -} - -inline bool CanUseSafeFetchN() { - return StubRoutines::SafeFetchN_stub() ? true : false; -} #endif // SHARE_RUNTIME_STUBROUTINES_HPP diff --git a/src/hotspot/share/runtime/synchronizer.cpp b/src/hotspot/share/runtime/synchronizer.cpp index 815b5bfdfbe2d7d15102574178bded645aebe135..3f3e4a6dd816f32492b60ee152b8518cea91c18b 100644 --- a/src/hotspot/share/runtime/synchronizer.cpp +++ b/src/hotspot/share/runtime/synchronizer.cpp @@ -28,7 +28,6 @@ #include "logging/log.hpp" #include "logging/logStream.hpp" #include "memory/allocation.inline.hpp" -#include "memory/metaspaceShared.hpp" #include "memory/padded.hpp" #include "memory/resourceArea.hpp" #include "memory/universe.hpp" diff --git a/src/hotspot/share/runtime/thread.cpp b/src/hotspot/share/runtime/thread.cpp index 45c4896c5e3022e68858c843d6bb0e6fdd911b8d..49c8ef58e5982c8b06d43a2973970d79ea6f667d 100644 --- a/src/hotspot/share/runtime/thread.cpp +++ b/src/hotspot/share/runtime/thread.cpp @@ -482,6 +482,50 @@ void Thread::check_for_dangling_thread_pointer(Thread *thread) { } #endif +// Is the target JavaThread protected by the calling Thread +// or by some other mechanism: +bool Thread::is_JavaThread_protected(const JavaThread* p) { + // Do the simplest check first: + if (SafepointSynchronize::is_at_safepoint()) { + // The target is protected since JavaThreads cannot exit + // while we're at a safepoint. + return true; + } + + // Now make the simple checks based on who the caller is: + Thread* current_thread = Thread::current(); + if (current_thread == p || Threads_lock->owner() == current_thread) { + // Target JavaThread is self or calling thread owns the Threads_lock. + // Second check is the same as Threads_lock->owner_is_self(), + // but we already have the current thread so check directly. + return true; + } + + // Check the ThreadsLists associated with the calling thread (if any) + // to see if one of them protects the target JavaThread: + for (SafeThreadsListPtr* stlp = current_thread->_threads_list_ptr; + stlp != NULL; stlp = stlp->previous()) { + if (stlp->list()->includes(p)) { + // The target JavaThread is protected by this ThreadsList: + return true; + } + } + + // Use this debug code with -XX:+UseNewCode to diagnose locations that + // are missing a ThreadsListHandle or other protection mechanism: + // guarantee(!UseNewCode, "current_thread=" INTPTR_FORMAT " is not protecting p=" + // INTPTR_FORMAT, p2i(current_thread), p2i(p)); + + // Note: Since 'p' isn't protected by a TLH, the call to + // p->is_handshake_safe_for() may crash, but we have debug bits so + // we'll be able to figure out what protection mechanism is missing. + assert(p->is_handshake_safe_for(current_thread), "JavaThread=" INTPTR_FORMAT + " is not protected and not handshake safe.", p2i(p)); + + // The target JavaThread is not protected so it is not safe to query: + return false; +} + ThreadPriority Thread::get_priority(const Thread* const thread) { ThreadPriority priority; // Can return an error! @@ -896,116 +940,29 @@ static void create_initial_thread(Handle thread_group, JavaThread* thread, JavaThreadStatus::RUNNABLE); } -char java_version[64] = ""; -char java_runtime_name[128] = ""; -char java_runtime_version[128] = ""; -char java_runtime_vendor_version[128] = ""; -char java_runtime_vendor_vm_bug_url[128] = ""; - -// extract the JRE version string from java.lang.VersionProps.java_version -static const char* get_java_version(TRAPS) { - Klass* k = SystemDictionary::find(vmSymbols::java_lang_VersionProps(), - Handle(), Handle(), CHECK_AND_CLEAR_NULL); - fieldDescriptor fd; - bool found = k != NULL && - InstanceKlass::cast(k)->find_local_field(vmSymbols::java_version_name(), - vmSymbols::string_signature(), &fd); - if (found) { - oop name_oop = k->java_mirror()->obj_field(fd.offset()); - if (name_oop == NULL) { - return NULL; - } - const char* name = java_lang_String::as_utf8_string(name_oop, - java_version, - sizeof(java_version)); - return name; - } else { - return NULL; - } -} +static char java_version[64] = ""; +static char java_runtime_name[128] = ""; +static char java_runtime_version[128] = ""; +static char java_runtime_vendor_version[128] = ""; +static char java_runtime_vendor_vm_bug_url[128] = ""; -// extract the JRE name from java.lang.VersionProps.java_runtime_name -static const char* get_java_runtime_name(TRAPS) { - Klass* k = SystemDictionary::find(vmSymbols::java_lang_VersionProps(), - Handle(), Handle(), CHECK_AND_CLEAR_NULL); +// Extract version and vendor specific information. +static const char* get_java_version_info(InstanceKlass* ik, + Symbol* field_name, + char* buffer, + int buffer_size) { fieldDescriptor fd; - bool found = k != NULL && - InstanceKlass::cast(k)->find_local_field(vmSymbols::java_runtime_name_name(), - vmSymbols::string_signature(), &fd); + bool found = ik != NULL && + ik->find_local_field(field_name, + vmSymbols::string_signature(), &fd); if (found) { - oop name_oop = k->java_mirror()->obj_field(fd.offset()); + oop name_oop = ik->java_mirror()->obj_field(fd.offset()); if (name_oop == NULL) { return NULL; } const char* name = java_lang_String::as_utf8_string(name_oop, - java_runtime_name, - sizeof(java_runtime_name)); - return name; - } else { - return NULL; - } -} - -// extract the JRE version from java.lang.VersionProps.java_runtime_version -static const char* get_java_runtime_version(TRAPS) { - Klass* k = SystemDictionary::find(vmSymbols::java_lang_VersionProps(), - Handle(), Handle(), CHECK_AND_CLEAR_NULL); - fieldDescriptor fd; - bool found = k != NULL && - InstanceKlass::cast(k)->find_local_field(vmSymbols::java_runtime_version_name(), - vmSymbols::string_signature(), &fd); - if (found) { - oop name_oop = k->java_mirror()->obj_field(fd.offset()); - if (name_oop == NULL) { - return NULL; - } - const char* name = java_lang_String::as_utf8_string(name_oop, - java_runtime_version, - sizeof(java_runtime_version)); - return name; - } else { - return NULL; - } -} - -// extract the JRE vendor version from java.lang.VersionProps.VENDOR_VERSION -static const char* get_java_runtime_vendor_version(TRAPS) { - Klass* k = SystemDictionary::find(vmSymbols::java_lang_VersionProps(), - Handle(), Handle(), CHECK_AND_CLEAR_NULL); - fieldDescriptor fd; - bool found = k != NULL && - InstanceKlass::cast(k)->find_local_field(vmSymbols::java_runtime_vendor_version_name(), - vmSymbols::string_signature(), &fd); - if (found) { - oop name_oop = k->java_mirror()->obj_field(fd.offset()); - if (name_oop == NULL) { - return NULL; - } - const char* name = java_lang_String::as_utf8_string(name_oop, - java_runtime_vendor_version, - sizeof(java_runtime_vendor_version)); - return name; - } else { - return NULL; - } -} - -// extract the JRE vendor VM bug URL from java.lang.VersionProps.VENDOR_URL_VM_BUG -static const char* get_java_runtime_vendor_vm_bug_url(TRAPS) { - Klass* k = SystemDictionary::find(vmSymbols::java_lang_VersionProps(), - Handle(), Handle(), CHECK_AND_CLEAR_NULL); - fieldDescriptor fd; - bool found = k != NULL && - InstanceKlass::cast(k)->find_local_field(vmSymbols::java_runtime_vendor_vm_bug_url_name(), - vmSymbols::string_signature(), &fd); - if (found) { - oop name_oop = k->java_mirror()->obj_field(fd.offset()); - if (name_oop == NULL) { - return NULL; - } - const char* name = java_lang_String::as_utf8_string(name_oop, - java_runtime_vendor_vm_bug_url, - sizeof(java_runtime_vendor_vm_bug_url)); + buffer, + buffer_size); return name; } else { return NULL; @@ -2519,21 +2476,17 @@ void JavaThread::verify() { // Most callers of this method assume that it can't return NULL but a // thread may not have a name whilst it is in the process of attaching to // the VM - see CR 6412693, and there are places where a JavaThread can be -// seen prior to having it's threadObj set (eg JNI attaching threads and +// seen prior to having its threadObj set (e.g., JNI attaching threads and // if vm exit occurs during initialization). These cases can all be accounted // for such that this method never returns NULL. const char* JavaThread::get_thread_name() const { -#ifdef ASSERT - // early safepoints can hit while current thread does not yet have TLS - if (!SafepointSynchronize::is_at_safepoint()) { - // Current JavaThreads are allowed to get their own name without - // the Threads_lock. - if (Thread::current() != this) { - assert_locked_or_safepoint_or_handshake(Threads_lock, this); - } + if (Thread::is_JavaThread_protected(this)) { + // The target JavaThread is protected so get_thread_name_string() is safe: + return get_thread_name_string(); } -#endif // ASSERT - return get_thread_name_string(); + + // The target JavaThread is not protected so we return the default: + return Thread::name(); } // Returns a non-NULL representation of this thread's name, or a suitable @@ -3022,11 +2975,25 @@ void Threads::initialize_java_lang_classes(JavaThread* main_thread, TRAPS) { call_initPhase1(CHECK); // get the Java runtime name, version, and vendor info after java.lang.System is initialized - JDK_Version::set_java_version(get_java_version(THREAD)); - JDK_Version::set_runtime_name(get_java_runtime_name(THREAD)); - JDK_Version::set_runtime_version(get_java_runtime_version(THREAD)); - JDK_Version::set_runtime_vendor_version(get_java_runtime_vendor_version(THREAD)); - JDK_Version::set_runtime_vendor_vm_bug_url(get_java_runtime_vendor_vm_bug_url(THREAD)); + InstanceKlass* ik = SystemDictionary::find_instance_klass(vmSymbols::java_lang_VersionProps(), + Handle(), Handle()); + + JDK_Version::set_java_version(get_java_version_info(ik, vmSymbols::java_version_name(), + java_version, sizeof(java_version))); + + JDK_Version::set_runtime_name(get_java_version_info(ik, vmSymbols::java_runtime_name_name(), + java_runtime_name, sizeof(java_runtime_name))); + + JDK_Version::set_runtime_version(get_java_version_info(ik, vmSymbols::java_runtime_version_name(), + java_runtime_version, sizeof(java_runtime_version))); + + JDK_Version::set_runtime_vendor_version(get_java_version_info(ik, vmSymbols::java_runtime_vendor_version_name(), + java_runtime_vendor_version, + sizeof(java_runtime_vendor_version))); + + JDK_Version::set_runtime_vendor_vm_bug_url(get_java_version_info(ik, vmSymbols::java_runtime_vendor_vm_bug_url_name(), + java_runtime_vendor_vm_bug_url, + sizeof(java_runtime_vendor_vm_bug_url))); // an instance of OutOfMemory exception has been allocated earlier initialize_class(vmSymbols::java_lang_OutOfMemoryError(), CHECK); @@ -3361,6 +3328,10 @@ jint Threads::create_vm(JavaVMInitArgs* args, bool* canTryAgain) { #if INCLUDE_CDS // capture the module path info from the ModuleEntryTable ClassLoader::initialize_module_path(THREAD); + if (HAS_PENDING_EXCEPTION) { + java_lang_Throwable::print(PENDING_EXCEPTION, tty); + vm_exit_during_initialization("ClassLoader::initialize_module_path() failed unexpectedly"); + } #endif #if INCLUDE_JVMCI diff --git a/src/hotspot/share/runtime/thread.hpp b/src/hotspot/share/runtime/thread.hpp index d0125dcbc2928cc7cebf28065ea34e3ab7eae95e..1ac4a564196fa5ffc2494d647dec1456dffe9572 100644 --- a/src/hotspot/share/runtime/thread.hpp +++ b/src/hotspot/share/runtime/thread.hpp @@ -201,6 +201,10 @@ class Thread: public ThreadShadow { } public: + // Is the target JavaThread protected by the calling Thread + // or by some other mechanism: + static bool is_JavaThread_protected(const JavaThread* p); + void* operator new(size_t size) throw() { return allocate(size, true); } void* operator new(size_t size, const std::nothrow_t& nothrow_constant) throw() { return allocate(size, false); } @@ -274,12 +278,6 @@ class Thread: public ThreadShadow { // suspend/resume lock: used for self-suspend Monitor* _SR_lock; - // Stack watermark barriers. - StackWatermarks _stack_watermarks; - - public: - inline StackWatermarks* stack_watermarks() { return &_stack_watermarks; } - protected: enum SuspendFlags { // NOTE: avoid using the sign-bit as cc generates different test code @@ -1061,6 +1059,11 @@ class JavaThread: public Thread { friend class ThreadWaitTransition; friend class VM_Exit; + // Stack watermark barriers. + StackWatermarks _stack_watermarks; + + public: + inline StackWatermarks* stack_watermarks() { return &_stack_watermarks; } public: // Constructor diff --git a/src/hotspot/share/runtime/vframe.cpp b/src/hotspot/share/runtime/vframe.cpp index 5fa4e13e11ab831bfdbb7ee38e1d9f012810e621..b50c18c944fff5b07b985212bdaca1ee0a704e30 100644 --- a/src/hotspot/share/runtime/vframe.cpp +++ b/src/hotspot/share/runtime/vframe.cpp @@ -63,6 +63,14 @@ vframe::vframe(const frame* fr, JavaThread* thread) _fr = *fr; } +vframe* vframe::new_vframe(StackFrameStream& fst, JavaThread* thread) { + if (fst.current()->is_runtime_frame()) { + fst.next(); + } + guarantee(!fst.is_done(), "missing caller"); + return new_vframe(fst.current(), fst.register_map(), thread); +} + vframe* vframe::new_vframe(const frame* f, const RegisterMap* reg_map, JavaThread* thread) { // Interpreter frame if (f->is_interpreted_frame()) { @@ -562,15 +570,6 @@ void vframeStreamCommon::skip_prefixed_method_and_wrappers() { } } - -void vframeStreamCommon::skip_reflection_related_frames() { - while (!at_end() && - (method()->method_holder()->is_subclass_of(vmClasses::reflect_MethodAccessorImpl_klass()) || - method()->method_holder()->is_subclass_of(vmClasses::reflect_ConstructorAccessorImpl_klass()))) { - next(); - } -} - javaVFrame* vframeStreamCommon::asJavaVFrame() { javaVFrame* result = NULL; if (_mode == compiled_mode) { diff --git a/src/hotspot/share/runtime/vframe.hpp b/src/hotspot/share/runtime/vframe.hpp index 52e58403094a62ff3c21ff8a20612fe6f63dafac..bdbdd85ff2e6b096c6156d8f982b2b029c1151c7 100644 --- a/src/hotspot/share/runtime/vframe.hpp +++ b/src/hotspot/share/runtime/vframe.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2021, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -60,8 +60,9 @@ class vframe: public ResourceObj { vframe(const frame* fr, const RegisterMap* reg_map, JavaThread* thread); vframe(const frame* fr, JavaThread* thread); public: - // Factory method for creating vframes + // Factory methods for creating vframes static vframe* new_vframe(const frame* f, const RegisterMap *reg_map, JavaThread* thread); + static vframe* new_vframe(StackFrameStream& fst, JavaThread* thread); // Accessors frame fr() const { return _fr; } @@ -328,10 +329,6 @@ class vframeStreamCommon : StackObj { // Implements security traversal. Skips depth no. of frame including // special security frames and prefixed native methods void security_get_caller_frame(int depth); - - // Helper routine for JVM_LatestUserDefinedLoader -- needed for 1.4 - // reflection implementation - void skip_reflection_related_frames(); }; class vframeStream : public vframeStreamCommon { diff --git a/src/hotspot/share/runtime/vmOperation.hpp b/src/hotspot/share/runtime/vmOperation.hpp new file mode 100644 index 0000000000000000000000000000000000000000..4e41eb5dcbb10c8853a0235141f38babed54cc3a --- /dev/null +++ b/src/hotspot/share/runtime/vmOperation.hpp @@ -0,0 +1,174 @@ +/* + * Copyright (c) 1997, 2021, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_RUNTIME_VMOPERATION_HPP +#define SHARE_RUNTIME_VMOPERATION_HPP + +#include "memory/allocation.hpp" + +// The following classes are used for operations +// initiated by a Java thread but that must +// take place in the VMThread. + +#define VM_OP_ENUM(type) VMOp_##type, + +// Note: When new VM_XXX comes up, add 'XXX' to the template table. +#define VM_OPS_DO(template) \ + template(None) \ + template(Cleanup) \ + template(ThreadDump) \ + template(PrintThreads) \ + template(FindDeadlocks) \ + template(ClearICs) \ + template(ForceSafepoint) \ + template(ForceAsyncSafepoint) \ + template(DeoptimizeFrame) \ + template(DeoptimizeAll) \ + template(ZombieAll) \ + template(Verify) \ + template(PrintJNI) \ + template(HeapDumper) \ + template(DeoptimizeTheWorld) \ + template(CollectForMetadataAllocation) \ + template(GC_HeapInspection) \ + template(GenCollectFull) \ + template(GenCollectFullConcurrent) \ + template(GenCollectForAllocation) \ + template(ParallelGCFailedAllocation) \ + template(ParallelGCSystemGC) \ + template(G1CollectForAllocation) \ + template(G1CollectFull) \ + template(G1Concurrent) \ + template(G1TryInitiateConcMark) \ + template(ZMarkStart) \ + template(ZMarkEnd) \ + template(ZRelocateStart) \ + template(ZVerify) \ + template(HandshakeOneThread) \ + template(HandshakeAllThreads) \ + template(HandshakeFallback) \ + template(EnableBiasedLocking) \ + template(BulkRevokeBias) \ + template(PopulateDumpSharedSpace) \ + template(JNIFunctionTableCopier) \ + template(RedefineClasses) \ + template(GetObjectMonitorUsage) \ + template(GetAllStackTraces) \ + template(GetThreadListStackTraces) \ + template(ChangeBreakpoints) \ + template(GetOrSetLocal) \ + template(ChangeSingleStep) \ + template(HeapWalkOperation) \ + template(HeapIterateOperation) \ + template(ReportJavaOutOfMemory) \ + template(JFRCheckpoint) \ + template(ShenandoahFullGC) \ + template(ShenandoahInitMark) \ + template(ShenandoahFinalMarkStartEvac) \ + template(ShenandoahInitUpdateRefs) \ + template(ShenandoahFinalUpdateRefs) \ + template(ShenandoahDegeneratedGC) \ + template(Exit) \ + template(LinuxDllLoad) \ + template(RotateGCLog) \ + template(WhiteBoxOperation) \ + template(JVMCIResizeCounters) \ + template(ClassLoaderStatsOperation) \ + template(ClassLoaderHierarchyOperation) \ + template(DumpHashtable) \ + template(DumpTouchedMethods) \ + template(CleanClassLoaderDataMetaspaces) \ + template(PrintCompileQueue) \ + template(PrintClassHierarchy) \ + template(ThreadSuspend) \ + template(ThreadsSuspendJVMTI) \ + template(ICBufferFull) \ + template(ScavengeMonitors) \ + template(PrintMetadata) \ + template(GTestExecuteAtSafepoint) \ + template(JFROldObject) \ + template(JvmtiPostObjectFree) + +class Thread; +class outputStream; + +class VM_Operation : public StackObj { + public: + enum VMOp_Type { + VM_OPS_DO(VM_OP_ENUM) + VMOp_Terminating + }; + + private: + Thread* _calling_thread; + + // The VM operation name array + static const char* _names[]; + + public: + VM_Operation() : _calling_thread(NULL) {} + + // VM operation support (used by VM thread) + Thread* calling_thread() const { return _calling_thread; } + void set_calling_thread(Thread* thread); + + // Called by VM thread - does in turn invoke doit(). Do not override this + void evaluate(); + + // evaluate() is called by the VMThread and in turn calls doit(). + // If the thread invoking VMThread::execute((VM_Operation*) is a JavaThread, + // doit_prologue() is called in that thread before transferring control to + // the VMThread. + // If doit_prologue() returns true the VM operation will proceed, and + // doit_epilogue() will be called by the JavaThread once the VM operation + // completes. If doit_prologue() returns false the VM operation is cancelled. + virtual void doit() = 0; + virtual bool doit_prologue() { return true; }; + virtual void doit_epilogue() {}; + + // Configuration. Override these appropriately in subclasses. + virtual VMOp_Type type() const = 0; + virtual bool allow_nested_vm_operations() const { return false; } + + // You may override skip_thread_oop_barriers to return true if the operation + // does not access thread-private oops (including frames). + virtual bool skip_thread_oop_barriers() const { return false; } + + // An operation can either be done inside a safepoint + // or concurrently with Java threads running. + virtual bool evaluate_at_safepoint() const { return true; } + + // Debugging + virtual void print_on_error(outputStream* st) const; + virtual const char* name() const { return _names[type()]; } + static const char* name(int type) { + assert(type >= 0 && type < VMOp_Terminating, "invalid VM operation type"); + return _names[type]; + } +#ifndef PRODUCT + void print_on(outputStream* st) const { print_on_error(st); } +#endif +}; + +#endif // SHARE_RUNTIME_VMOPERATION_HPP diff --git a/src/hotspot/share/runtime/vmOperations.hpp b/src/hotspot/share/runtime/vmOperations.hpp index 3dc1608678cd265dfaab25bc9ba3476104039838..3a34f5d79e9c5652067a446f6c01829ce4f961c8 100644 --- a/src/hotspot/share/runtime/vmOperations.hpp +++ b/src/hotspot/share/runtime/vmOperations.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2021, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -25,151 +25,12 @@ #ifndef SHARE_RUNTIME_VMOPERATIONS_HPP #define SHARE_RUNTIME_VMOPERATIONS_HPP -#include "memory/allocation.hpp" #include "oops/oop.hpp" +#include "runtime/vmOperation.hpp" #include "runtime/thread.hpp" #include "runtime/threadSMR.hpp" -// The following classes are used for operations -// initiated by a Java thread but that must -// take place in the VMThread. - -#define VM_OP_ENUM(type) VMOp_##type, - -// Note: When new VM_XXX comes up, add 'XXX' to the template table. -#define VM_OPS_DO(template) \ - template(None) \ - template(Cleanup) \ - template(ThreadDump) \ - template(PrintThreads) \ - template(FindDeadlocks) \ - template(ClearICs) \ - template(ForceSafepoint) \ - template(ForceAsyncSafepoint) \ - template(DeoptimizeFrame) \ - template(DeoptimizeAll) \ - template(ZombieAll) \ - template(Verify) \ - template(PrintJNI) \ - template(HeapDumper) \ - template(DeoptimizeTheWorld) \ - template(CollectForMetadataAllocation) \ - template(GC_HeapInspection) \ - template(GenCollectFull) \ - template(GenCollectFullConcurrent) \ - template(GenCollectForAllocation) \ - template(ParallelGCFailedAllocation) \ - template(ParallelGCSystemGC) \ - template(G1CollectForAllocation) \ - template(G1CollectFull) \ - template(G1Concurrent) \ - template(G1TryInitiateConcMark) \ - template(ZMarkStart) \ - template(ZMarkEnd) \ - template(ZRelocateStart) \ - template(ZVerify) \ - template(HandshakeOneThread) \ - template(HandshakeAllThreads) \ - template(HandshakeFallback) \ - template(EnableBiasedLocking) \ - template(BulkRevokeBias) \ - template(PopulateDumpSharedSpace) \ - template(JNIFunctionTableCopier) \ - template(RedefineClasses) \ - template(GetObjectMonitorUsage) \ - template(GetAllStackTraces) \ - template(GetThreadListStackTraces) \ - template(ChangeBreakpoints) \ - template(GetOrSetLocal) \ - template(ChangeSingleStep) \ - template(HeapWalkOperation) \ - template(HeapIterateOperation) \ - template(ReportJavaOutOfMemory) \ - template(JFRCheckpoint) \ - template(ShenandoahFullGC) \ - template(ShenandoahInitMark) \ - template(ShenandoahFinalMarkStartEvac) \ - template(ShenandoahInitUpdateRefs) \ - template(ShenandoahFinalUpdateRefs) \ - template(ShenandoahDegeneratedGC) \ - template(Exit) \ - template(LinuxDllLoad) \ - template(RotateGCLog) \ - template(WhiteBoxOperation) \ - template(JVMCIResizeCounters) \ - template(ClassLoaderStatsOperation) \ - template(ClassLoaderHierarchyOperation) \ - template(DumpHashtable) \ - template(DumpTouchedMethods) \ - template(CleanClassLoaderDataMetaspaces) \ - template(PrintCompileQueue) \ - template(PrintClassHierarchy) \ - template(ThreadSuspend) \ - template(ThreadsSuspendJVMTI) \ - template(ICBufferFull) \ - template(ScavengeMonitors) \ - template(PrintMetadata) \ - template(GTestExecuteAtSafepoint) \ - template(JFROldObject) \ - template(JvmtiPostObjectFree) - -class VM_Operation : public StackObj { - public: - enum VMOp_Type { - VM_OPS_DO(VM_OP_ENUM) - VMOp_Terminating - }; - - private: - Thread* _calling_thread; - - // The VM operation name array - static const char* _names[]; - - public: - VM_Operation() : _calling_thread(NULL) {} - - // VM operation support (used by VM thread) - Thread* calling_thread() const { return _calling_thread; } - void set_calling_thread(Thread* thread); - - // Called by VM thread - does in turn invoke doit(). Do not override this - void evaluate(); - - // evaluate() is called by the VMThread and in turn calls doit(). - // If the thread invoking VMThread::execute((VM_Operation*) is a JavaThread, - // doit_prologue() is called in that thread before transferring control to - // the VMThread. - // If doit_prologue() returns true the VM operation will proceed, and - // doit_epilogue() will be called by the JavaThread once the VM operation - // completes. If doit_prologue() returns false the VM operation is cancelled. - virtual void doit() = 0; - virtual bool doit_prologue() { return true; }; - virtual void doit_epilogue() {}; - - // Configuration. Override these appropriately in subclasses. - virtual VMOp_Type type() const = 0; - virtual bool allow_nested_vm_operations() const { return false; } - - // You may override skip_thread_oop_barriers to return true if the operation - // does not access thread-private oops (including frames). - virtual bool skip_thread_oop_barriers() const { return false; } - - // An operation can either be done inside a safepoint - // or concurrently with Java threads running. - virtual bool evaluate_at_safepoint() const { return true; } - - // Debugging - virtual void print_on_error(outputStream* st) const; - virtual const char* name() const { return _names[type()]; } - static const char* name(int type) { - assert(type >= 0 && type < VMOp_Terminating, "invalid VM operation type"); - return _names[type]; - } -#ifndef PRODUCT - void print_on(outputStream* st) const { print_on_error(st); } -#endif -}; +// A hodge podge of commonly used VM Operations class VM_None: public VM_Operation { const char* _reason; diff --git a/src/hotspot/share/runtime/vmStructs.cpp b/src/hotspot/share/runtime/vmStructs.cpp index f4741b1fae1349a9a8b76bf3fe4243d1d28e41ac..60600d48d4a1c0cfff6bba00a2e356417a26f7d6 100644 --- a/src/hotspot/share/runtime/vmStructs.cpp +++ b/src/hotspot/share/runtime/vmStructs.cpp @@ -307,7 +307,6 @@ typedef HashtableEntry KlassHashtableEntry; nonstatic_field(Method, _vtable_index, int) \ nonstatic_field(Method, _intrinsic_id, u2) \ nonstatic_field(Method, _flags, u2) \ - nonproduct_nonstatic_field(Method, _compiled_invocation_count, int) \ volatile_nonstatic_field(Method, _code, CompiledMethod*) \ nonstatic_field(Method, _i2i_entry, address) \ volatile_nonstatic_field(Method, _from_compiled_entry, address) \ diff --git a/src/hotspot/share/runtime/vmThread.hpp b/src/hotspot/share/runtime/vmThread.hpp index 915f8926e2909adddf703bd8d502c5f6128a012f..e15a26c01e9453d8c19494b3738797f296da4360 100644 --- a/src/hotspot/share/runtime/vmThread.hpp +++ b/src/hotspot/share/runtime/vmThread.hpp @@ -29,7 +29,7 @@ #include "runtime/nonJavaThread.hpp" #include "runtime/thread.hpp" #include "runtime/task.hpp" -#include "runtime/vmOperations.hpp" +#include "runtime/vmOperation.hpp" // VM operation timeout handling: warn or abort the VM when VM operation takes // too long. Periodic tasks do not participate in safepoint protocol, and therefore diff --git a/src/hotspot/share/services/allocationSite.hpp b/src/hotspot/share/services/allocationSite.hpp index 95ee196dd45bb4b6de21d4046e23e04a1efae829..41ff2629509368ae80bbf2687ca3960737c3b9a6 100644 --- a/src/hotspot/share/services/allocationSite.hpp +++ b/src/hotspot/share/services/allocationSite.hpp @@ -30,19 +30,18 @@ // Allocation site represents a code path that makes a memory // allocation -template class AllocationSite { +class AllocationSite { private: - NativeCallStack _call_stack; - E e; - MEMFLAGS _flag; + const NativeCallStack _call_stack; + const MEMFLAGS _flag; public: AllocationSite(const NativeCallStack& stack, MEMFLAGS flag) : _call_stack(stack), _flag(flag) { } - int hash() const { return _call_stack.hash(); } + bool equals(const NativeCallStack& stack) const { return _call_stack.equals(stack); } - bool equals(const AllocationSite& other) const { + bool equals(const AllocationSite& other) const { return other.equals(_call_stack); } @@ -50,10 +49,6 @@ template class AllocationSite { return &_call_stack; } - // Information regarding this allocation - E* data() { return &e; } - const E* peek() const { return &e; } - MEMFLAGS flag() const { return _flag; } }; diff --git a/src/hotspot/share/services/attachListener.cpp b/src/hotspot/share/services/attachListener.cpp index 6abbc9161745d01d10ebcf67941718c2823b9881..738f7bdedc9b9aada0a2e2460e0477500c9b484c 100644 --- a/src/hotspot/share/services/attachListener.cpp +++ b/src/hotspot/share/services/attachListener.cpp @@ -39,6 +39,7 @@ #include "runtime/java.hpp" #include "runtime/javaCalls.hpp" #include "runtime/os.hpp" +#include "runtime/vmOperations.hpp" #include "services/attachListener.hpp" #include "services/diagnosticCommand.hpp" #include "services/heapDumper.hpp" diff --git a/src/hotspot/share/services/attachListener.hpp b/src/hotspot/share/services/attachListener.hpp index 251b9766735595a5d9ad6b993c03ee6ed67fabb9..d80f2be951178983f7a3236aa74b9de002fa971a 100644 --- a/src/hotspot/share/services/attachListener.hpp +++ b/src/hotspot/share/services/attachListener.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2005, 2019, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2005, 2021, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -27,6 +27,7 @@ #include "memory/allocation.hpp" #include "runtime/atomic.hpp" +#include "runtime/globals.hpp" #include "utilities/debug.hpp" #include "utilities/globalDefinitions.hpp" #include "utilities/macros.hpp" diff --git a/src/hotspot/share/services/diagnosticCommand.cpp b/src/hotspot/share/services/diagnosticCommand.cpp index 999ee48ddcf97bbc6e8a0250a6747babeca77079..ab3d133cc852f301996c5799de5725efb3092f72 100644 --- a/src/hotspot/share/services/diagnosticCommand.cpp +++ b/src/hotspot/share/services/diagnosticCommand.cpp @@ -46,6 +46,7 @@ #include "runtime/javaCalls.hpp" #include "runtime/jniHandles.hpp" #include "runtime/os.hpp" +#include "runtime/vmOperations.hpp" #include "runtime/vm_version.hpp" #include "services/diagnosticArgument.hpp" #include "services/diagnosticCommand.hpp" diff --git a/src/hotspot/share/services/mallocSiteTable.cpp b/src/hotspot/share/services/mallocSiteTable.cpp index 7cb30d7f107bf3a2a33aa028c7e73e7688d2332f..869a67385318f78c6d299682ddea3432eac5e5e4 100644 --- a/src/hotspot/share/services/mallocSiteTable.cpp +++ b/src/hotspot/share/services/mallocSiteTable.cpp @@ -81,7 +81,7 @@ bool MallocSiteTable::initialize() { _hash_entry_allocation_site = &entry; // Add the allocation site to hashtable. - int index = hash_to_index(stack.hash()); + int index = hash_to_index(entry.hash()); _table[index] = const_cast(&entry); return true; @@ -117,7 +117,8 @@ bool MallocSiteTable::walk(MallocSiteWalker* walker) { MallocSite* MallocSiteTable::lookup_or_add(const NativeCallStack& key, size_t* bucket_idx, size_t* pos_idx, MEMFLAGS flags) { assert(flags != mtNone, "Should have a real memory type"); - unsigned int index = hash_to_index(key.hash()); + const unsigned int hash = key.calculate_hash(); + const unsigned int index = hash_to_index(hash); *bucket_idx = (size_t)index; *pos_idx = 0; @@ -137,9 +138,11 @@ MallocSite* MallocSiteTable::lookup_or_add(const NativeCallStack& key, size_t* b MallocSiteHashtableEntry* head = _table[index]; while (head != NULL && (*pos_idx) <= MAX_BUCKET_LENGTH) { - MallocSite* site = head->data(); - if (site->flag() == flags && site->equals(key)) { - return head->data(); + if (head->hash() == hash) { + MallocSite* site = head->data(); + if (site->flag() == flags && site->equals(key)) { + return head->data(); + } } if (head->next() == NULL && (*pos_idx) < MAX_BUCKET_LENGTH) { @@ -242,6 +245,82 @@ void MallocSiteTable::AccessLock::exclusiveLock() { _lock_state = ExclusiveLock; } +void MallocSiteTable::print_tuning_statistics(outputStream* st) { + + AccessLock locker(&_access_count); + if (locker.sharedLock()) { + // Total number of allocation sites, include empty sites + int total_entries = 0; + // Number of allocation sites that have all memory freed + int empty_entries = 0; + // Number of captured call stack distribution + int stack_depth_distribution[NMT_TrackingStackDepth + 1] = { 0 }; + // Chain lengths + int lengths[table_size] = { 0 }; + + for (int i = 0; i < table_size; i ++) { + int this_chain_length = 0; + const MallocSiteHashtableEntry* head = _table[i]; + while (head != NULL) { + total_entries ++; + this_chain_length ++; + if (head->size() == 0) { + empty_entries ++; + } + const int callstack_depth = head->peek()->call_stack()->frames(); + assert(callstack_depth >= 0 && callstack_depth <= NMT_TrackingStackDepth, + "Sanity (%d)", callstack_depth); + stack_depth_distribution[callstack_depth] ++; + head = head->next(); + } + lengths[i] = this_chain_length; + } + + st->print_cr("Malloc allocation site table:"); + st->print_cr("\tTotal entries: %d", total_entries); + st->print_cr("\tEmpty entries: %d (%2.2f%%)", empty_entries, ((float)empty_entries * 100) / total_entries); + st->cr(); + + // We report the hash distribution (chain length distribution) of the n shortest chains + // - under the assumption that this usually contains all lengths. Reporting threshold + // is 20, and the expected avg chain length is 5..6 (see table size). + static const int chain_length_threshold = 20; + int chain_length_distribution[chain_length_threshold] = { 0 }; + int over_threshold = 0; + int longest_chain_length = 0; + for (int i = 0; i < table_size; i ++) { + if (lengths[i] >= chain_length_threshold) { + over_threshold ++; + } else { + chain_length_distribution[lengths[i]] ++; + } + longest_chain_length = MAX2(longest_chain_length, lengths[i]); + } + + st->print_cr("Hash distribution:"); + if (chain_length_distribution[0] == 0) { + st->print_cr("no empty buckets."); + } else { + st->print_cr("%d buckets are empty.", chain_length_distribution[0]); + } + for (int len = 1; len < MIN2(longest_chain_length + 1, chain_length_threshold); len ++) { + st->print_cr("%2d %s: %d.", len, (len == 1 ? " entry" : "entries"), chain_length_distribution[len]); + } + if (longest_chain_length >= chain_length_threshold) { + st->print_cr(">=%2d entries: %d.", chain_length_threshold, over_threshold); + } + st->print_cr("most entries: %d.", longest_chain_length); + st->cr(); + + st->print_cr("Call stack depth distribution:"); + for (int i = 0; i <= NMT_TrackingStackDepth; i ++) { + st->print_cr("\t%d: %d", i, stack_depth_distribution[i]); + } + st->cr(); + } // lock +} + + bool MallocSiteHashtableEntry::atomic_insert(MallocSiteHashtableEntry* entry) { return Atomic::replace_if_null(&_next, entry); } diff --git a/src/hotspot/share/services/mallocSiteTable.hpp b/src/hotspot/share/services/mallocSiteTable.hpp index 71666d6e02a848e56e80a463fd7243d6f2047e02..f401d456b18a9ed7b0b0331136389f215a0c5d9e 100644 --- a/src/hotspot/share/services/mallocSiteTable.hpp +++ b/src/hotspot/share/services/mallocSiteTable.hpp @@ -38,35 +38,32 @@ // MallocSite represents a code path that eventually calls // os::malloc() to allocate memory -class MallocSite : public AllocationSite { +class MallocSite : public AllocationSite { + MemoryCounter _c; public: - MallocSite() : - AllocationSite(NativeCallStack::empty_stack(), mtNone) {} - MallocSite(const NativeCallStack& stack, MEMFLAGS flags) : - AllocationSite(stack, flags) {} - + AllocationSite(stack, flags) {} - void allocate(size_t size) { data()->allocate(size); } - void deallocate(size_t size) { data()->deallocate(size); } + void allocate(size_t size) { _c.allocate(size); } + void deallocate(size_t size) { _c.deallocate(size); } // Memory allocated from this code path - size_t size() const { return peek()->size(); } + size_t size() const { return _c.size(); } // The number of calls were made - size_t count() const { return peek()->count(); } + size_t count() const { return _c.count(); } }; // Malloc site hashtable entry class MallocSiteHashtableEntry : public CHeapObj { private: MallocSite _malloc_site; + const unsigned int _hash; MallocSiteHashtableEntry* volatile _next; public: - MallocSiteHashtableEntry() : _next(NULL) { } MallocSiteHashtableEntry(NativeCallStack stack, MEMFLAGS flags): - _malloc_site(stack, flags), _next(NULL) { + _malloc_site(stack, flags), _hash(stack.calculate_hash()), _next(NULL) { assert(flags != mtNone, "Expect a real memory type"); } @@ -79,17 +76,11 @@ class MallocSiteHashtableEntry : public CHeapObj { // The operation can be failed due to contention from other thread. bool atomic_insert(MallocSiteHashtableEntry* entry); - void set_callsite(const MallocSite& site) { - _malloc_site = site; - } + unsigned int hash() const { return _hash; } inline const MallocSite* peek() const { return &_malloc_site; } inline MallocSite* data() { return &_malloc_site; } - inline long hash() const { return _malloc_site.hash(); } - inline bool equals(const NativeCallStack& stack) const { - return _malloc_site.equals(stack); - } // Allocation/deallocation on this allocation site inline void allocate(size_t size) { _malloc_site.allocate(size); } inline void deallocate(size_t size) { _malloc_site.deallocate(size); } @@ -229,6 +220,8 @@ class MallocSiteTable : AllStatic { // Walk this table. static bool walk_malloc_site(MallocSiteWalker* walker); + static void print_tuning_statistics(outputStream* st); + private: static MallocSiteHashtableEntry* new_entry(const NativeCallStack& key, MEMFLAGS flags); static void reset(); diff --git a/src/hotspot/share/services/mallocTracker.cpp b/src/hotspot/share/services/mallocTracker.cpp index f0a0e382e4f9d4e5abefb4c94b223f504a4ee7f9..e891a06cb3ead9a6223304f60831f167da4a00c6 100644 --- a/src/hotspot/share/services/mallocTracker.cpp +++ b/src/hotspot/share/services/mallocTracker.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2014, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2014, 2021, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -30,6 +30,36 @@ size_t MallocMemorySummary::_snapshot[CALC_OBJ_SIZE_IN_TYPE(MallocMemorySnapshot, size_t)]; +#ifdef ASSERT +void MemoryCounter::update_peak_count(size_t count) { + size_t peak_cnt = peak_count(); + while (peak_cnt < count) { + size_t old_cnt = Atomic::cmpxchg(&_peak_count, peak_cnt, count, memory_order_relaxed); + if (old_cnt != peak_cnt) { + peak_cnt = old_cnt; + } + } +} + +void MemoryCounter::update_peak_size(size_t sz) { + size_t peak_sz = peak_size(); + while (peak_sz < sz) { + size_t old_sz = Atomic::cmpxchg(&_peak_size, peak_sz, sz, memory_order_relaxed); + if (old_sz != peak_sz) { + peak_sz = old_sz; + } + } +} + +size_t MemoryCounter::peak_count() const { + return Atomic::load(&_peak_count); +} + +size_t MemoryCounter::peak_size() const { + return Atomic::load(&_peak_size); +} +#endif + // Total malloc'd memory amount size_t MallocMemorySnapshot::total() const { size_t amount = 0; diff --git a/src/hotspot/share/services/mallocTracker.hpp b/src/hotspot/share/services/mallocTracker.hpp index acfaa3a880d1aa1f1e0a3dbdb892d77543910fe4..7169ee358ebc6d6ed690c20e111f7034a2dee40d 100644 --- a/src/hotspot/share/services/mallocTracker.hpp +++ b/src/hotspot/share/services/mallocTracker.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2014, 2019, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2014, 2021, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -43,8 +43,8 @@ class MemoryCounter { volatile size_t _count; volatile size_t _size; - DEBUG_ONLY(size_t _peak_count;) - DEBUG_ONLY(size_t _peak_size; ) + DEBUG_ONLY(volatile size_t _peak_count;) + DEBUG_ONLY(volatile size_t _peak_size; ) public: MemoryCounter() : _count(0), _size(0) { @@ -53,36 +53,40 @@ class MemoryCounter { } inline void allocate(size_t sz) { - Atomic::inc(&_count); + size_t cnt = Atomic::add(&_count, size_t(1), memory_order_relaxed); if (sz > 0) { - Atomic::add(&_size, sz); - DEBUG_ONLY(_peak_size = MAX2(_peak_size, _size)); + size_t sum = Atomic::add(&_size, sz, memory_order_relaxed); + DEBUG_ONLY(update_peak_size(sum);) } - DEBUG_ONLY(_peak_count = MAX2(_peak_count, _count);) + DEBUG_ONLY(update_peak_count(cnt);) } inline void deallocate(size_t sz) { - assert(_count > 0, "Nothing allocated yet"); - assert(_size >= sz, "deallocation > allocated"); - Atomic::dec(&_count); + assert(count() > 0, "Nothing allocated yet"); + assert(size() >= sz, "deallocation > allocated"); + Atomic::dec(&_count, memory_order_relaxed); if (sz > 0) { - Atomic::sub(&_size, sz); + Atomic::sub(&_size, sz, memory_order_relaxed); } } inline void resize(ssize_t sz) { if (sz != 0) { - assert(sz >= 0 || _size >= size_t(-sz), "Must be"); - Atomic::add(&_size, size_t(sz)); - DEBUG_ONLY(_peak_size = MAX2(_size, _peak_size);) + assert(sz >= 0 || size() >= size_t(-sz), "Must be"); + size_t sum = Atomic::add(&_size, size_t(sz), memory_order_relaxed); + DEBUG_ONLY(update_peak_size(sum);) } } - inline size_t count() const { return _count; } - inline size_t size() const { return _size; } - DEBUG_ONLY(inline size_t peak_count() const { return _peak_count; }) - DEBUG_ONLY(inline size_t peak_size() const { return _peak_size; }) + inline size_t count() const { return Atomic::load(&_count); } + inline size_t size() const { return Atomic::load(&_size); } +#ifdef ASSERT + void update_peak_count(size_t cnt); + void update_peak_size(size_t sz); + size_t peak_count() const; + size_t peak_size() const; +#endif // ASSERT }; /* diff --git a/src/hotspot/share/services/management.cpp b/src/hotspot/share/services/management.cpp index 9070f26bc01d88f17738ae72151e87bf4a498c49..62f276044bd5235ba2a2816e55441ad4e3244891 100644 --- a/src/hotspot/share/services/management.cpp +++ b/src/hotspot/share/services/management.cpp @@ -52,6 +52,7 @@ #include "runtime/os.hpp" #include "runtime/thread.inline.hpp" #include "runtime/threadSMR.hpp" +#include "runtime/vmOperations.hpp" #include "services/classLoadingService.hpp" #include "services/diagnosticCommand.hpp" #include "services/diagnosticFramework.hpp" diff --git a/src/hotspot/share/services/memReporter.cpp b/src/hotspot/share/services/memReporter.cpp index 019c22131b312fdd2d758a77fb07ceac8b6a73c0..3f418f307870176d0b0145fd7e30e6a7ac49a8a3 100644 --- a/src/hotspot/share/services/memReporter.cpp +++ b/src/hotspot/share/services/memReporter.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012, 2019, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2012, 2021, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -24,6 +24,7 @@ #include "precompiled.hpp" #include "memory/allocation.hpp" #include "memory/metaspace.hpp" +#include "memory/metaspaceUtils.hpp" #include "services/mallocTracker.hpp" #include "services/memReporter.hpp" #include "services/threadStackTracker.hpp" diff --git a/src/hotspot/share/services/memReporter.hpp b/src/hotspot/share/services/memReporter.hpp index 6685c102efca096e4d97892ecb922d5c9a4b9c80..70e73e5d97cb0d438d06f5d5a2910bf5b228dac0 100644 --- a/src/hotspot/share/services/memReporter.hpp +++ b/src/hotspot/share/services/memReporter.hpp @@ -39,14 +39,17 @@ */ class MemReporterBase : public StackObj { private: - size_t _scale; // report in this scale - outputStream* _output; // destination + const size_t _scale; // report in this scale + outputStream* const _output; // destination public: - MemReporterBase(outputStream* out = NULL, size_t scale = K) - : _scale(scale) { - _output = (out == NULL) ? tty : out; - } + + // Default scale to use if no scale given. + static const size_t default_scale = K; + + MemReporterBase(outputStream* out, size_t scale = default_scale) : + _scale(scale), _output(out) + {} protected: inline outputStream* output() const { @@ -74,7 +77,6 @@ class MemReporterBase : public StackObj { size_t reserved_total(const MallocMemory* malloc, const VirtualMemory* vm) const; size_t committed_total(const MallocMemory* malloc, const VirtualMemory* vm) const; - // Print summary total, malloc and virtual memory void print_total(size_t reserved, size_t committed) const; void print_malloc(size_t amount, size_t count, MEMFLAGS flag = mtNone) const; @@ -100,7 +102,7 @@ class MemSummaryReporter : public MemReporterBase { public: // This constructor is for normal reporting from a recent baseline. MemSummaryReporter(MemBaseline& baseline, outputStream* output, - size_t scale = K) : MemReporterBase(output, scale), + size_t scale = default_scale) : MemReporterBase(output, scale), _malloc_snapshot(baseline.malloc_memory_snapshot()), _vm_snapshot(baseline.virtual_memory_snapshot()), _instance_class_count(baseline.instance_class_count()), @@ -125,7 +127,7 @@ class MemDetailReporter : public MemSummaryReporter { MemBaseline& _baseline; public: - MemDetailReporter(MemBaseline& baseline, outputStream* output, size_t scale = K) : + MemDetailReporter(MemBaseline& baseline, outputStream* output, size_t scale = default_scale) : MemSummaryReporter(baseline, output, scale), _baseline(baseline) { } @@ -162,7 +164,7 @@ class MemSummaryDiffReporter : public MemReporterBase { public: MemSummaryDiffReporter(MemBaseline& early_baseline, MemBaseline& current_baseline, - outputStream* output, size_t scale = K) : MemReporterBase(output, scale), + outputStream* output, size_t scale = default_scale) : MemReporterBase(output, scale), _early_baseline(early_baseline), _current_baseline(current_baseline) { assert(early_baseline.baseline_type() != MemBaseline::Not_baselined, "Not baselined"); assert(current_baseline.baseline_type() != MemBaseline::Not_baselined, "Not baselined"); @@ -201,7 +203,7 @@ class MemSummaryDiffReporter : public MemReporterBase { class MemDetailDiffReporter : public MemSummaryDiffReporter { public: MemDetailDiffReporter(MemBaseline& early_baseline, MemBaseline& current_baseline, - outputStream* output, size_t scale = K) : + outputStream* output, size_t scale = default_scale) : MemSummaryDiffReporter(early_baseline, current_baseline, output, scale) { } // Generate detail comparison report diff --git a/src/hotspot/share/services/memTracker.cpp b/src/hotspot/share/services/memTracker.cpp index 5fcbc7cc6647c24906ebdd7877099d031be453be..981584f94be14f129323a143374fd3cc4a9b391f 100644 --- a/src/hotspot/share/services/memTracker.cpp +++ b/src/hotspot/share/services/memTracker.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012, 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2012, 2021, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -23,7 +23,7 @@ */ #include "precompiled.hpp" #include "jvm.h" - +#include "memory/metaspaceUtils.hpp" #include "runtime/atomic.hpp" #include "runtime/orderAccess.hpp" #include "runtime/vmThread.hpp" @@ -170,7 +170,14 @@ bool MemTracker::transition_to(NMT_TrackingLevel level) { return true; } +// Report during error reporting. +void MemTracker::error_report(outputStream* output) { + if (tracking_level() >= NMT_summary) { + report(true, output, MemReporterBase::default_scale); // just print summary for error case. + } +} +// Report when handling PrintNMTStatistics before VM shutdown. static volatile bool g_final_report_did_run = false; void MemTracker::final_report(outputStream* output) { // This function is called during both error reporting and normal VM exit. @@ -181,161 +188,35 @@ void MemTracker::final_report(outputStream* output) { if (Atomic::cmpxchg(&g_final_report_did_run, false, true) == false) { NMT_TrackingLevel level = tracking_level(); if (level >= NMT_summary) { - report(level == NMT_summary, output); + report(level == NMT_summary, output, 1); } } } -void MemTracker::report(bool summary_only, outputStream* output) { +void MemTracker::report(bool summary_only, outputStream* output, size_t scale) { assert(output != NULL, "No output stream"); MemBaseline baseline; if (baseline.baseline(summary_only)) { if (summary_only) { - MemSummaryReporter rpt(baseline, output); + MemSummaryReporter rpt(baseline, output, scale); rpt.report(); } else { - MemDetailReporter rpt(baseline, output); + MemDetailReporter rpt(baseline, output, scale); rpt.report(); output->print("Metaspace:"); // The basic metaspace report avoids any locking and should be safe to // be called at any time. - MetaspaceUtils::print_basic_report(output, K); + MetaspaceUtils::print_basic_report(output, scale); } } } -// This is a walker to gather malloc site hashtable statistics, -// the result is used for tuning. -class StatisticsWalker : public MallocSiteWalker { - private: - enum Threshold { - // aggregates statistics over this threshold into one - // line item. - report_threshold = 20 - }; - - private: - // Number of allocation sites that have all memory freed - int _empty_entries; - // Total number of allocation sites, include empty sites - int _total_entries; - // Number of captured call stack distribution - int _stack_depth_distribution[NMT_TrackingStackDepth]; - // Hash distribution - int _hash_distribution[report_threshold]; - // Number of hash buckets that have entries over the threshold - int _bucket_over_threshold; - - // The hash bucket that walker is currently walking - int _current_hash_bucket; - // The length of current hash bucket - int _current_bucket_length; - // Number of hash buckets that are not empty - int _used_buckets; - // Longest hash bucket length - int _longest_bucket_length; - - public: - StatisticsWalker() : _empty_entries(0), _total_entries(0) { - int index = 0; - for (index = 0; index < NMT_TrackingStackDepth; index ++) { - _stack_depth_distribution[index] = 0; - } - for (index = 0; index < report_threshold; index ++) { - _hash_distribution[index] = 0; - } - _bucket_over_threshold = 0; - _longest_bucket_length = 0; - _current_hash_bucket = -1; - _current_bucket_length = 0; - _used_buckets = 0; - } - - virtual bool do_malloc_site(const MallocSite* e) { - if (e->size() == 0) _empty_entries ++; - _total_entries ++; - - // stack depth distrubution - int frames = e->call_stack()->frames(); - _stack_depth_distribution[frames - 1] ++; - - // hash distribution - int hash_bucket = ((unsigned)e->hash()) % MallocSiteTable::hash_buckets(); - if (_current_hash_bucket == -1) { - _current_hash_bucket = hash_bucket; - _current_bucket_length = 1; - } else if (_current_hash_bucket == hash_bucket) { - _current_bucket_length ++; - } else { - record_bucket_length(_current_bucket_length); - _current_hash_bucket = hash_bucket; - _current_bucket_length = 1; - } - return true; - } - - // walk completed - void completed() { - record_bucket_length(_current_bucket_length); - } - - void report_statistics(outputStream* out) { - int index; - out->print_cr("Malloc allocation site table:"); - out->print_cr("\tTotal entries: %d", _total_entries); - out->print_cr("\tEmpty entries: %d (%2.2f%%)", _empty_entries, ((float)_empty_entries * 100) / _total_entries); - out->print_cr(" "); - out->print_cr("Hash distribution:"); - if (_used_buckets < MallocSiteTable::hash_buckets()) { - out->print_cr("empty bucket: %d", (MallocSiteTable::hash_buckets() - _used_buckets)); - } - for (index = 0; index < report_threshold; index ++) { - if (_hash_distribution[index] != 0) { - if (index == 0) { - out->print_cr(" %d entry: %d", 1, _hash_distribution[0]); - } else if (index < 9) { // single digit - out->print_cr(" %d entries: %d", (index + 1), _hash_distribution[index]); - } else { - out->print_cr(" %d entries: %d", (index + 1), _hash_distribution[index]); - } - } - } - if (_bucket_over_threshold > 0) { - out->print_cr(" >%d entries: %d", report_threshold, _bucket_over_threshold); - } - out->print_cr("most entries: %d", _longest_bucket_length); - out->print_cr(" "); - out->print_cr("Call stack depth distribution:"); - for (index = 0; index < NMT_TrackingStackDepth; index ++) { - if (_stack_depth_distribution[index] > 0) { - out->print_cr("\t%d: %d", index + 1, _stack_depth_distribution[index]); - } - } - } - - private: - void record_bucket_length(int length) { - _used_buckets ++; - if (length <= report_threshold) { - _hash_distribution[length - 1] ++; - } else { - _bucket_over_threshold ++; - } - _longest_bucket_length = MAX2(_longest_bucket_length, length); - } -}; - - void MemTracker::tuning_statistics(outputStream* out) { // NMT statistics - StatisticsWalker walker; - MallocSiteTable::walk_malloc_site(&walker); - walker.completed(); - out->print_cr("Native Memory Tracking Statistics:"); out->print_cr("Malloc allocation site table size: %d", MallocSiteTable::hash_buckets()); out->print_cr(" Tracking stack depth: %d", NMT_TrackingStackDepth); NOT_PRODUCT(out->print_cr("Peak concurrent access: %d", MallocSiteTable::access_peak_count());) - out->print_cr(" "); - walker.report_statistics(out); + out->cr(); + MallocSiteTable::print_tuning_statistics(out); } diff --git a/src/hotspot/share/services/memTracker.hpp b/src/hotspot/share/services/memTracker.hpp index 7097144c2424177b0eacadae340f7fb54870a850..b15bc1dcc26f872690e170e0a7a7047011dd4519 100644 --- a/src/hotspot/share/services/memTracker.hpp +++ b/src/hotspot/share/services/memTracker.hpp @@ -87,9 +87,9 @@ class MemTracker : AllStatic { #include "services/virtualMemoryTracker.hpp" #define CURRENT_PC ((MemTracker::tracking_level() == NMT_detail) ? \ - NativeCallStack(0, true) : NativeCallStack::empty_stack()) + NativeCallStack(0) : NativeCallStack::empty_stack()) #define CALLER_PC ((MemTracker::tracking_level() == NMT_detail) ? \ - NativeCallStack(1, true) : NativeCallStack::empty_stack()) + NativeCallStack(1) : NativeCallStack::empty_stack()) class MemBaseline; @@ -286,13 +286,10 @@ class MemTracker : AllStatic { return NMTQuery_lock; } - // Make a final report or report for hs_err file. - static void error_report(outputStream* output) { - if (tracking_level() >= NMT_summary) { - report(true, output); // just print summary for error case. - } - } + // Report during error reporting. + static void error_report(outputStream* output); + // Report when handling PrintNMTStatistics before VM shutdown. static void final_report(outputStream* output); // Stored baseline @@ -308,7 +305,7 @@ class MemTracker : AllStatic { private: static NMT_TrackingLevel init_tracking_level(); - static void report(bool summary_only, outputStream* output); + static void report(bool summary_only, outputStream* output, size_t scale); private: // Tracking level diff --git a/src/hotspot/share/services/memoryPool.cpp b/src/hotspot/share/services/memoryPool.cpp index 3cdd5275b7b267a6590b192a904b89dbd7017ab0..b17c93a5b75717a2f4f9b73b5284e1bea6b2cbc3 100644 --- a/src/hotspot/share/services/memoryPool.cpp +++ b/src/hotspot/share/services/memoryPool.cpp @@ -26,6 +26,7 @@ #include "classfile/javaClasses.hpp" #include "classfile/vmSymbols.hpp" #include "memory/metaspace.hpp" +#include "memory/metaspaceUtils.hpp" #include "memory/universe.hpp" #include "oops/oop.inline.hpp" #include "oops/oopHandle.inline.hpp" diff --git a/src/hotspot/share/services/runtimeService.cpp b/src/hotspot/share/services/runtimeService.cpp index be2108a86c7ff74117fa76871660394c29c90887..d2e8d058a98c2c451c131a330ce559a54c0ce1db 100644 --- a/src/hotspot/share/services/runtimeService.cpp +++ b/src/hotspot/share/services/runtimeService.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2019, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2021, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -23,7 +23,6 @@ */ #include "precompiled.hpp" -#include "classfile/classLoader.hpp" #include "logging/log.hpp" #include "logging/logStream.hpp" #include "runtime/vm_version.hpp" diff --git a/src/hotspot/share/services/threadService.cpp b/src/hotspot/share/services/threadService.cpp index ee08ac4160696f05f69b0efd135dce5a868c882a..c0476bba8f2a6e7649fcf9fe85af783ad415e557 100644 --- a/src/hotspot/share/services/threadService.cpp +++ b/src/hotspot/share/services/threadService.cpp @@ -33,6 +33,7 @@ #include "memory/resourceArea.hpp" #include "memory/universe.hpp" #include "oops/instanceKlass.hpp" +#include "oops/klass.inline.hpp" #include "oops/objArrayKlass.hpp" #include "oops/objArrayOop.inline.hpp" #include "oops/oop.inline.hpp" diff --git a/src/hotspot/share/services/threadStackTracker.cpp b/src/hotspot/share/services/threadStackTracker.cpp index ae566a0973336f26a5194066f188b5b076f84193..cfc80b24a2d8c0bd5029793e6c65cc6a0ce3db81 100644 --- a/src/hotspot/share/services/threadStackTracker.cpp +++ b/src/hotspot/share/services/threadStackTracker.cpp @@ -91,7 +91,7 @@ void ThreadStackTracker::delete_thread_stack(void* base, size_t size) { if (MemTracker::tracking_level() == NMT_detail) { ThreadCritical tc; assert(_simple_thread_stacks != NULL, "Must be initialized"); - SimpleThreadStackSite site((address)base, size); + SimpleThreadStackSite site((address)base, size, NativeCallStack::empty_stack()); // Fake object just to serve as compare target for delete bool removed = _simple_thread_stacks->remove(site); assert(removed, "Must exist"); } diff --git a/src/hotspot/share/services/threadStackTracker.hpp b/src/hotspot/share/services/threadStackTracker.hpp index 9c5793ad35c150b2719f90cf8851084c54a22444..d628e7633d9ebdb2072b3d17db042842f71bc96e 100644 --- a/src/hotspot/share/services/threadStackTracker.hpp +++ b/src/hotspot/share/services/threadStackTracker.hpp @@ -33,39 +33,14 @@ #include "utilities/nativeCallStack.hpp" #include "utilities/linkedlist.hpp" -class SimpleThreadStackSite; - -class SimpleThreadStack { - friend class SimpleThreadStackSite; -private: - address _base; - size_t _size; -public: - SimpleThreadStack() : _base(NULL), _size(0) { } - bool equals(const SimpleThreadStack& s) const { - return base() == s.base(); - } - - size_t size() const { return _size; } - address base() const { return _base; } -private: - void set_size(size_t size) { _size = size; } - void set_base(address base) { _base = base; } -}; - -class SimpleThreadStackSite : public AllocationSite { +class SimpleThreadStackSite : public AllocationSite { + const address _base; + const size_t _size; public: SimpleThreadStackSite(address base, size_t size, const NativeCallStack& stack) : - AllocationSite(stack, mtThreadStack) { - data()->set_size(size); - data()->set_base(base); - } - - SimpleThreadStackSite(address base, size_t size) : - AllocationSite(NativeCallStack::empty_stack(), mtThreadStack) { - data()->set_base(base); - data()->set_size(size); - } + AllocationSite(stack, mtThreadStack), + _base(base), + _size(size) {} bool equals(const SimpleThreadStackSite& mts) const { bool eq = base() == mts.base(); @@ -73,8 +48,8 @@ public: return eq; } - size_t size() const { return peek()->size(); } - address base() const { return peek()->base(); } + size_t size() const { return _size; } + address base() const { return _base; } }; /* diff --git a/src/hotspot/share/services/virtualMemoryTracker.cpp b/src/hotspot/share/services/virtualMemoryTracker.cpp index 79f1103615499b33e484a13c130c0915f673f67a..cf8ab43ee92693f5da94b2f60e775920077e9ed5 100644 --- a/src/hotspot/share/services/virtualMemoryTracker.cpp +++ b/src/hotspot/share/services/virtualMemoryTracker.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013, 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2013, 2021, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -22,9 +22,9 @@ * */ #include "precompiled.hpp" - #include "logging/log.hpp" #include "memory/metaspace.hpp" +#include "memory/metaspaceUtils.hpp" #include "runtime/os.hpp" #include "runtime/threadCritical.hpp" #include "services/memTracker.hpp" diff --git a/src/hotspot/share/services/virtualMemoryTracker.hpp b/src/hotspot/share/services/virtualMemoryTracker.hpp index 60fbc64988a5327a0890039b7c41cfcaf65727bc..211c8f03723e68b87aa54e6c26be9068b38ad549 100644 --- a/src/hotspot/share/services/virtualMemoryTracker.hpp +++ b/src/hotspot/share/services/virtualMemoryTracker.hpp @@ -68,17 +68,18 @@ class VirtualMemory { }; // Virtual memory allocation site, keeps track where the virtual memory is reserved. -class VirtualMemoryAllocationSite : public AllocationSite { +class VirtualMemoryAllocationSite : public AllocationSite { + VirtualMemory _c; public: VirtualMemoryAllocationSite(const NativeCallStack& stack, MEMFLAGS flag) : - AllocationSite(stack, flag) { } - - inline void reserve_memory(size_t sz) { data()->reserve_memory(sz); } - inline void commit_memory (size_t sz) { data()->commit_memory(sz); } - inline void uncommit_memory(size_t sz) { data()->uncommit_memory(sz); } - inline void release_memory(size_t sz) { data()->release_memory(sz); } - inline size_t reserved() const { return peek()->reserved(); } - inline size_t committed() const { return peek()->committed(); } + AllocationSite(stack, flag) { } + + inline void reserve_memory(size_t sz) { _c.reserve_memory(sz); } + inline void commit_memory (size_t sz) { _c.commit_memory(sz); } + inline void uncommit_memory(size_t sz) { _c.uncommit_memory(sz); } + inline void release_memory(size_t sz) { _c.release_memory(sz); } + inline size_t reserved() const { return _c.reserved(); } + inline size_t committed() const { return _c.committed(); } }; class VirtualMemorySummary; diff --git a/src/hotspot/share/utilities/exceptions.hpp b/src/hotspot/share/utilities/exceptions.hpp index c9c298f0e6412e94c77a06a64b7fc2e92b81448f..056989448c12e0ec4916c8556a8701a6b26c906c 100644 --- a/src/hotspot/share/utilities/exceptions.hpp +++ b/src/hotspot/share/utilities/exceptions.hpp @@ -321,8 +321,8 @@ class Exceptions { THREAD); if (HAS_PENDING_EXCEPTION) { \ oop ex = PENDING_EXCEPTION; \ CLEAR_PENDING_EXCEPTION; \ - ex->print(); \ - ShouldNotReachHere(); \ + DEBUG_ONLY(ex->print();) \ + assert(false, "CATCH"); \ } (void)(0 // ExceptionMark is a stack-allocated helper class for local exception handling. diff --git a/src/hotspot/share/utilities/globalDefinitions.hpp b/src/hotspot/share/utilities/globalDefinitions.hpp index b7f8b1a4b0bcea56c3768623d7123d9c9936951f..6d8e271540043591652f7243cc8da8f44c2b335b 100644 --- a/src/hotspot/share/utilities/globalDefinitions.hpp +++ b/src/hotspot/share/utilities/globalDefinitions.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2021, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -73,18 +73,12 @@ // This file holds all globally used constants & types, class (forward) // declarations and a few frequently used utility functions. -// Declare the named class to be noncopyable. This macro must be used in -// a private part of the class's definition, followed by a semi-colon. -// Doing so provides private declarations for the class's copy constructor -// and assignment operator. Because these operations are private, most -// potential callers will fail to compile because they are inaccessible. -// The operations intentionally lack a definition, to provoke link-time -// failures for calls from contexts where they are accessible, e.g. from -// within the class or from a friend of the class. -// Note: The lack of definitions is still not completely bullet-proof, as -// an apparent call might be optimized away by copy elision. -// For C++11 the declarations should be changed to deleted definitions. -#define NONCOPYABLE(C) C(C const&); C& operator=(C const&) /* next token must be ; */ +// Declare the named class to be noncopyable. This macro must be followed by +// a semi-colon. The macro provides deleted declarations for the class's copy +// constructor and assignment operator. Because these operations are deleted, +// they cannot be defined and potential callers will fail to compile. +#define NONCOPYABLE(C) C(C const&) = delete; C& operator=(C const&) = delete /* next token must be ; */ + //---------------------------------------------------------------------------------------------------- // Printf-style formatters for fixed- and variable-width types as pointers and diff --git a/src/hotspot/share/utilities/hashtable.cpp b/src/hotspot/share/utilities/hashtable.cpp index 902fd76440b300b86166403f4403ceff676b8c06..625523dc59aff79c18d681271638476320e6cae7 100644 --- a/src/hotspot/share/utilities/hashtable.cpp +++ b/src/hotspot/share/utilities/hashtable.cpp @@ -179,15 +179,10 @@ template bool BasicHashtable::resize(int new_size) { for (int index_old = 0; index_old < table_size_old; index_old++) { for (BasicHashtableEntry* p = _buckets[index_old].get_entry(); p != NULL; ) { BasicHashtableEntry* next = p->next(); - bool keep_shared = p->is_shared(); int index_new = hash_to_index(p->hash()); p->set_next(buckets_new[index_new].get_entry()); buckets_new[index_new].set_entry(p); - - if (keep_shared) { - p->set_shared(); - } p = next; } } diff --git a/src/hotspot/share/utilities/hashtable.hpp b/src/hotspot/share/utilities/hashtable.hpp index a41bcd02350cb050856df44dff310b7b2d897590..823bc031d27fccfaf8c46086db6b7e082db11fa3 100644 --- a/src/hotspot/share/utilities/hashtable.hpp +++ b/src/hotspot/share/utilities/hashtable.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2021, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -47,13 +47,7 @@ template class BasicHashtableEntry : public CHeapObj { private: unsigned int _hash; // 32-bit hash for item - // Link to next element in the linked list for this bucket. EXCEPT - // bit 0 set indicates that this entry is shared and must not be - // unlinked from the table. Bit 0 is set during the dumping of the - // archive. Since shared entries are immutable, _next fields in the - // shared entries will not change. New entries will always be - // unshared and since pointers are align, bit 0 will always remain 0 - // with no extra effort. + // Link to next element in the linked list for this bucket. BasicHashtableEntry* _next; // Windows IA64 compiler requires subclasses to be able to access these @@ -71,12 +65,8 @@ public: void set_hash(unsigned int hash) { _hash = hash; } unsigned int* hash_addr() { return &_hash; } - static BasicHashtableEntry* make_ptr(BasicHashtableEntry* p) { - return (BasicHashtableEntry*)((intptr_t)p & -2); - } - BasicHashtableEntry* next() const { - return make_ptr(_next); + return _next; } void set_next(BasicHashtableEntry* next) { @@ -86,14 +76,6 @@ public: BasicHashtableEntry** next_addr() { return &_next; } - - bool is_shared() const { - return ((intptr_t)_next & 1) != 0; - } - - void set_shared() { - _next = (BasicHashtableEntry*)((intptr_t)_next | 1); - } }; diff --git a/src/hotspot/share/utilities/nativeCallStack.cpp b/src/hotspot/share/utilities/nativeCallStack.cpp index 45a29424fd72f9ce2911bfa6fcb8aae98ccd3db5..4b62360dccfd433161abfa462356ea21e64c7833 100644 --- a/src/hotspot/share/utilities/nativeCallStack.cpp +++ b/src/hotspot/share/utilities/nativeCallStack.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2014, 2021, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -28,29 +28,24 @@ #include "utilities/globalDefinitions.hpp" #include "utilities/nativeCallStack.hpp" -NativeCallStack::NativeCallStack(int toSkip, bool fillStack) : - _hash_value(0) { +const NativeCallStack NativeCallStack::_empty_stack; // Uses default ctor - if (fillStack) { - // We need to skip the NativeCallStack::NativeCallStack frame if a tail call is NOT used - // to call os::get_native_stack. A tail call is used if _NMT_NOINLINE_ is not defined - // (which means this is not a slowdebug build), and we are on 64-bit (except Windows). - // This is not necessarily a rule, but what has been obvserved to date. +NativeCallStack::NativeCallStack(int toSkip) { + + // We need to skip the NativeCallStack::NativeCallStack frame if a tail call is NOT used + // to call os::get_native_stack. A tail call is used if _NMT_NOINLINE_ is not defined + // (which means this is not a slowdebug build), and we are on 64-bit (except Windows). + // This is not necessarily a rule, but what has been obvserved to date. #if (defined(_NMT_NOINLINE_) || defined(_WINDOWS) || !defined(_LP64) || defined(PPC64)) - // Not a tail call. - toSkip++; + // Not a tail call. + toSkip++; #if (defined(_NMT_NOINLINE_) && defined(BSD) && defined(_LP64)) - // Mac OS X slowdebug builds have this odd behavior where NativeCallStack::NativeCallStack - // appears as two frames, so we need to skip an extra frame. - toSkip++; + // Mac OS X slowdebug builds have this odd behavior where NativeCallStack::NativeCallStack + // appears as two frames, so we need to skip an extra frame. + toSkip++; #endif // Special-case for BSD. #endif // Not a tail call. - os::get_native_stack(_stack, NMT_TrackingStackDepth, toSkip); - } else { - for (int index = 0; index < NMT_TrackingStackDepth; index ++) { - _stack[index] = NULL; - } - } + os::get_native_stack(_stack, NMT_TrackingStackDepth, toSkip); } NativeCallStack::NativeCallStack(address* pc, int frameCount) { @@ -63,7 +58,6 @@ NativeCallStack::NativeCallStack(address* pc, int frameCount) { for (; index < NMT_TrackingStackDepth; index ++) { _stack[index] = NULL; } - _hash_value = 0; } // number of stack frames captured @@ -77,21 +71,6 @@ int NativeCallStack::frames() const { return index; } -// Hash code. Any better algorithm? -unsigned int NativeCallStack::hash() const { - uintptr_t hash_val = _hash_value; - if (hash_val == 0) { - for (int index = 0; index < NMT_TrackingStackDepth; index++) { - if (_stack[index] == NULL) break; - hash_val += (uintptr_t)_stack[index]; - } - - NativeCallStack* p = const_cast(this); - p->_hash_value = (unsigned int)(hash_val & 0xFFFFFFFF); - } - return _hash_value; -} - void NativeCallStack::print_on(outputStream* out) const { print_on(out, 0); } diff --git a/src/hotspot/share/utilities/nativeCallStack.hpp b/src/hotspot/share/utilities/nativeCallStack.hpp index 724bba8ed35cb7f6271d142f1211b4dd4b5840f9..33ce0164d84d4be36d879526c8a6c98968574b5d 100644 --- a/src/hotspot/share/utilities/nativeCallStack.hpp +++ b/src/hotspot/share/utilities/nativeCallStack.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2014, 2019, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2014, 2021, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -56,16 +56,18 @@ class MemTracker; class NativeCallStack : public StackObj { private: address _stack[NMT_TrackingStackDepth]; - unsigned int _hash_value; - + static const NativeCallStack _empty_stack; public: - NativeCallStack(int toSkip = 0, bool fillStack = false); + // Default ctor creates an empty stack. + // (it may make sense to remove this altogether but its used in a few places). + NativeCallStack() { + memset(_stack, 0, sizeof(_stack)); + } + + NativeCallStack(int toSkip); NativeCallStack(address* pc, int frameCount); - static inline const NativeCallStack& empty_stack() { - static const NativeCallStack EMPTY_STACK(0, false); - return EMPTY_STACK; - } + static inline const NativeCallStack& empty_stack() { return _empty_stack; } // if it is an empty stack inline bool is_empty() const { @@ -80,9 +82,6 @@ public: } inline bool equals(const NativeCallStack& other) const { - // compare hash values - if (hash() != other.hash()) return false; - // compare each frame return compare(other) == 0; } @@ -91,8 +90,14 @@ public: return _stack[index]; } - // Hash code. Any better algorithm? - unsigned int hash() const; + // Helper; calculates a hash value over the stack frames in this stack + unsigned int calculate_hash() const { + uintptr_t hash = 0; + for (int i = 0; i < NMT_TrackingStackDepth; i++) { + hash += (uintptr_t)_stack[i]; + } + return hash; + } void print_on(outputStream* out) const; void print_on(outputStream* out, int indent) const; diff --git a/src/hotspot/share/utilities/ostream.cpp b/src/hotspot/share/utilities/ostream.cpp index 61ff92be830c2e50c8ad24ed53c958eb866ea30e..b98e6f5bbe9650055a638bc020933f506b51abf1 100644 --- a/src/hotspot/share/utilities/ostream.cpp +++ b/src/hotspot/share/utilities/ostream.cpp @@ -589,8 +589,11 @@ char* fileStream::readln(char *data, int count ) { char * ret = NULL; if (_file != NULL) { ret = ::fgets(data, count, _file); - //Get rid of annoying \n char - data[::strlen(data)-1] = '\0'; + // Get rid of annoying \n char only if it is present. + size_t len = ::strlen(data); + if (len > 0 && data[len - 1] == '\n') { + data[len - 1] = '\0'; + } } return ret; } diff --git a/src/hotspot/share/utilities/utf8.cpp b/src/hotspot/share/utilities/utf8.cpp index 1613ebde20cee16c6193de034352301c76a33166..6ebeb9a6c9b21d4f688f60a1b0f02d162ef805a3 100644 --- a/src/hotspot/share/utilities/utf8.cpp +++ b/src/hotspot/share/utilities/utf8.cpp @@ -447,6 +447,7 @@ char* UNICODE::as_utf8(const T* base, int& length) { } char* UNICODE::as_utf8(const jchar* base, int length, char* buf, int buflen) { + assert(buflen > 0, "zero length output buffer"); u_char* p = (u_char*)buf; for (int index = 0; index < length; index++) { jchar c = base[index]; @@ -459,6 +460,7 @@ char* UNICODE::as_utf8(const jchar* base, int length, char* buf, int buflen) { } char* UNICODE::as_utf8(const jbyte* base, int length, char* buf, int buflen) { + assert(buflen > 0, "zero length output buffer"); u_char* p = (u_char*)buf; for (int index = 0; index < length; index++) { jbyte c = base[index]; diff --git a/src/hotspot/share/utilities/vmError.cpp b/src/hotspot/share/utilities/vmError.cpp index 1f8ab5b3537cce2e9843d18a0365132463985997..57296c3e2a1835dc693f718378d0eefec5d5475d 100644 --- a/src/hotspot/share/utilities/vmError.cpp +++ b/src/hotspot/share/utilities/vmError.cpp @@ -33,6 +33,7 @@ #include "logging/logConfiguration.hpp" #include "memory/metaspace.hpp" #include "memory/metaspaceShared.hpp" +#include "memory/metaspaceUtils.hpp" #include "memory/resourceArea.inline.hpp" #include "memory/universe.hpp" #include "oops/compressedOops.hpp" @@ -43,6 +44,7 @@ #include "runtime/init.hpp" #include "runtime/os.hpp" #include "runtime/osThread.hpp" +#include "runtime/safefetch.inline.hpp" #include "runtime/safepointMechanism.hpp" #include "runtime/thread.inline.hpp" #include "runtime/threadSMR.hpp" diff --git a/src/hotspot/share/utilities/vmError.hpp b/src/hotspot/share/utilities/vmError.hpp index 416dd03cd833f5827e666146483dfbd483e0c39f..b53e6db43c52c38915afe6203f779e5cb84900f7 100644 --- a/src/hotspot/share/utilities/vmError.hpp +++ b/src/hotspot/share/utilities/vmError.hpp @@ -160,14 +160,6 @@ public: // reporting OutOfMemoryError static void report_java_out_of_memory(const char* message); - // returns original flags for signal, if it was resetted, or -1 if - // signal was not changed by error reporter - static int get_resetted_sigflags(int sig); - - // returns original handler for signal, if it was resetted, or NULL if - // signal was not changed by error reporter - static address get_resetted_sighandler(int sig); - // Called by the WatcherThread to check if error reporting has timed-out. // Returns true if error reporting has not completed within the ErrorLogTimeout limit. static bool check_timeout(); @@ -185,5 +177,8 @@ public: // which is not NULL and contains bits in every word. static const intptr_t segfault_address = LP64_ONLY(0xABC0000000000ABCULL) NOT_LP64(0x00000ABC); + // Needed when printing signal handlers. + NOT_WINDOWS(static const void* crash_handler_address;) + }; #endif // SHARE_UTILITIES_VMERROR_HPP diff --git a/src/hotspot/share/utilities/xmlstream.cpp b/src/hotspot/share/utilities/xmlstream.cpp index 4e48a16ccb85609a71ea09e668d12271d0cde6b5..c5495533d02610ecb5504146bd07f2cf8d3094f8 100644 --- a/src/hotspot/share/utilities/xmlstream.cpp +++ b/src/hotspot/share/utilities/xmlstream.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2002, 2019, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2002, 2021, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -32,6 +32,7 @@ #include "oops/oop.inline.hpp" #include "runtime/deoptimization.hpp" #include "runtime/handles.inline.hpp" +#include "runtime/vmOperations.hpp" #include "runtime/vmThread.hpp" #include "utilities/vmError.hpp" #include "utilities/xmlstream.hpp" diff --git a/src/java.base/aix/classes/sun/nio/fs/AixFileStore.java b/src/java.base/aix/classes/sun/nio/fs/AixFileStore.java index b13f333eacd819585c44ebf5e06a740654861d71..5e424e2f363b7e343f81ae7fae62203c6b143b34 100644 --- a/src/java.base/aix/classes/sun/nio/fs/AixFileStore.java +++ b/src/java.base/aix/classes/sun/nio/fs/AixFileStore.java @@ -88,9 +88,8 @@ class AixFileStore throw new IOException("Mount point not found"); } - // returns true if extended attributes enabled on file system where given - // file resides, returns false if disabled or unable to determine. - private boolean isExtendedAttributesEnabled(UnixPath path) { + @Override + protected boolean isExtendedAttributesEnabled(UnixPath path) { return false; } diff --git a/src/java.base/linux/classes/jdk/internal/platform/CgroupInfo.java b/src/java.base/linux/classes/jdk/internal/platform/CgroupInfo.java index b436b62585011449dff418de58190ca896800e3f..60b7f3fc3427d6c6f0aa52e6062c227207667a0a 100644 --- a/src/java.base/linux/classes/jdk/internal/platform/CgroupInfo.java +++ b/src/java.base/linux/classes/jdk/internal/platform/CgroupInfo.java @@ -26,17 +26,21 @@ package jdk.internal.platform; /** - * Data structure to hold info from /proc/self/cgroup + * Data structure to hold info from /proc/self/cgroup, + * /proc/cgroups and /proc/self/mountinfo * * man 7 cgroups * * @see CgroupSubsystemFactory */ -class CgroupInfo { +public class CgroupInfo { private final String name; private final int hierarchyId; private final boolean enabled; + private String mountPoint; + private String mountRoot; + private String cgroupPath; private CgroupInfo(String name, int hierarchyId, boolean enabled) { this.name = name; @@ -44,18 +48,64 @@ class CgroupInfo { this.enabled = enabled; } - String getName() { + public String getName() { return name; } - int getHierarchyId() { + public int getHierarchyId() { return hierarchyId; } - boolean isEnabled() { + public boolean isEnabled() { return enabled; } + public String getMountPoint() { + return mountPoint; + } + + public void setMountPoint(String mountPoint) { + this.mountPoint = mountPoint; + } + + public String getMountRoot() { + return mountRoot; + } + + public void setMountRoot(String mountRoot) { + this.mountRoot = mountRoot; + } + + public String getCgroupPath() { + return cgroupPath; + } + + public void setCgroupPath(String cgroupPath) { + this.cgroupPath = cgroupPath; + } + + /* + * Creates a CgroupInfo instance from a line in /proc/cgroups. + * Comment token (hash) is handled by the caller. + * + * Example (annotated): + * + * #subsys_name hierarchy num_cgroups enabled + * cpuset 10 1 1 (a) + * cpu 7 8 1 (b) + * [...] + * + * Line (a) would yield: + * info = new CgroupInfo("cpuset", 10, true); + * return info; + * Line (b) results in: + * info = new CgroupInfo("cpu", 7, true); + * return info; + * + * + * See CgroupSubsystemFactory.determineType() + * + */ static CgroupInfo fromCgroupsLine(String line) { String[] tokens = line.split("\\s+"); if (tokens.length != 4) { diff --git a/src/java.base/linux/classes/jdk/internal/platform/CgroupSubsystemFactory.java b/src/java.base/linux/classes/jdk/internal/platform/CgroupSubsystemFactory.java index d71f5c1fbf2145c3ebc5e4eaf11301631707fc73..931d0896079cc72d41c3c8f3aac963c1811e2cb1 100644 --- a/src/java.base/linux/classes/jdk/internal/platform/CgroupSubsystemFactory.java +++ b/src/java.base/linux/classes/jdk/internal/platform/CgroupSubsystemFactory.java @@ -26,13 +26,17 @@ package jdk.internal.platform; import java.io.IOException; +import java.io.UncheckedIOException; import java.lang.System.Logger; import java.lang.System.Logger.Level; +import java.nio.file.Path; import java.nio.file.Paths; +import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Optional; +import java.util.function.Consumer; import java.util.regex.Matcher; import java.util.regex.Pattern; import java.util.stream.Stream; @@ -68,17 +72,19 @@ public class CgroupSubsystemFactory { */ private static final Pattern MOUNTINFO_PATTERN = Pattern.compile( "^[^\\s]+\\s+[^\\s]+\\s+[^\\s]+\\s+" + // (1), (2), (3) - "[^\\s]+\\s+([^\\s]+)\\s+" + // (4), (5) - group 1: mount point + "([^\\s]+)\\s+([^\\s]+)\\s+" + // (4), (5) - group 1, 2: root, mount point "[^-]+-\\s+" + // (6), (7), (8) - "([^\\s]+)\\s+" + // (9) - group 2: filesystem type + "([^\\s]+)\\s+" + // (9) - group 3: filesystem type ".*$"); // (10), (11) static CgroupMetrics create() { Optional optResult = null; try { - optResult = determineType("/proc/self/mountinfo", "/proc/cgroups"); + optResult = determineType("/proc/self/mountinfo", "/proc/cgroups", "/proc/self/cgroup"); } catch (IOException e) { return null; + } catch (UncheckedIOException e) { + return null; } if (optResult.isEmpty()) { @@ -100,17 +106,37 @@ public class CgroupSubsystemFactory { return null; } + Map infos = result.getInfos(); if (result.isCgroupV2()) { - CgroupSubsystem subsystem = CgroupV2Subsystem.getInstance(); + // For unified it doesn't matter which controller we pick. + CgroupInfo anyController = infos.get(MEMORY_CTRL); + CgroupSubsystem subsystem = CgroupV2Subsystem.getInstance(anyController); return subsystem != null ? new CgroupMetrics(subsystem) : null; } else { - CgroupV1Subsystem subsystem = CgroupV1Subsystem.getInstance(); + CgroupV1Subsystem subsystem = CgroupV1Subsystem.getInstance(infos); return subsystem != null ? new CgroupV1MetricsImpl(subsystem) : null; } } - public static Optional determineType(String mountInfo, String cgroups) throws IOException { - Map infos = new HashMap<>(); + /* + * Determine the type of the cgroup system (v1 - legacy or hybrid - or, v2 - unified) + * based on three files: + * + * (1) mountInfo (i.e. /proc/self/mountinfo) + * (2) cgroups (i.e. /proc/cgroups) + * (3) selfCgroup (i.e. /proc/self/cgroup) + * + * File 'cgroups' is inspected for the hierarchy ID of the mounted cgroup pseudo + * filesystem. The hierarchy ID, in turn, helps us distinguish cgroups v2 and + * cgroup v1. For a system with zero hierarchy ID, but with >= 1 relevant cgroup + * controllers mounted in 'mountInfo' we can infer it's cgroups v2. Anything else + * will be cgroup v1 (hybrid or legacy). File 'selfCgroup' is being used for + * figuring out the mount path of the controller in the cgroup hierarchy. + */ + public static Optional determineType(String mountInfo, + String cgroups, + String selfCgroup) throws IOException { + final Map infos = new HashMap<>(); List lines = CgroupUtil.readAllLinesPrivileged(Paths.get(cgroups)); for (String line : lines) { if (line.startsWith("#")) { @@ -141,44 +167,187 @@ public class CgroupSubsystemFactory { anyControllersEnabled = anyControllersEnabled || info.isEnabled(); } - // If there are no mounted, relevant cgroup controllers in mountinfo and only - // 0 hierarchy IDs in /proc/cgroups have been seen, we are on a cgroups v1 system. + // If there are no mounted, relevant cgroup controllers in 'mountinfo' and only + // 0 hierarchy IDs in file 'cgroups' have been seen, we are on a cgroups v1 system. // However, continuing in that case does not make sense as we'd need // information from mountinfo for the mounted controller paths which we wouldn't // find anyway in that case. - try (Stream mntInfo = CgroupUtil.readFilePrivileged(Paths.get(mountInfo))) { - boolean anyCgroupMounted = mntInfo.anyMatch(CgroupSubsystemFactory::isRelevantControllerMount); - if (!anyCgroupMounted && isCgroupsV2) { - return Optional.empty(); + lines = CgroupUtil.readAllLinesPrivileged(Paths.get(mountInfo)); + boolean anyCgroupMounted = false; + for (String line: lines) { + boolean cgroupsControllerFound = amendCgroupInfos(line, infos, isCgroupsV2); + anyCgroupMounted = anyCgroupMounted || cgroupsControllerFound; + } + if (!anyCgroupMounted) { + return Optional.empty(); + } + + // Map a cgroup version specific 'action' to a line in 'selfCgroup' (i.e. + // /proc/self/cgroups) , split on the ':' token, so as to set the appropriate + // path to the cgroup controller in cgroup data structures 'infos'. + // See: + // setCgroupV1Path() for the action run for cgroups v1 systems + // setCgroupV2Path() for the action run for cgroups v2 systems + try (Stream selfCgroupLines = + CgroupUtil.readFilePrivileged(Paths.get(selfCgroup))) { + Consumer action = (tokens -> setCgroupV1Path(infos, tokens)); + if (isCgroupsV2) { + action = (tokens -> setCgroupV2Path(infos, tokens)); } + selfCgroupLines.map(line -> line.split(":")) + .filter(tokens -> (tokens.length >= 3)) + .forEach(action); } - CgroupTypeResult result = new CgroupTypeResult(isCgroupsV2, anyControllersEnabled, anyCgroupsV2Controller, anyCgroupsV1Controller); + + CgroupTypeResult result = new CgroupTypeResult(isCgroupsV2, + anyControllersEnabled, + anyCgroupsV2Controller, + anyCgroupsV1Controller, + Collections.unmodifiableMap(infos)); return Optional.of(result); } - private static boolean isRelevantControllerMount(String line) { - Matcher lineMatcher = MOUNTINFO_PATTERN.matcher(line.trim()); - if (lineMatcher.matches()) { - String mountPoint = lineMatcher.group(1); - String fsType = lineMatcher.group(2); - if (fsType.equals("cgroup")) { - String filename = Paths.get(mountPoint).getFileName().toString(); - for (String fn: filename.split(",")) { - switch (fn) { - case MEMORY_CTRL: // fall through - case CPU_CTRL: - case CPUSET_CTRL: - case CPUACCT_CTRL: - case BLKIO_CTRL: - return true; - default: break; // ignore not recognized controllers - } - } - } else if (fsType.equals("cgroup2")) { - return true; - } - } - return false; + /* + * Sets the path to the cgroup controller for cgroups v2 based on a line + * in /proc/self/cgroup file (represented as the 'tokens' array). + * + * Example: + * + * 0::/ + * + * => tokens = [ "0", "", "/" ] + */ + private static void setCgroupV2Path(Map infos, + String[] tokens) { + int hierarchyId = Integer.parseInt(tokens[0]); + String cgroupPath = tokens[2]; + for (CgroupInfo info: infos.values()) { + assert hierarchyId == info.getHierarchyId() && hierarchyId == 0; + info.setCgroupPath(cgroupPath); + } + } + + /* + * Sets the path to the cgroup controller for cgroups v1 based on a line + * in /proc/self/cgroup file (represented as the 'tokens' array). + * + * Note that multiple controllers might be joined at a single path. + * + * Example: + * + * 7:cpu,cpuacct:/system.slice/docker-74ad896fb40bbefe0f181069e4417505fffa19052098f27edf7133f31423bc0b.scope + * + * => tokens = [ "7", "cpu,cpuacct", "/system.slice/docker-74ad896fb40bbefe0f181069e4417505fffa19052098f27edf7133f31423bc0b.scope" ] + */ + private static void setCgroupV1Path(Map infos, + String[] tokens) { + String controllerName = tokens[1]; + String cgroupPath = tokens[2]; + if (controllerName != null && cgroupPath != null) { + for (String cName: controllerName.split(",")) { + switch (cName) { + case MEMORY_CTRL: // fall through + case CPUSET_CTRL: + case CPUACCT_CTRL: + case CPU_CTRL: + case BLKIO_CTRL: + CgroupInfo info = infos.get(cName); + info.setCgroupPath(cgroupPath); + break; + // Ignore not recognized controllers + default: + break; + } + } + } + } + + /** + * Amends cgroup infos with mount path and mount root. The passed in + * 'mntInfoLine' represents a single line in, for example, + * /proc/self/mountinfo. Each line is matched with MOUNTINFO_PATTERN + * (see above), so as to extract the relevant tokens from the line. + * + * Host example cgroups v1: + * + * 44 30 0:41 / /sys/fs/cgroup/devices rw,nosuid,nodev,noexec,relatime shared:16 - cgroup cgroup rw,seclabel,devices + * + * Container example cgroups v1: + * + * 1901 1894 0:37 /system.slice/docker-2291eeb92093f9d761aaf971782b575e9be56bd5930d4b5759b51017df3c1387.scope /sys/fs/cgroup/cpu,cpuacct ro,nosuid,nodev,noexec,relatime master:12 - cgroup cgroup rw,seclabel,cpu,cpuacct + * + * Container example cgroups v2: + * + * 1043 1034 0:27 / /sys/fs/cgroup ro,nosuid,nodev,noexec,relatime - cgroup2 cgroup2 rw,seclabel,nsdelegate + * + * + * @return {@code true} iff a relevant controller has been found at the + * given line + */ + private static boolean amendCgroupInfos(String mntInfoLine, + Map infos, + boolean isCgroupsV2) { + Matcher lineMatcher = MOUNTINFO_PATTERN.matcher(mntInfoLine.trim()); + boolean cgroupv1ControllerFound = false; + boolean cgroupv2ControllerFound = false; + if (lineMatcher.matches()) { + String mountRoot = lineMatcher.group(1); + String mountPath = lineMatcher.group(2); + String fsType = lineMatcher.group(3); + if (fsType.equals("cgroup")) { + Path p = Paths.get(mountPath); + String[] controllerNames = p.getFileName().toString().split(","); + for (String controllerName: controllerNames) { + switch (controllerName) { + case MEMORY_CTRL: // fall-through + case CPU_CTRL: + case CPUACCT_CTRL: + case BLKIO_CTRL: { + CgroupInfo info = infos.get(controllerName); + assert info.getMountPoint() == null; + assert info.getMountRoot() == null; + info.setMountPoint(mountPath); + info.setMountRoot(mountRoot); + cgroupv1ControllerFound = true; + break; + } + case CPUSET_CTRL: { + CgroupInfo info = infos.get(controllerName); + if (info.getMountPoint() != null) { + // On some systems duplicate cpuset controllers get mounted in addition to + // the main cgroup controllers most likely under /sys/fs/cgroup. In that + // case pick the one under /sys/fs/cgroup and discard others. + if (!info.getMountPoint().startsWith("/sys/fs/cgroup")) { + info.setMountPoint(mountPath); + info.setMountRoot(mountRoot); + } + } else { + info.setMountPoint(mountPath); + info.setMountRoot(mountRoot); + } + cgroupv1ControllerFound = true; + break; + } + default: + // Ignore controllers which we don't recognize + break; + } + } + } else if (fsType.equals("cgroup2")) { + if (isCgroupsV2) { // will be false for hybrid + // All controllers have the same mount point and root mount + // for unified hierarchy. + for (CgroupInfo info: infos.values()) { + assert info.getMountPoint() == null; + assert info.getMountRoot() == null; + info.setMountPoint(mountPath); + info.setMountRoot(mountRoot); + } + } + cgroupv2ControllerFound = true; + } + } + return cgroupv1ControllerFound || cgroupv2ControllerFound; } public static final class CgroupTypeResult { @@ -186,15 +355,18 @@ public class CgroupSubsystemFactory { private final boolean anyControllersEnabled; private final boolean anyCgroupV2Controllers; private final boolean anyCgroupV1Controllers; + private final Map infos; private CgroupTypeResult(boolean isCgroupV2, boolean anyControllersEnabled, boolean anyCgroupV2Controllers, - boolean anyCgroupV1Controllers) { + boolean anyCgroupV1Controllers, + Map infos) { this.isCgroupV2 = isCgroupV2; this.anyControllersEnabled = anyControllersEnabled; this.anyCgroupV1Controllers = anyCgroupV1Controllers; this.anyCgroupV2Controllers = anyCgroupV2Controllers; + this.infos = infos; } public boolean isCgroupV2() { @@ -212,5 +384,9 @@ public class CgroupSubsystemFactory { public boolean isAnyCgroupV1Controllers() { return anyCgroupV1Controllers; } + + public Map getInfos() { + return infos; + } } } diff --git a/src/java.base/linux/classes/jdk/internal/platform/cgroupv1/CgroupV1Subsystem.java b/src/java.base/linux/classes/jdk/internal/platform/cgroupv1/CgroupV1Subsystem.java index 25a435376c13a0a0aa60ba665b865317225d46f6..d10cfe091cfcea433903f6feddd731c5c881d35a 100644 --- a/src/java.base/linux/classes/jdk/internal/platform/cgroupv1/CgroupV1Subsystem.java +++ b/src/java.base/linux/classes/jdk/internal/platform/cgroupv1/CgroupV1Subsystem.java @@ -25,15 +25,11 @@ package jdk.internal.platform.cgroupv1; -import java.io.IOException; -import java.io.UncheckedIOException; -import java.nio.file.Path; -import java.nio.file.Paths; -import java.util.stream.Stream; +import java.util.Map; +import jdk.internal.platform.CgroupInfo; import jdk.internal.platform.CgroupSubsystem; import jdk.internal.platform.CgroupSubsystemController; -import jdk.internal.platform.CgroupUtil; import jdk.internal.platform.CgroupV1Metrics; public class CgroupV1Subsystem implements CgroupSubsystem, CgroupV1Metrics { @@ -42,172 +38,107 @@ public class CgroupV1Subsystem implements CgroupSubsystem, CgroupV1Metrics { private CgroupV1SubsystemController cpuacct; private CgroupV1SubsystemController cpuset; private CgroupV1SubsystemController blkio; - private boolean activeSubSystems; - private static final CgroupV1Subsystem INSTANCE = initSubSystem(); + private static volatile CgroupV1Subsystem INSTANCE; private static final String PROVIDER_NAME = "cgroupv1"; - private CgroupV1Subsystem() { - activeSubSystems = false; - } + private CgroupV1Subsystem() {} - public static CgroupV1Subsystem getInstance() { + /** + * Get a singleton instance of CgroupV1Subsystem. Initially, it creates a new + * object by retrieving the pre-parsed information from cgroup interface + * files from the provided 'infos' map. + * + * See CgroupSubsystemFactory.determineType() where the actual parsing of + * cgroup interface files happens. + * + * @return A singleton CgroupV1Subsystem instance, never null + */ + public static CgroupV1Subsystem getInstance(Map infos) { + if (INSTANCE == null) { + CgroupV1Subsystem tmpSubsystem = initSubSystem(infos); + synchronized (CgroupV1Subsystem.class) { + if (INSTANCE == null) { + INSTANCE = tmpSubsystem; + } + } + } return INSTANCE; } - private static CgroupV1Subsystem initSubSystem() { + private static CgroupV1Subsystem initSubSystem(Map infos) { CgroupV1Subsystem subsystem = new CgroupV1Subsystem(); - /** - * Find the cgroup mount points for subsystems - * by reading /proc/self/mountinfo - * - * Example for docker MemorySubSystem subsystem: - * 219 214 0:29 /docker/7208cebd00fa5f2e342b1094f7bed87fa25661471a4637118e65f1c995be8a34 /sys/fs/cgroup/MemorySubSystem ro,nosuid,nodev,noexec,relatime - cgroup cgroup rw,MemorySubSystem - * - * Example for host: - * 34 28 0:29 / /sys/fs/cgroup/MemorySubSystem rw,nosuid,nodev,noexec,relatime shared:16 - cgroup cgroup rw,MemorySubSystem - */ - try (Stream lines = - CgroupUtil.readFilePrivileged(Paths.get("/proc/self/mountinfo"))) { - - lines.filter(line -> line.contains(" - cgroup ")) - .map(line -> line.split(" ")) - .forEach(entry -> createSubSystemController(subsystem, entry)); - - } catch (UncheckedIOException e) { - return null; - } catch (IOException e) { - return null; - } - - /** - * Read /proc/self/cgroup and map host mount point to - * local one via /proc/self/mountinfo content above - * - * Docker example: - * 5:memory:/docker/6558aed8fc662b194323ceab5b964f69cf36b3e8af877a14b80256e93aecb044 - * - * Host example: - * 5:memory:/user.slice - * - * Construct a path to the process specific memory and cpuset - * cgroup directory. - * - * For a container running under Docker from memory example above - * the paths would be: - * - * /sys/fs/cgroup/memory - * - * For a Host from memory example above the path would be: - * - * /sys/fs/cgroup/memory/user.slice - * + boolean anyActiveControllers = false; + /* + * Find the cgroup mount points for subsystem controllers + * by looking up relevant data in the infos map */ - try (Stream lines = - CgroupUtil.readFilePrivileged(Paths.get("/proc/self/cgroup"))) { - - lines.map(line -> line.split(":")) - .filter(line -> (line.length >= 3)) - .forEach(line -> setSubSystemControllerPath(subsystem, line)); - - } catch (UncheckedIOException e) { - return null; - } catch (IOException e) { - return null; + for (CgroupInfo info: infos.values()) { + switch (info.getName()) { + case "memory": { + if (info.getMountRoot() != null && info.getMountPoint() != null) { + CgroupV1MemorySubSystemController controller = new CgroupV1MemorySubSystemController(info.getMountRoot(), info.getMountPoint()); + controller.setPath(info.getCgroupPath()); + boolean isHierarchial = getHierarchical(controller); + controller.setHierarchical(isHierarchial); + boolean isSwapEnabled = getSwapEnabled(controller); + controller.setSwapEnabled(isSwapEnabled); + subsystem.setMemorySubSystem(controller); + anyActiveControllers = true; + } + break; + } + case "cpuset": { + if (info.getMountRoot() != null && info.getMountPoint() != null) { + CgroupV1SubsystemController controller = new CgroupV1SubsystemController(info.getMountRoot(), info.getMountPoint()); + controller.setPath(info.getCgroupPath()); + subsystem.setCpuSetController(controller); + anyActiveControllers = true; + } + break; + } + case "cpuacct": { + if (info.getMountRoot() != null && info.getMountPoint() != null) { + CgroupV1SubsystemController controller = new CgroupV1SubsystemController(info.getMountRoot(), info.getMountPoint()); + controller.setPath(info.getCgroupPath()); + subsystem.setCpuAcctController(controller); + anyActiveControllers = true; + } + break; + } + case "cpu": { + if (info.getMountRoot() != null && info.getMountPoint() != null) { + CgroupV1SubsystemController controller = new CgroupV1SubsystemController(info.getMountRoot(), info.getMountPoint()); + controller.setPath(info.getCgroupPath()); + subsystem.setCpuController(controller); + anyActiveControllers = true; + } + break; + } + case "blkio": { + if (info.getMountRoot() != null && info.getMountPoint() != null) { + CgroupV1SubsystemController controller = new CgroupV1SubsystemController(info.getMountRoot(), info.getMountPoint()); + controller.setPath(info.getCgroupPath()); + subsystem.setBlkIOController(controller); + anyActiveControllers = true; + } + break; + } + default: + throw new AssertionError("Unrecognized controller in infos: " + info.getName()); + } } // Return Metrics object if we found any subsystems. - if (subsystem.activeSubSystems()) { + if (anyActiveControllers) { return subsystem; } return null; } - /** - * createSubSystem objects and initialize mount points - */ - private static void createSubSystemController(CgroupV1Subsystem subsystem, String[] mountentry) { - if (mountentry.length < 5) return; - - Path p = Paths.get(mountentry[4]); - String[] subsystemNames = p.getFileName().toString().split(","); - - for (String subsystemName: subsystemNames) { - switch (subsystemName) { - case "memory": - subsystem.setMemorySubSystem(new CgroupV1MemorySubSystemController(mountentry[3], mountentry[4])); - break; - case "cpuset": - subsystem.setCpuSetController(new CgroupV1SubsystemController(mountentry[3], mountentry[4])); - break; - case "cpuacct": - subsystem.setCpuAcctController(new CgroupV1SubsystemController(mountentry[3], mountentry[4])); - break; - case "cpu": - subsystem.setCpuController(new CgroupV1SubsystemController(mountentry[3], mountentry[4])); - break; - case "blkio": - subsystem.setBlkIOController(new CgroupV1SubsystemController(mountentry[3], mountentry[4])); - break; - default: - // Ignore subsystems that we don't support - break; - } - } - } - - /** - * setSubSystemPath based on the contents of /proc/self/cgroup - */ - private static void setSubSystemControllerPath(CgroupV1Subsystem subsystem, String[] entry) { - String controllerName = entry[1]; - String base = entry[2]; - - if (controllerName != null && base != null) { - for (String cName: controllerName.split(",")) { - switch (cName) { - case "memory": - setPath(subsystem, subsystem.memoryController(), base); - break; - case "cpuset": - setPath(subsystem, subsystem.cpuSetController(), base); - break; - case "cpu": - setPath(subsystem, subsystem.cpuController(), base); - break; - case "cpuacct": - setPath(subsystem, subsystem.cpuAcctController(), base); - break; - case "blkio": - setPath(subsystem, subsystem.blkIOController(), base); - break; - // Ignore subsystems that we don't support - default: - break; - } - } - } - - } - - private static void setPath(CgroupV1Subsystem subsystem, CgroupV1SubsystemController controller, String base) { - if (controller != null) { - controller.setPath(base); - if (controller instanceof CgroupV1MemorySubSystemController) { - CgroupV1MemorySubSystemController memorySubSystem = (CgroupV1MemorySubSystemController)controller; - boolean isHierarchial = getHierarchical(memorySubSystem); - memorySubSystem.setHierarchical(isHierarchial); - boolean isSwapEnabled = getSwapEnabled(memorySubSystem); - memorySubSystem.setSwapEnabled(isSwapEnabled); - } - subsystem.setActiveSubSystems(); - } - } - - private static boolean getSwapEnabled(CgroupV1MemorySubSystemController controller) { long retval = getLongValue(controller, "memory.memsw.limit_in_bytes"); return retval > 0; @@ -219,14 +150,6 @@ public class CgroupV1Subsystem implements CgroupSubsystem, CgroupV1Metrics { return hierarchical > 0; } - private void setActiveSubSystems() { - activeSubSystems = true; - } - - private boolean activeSubSystems() { - return activeSubSystems; - } - private void setMemorySubSystem(CgroupV1MemorySubSystemController memory) { this.memory = memory; } @@ -247,26 +170,6 @@ public class CgroupV1Subsystem implements CgroupSubsystem, CgroupV1Metrics { this.blkio = blkio; } - private CgroupV1SubsystemController memoryController() { - return memory; - } - - private CgroupV1SubsystemController cpuController() { - return cpu; - } - - private CgroupV1SubsystemController cpuAcctController() { - return cpuacct; - } - - private CgroupV1SubsystemController cpuSetController() { - return cpuset; - } - - private CgroupV1SubsystemController blkIOController() { - return blkio; - } - private static long getLongValue(CgroupSubsystemController controller, String parm) { return CgroupSubsystemController.getLongValue(controller, diff --git a/src/java.base/linux/classes/jdk/internal/platform/cgroupv2/CgroupV2Subsystem.java b/src/java.base/linux/classes/jdk/internal/platform/cgroupv2/CgroupV2Subsystem.java index df6670e1d4ef533a27cb1bbc05e73d3027100013..2d4e2bc78e4570ac1df53e25bbbf87568611558d 100644 --- a/src/java.base/linux/classes/jdk/internal/platform/cgroupv2/CgroupV2Subsystem.java +++ b/src/java.base/linux/classes/jdk/internal/platform/cgroupv2/CgroupV2Subsystem.java @@ -28,19 +28,18 @@ package jdk.internal.platform.cgroupv2; import java.io.IOException; import java.io.UncheckedIOException; import java.nio.file.Paths; -import java.util.List; import java.util.concurrent.TimeUnit; import java.util.function.Function; import java.util.stream.Collectors; -import java.util.stream.Stream; +import jdk.internal.platform.CgroupInfo; import jdk.internal.platform.CgroupSubsystem; import jdk.internal.platform.CgroupSubsystemController; import jdk.internal.platform.CgroupUtil; public class CgroupV2Subsystem implements CgroupSubsystem { - private static final CgroupV2Subsystem INSTANCE = initSubsystem(); + private static volatile CgroupV2Subsystem INSTANCE; private static final long[] LONG_ARRAY_NOT_SUPPORTED = null; private static final int[] INT_ARRAY_UNAVAILABLE = null; private final CgroupSubsystemController unified; @@ -65,48 +64,29 @@ public class CgroupV2Subsystem implements CgroupSubsystem { return getLongVal(file, CgroupSubsystem.LONG_RETVAL_UNLIMITED); } - private static CgroupV2Subsystem initSubsystem() { - // read mountinfo so as to determine root mount path - String mountPath = null; - try (Stream lines = - CgroupUtil.readFilePrivileged(Paths.get("/proc/self/mountinfo"))) { - - String l = lines.filter(line -> line.contains(" - cgroup2 ")) - .collect(Collectors.joining()); - String[] tokens = l.split(" "); - mountPath = tokens[4]; - } catch (UncheckedIOException e) { - return null; - } catch (IOException e) { - return null; - } - String cgroupPath = null; - try { - List lines = CgroupUtil.readAllLinesPrivileged(Paths.get("/proc/self/cgroup")); - for (String line: lines) { - String[] tokens = line.split(":"); - if (tokens.length != 3) { - return null; // something is not right. - } - if (!"0".equals(tokens[0])) { - // hierarchy must be zero for cgroups v2 - return null; + /** + * Get the singleton instance of a cgroups v2 subsystem. On initialization, + * a new object from the given cgroup information 'anyController' is being + * created. Note that the cgroup information has been parsed from cgroup + * interface files ahead of time. + * + * See CgroupSubsystemFactory.determineType() for the cgroup interface + * files parsing logic. + * + * @return A singleton CgroupSubsystem instance, never null. + */ + public static CgroupSubsystem getInstance(CgroupInfo anyController) { + if (INSTANCE == null) { + CgroupSubsystemController unified = new CgroupV2SubsystemController( + anyController.getMountPoint(), + anyController.getCgroupPath()); + CgroupV2Subsystem tmpCgroupSystem = new CgroupV2Subsystem(unified); + synchronized (CgroupV2Subsystem.class) { + if (INSTANCE == null) { + INSTANCE = tmpCgroupSystem; } - cgroupPath = tokens[2]; - break; } - } catch (UncheckedIOException e) { - return null; - } catch (IOException e) { - return null; } - CgroupSubsystemController unified = new CgroupV2SubsystemController( - mountPath, - cgroupPath); - return new CgroupV2Subsystem(unified); - } - - public static CgroupSubsystem getInstance() { return INSTANCE; } diff --git a/src/java.base/linux/classes/sun/nio/fs/LinuxFileStore.java b/src/java.base/linux/classes/sun/nio/fs/LinuxFileStore.java index 0816146fa6d49b25336b64b6eaaabcc2784ccdf1..1b6fce47fc718bcd282549d492c5dcc32012cceb 100644 --- a/src/java.base/linux/classes/sun/nio/fs/LinuxFileStore.java +++ b/src/java.base/linux/classes/sun/nio/fs/LinuxFileStore.java @@ -105,27 +105,6 @@ class LinuxFileStore throw new IOException("Mount point not found"); } - // returns true if extended attributes enabled on file system where given - // file resides, returns false if disabled or unable to determine. - private boolean isExtendedAttributesEnabled(UnixPath path) { - int fd = -1; - try { - fd = path.openForAttributeAccess(false); - - // fgetxattr returns size if called with size==0 - byte[] name = Util.toBytes("user.java"); - LinuxNativeDispatcher.fgetxattr(fd, name, 0L, 0); - return true; - } catch (UnixException e) { - // attribute does not exist - if (e.errno() == UnixConstants.ENODATA) - return true; - } finally { - UnixNativeDispatcher.close(fd); - } - return false; - } - // get kernel version as a three element array {major, minor, micro} private static int[] getKernelVersion() { Pattern pattern = Pattern.compile("\\D+"); @@ -162,12 +141,6 @@ class LinuxFileStore return false; } - // user_{no}xattr options not present but we special-case ext3 as - // we know that extended attributes are not enabled by default. - if (entry().fstype().equals("ext3")) { - return false; - } - // user_xattr option not present but we special-case ext4 as we // know that extended attributes are enabled by default for // kernel version >= 2.6.39 @@ -184,7 +157,7 @@ class LinuxFileStore return xattrEnabled; } - // not ext3/4 so probe mount point + // not ext4 so probe mount point if (!xattrChecked) { UnixPath dir = new UnixPath(file().getFileSystem(), entry().dir()); xattrEnabled = isExtendedAttributesEnabled(dir); diff --git a/src/java.base/linux/classes/sun/nio/fs/LinuxFileSystem.java b/src/java.base/linux/classes/sun/nio/fs/LinuxFileSystem.java index cfd83963b2bce8e9bafac7b4e39b6b5e386617a7..a3153b31329f332de9650e222da5082a9bc08315 100644 --- a/src/java.base/linux/classes/sun/nio/fs/LinuxFileSystem.java +++ b/src/java.base/linux/classes/sun/nio/fs/LinuxFileSystem.java @@ -69,7 +69,7 @@ class LinuxFileSystem extends UnixFileSystem { @Override void copyNonPosixAttributes(int ofd, int nfd) { - LinuxUserDefinedFileAttributeView.copyExtendedAttributes(ofd, nfd); + UnixUserDefinedFileAttributeView.copyExtendedAttributes(ofd, nfd); } /** diff --git a/src/java.base/linux/classes/sun/nio/fs/LinuxNativeDispatcher.java b/src/java.base/linux/classes/sun/nio/fs/LinuxNativeDispatcher.java index 289dc7cc94c06568083b4f41f149742665bc7e5d..6e6d59fd8ba218d99d471cdc42f38c3f06e387c7 100644 --- a/src/java.base/linux/classes/sun/nio/fs/LinuxNativeDispatcher.java +++ b/src/java.base/linux/classes/sun/nio/fs/LinuxNativeDispatcher.java @@ -69,61 +69,6 @@ class LinuxNativeDispatcher extends UnixNativeDispatcher { */ static native void endmntent(long stream) throws UnixException; - /** - * ssize_t fgetxattr(int filedes, const char *name, void *value, size_t size); - */ - static int fgetxattr(int filedes, byte[] name, long valueAddress, - int valueLen) throws UnixException - { - NativeBuffer buffer = NativeBuffers.asNativeBuffer(name); - try { - return fgetxattr0(filedes, buffer.address(), valueAddress, valueLen); - } finally { - buffer.release(); - } - } - - private static native int fgetxattr0(int filedes, long nameAddress, - long valueAddress, int valueLen) throws UnixException; - - /** - * fsetxattr(int filedes, const char *name, const void *value, size_t size, int flags); - */ - static void fsetxattr(int filedes, byte[] name, long valueAddress, - int valueLen) throws UnixException - { - NativeBuffer buffer = NativeBuffers.asNativeBuffer(name); - try { - fsetxattr0(filedes, buffer.address(), valueAddress, valueLen); - } finally { - buffer.release(); - } - } - - private static native void fsetxattr0(int filedes, long nameAddress, - long valueAddress, int valueLen) throws UnixException; - - /** - * fremovexattr(int filedes, const char *name); - */ - static void fremovexattr(int filedes, byte[] name) throws UnixException { - NativeBuffer buffer = NativeBuffers.asNativeBuffer(name); - try { - fremovexattr0(filedes, buffer.address()); - } finally { - buffer.release(); - } - } - - private static native void fremovexattr0(int filedes, long nameAddress) - throws UnixException; - - /** - * size_t flistxattr(int filedes, const char *list, size_t size) - */ - static native int flistxattr(int filedes, long listAddress, int size) - throws UnixException; - // initialize private static native void init(); diff --git a/src/java.base/linux/classes/sun/nio/fs/LinuxUserDefinedFileAttributeView.java b/src/java.base/linux/classes/sun/nio/fs/LinuxUserDefinedFileAttributeView.java index 568341be3ecf29c6971ec8e3976ad977dadf15da..3175c902d0014e1c1e1268823a805c604e0fe958 100644 --- a/src/java.base/linux/classes/sun/nio/fs/LinuxUserDefinedFileAttributeView.java +++ b/src/java.base/linux/classes/sun/nio/fs/LinuxUserDefinedFileAttributeView.java @@ -25,351 +25,17 @@ package sun.nio.fs; -import java.nio.file.*; -import java.nio.ByteBuffer; -import java.io.IOException; -import java.util.*; -import jdk.internal.misc.Unsafe; - -import static sun.nio.fs.UnixConstants.*; -import static sun.nio.fs.LinuxNativeDispatcher.*; - -/** - * Linux implementation of UserDefinedFileAttributeView using extended attributes. - */ - class LinuxUserDefinedFileAttributeView - extends AbstractUserDefinedFileAttributeView + extends UnixUserDefinedFileAttributeView { - private static final Unsafe unsafe = Unsafe.getUnsafe(); - - // namespace for extended user attributes - private static final String USER_NAMESPACE = "user."; - - // maximum bytes in extended attribute name (includes namespace) - private static final int XATTR_NAME_MAX = 255; - - private byte[] nameAsBytes(UnixPath file, String name) throws IOException { - if (name == null) - throw new NullPointerException("'name' is null"); - name = USER_NAMESPACE + name; - byte[] bytes = Util.toBytes(name); - if (bytes.length > XATTR_NAME_MAX) { - throw new FileSystemException(file.getPathForExceptionMessage(), - null, "'" + name + "' is too big"); - } - return bytes; - } - - // Parses buffer as array of NULL-terminated C strings. - private List asList(long address, int size) { - List list = new ArrayList<>(); - int start = 0; - int pos = 0; - while (pos < size) { - if (unsafe.getByte(address + pos) == 0) { - int len = pos - start; - byte[] value = new byte[len]; - unsafe.copyMemory(null, address+start, value, - Unsafe.ARRAY_BYTE_BASE_OFFSET, len); - String s = Util.toString(value); - if (s.startsWith(USER_NAMESPACE)) { - s = s.substring(USER_NAMESPACE.length()); - list.add(s); - } - start = pos + 1; - } - pos++; - } - return list; - } - - private final UnixPath file; - private final boolean followLinks; LinuxUserDefinedFileAttributeView(UnixPath file, boolean followLinks) { - this.file = file; - this.followLinks = followLinks; - } - - @Override - public List list() throws IOException { - if (System.getSecurityManager() != null) - checkAccess(file.getPathForPermissionCheck(), true, false); - - int fd = -1; - try { - fd = file.openForAttributeAccess(followLinks); - } catch (UnixException x) { - x.rethrowAsIOException(file); - } - NativeBuffer buffer = null; - try { - int size = 1024; - buffer = NativeBuffers.getNativeBuffer(size); - for (;;) { - try { - int n = flistxattr(fd, buffer.address(), size); - List list = asList(buffer.address(), n); - return Collections.unmodifiableList(list); - } catch (UnixException x) { - // allocate larger buffer if required - if (x.errno() == ERANGE && size < 32*1024) { - buffer.release(); - size *= 2; - buffer = null; - buffer = NativeBuffers.getNativeBuffer(size); - continue; - } - throw new FileSystemException(file.getPathForExceptionMessage(), - null, "Unable to get list of extended attributes: " + - x.getMessage()); - } - } - } finally { - if (buffer != null) - buffer.release(); - close(fd); - } - } - - @Override - public int size(String name) throws IOException { - if (System.getSecurityManager() != null) - checkAccess(file.getPathForPermissionCheck(), true, false); - - int fd = -1; - try { - fd = file.openForAttributeAccess(followLinks); - } catch (UnixException x) { - x.rethrowAsIOException(file); - } - try { - // fgetxattr returns size if called with size==0 - return fgetxattr(fd, nameAsBytes(file,name), 0L, 0); - } catch (UnixException x) { - throw new FileSystemException(file.getPathForExceptionMessage(), - null, "Unable to get size of extended attribute '" + name + - "': " + x.getMessage()); - } finally { - close(fd); - } - } - - @Override - public int read(String name, ByteBuffer dst) throws IOException { - if (System.getSecurityManager() != null) - checkAccess(file.getPathForPermissionCheck(), true, false); - - if (dst.isReadOnly()) - throw new IllegalArgumentException("Read-only buffer"); - int pos = dst.position(); - int lim = dst.limit(); - assert (pos <= lim); - int rem = (pos <= lim ? lim - pos : 0); - - NativeBuffer nb; - long address; - if (dst instanceof sun.nio.ch.DirectBuffer) { - nb = null; - address = ((sun.nio.ch.DirectBuffer)dst).address() + pos; - } else { - // substitute with native buffer - nb = NativeBuffers.getNativeBuffer(rem); - address = nb.address(); - } - - int fd = -1; - try { - fd = file.openForAttributeAccess(followLinks); - } catch (UnixException x) { - x.rethrowAsIOException(file); - } - try { - try { - int n = fgetxattr(fd, nameAsBytes(file,name), address, rem); - - // if remaining is zero then fgetxattr returns the size - if (rem == 0) { - if (n > 0) - throw new UnixException(ERANGE); - return 0; - } - - // copy from buffer into backing array if necessary - if (nb != null) { - int off = dst.arrayOffset() + pos + Unsafe.ARRAY_BYTE_BASE_OFFSET; - unsafe.copyMemory(null, address, dst.array(), off, n); - } - dst.position(pos + n); - return n; - } catch (UnixException x) { - String msg = (x.errno() == ERANGE) ? - "Insufficient space in buffer" : x.getMessage(); - throw new FileSystemException(file.getPathForExceptionMessage(), - null, "Error reading extended attribute '" + name + "': " + msg); - } finally { - close(fd); - } - } finally { - if (nb != null) - nb.release(); - } + super(file, followLinks); } @Override - public int write(String name, ByteBuffer src) throws IOException { - if (System.getSecurityManager() != null) - checkAccess(file.getPathForPermissionCheck(), false, true); - - int pos = src.position(); - int lim = src.limit(); - assert (pos <= lim); - int rem = (pos <= lim ? lim - pos : 0); - - NativeBuffer nb; - long address; - if (src instanceof sun.nio.ch.DirectBuffer) { - nb = null; - address = ((sun.nio.ch.DirectBuffer)src).address() + pos; - } else { - // substitute with native buffer - nb = NativeBuffers.getNativeBuffer(rem); - address = nb.address(); - - if (src.hasArray()) { - // copy from backing array into buffer - int off = src.arrayOffset() + pos + Unsafe.ARRAY_BYTE_BASE_OFFSET; - unsafe.copyMemory(src.array(), off, null, address, rem); - } else { - // backing array not accessible so transfer via temporary array - byte[] tmp = new byte[rem]; - src.get(tmp); - src.position(pos); // reset position as write may fail - unsafe.copyMemory(tmp, Unsafe.ARRAY_BYTE_BASE_OFFSET, null, - address, rem); - } - } - - int fd = -1; - try { - fd = file.openForAttributeAccess(followLinks); - } catch (UnixException x) { - x.rethrowAsIOException(file); - } - try { - try { - fsetxattr(fd, nameAsBytes(file,name), address, rem); - src.position(pos + rem); - return rem; - } catch (UnixException x) { - throw new FileSystemException(file.getPathForExceptionMessage(), - null, "Error writing extended attribute '" + name + "': " + - x.getMessage()); - } finally { - close(fd); - } - } finally { - if (nb != null) - nb.release(); - } + protected int maxNameLength() { + return 255; } - @Override - public void delete(String name) throws IOException { - if (System.getSecurityManager() != null) - checkAccess(file.getPathForPermissionCheck(), false, true); - - int fd = -1; - try { - fd = file.openForAttributeAccess(followLinks); - } catch (UnixException x) { - x.rethrowAsIOException(file); - } - try { - fremovexattr(fd, nameAsBytes(file,name)); - } catch (UnixException x) { - throw new FileSystemException(file.getPathForExceptionMessage(), - null, "Unable to delete extended attribute '" + name + "': " + x.getMessage()); - } finally { - close(fd); - } - } - - /** - * Used by copyTo/moveTo to copy extended attributes from source to target. - * - * @param ofd - * file descriptor for source file - * @param nfd - * file descriptor for target file - */ - static void copyExtendedAttributes(int ofd, int nfd) { - NativeBuffer buffer = null; - try { - - // call flistxattr to get list of extended attributes. - int size = 1024; - buffer = NativeBuffers.getNativeBuffer(size); - for (;;) { - try { - size = flistxattr(ofd, buffer.address(), size); - break; - } catch (UnixException x) { - // allocate larger buffer if required - if (x.errno() == ERANGE && size < 32*1024) { - buffer.release(); - size *= 2; - buffer = null; - buffer = NativeBuffers.getNativeBuffer(size); - continue; - } - - // unable to get list of attributes - return; - } - } - - // parse buffer as array of NULL-terminated C strings. - long address = buffer.address(); - int start = 0; - int pos = 0; - while (pos < size) { - if (unsafe.getByte(address + pos) == 0) { - // extract attribute name and copy attribute to target. - // FIXME: We can avoid needless copying by using address+pos - // as the address of the name. - int len = pos - start; - byte[] name = new byte[len]; - unsafe.copyMemory(null, address+start, name, - Unsafe.ARRAY_BYTE_BASE_OFFSET, len); - try { - copyExtendedAttribute(ofd, name, nfd); - } catch (UnixException ignore) { - // ignore - } - start = pos + 1; - } - pos++; - } - - } finally { - if (buffer != null) - buffer.release(); - } - } - - private static void copyExtendedAttribute(int ofd, byte[] name, int nfd) - throws UnixException - { - int size = fgetxattr(ofd, name, 0L, 0); - NativeBuffer buffer = NativeBuffers.getNativeBuffer(size); - try { - long address = buffer.address(); - size = fgetxattr(ofd, name, address, size); - fsetxattr(nfd, name, address, size); - } finally { - buffer.release(); - } - } } diff --git a/src/java.base/linux/native/libnio/fs/LinuxNativeDispatcher.c b/src/java.base/linux/native/libnio/fs/LinuxNativeDispatcher.c index 48df9b7c197467b0b0a49e6fe0b9bdf5f9eb24f2..20014564172a8d9a80e8e9c174974266f3cc7581 100644 --- a/src/java.base/linux/native/libnio/fs/LinuxNativeDispatcher.c +++ b/src/java.base/linux/native/libnio/fs/LinuxNativeDispatcher.c @@ -36,16 +36,6 @@ #include "sun_nio_fs_LinuxNativeDispatcher.h" -typedef size_t fgetxattr_func(int fd, const char* name, void* value, size_t size); -typedef int fsetxattr_func(int fd, const char* name, void* value, size_t size, int flags); -typedef int fremovexattr_func(int fd, const char* name); -typedef int flistxattr_func(int fd, char* list, size_t size); - -fgetxattr_func* my_fgetxattr_func = NULL; -fsetxattr_func* my_fsetxattr_func = NULL; -fremovexattr_func* my_fremovexattr_func = NULL; -flistxattr_func* my_flistxattr_func = NULL; - static jfieldID entry_name; static jfieldID entry_dir; static jfieldID entry_fstype; @@ -62,11 +52,6 @@ static void throwUnixException(JNIEnv* env, int errnum) { JNIEXPORT void JNICALL Java_sun_nio_fs_LinuxNativeDispatcher_init(JNIEnv *env, jclass clazz) { - my_fgetxattr_func = (fgetxattr_func*)dlsym(RTLD_DEFAULT, "fgetxattr"); - my_fsetxattr_func = (fsetxattr_func*)dlsym(RTLD_DEFAULT, "fsetxattr"); - my_fremovexattr_func = (fremovexattr_func*)dlsym(RTLD_DEFAULT, "fremovexattr"); - my_flistxattr_func = (flistxattr_func*)dlsym(RTLD_DEFAULT, "flistxattr"); - clazz = (*env)->FindClass(env, "sun/nio/fs/UnixMountEntry"); CHECK_NULL(clazz); entry_name = (*env)->GetFieldID(env, clazz, "name", "[B"); @@ -79,78 +64,6 @@ Java_sun_nio_fs_LinuxNativeDispatcher_init(JNIEnv *env, jclass clazz) CHECK_NULL(entry_options); } -JNIEXPORT jint JNICALL -Java_sun_nio_fs_LinuxNativeDispatcher_fgetxattr0(JNIEnv* env, jclass clazz, - jint fd, jlong nameAddress, jlong valueAddress, jint valueLen) -{ - size_t res = -1; - const char* name = jlong_to_ptr(nameAddress); - void* value = jlong_to_ptr(valueAddress); - - if (my_fgetxattr_func == NULL) { - errno = ENOTSUP; - } else { - /* EINTR not documented */ - res = (*my_fgetxattr_func)(fd, name, value, valueLen); - } - if (res == (size_t)-1) - throwUnixException(env, errno); - return (jint)res; -} - -JNIEXPORT void JNICALL -Java_sun_nio_fs_LinuxNativeDispatcher_fsetxattr0(JNIEnv* env, jclass clazz, - jint fd, jlong nameAddress, jlong valueAddress, jint valueLen) -{ - int res = -1; - const char* name = jlong_to_ptr(nameAddress); - void* value = jlong_to_ptr(valueAddress); - - if (my_fsetxattr_func == NULL) { - errno = ENOTSUP; - } else { - /* EINTR not documented */ - res = (*my_fsetxattr_func)(fd, name, value, valueLen, 0); - } - if (res == -1) - throwUnixException(env, errno); -} - -JNIEXPORT void JNICALL -Java_sun_nio_fs_LinuxNativeDispatcher_fremovexattr0(JNIEnv* env, jclass clazz, - jint fd, jlong nameAddress) -{ - int res = -1; - const char* name = jlong_to_ptr(nameAddress); - - if (my_fremovexattr_func == NULL) { - errno = ENOTSUP; - } else { - /* EINTR not documented */ - res = (*my_fremovexattr_func)(fd, name); - } - if (res == -1) - throwUnixException(env, errno); -} - -JNIEXPORT jint JNICALL -Java_sun_nio_fs_LinuxNativeDispatcher_flistxattr(JNIEnv* env, jclass clazz, - jint fd, jlong listAddress, jint size) -{ - size_t res = -1; - char* list = jlong_to_ptr(listAddress); - - if (my_flistxattr_func == NULL) { - errno = ENOTSUP; - } else { - /* EINTR not documented */ - res = (*my_flistxattr_func)(fd, list, (size_t)size); - } - if (res == (size_t)-1) - throwUnixException(env, errno); - return (jint)res; -} - JNIEXPORT jlong JNICALL Java_sun_nio_fs_LinuxNativeDispatcher_setmntent0(JNIEnv* env, jclass this, jlong pathAddress, jlong modeAddress) diff --git a/src/java.base/macosx/classes/sun/nio/fs/BsdFileStore.java b/src/java.base/macosx/classes/sun/nio/fs/BsdFileStore.java index 8083ebcf54875e7b5fde61db7097256a85fcb03c..3f5de6bbefad7103190c756a58fc75911517d4dd 100644 --- a/src/java.base/macosx/classes/sun/nio/fs/BsdFileStore.java +++ b/src/java.base/macosx/classes/sun/nio/fs/BsdFileStore.java @@ -80,27 +80,6 @@ class BsdFileStore throw new IOException("Mount point not found in fstab"); } - // returns true if extended attributes enabled on file system where given - // file resides, returns false if disabled or unable to determine. - private boolean isExtendedAttributesEnabled(UnixPath path) { - int fd = -1; - try { - fd = path.openForAttributeAccess(false); - - // fgetxattr returns size if called with size==0 - byte[] name = Util.toBytes("user.java"); - BsdNativeDispatcher.fgetxattr(fd, name, 0L, 0); - return true; - } catch (UnixException e) { - // attribute does not exist - if (e.errno() == UnixConstants.ENOATTR) - return true; - } finally { - UnixNativeDispatcher.close(fd); - } - return false; - } - @Override public boolean supportsFileAttributeView(Class type) { // support UserDefinedAttributeView if extended attributes enabled diff --git a/src/java.base/macosx/classes/sun/nio/fs/BsdFileSystem.java b/src/java.base/macosx/classes/sun/nio/fs/BsdFileSystem.java index 3f6b993b4221942cd4a481a02885bc5455a4b47c..359da72f7f88375a1b2f72467b453f790b71a34e 100644 --- a/src/java.base/macosx/classes/sun/nio/fs/BsdFileSystem.java +++ b/src/java.base/macosx/classes/sun/nio/fs/BsdFileSystem.java @@ -69,6 +69,7 @@ class BsdFileSystem extends UnixFileSystem { @Override void copyNonPosixAttributes(int ofd, int nfd) { + UnixUserDefinedFileAttributeView.copyExtendedAttributes(ofd, nfd); } /** diff --git a/src/java.base/macosx/classes/sun/nio/fs/BsdNativeDispatcher.java b/src/java.base/macosx/classes/sun/nio/fs/BsdNativeDispatcher.java index fecc58ef170bc68d279ff2c84c6e53a7977b0d07..2e0531309f17e275238748e03a91e2848b6587e7 100644 --- a/src/java.base/macosx/classes/sun/nio/fs/BsdNativeDispatcher.java +++ b/src/java.base/macosx/classes/sun/nio/fs/BsdNativeDispatcher.java @@ -62,67 +62,6 @@ class BsdNativeDispatcher extends UnixNativeDispatcher { } static native byte[] getmntonname0(long pathAddress) throws UnixException; - /** - * ssize_t fgetxattr(int fd, const char *name, void *value, size_t size, - * u_int32_t position, int options); - */ - static int fgetxattr(int fd, byte[] name, long valueAddress, - int valueLen) throws UnixException - { - NativeBuffer buffer = NativeBuffers.asNativeBuffer(name); - try { - return fgetxattr0(fd, buffer.address(), valueAddress, valueLen, 0L, 0); - } finally { - buffer.release(); - } - } - - private static native int fgetxattr0(int fd, long nameAddress, - long valueAddress, int valueLen, long position, int options) throws UnixException; - - /** - * int fsetxattr(int fd, const char *name, void *value, size_t size, - * u_int32_t position, int options); - */ - static void fsetxattr(int fd, byte[] name, long valueAddress, - int valueLen) throws UnixException - { - NativeBuffer buffer = NativeBuffers.asNativeBuffer(name); - try { - fsetxattr0(fd, buffer.address(), valueAddress, valueLen, 0L, 0); - } finally { - buffer.release(); - } - } - - private static native void fsetxattr0(int fd, long nameAddress, - long valueAddress, int valueLen, long position, int options) throws UnixException; - - /** - * int fremovexattr(int fd, const char *name, int options); - */ - static void fremovexattr(int fd, byte[] name) throws UnixException { - NativeBuffer buffer = NativeBuffers.asNativeBuffer(name); - try { - fremovexattr0(fd, buffer.address(), 0); - } finally { - buffer.release(); - } - } - - private static native void fremovexattr0(int fd, long nameAddress, int options) - throws UnixException; - - /** - * ssize_t flistxattr(int fd, char *namebuf, size_t size, int options); - */ - static int flistxattr(int fd, long nameBufAddress, int size) throws UnixException { - return flistxattr0(fd, nameBufAddress, size, 0); - } - - private static native int flistxattr0(int fd, long nameBufAddress, int size, - int options) throws UnixException; - // initialize field IDs private static native void initIDs(); diff --git a/src/java.base/macosx/classes/sun/nio/fs/BsdUserDefinedFileAttributeView.java b/src/java.base/macosx/classes/sun/nio/fs/BsdUserDefinedFileAttributeView.java index 5edc9726434a9357c2ac67011fb00c2b804fa313..725383e1778670c377e7f9f06c823af04787590b 100644 --- a/src/java.base/macosx/classes/sun/nio/fs/BsdUserDefinedFileAttributeView.java +++ b/src/java.base/macosx/classes/sun/nio/fs/BsdUserDefinedFileAttributeView.java @@ -25,352 +25,18 @@ package sun.nio.fs; -import java.nio.file.*; -import java.nio.ByteBuffer; -import java.io.IOException; -import java.util.*; -import jdk.internal.misc.Unsafe; - -import static sun.nio.fs.UnixConstants.*; -import static sun.nio.fs.BsdNativeDispatcher.*; - -/** - * BSD implementation of UserDefinedFileAttributeView using extended attributes. - */ - class BsdUserDefinedFileAttributeView - extends AbstractUserDefinedFileAttributeView + extends UnixUserDefinedFileAttributeView { - private static final Unsafe unsafe = Unsafe.getUnsafe(); - - // namespace for extended user attributes - private static final String USER_NAMESPACE = "user."; - - // maximum bytes in extended attribute name (includes namespace), - // see XATTR_MAXNAMELEN in https://github.com/apple/darwin-xnu/blob/master/bsd/sys/xattr.h - private static final int XATTR_NAME_MAX = 127; - - private byte[] nameAsBytes(UnixPath file, String name) throws IOException { - if (name == null) - throw new NullPointerException("'name' is null"); - name = USER_NAMESPACE + name; - byte[] bytes = Util.toBytes(name); - if (bytes.length > XATTR_NAME_MAX) { - throw new FileSystemException(file.getPathForExceptionMessage(), - null, "'" + name + "' is too big"); - } - return bytes; - } - - // Parses buffer as array of NULL-terminated C strings. - private List asList(long address, int size) { - List list = new ArrayList<>(); - int start = 0; - int pos = 0; - while (pos < size) { - if (unsafe.getByte(address + pos) == 0) { - int len = pos - start; - byte[] value = new byte[len]; - unsafe.copyMemory(null, address+start, value, - Unsafe.ARRAY_BYTE_BASE_OFFSET, len); - String s = Util.toString(value); - if (s.startsWith(USER_NAMESPACE)) { - s = s.substring(USER_NAMESPACE.length()); - list.add(s); - } - start = pos + 1; - } - pos++; - } - return list; - } - - private final UnixPath file; - private final boolean followLinks; BsdUserDefinedFileAttributeView(UnixPath file, boolean followLinks) { - this.file = file; - this.followLinks = followLinks; - } - - @Override - public List list() throws IOException { - if (System.getSecurityManager() != null) - checkAccess(file.getPathForPermissionCheck(), true, false); - - int fd = -1; - try { - fd = file.openForAttributeAccess(followLinks); - } catch (UnixException x) { - x.rethrowAsIOException(file); - } - NativeBuffer buffer = null; - try { - int size = 1024; - buffer = NativeBuffers.getNativeBuffer(size); - for (;;) { - try { - int n = flistxattr(fd, buffer.address(), size); - List list = asList(buffer.address(), n); - return Collections.unmodifiableList(list); - } catch (UnixException x) { - // allocate larger buffer if required - if (x.errno() == ERANGE && size < 32*1024) { - buffer.release(); - size *= 2; - buffer = null; - buffer = NativeBuffers.getNativeBuffer(size); - continue; - } - throw new FileSystemException(file.getPathForExceptionMessage(), - null, "Unable to get list of extended attributes: " + - x.getMessage()); - } - } - } finally { - if (buffer != null) - buffer.release(); - close(fd); - } - } - - @Override - public int size(String name) throws IOException { - if (System.getSecurityManager() != null) - checkAccess(file.getPathForPermissionCheck(), true, false); - - int fd = -1; - try { - fd = file.openForAttributeAccess(followLinks); - } catch (UnixException x) { - x.rethrowAsIOException(file); - } - try { - // fgetxattr returns size if called with size==0 - return fgetxattr(fd, nameAsBytes(file,name), 0L, 0); - } catch (UnixException x) { - throw new FileSystemException(file.getPathForExceptionMessage(), - null, "Unable to get size of extended attribute '" + name + - "': " + x.getMessage()); - } finally { - close(fd); - } - } - - @Override - public int read(String name, ByteBuffer dst) throws IOException { - if (System.getSecurityManager() != null) - checkAccess(file.getPathForPermissionCheck(), true, false); - - if (dst.isReadOnly()) - throw new IllegalArgumentException("Read-only buffer"); - int pos = dst.position(); - int lim = dst.limit(); - assert (pos <= lim); - int rem = (pos <= lim ? lim - pos : 0); - - NativeBuffer nb; - long address; - if (dst instanceof sun.nio.ch.DirectBuffer) { - nb = null; - address = ((sun.nio.ch.DirectBuffer)dst).address() + pos; - } else { - // substitute with native buffer - nb = NativeBuffers.getNativeBuffer(rem); - address = nb.address(); - } - - int fd = -1; - try { - fd = file.openForAttributeAccess(followLinks); - } catch (UnixException x) { - x.rethrowAsIOException(file); - } - try { - try { - int n = fgetxattr(fd, nameAsBytes(file,name), address, rem); - - // if remaining is zero then fgetxattr returns the size - if (rem == 0) { - if (n > 0) - throw new UnixException(ERANGE); - return 0; - } - - // copy from buffer into backing array if necessary - if (nb != null) { - int off = dst.arrayOffset() + pos + Unsafe.ARRAY_BYTE_BASE_OFFSET; - unsafe.copyMemory(null, address, dst.array(), off, n); - } - dst.position(pos + n); - return n; - } catch (UnixException x) { - String msg = (x.errno() == ERANGE) ? - "Insufficient space in buffer" : x.getMessage(); - throw new FileSystemException(file.getPathForExceptionMessage(), - null, "Error reading extended attribute '" + name + "': " + msg); - } finally { - close(fd); - } - } finally { - if (nb != null) - nb.release(); - } + super(file, followLinks); } @Override - public int write(String name, ByteBuffer src) throws IOException { - if (System.getSecurityManager() != null) - checkAccess(file.getPathForPermissionCheck(), false, true); - - int pos = src.position(); - int lim = src.limit(); - assert (pos <= lim); - int rem = (pos <= lim ? lim - pos : 0); - - NativeBuffer nb; - long address; - if (src instanceof sun.nio.ch.DirectBuffer) { - nb = null; - address = ((sun.nio.ch.DirectBuffer)src).address() + pos; - } else { - // substitute with native buffer - nb = NativeBuffers.getNativeBuffer(rem); - address = nb.address(); - - if (src.hasArray()) { - // copy from backing array into buffer - int off = src.arrayOffset() + pos + Unsafe.ARRAY_BYTE_BASE_OFFSET; - unsafe.copyMemory(src.array(), off, null, address, rem); - } else { - // backing array not accessible so transfer via temporary array - byte[] tmp = new byte[rem]; - src.get(tmp); - src.position(pos); // reset position as write may fail - unsafe.copyMemory(tmp, Unsafe.ARRAY_BYTE_BASE_OFFSET, null, - address, rem); - } - } - - int fd = -1; - try { - fd = file.openForAttributeAccess(followLinks); - } catch (UnixException x) { - x.rethrowAsIOException(file); - } - try { - try { - fsetxattr(fd, nameAsBytes(file,name), address, rem); - src.position(pos + rem); - return rem; - } catch (UnixException x) { - throw new FileSystemException(file.getPathForExceptionMessage(), - null, "Error writing extended attribute '" + name + "': " + - x.getMessage()); - } finally { - close(fd); - } - } finally { - if (nb != null) - nb.release(); - } + protected int maxNameLength() { + // see XATTR_MAXNAMELEN in https://github.com/apple/darwin-xnu/blob/master/bsd/sys/xattr.h + return 127; } - @Override - public void delete(String name) throws IOException { - if (System.getSecurityManager() != null) - checkAccess(file.getPathForPermissionCheck(), false, true); - - int fd = -1; - try { - fd = file.openForAttributeAccess(followLinks); - } catch (UnixException x) { - x.rethrowAsIOException(file); - } - try { - fremovexattr(fd, nameAsBytes(file,name)); - } catch (UnixException x) { - throw new FileSystemException(file.getPathForExceptionMessage(), - null, "Unable to delete extended attribute '" + name + "': " + x.getMessage()); - } finally { - close(fd); - } - } - - /** - * Used by copyTo/moveTo to copy extended attributes from source to target. - * - * @param ofd - * file descriptor for source file - * @param nfd - * file descriptor for target file - */ - static void copyExtendedAttributes(int ofd, int nfd) { - NativeBuffer buffer = null; - try { - - // call flistxattr to get list of extended attributes. - int size = 1024; - buffer = NativeBuffers.getNativeBuffer(size); - for (;;) { - try { - size = flistxattr(ofd, buffer.address(), size); - break; - } catch (UnixException x) { - // allocate larger buffer if required - if (x.errno() == ERANGE && size < 32*1024) { - buffer.release(); - size *= 2; - buffer = null; - buffer = NativeBuffers.getNativeBuffer(size); - continue; - } - - // unable to get list of attributes - return; - } - } - - // parse buffer as array of NULL-terminated C strings. - long address = buffer.address(); - int start = 0; - int pos = 0; - while (pos < size) { - if (unsafe.getByte(address + pos) == 0) { - // extract attribute name and copy attribute to target. - // FIXME: We can avoid needless copying by using address+pos - // as the address of the name. - int len = pos - start; - byte[] name = new byte[len]; - unsafe.copyMemory(null, address+start, name, - Unsafe.ARRAY_BYTE_BASE_OFFSET, len); - try { - copyExtendedAttribute(ofd, name, nfd); - } catch (UnixException ignore) { - // ignore - } - start = pos + 1; - } - pos++; - } - - } finally { - if (buffer != null) - buffer.release(); - } - } - - private static void copyExtendedAttribute(int ofd, byte[] name, int nfd) - throws UnixException - { - int size = fgetxattr(ofd, name, 0L, 0); - NativeBuffer buffer = NativeBuffers.getNativeBuffer(size); - try { - long address = buffer.address(); - size = fgetxattr(ofd, name, address, size); - fsetxattr(nfd, name, address, size); - } finally { - buffer.release(); - } - } } diff --git a/src/java.base/macosx/native/libjava/java_props_macosx.c b/src/java.base/macosx/native/libjava/java_props_macosx.c index 321f7583b26cd8b695930c6cea16175ace0f6afa..2189c1f94e33d896a90973dc441d5b475543087c 100644 --- a/src/java.base/macosx/native/libjava/java_props_macosx.c +++ b/src/java.base/macosx/native/libjava/java_props_macosx.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 1998, 2019, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1998, 2021, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -247,15 +247,35 @@ void setOSNameAndVersion(java_props_t *sprops) { [invoke getReturnValue:&osVer]; NSString *nsVerStr; - if (osVer.patchVersion == 0) { // Omit trailing ".0" - nsVerStr = [NSString stringWithFormat:@"%ld.%ld", - (long)osVer.majorVersion, (long)osVer.minorVersion]; + // Copy out the char* if running on version other than 10.16 Mac OS (10.16 == 11.x) + // or explicitly requesting version compatibility + if (!((long)osVer.majorVersion == 10 && (long)osVer.minorVersion >= 16) || + (getenv("SYSTEM_VERSION_COMPAT") != NULL)) { + if (osVer.patchVersion == 0) { // Omit trailing ".0" + nsVerStr = [NSString stringWithFormat:@"%ld.%ld", + (long)osVer.majorVersion, (long)osVer.minorVersion]; + } else { + nsVerStr = [NSString stringWithFormat:@"%ld.%ld.%ld", + (long)osVer.majorVersion, (long)osVer.minorVersion, (long)osVer.patchVersion]; + } + // Copy out the char* + osVersionCStr = strdup([nsVerStr UTF8String]); } else { - nsVerStr = [NSString stringWithFormat:@"%ld.%ld.%ld", - (long)osVer.majorVersion, (long)osVer.minorVersion, (long)osVer.patchVersion]; + // Version 10.16, without explicit env setting of SYSTEM_VERSION_COMPAT + // AKA 11.x; compute the version number from the letter in the ProductBuildVersion + NSDictionary *version = [NSDictionary dictionaryWithContentsOfFile : + @"/System/Library/CoreServices/SystemVersion.plist"]; + if (version != NULL) { + NSString *nsBuildVerStr = [version objectForKey : @"ProductBuildVersion"]; + if (nsBuildVerStr != NULL && nsBuildVerStr.length >= 3) { + int letter = [nsBuildVerStr characterAtIndex:2]; + if (letter >= 'B' && letter <= 'Z') { + int vers = letter - 'A' - 1; + asprintf(&osVersionCStr, "11.%d", vers); + } + } + } } - // Copy out the char* - osVersionCStr = strdup([nsVerStr UTF8String]); } // Fallback if running on pre-10.9 Mac OS if (osVersionCStr == NULL) { diff --git a/src/java.base/macosx/native/libnio/fs/BsdNativeDispatcher.c b/src/java.base/macosx/native/libnio/fs/BsdNativeDispatcher.c index e8296d4ea10e3a966cbe08b9f7df3989668e126c..056d08eecc071873c76f9d8852d8b29c4e516272 100644 --- a/src/java.base/macosx/native/libnio/fs/BsdNativeDispatcher.c +++ b/src/java.base/macosx/native/libnio/fs/BsdNativeDispatcher.c @@ -30,7 +30,6 @@ #include #include -#include #ifdef ST_RDONLY #define statfs statvfs #define getfsstat getvfsstat @@ -225,52 +224,3 @@ Java_sun_nio_fs_BsdNativeDispatcher_getmntonname0(JNIEnv *env, jclass this, return mntonname; } - -JNIEXPORT jint JNICALL -Java_sun_nio_fs_BsdNativeDispatcher_fgetxattr0(JNIEnv* env, jclass clazz, - jint fd, jlong nameAddress, jlong valueAddress, jint valueLen, jlong position, jint options) -{ - const char* name = jlong_to_ptr(nameAddress); - void* value = jlong_to_ptr(valueAddress); - - ssize_t res = fgetxattr(fd, name, value, valueLen, (u_int32_t)position, options); - if (res == (ssize_t)-1) - throwUnixException(env, errno); - return (jint)res; -} - -JNIEXPORT void JNICALL -Java_sun_nio_fs_BsdNativeDispatcher_fsetxattr0(JNIEnv* env, jclass clazz, - jint fd, jlong nameAddress, jlong valueAddress, jint valueLen, jlong position, jint options) -{ - const char* name = jlong_to_ptr(nameAddress); - void* value = jlong_to_ptr(valueAddress); - - int res = fsetxattr(fd, name, value, valueLen, (u_int32_t)position, options); - if (res == -1) - throwUnixException(env, errno); -} - -JNIEXPORT void JNICALL -Java_sun_nio_fs_BsdNativeDispatcher_fremovexattr0(JNIEnv* env, jclass clazz, - jint fd, jlong nameAddress, jint options) -{ - const char* name = jlong_to_ptr(nameAddress); - - int res = fremovexattr(fd, name, options); - if (res == -1) - throwUnixException(env, errno); -} - -JNIEXPORT jint JNICALL -Java_sun_nio_fs_BsdNativeDispatcher_flistxattr0(JNIEnv* env, jclass clazz, - jint fd, jlong nameBufAddress, jint size, jint options) -{ - char* nameBuf = jlong_to_ptr(nameBufAddress); - - ssize_t res = flistxattr(fd, nameBuf, (size_t)size, options); - - if (res == (ssize_t)-1) - throwUnixException(env, errno); - return (jint)res; -} diff --git a/src/java.base/share/classes/com/sun/crypto/provider/CipherCore.java b/src/java.base/share/classes/com/sun/crypto/provider/CipherCore.java index c804bcc00b2c609ce4f84aec8996e2069892f897..cf7993a69fb13ba1eb2e47021dcd07f1aab7a82b 100644 --- a/src/java.base/share/classes/com/sun/crypto/provider/CipherCore.java +++ b/src/java.base/share/classes/com/sun/crypto/provider/CipherCore.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2002, 2019, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2002, 2021, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -1240,16 +1240,19 @@ final class CipherCore { throw new ShortBufferException("output buffer too small"); } + int len; if (decrypting) { if (buffered > 0) { cipher.decrypt(buffer, 0, buffered, new byte[0], 0); } - return cipher.decryptFinal(src, dst); + len = cipher.decryptFinal(src, dst); } else { if (buffered > 0) { ((GaloisCounterMode)cipher).encrypt(buffer, 0, buffered); } - return cipher.encryptFinal(src, dst); + len = cipher.encryptFinal(src, dst); } + endDoFinal(); + return len; } } diff --git a/src/java.base/share/classes/com/sun/crypto/provider/DESKeyFactory.java b/src/java.base/share/classes/com/sun/crypto/provider/DESKeyFactory.java index f51de29ba8b67449215970cf4557859095fa9a66..691d45e1a1199874809576f546aa9a3607821d3f 100644 --- a/src/java.base/share/classes/com/sun/crypto/provider/DESKeyFactory.java +++ b/src/java.base/share/classes/com/sun/crypto/provider/DESKeyFactory.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2021, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -106,7 +106,7 @@ public final class DESKeyFactory extends SecretKeyFactorySpi { // Check if requested key spec is amongst the valid ones if ((keySpec != null) && - DESKeySpec.class.isAssignableFrom(keySpec)) { + keySpec.isAssignableFrom(DESKeySpec.class)) { return new DESKeySpec(key.getEncoded()); } else { diff --git a/src/java.base/share/classes/com/sun/crypto/provider/DESedeKeyFactory.java b/src/java.base/share/classes/com/sun/crypto/provider/DESedeKeyFactory.java index d2d2d7d47ea8b5806391d0ee78c2e5e929716060..339d664772a8489879e7002affa92b99ed605614 100644 --- a/src/java.base/share/classes/com/sun/crypto/provider/DESedeKeyFactory.java +++ b/src/java.base/share/classes/com/sun/crypto/provider/DESedeKeyFactory.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2021, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -102,7 +102,7 @@ public final class DESedeKeyFactory extends SecretKeyFactorySpi { && (key.getFormat().equalsIgnoreCase("RAW"))) { // Check if requested key spec is amongst the valid ones - if (DESedeKeySpec.class.isAssignableFrom(keySpec)) { + if (keySpec.isAssignableFrom(DESedeKeySpec.class)) { return new DESedeKeySpec(key.getEncoded()); } else { diff --git a/src/java.base/share/classes/com/sun/crypto/provider/DHKeyFactory.java b/src/java.base/share/classes/com/sun/crypto/provider/DHKeyFactory.java index 273d49eb0769a310a438f2da0d4680ef1720d743..6b716e5305c7797b567bb223465bbc3f4762059a 100644 --- a/src/java.base/share/classes/com/sun/crypto/provider/DHKeyFactory.java +++ b/src/java.base/share/classes/com/sun/crypto/provider/DHKeyFactory.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2021, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -145,7 +145,7 @@ public final class DHKeyFactory extends KeyFactorySpi { if (key instanceof javax.crypto.interfaces.DHPublicKey) { - if (DHPublicKeySpec.class.isAssignableFrom(keySpec)) { + if (keySpec.isAssignableFrom(DHPublicKeySpec.class)) { javax.crypto.interfaces.DHPublicKey dhPubKey = (javax.crypto.interfaces.DHPublicKey) key; params = dhPubKey.getParams(); @@ -153,7 +153,7 @@ public final class DHKeyFactory extends KeyFactorySpi { params.getP(), params.getG())); - } else if (X509EncodedKeySpec.class.isAssignableFrom(keySpec)) { + } else if (keySpec.isAssignableFrom(X509EncodedKeySpec.class)) { return keySpec.cast(new X509EncodedKeySpec(key.getEncoded())); } else { @@ -163,7 +163,7 @@ public final class DHKeyFactory extends KeyFactorySpi { } else if (key instanceof javax.crypto.interfaces.DHPrivateKey) { - if (DHPrivateKeySpec.class.isAssignableFrom(keySpec)) { + if (keySpec.isAssignableFrom(DHPrivateKeySpec.class)) { javax.crypto.interfaces.DHPrivateKey dhPrivKey = (javax.crypto.interfaces.DHPrivateKey)key; params = dhPrivKey.getParams(); @@ -171,7 +171,7 @@ public final class DHKeyFactory extends KeyFactorySpi { params.getP(), params.getG())); - } else if (PKCS8EncodedKeySpec.class.isAssignableFrom(keySpec)) { + } else if (keySpec.isAssignableFrom(PKCS8EncodedKeySpec.class)) { return keySpec.cast(new PKCS8EncodedKeySpec(key.getEncoded())); } else { diff --git a/src/java.base/share/classes/com/sun/crypto/provider/GaloisCounterMode.java b/src/java.base/share/classes/com/sun/crypto/provider/GaloisCounterMode.java index cde13fff1c247494c19cccdca6c3a6dbc9ccbc16..aa244c452d910813ddfbb53bfe961189b44fb51c 100644 --- a/src/java.base/share/classes/com/sun/crypto/provider/GaloisCounterMode.java +++ b/src/java.base/share/classes/com/sun/crypto/provider/GaloisCounterMode.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013, 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2013, 2021, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -907,6 +907,7 @@ final class GaloisCounterMode extends FeedbackCipher { // Decrypt the all the input data and put it into dst doLastBlock(buffer, ct, dst); restoreDst(dst); + src.position(src.limit()); // 'processed' from the gctr decryption operation, not ghash return processed; } diff --git a/src/java.base/share/classes/java/io/BufferedReader.java b/src/java.base/share/classes/java/io/BufferedReader.java index 80d3ca4c153302ddff582440bf7cd1ee0a759ad8..cfebaec2c2c58998d4eb6e6c94bc9badbf908c53 100644 --- a/src/java.base/share/classes/java/io/BufferedReader.java +++ b/src/java.base/share/classes/java/io/BufferedReader.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 1996, 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1996, 2021, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -28,6 +28,7 @@ package java.io; import java.util.Iterator; import java.util.NoSuchElementException; +import java.util.Objects; import java.util.Spliterator; import java.util.Spliterators; import java.util.stream.Stream; @@ -71,7 +72,7 @@ public class BufferedReader extends Reader { private Reader in; - private char cb[]; + private char[] cb; private int nChars, nextChar; private static final int INVALIDATED = -2; @@ -146,7 +147,7 @@ public class BufferedReader extends Reader { dst = delta; } else { /* Reallocate buffer to accommodate read-ahead limit */ - char ncb[] = new char[readAheadLimit]; + char[] ncb = new char[readAheadLimit]; System.arraycopy(cb, markedChar, ncb, 0, delta); cb = ncb; markedChar = 0; @@ -237,7 +238,8 @@ public class BufferedReader extends Reader { * attempts to read as many characters as possible by repeatedly invoking * the {@code read} method of the underlying stream. This iterated * {@code read} continues until one of the following conditions becomes - * true:
    + * true: + *
      * *
    • The specified number of characters have been read, * @@ -248,7 +250,8 @@ public class BufferedReader extends Reader { * returns {@code false}, indicating that further input requests * would block. * - *
    If the first {@code read} on the underlying stream returns + *
+ * If the first {@code read} on the underlying stream returns * {@code -1} to indicate end-of-file then this method returns * {@code -1}. Otherwise this method returns the number of characters * actually read. @@ -264,23 +267,20 @@ public class BufferedReader extends Reader { * Thus redundant {@code BufferedReader}s will not copy data * unnecessarily. * - * @param cbuf Destination buffer - * @param off Offset at which to start storing characters - * @param len Maximum number of characters to read + * @param cbuf {@inheritDoc} + * @param off {@inheritDoc} + * @param len {@inheritDoc} * - * @return The number of characters read, or -1 if the end of the - * stream has been reached + * @return {@inheritDoc} * - * @throws IOException If an I/O error occurs * @throws IndexOutOfBoundsException {@inheritDoc} + * @throws IOException {@inheritDoc} */ - public int read(char cbuf[], int off, int len) throws IOException { + public int read(char[] cbuf, int off, int len) throws IOException { synchronized (lock) { ensureOpen(); - if ((off < 0) || (off > cbuf.length) || (len < 0) || - ((off + len) > cbuf.length) || ((off + len) < 0)) { - throw new IndexOutOfBoundsException(); - } else if (len == 0) { + Objects.checkFromIndexSize(off, len, cbuf.length); + if (len == 0) { return 0; } @@ -397,14 +397,7 @@ public class BufferedReader extends Reader { } /** - * Skips characters. - * - * @param n The number of characters to skip - * - * @return The number of characters actually skipped - * - * @throws IllegalArgumentException If {@code n} is negative. - * @throws IOException If an I/O error occurs + * {@inheritDoc} */ public long skip(long n) throws IOException { if (n < 0L) { diff --git a/src/java.base/share/classes/java/io/CharArrayReader.java b/src/java.base/share/classes/java/io/CharArrayReader.java index f49e3cfdef3983365c14f3160c568fe9a378c596..bd2d7e965cec5c0f7e378a89b17f20f521a65a15 100644 --- a/src/java.base/share/classes/java/io/CharArrayReader.java +++ b/src/java.base/share/classes/java/io/CharArrayReader.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 1996, 2019, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1996, 2021, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -25,6 +25,8 @@ package java.io; +import java.util.Objects; + /** * This class implements a character buffer that can be used as a * character-input stream. @@ -34,7 +36,7 @@ package java.io; */ public class CharArrayReader extends Reader { /** The character buffer. */ - protected char buf[]; + protected char[] buf; /** The current buffer position. */ protected int pos; @@ -52,7 +54,7 @@ public class CharArrayReader extends Reader { * Creates a CharArrayReader from the specified array of chars. * @param buf Input buffer (not copied) */ - public CharArrayReader(char buf[]) { + public CharArrayReader(char[] buf) { this.buf = buf; this.pos = 0; this.count = buf.length; @@ -75,7 +77,7 @@ public class CharArrayReader extends Reader { * @param offset Offset of the first char to read * @param length Number of chars to read */ - public CharArrayReader(char buf[], int offset, int length) { + public CharArrayReader(char[] buf, int offset, int length) { if ((offset < 0) || (offset > buf.length) || (length < 0) || ((offset + length) < 0)) { throw new IllegalArgumentException(); @@ -109,22 +111,27 @@ public class CharArrayReader extends Reader { /** * Reads characters into a portion of an array. - * @param b Destination buffer - * @param off Offset at which to start storing characters - * @param len Maximum number of characters to read - * @return The actual number of characters read, or -1 if - * the end of the stream has been reached - * - * @throws IOException If an I/O error occurs - * @throws IndexOutOfBoundsException {@inheritDoc} + * + *

If {@code len} is zero, then no characters are read and {@code 0} is + * returned; otherwise, there is an attempt to read at least one character. + * If no character is available because the stream is at its end, the value + * {@code -1} is returned; otherwise, at least one character is read and + * stored into {@code cbuf}. + * + * @param cbuf {@inheritDoc} + * @param off {@inheritDoc} + * @param len {@inheritDoc} + * + * @return {@inheritDoc} + * + * @throws IndexOutOfBoundsException {@inheritDoc} + * @throws IOException {@inheritDoc} */ - public int read(char b[], int off, int len) throws IOException { + public int read(char[] cbuf, int off, int len) throws IOException { synchronized (lock) { ensureOpen(); - if ((off < 0) || (off > b.length) || (len < 0) || - ((off + len) > b.length) || ((off + len) < 0)) { - throw new IndexOutOfBoundsException(); - } else if (len == 0) { + Objects.checkFromIndexSize(off, len, cbuf.length); + if (len == 0) { return 0; } @@ -139,23 +146,26 @@ public class CharArrayReader extends Reader { if (len <= 0) { return 0; } - System.arraycopy(buf, pos, b, off, len); + System.arraycopy(buf, pos, cbuf, off, len); pos += len; return len; } } /** - * Skips characters. Returns the number of characters that were skipped. + * Skips characters. If the stream is already at its end before this method + * is invoked, then no characters are skipped and zero is returned. * *

The {@code n} parameter may be negative, even though the * {@code skip} method of the {@link Reader} superclass throws * an exception in this case. If {@code n} is negative, then * this method does nothing and returns {@code 0}. * - * @param n The number of characters to skip - * @return The number of characters actually skipped - * @throws IOException If the stream is closed, or an I/O error occurs + * @param n {@inheritDoc} + * + * @return {@inheritDoc} + * + * @throws IOException {@inheritDoc} */ public long skip(long n) throws IOException { synchronized (lock) { diff --git a/src/java.base/share/classes/java/io/DataInputStream.java b/src/java.base/share/classes/java/io/DataInputStream.java index b784a5ab50e99dd8f858ab75f840b4f847da2efe..606b37cd549ac944a2209a9f5ead5f34618ec20c 100644 --- a/src/java.base/share/classes/java/io/DataInputStream.java +++ b/src/java.base/share/classes/java/io/DataInputStream.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 1994, 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1994, 2021, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -595,8 +595,7 @@ loop: while (true) { int utflen = in.readUnsignedShort(); byte[] bytearr = null; char[] chararr = null; - if (in instanceof DataInputStream) { - DataInputStream dis = (DataInputStream)in; + if (in instanceof DataInputStream dis) { if (dis.bytearr.length < utflen){ dis.bytearr = new byte[utflen*2]; dis.chararr = new char[utflen*2]; diff --git a/src/java.base/share/classes/java/io/DataOutputStream.java b/src/java.base/share/classes/java/io/DataOutputStream.java index 4ea497fc7c0f554ca055e6a450fa119ccc4f1898..97e5c1f3c3eb71e4e7a2960fbdc549a8f11644a3 100644 --- a/src/java.base/share/classes/java/io/DataOutputStream.java +++ b/src/java.base/share/classes/java/io/DataOutputStream.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 1994, 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1994, 2021, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -369,8 +369,7 @@ public class DataOutputStream extends FilterOutputStream implements DataOutput { throw new UTFDataFormatException(tooLongMsg(str, utflen)); final byte[] bytearr; - if (out instanceof DataOutputStream) { - DataOutputStream dos = (DataOutputStream)out; + if (out instanceof DataOutputStream dos) { if (dos.bytearr == null || (dos.bytearr.length < (utflen + 2))) dos.bytearr = new byte[(utflen*2) + 2]; bytearr = dos.bytearr; diff --git a/src/java.base/share/classes/java/io/File.java b/src/java.base/share/classes/java/io/File.java index 1576b90b0d25e9d305c2b9febf8103c5ccb23f94..9166b8022140a089e8e6cec36d2e6f1641ec6e5f 100644 --- a/src/java.base/share/classes/java/io/File.java +++ b/src/java.base/share/classes/java/io/File.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 1994, 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1994, 2021, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -1376,7 +1376,9 @@ public class File * file from one filesystem to another, it might not be atomic, and it * might not succeed if a file with the destination abstract pathname * already exists. The return value should always be checked to make sure - * that the rename operation was successful. + * that the rename operation was successful. As instances of {@code File} + * are immutable, this File object is not changed to name the destination + * file or directory. * *

Note that the {@link java.nio.file.Files} class defines the {@link * java.nio.file.Files#move move} method to move or rename a file in a diff --git a/src/java.base/share/classes/java/io/FilePermission.java b/src/java.base/share/classes/java/io/FilePermission.java index a9268113443dff16b854c83f1b74542d374a4926..40d057f949562fde2113f8039ed644a925107fde 100644 --- a/src/java.base/share/classes/java/io/FilePermission.java +++ b/src/java.base/share/classes/java/io/FilePermission.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2021, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -566,11 +566,9 @@ public final class FilePermission extends Permission implements Serializable { */ @Override public boolean implies(Permission p) { - if (!(p instanceof FilePermission)) + if (!(p instanceof FilePermission that)) return false; - FilePermission that = (FilePermission) p; - // we get the effective mask. i.e., the "and" of this and that. // They must be equal to that.mask for implies to return true. @@ -791,11 +789,9 @@ public final class FilePermission extends Permission implements Serializable { if (obj == this) return true; - if (! (obj instanceof FilePermission)) + if (! (obj instanceof FilePermission that)) return false; - FilePermission that = (FilePermission) obj; - if (this.invalid || that.invalid) { return false; } @@ -1150,15 +1146,13 @@ final class FilePermissionCollection extends PermissionCollection */ @Override public void add(Permission permission) { - if (! (permission instanceof FilePermission)) + if (! (permission instanceof FilePermission fp)) throw new IllegalArgumentException("invalid permission: "+ permission); if (isReadOnly()) throw new SecurityException( "attempt to add a Permission to a readonly PermissionCollection"); - FilePermission fp = (FilePermission)permission; - // Add permission to map if it is absent, or replace with new // permission if applicable. perms.merge(fp.getName(), fp, @@ -1195,11 +1189,9 @@ final class FilePermissionCollection extends PermissionCollection */ @Override public boolean implies(Permission permission) { - if (! (permission instanceof FilePermission)) + if (! (permission instanceof FilePermission fperm)) return false; - FilePermission fperm = (FilePermission) permission; - int desired = fperm.getMask(); int effective = 0; int needed = desired; diff --git a/src/java.base/share/classes/java/io/FilterReader.java b/src/java.base/share/classes/java/io/FilterReader.java index 1fe5aceaf5dc87823df0e6d388cf6775e1cfa04f..17b9ffad6d1f650f96527a2894627817e4b46b00 100644 --- a/src/java.base/share/classes/java/io/FilterReader.java +++ b/src/java.base/share/classes/java/io/FilterReader.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 1996, 2019, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1996, 2021, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -66,19 +66,19 @@ public abstract class FilterReader extends Reader { } /** - * Reads characters into a portion of an array. - * - * @throws IOException If an I/O error occurs - * @throws IndexOutOfBoundsException {@inheritDoc} + * {@inheritDoc} + * @throws IndexOutOfBoundsException {@inheritDoc} */ - public int read(char cbuf[], int off, int len) throws IOException { + public int read(char[] cbuf, int off, int len) throws IOException { return in.read(cbuf, off, len); } /** - * Skips characters. + * {@inheritDoc} * - * @throws IOException If an I/O error occurs + * @throws IllegalArgumentException If {@code n} is negative and the + * contained {@code Reader}'s {@code skip} method throws an + * IllegalArgumentException for a negative parameter */ public long skip(long n) throws IOException { return in.skip(n); diff --git a/src/java.base/share/classes/java/io/InputStreamReader.java b/src/java.base/share/classes/java/io/InputStreamReader.java index ca7db8aecee1aaa9adde6b5d9ab169b01f65ba62..b346183c6627fc529925218c84bb6e88dff4e477 100644 --- a/src/java.base/share/classes/java/io/InputStreamReader.java +++ b/src/java.base/share/classes/java/io/InputStreamReader.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 1996, 2019, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1996, 2021, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -162,20 +162,11 @@ public class InputStreamReader extends Reader { } /** - * Reads characters into a portion of an array. - * - * @param cbuf Destination buffer - * @param offset Offset at which to start storing characters - * @param length Maximum number of characters to read - * - * @return The number of characters read, or -1 if the end of the - * stream has been reached - * - * @throws IOException If an I/O error occurs - * @throws IndexOutOfBoundsException {@inheritDoc} + * {@inheritDoc} + * @throws IndexOutOfBoundsException {@inheritDoc} */ - public int read(char cbuf[], int offset, int length) throws IOException { - return sd.read(cbuf, offset, length); + public int read(char[] cbuf, int off, int len) throws IOException { + return sd.read(cbuf, off, len); } /** diff --git a/src/java.base/share/classes/java/io/LineNumberReader.java b/src/java.base/share/classes/java/io/LineNumberReader.java index 49e6a2ea99a3f98a988abbe1609147431d35e2e0..7b3f749a6cfde4d22016c48bd1f1ba3cde3f734a 100644 --- a/src/java.base/share/classes/java/io/LineNumberReader.java +++ b/src/java.base/share/classes/java/io/LineNumberReader.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 1996, 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1996, 2021, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -162,28 +162,29 @@ public class LineNumberReader extends BufferedReader { } /** - * Read characters into a portion of an array. - * Line terminators are compressed into single newline + * Reads characters into a portion of an array. This method will block + * until some input is available, an I/O error occurs, or the end of the + * stream is reached. + * + *

If {@code len} is zero, then no characters are read and {@code 0} is + * returned; otherwise, there is an attempt to read at least one character. + * If no character is available because the stream is at its end, the value + * {@code -1} is returned; otherwise, at least one character is read and + * stored into {@code cbuf}. + * + *

Line terminators are compressed into single newline * ('\n') characters. The current line number is incremented whenever a * line terminator is read, or when the end of the stream is reached and * the last character in the stream is not a line terminator. * - * @param cbuf - * Destination buffer - * - * @param off - * Offset at which to start storing characters - * - * @param len - * Maximum number of characters to read + * @param cbuf {@inheritDoc} + * @param off {@inheritDoc} + * @param len {@inheritDoc} * - * @return The number of characters read, or -1 if the end of the stream - * has already been reached - * - * @throws IOException - * If an I/O error occurs + * @return {@inheritDoc} * * @throws IndexOutOfBoundsException {@inheritDoc} + * @throws IOException {@inheritDoc} */ @SuppressWarnings("fallthrough") public int read(char cbuf[], int off, int len) throws IOException { @@ -267,18 +268,7 @@ public class LineNumberReader extends BufferedReader { private char skipBuffer[] = null; /** - * Skip characters. - * - * @param n - * The number of characters to skip - * - * @return The number of characters actually skipped - * - * @throws IOException - * If an I/O error occurs - * - * @throws IllegalArgumentException - * If {@code n} is negative + * {@inheritDoc} */ public long skip(long n) throws IOException { if (n < 0) diff --git a/src/java.base/share/classes/java/io/ObjectInputStream.java b/src/java.base/share/classes/java/io/ObjectInputStream.java index 03d3aa8dc45ee6ef879f95ae3aca90124b019845..931a829df73cfe4437264e7066ef34fe3fc95714 100644 --- a/src/java.base/share/classes/java/io/ObjectInputStream.java +++ b/src/java.base/share/classes/java/io/ObjectInputStream.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 1996, 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1996, 2021, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -48,6 +48,7 @@ import java.util.concurrent.ConcurrentMap; import static java.io.ObjectStreamClass.processQueue; import jdk.internal.access.SharedSecrets; +import jdk.internal.event.DeserializationEvent; import jdk.internal.misc.Unsafe; import sun.reflect.misc.ReflectUtil; import sun.security.action.GetBooleanAction; @@ -1323,9 +1324,12 @@ public class ObjectInputStream } /** - * Invoke the serialization filter if non-null. + * Invokes the serialization filter if non-null. + * * If the filter rejects or an exception is thrown, throws InvalidClassException. * + * Logs and/or commits a {@code DeserializationEvent}, if configured. + * * @param clazz the class; may be null * @param arrayLength the array length requested; use {@code -1} if not creating an array * @throws InvalidClassException if it rejected by the filter or @@ -1333,11 +1337,12 @@ public class ObjectInputStream */ private void filterCheck(Class clazz, int arrayLength) throws InvalidClassException { + // Info about the stream is not available if overridden by subclass, return 0 + long bytesRead = (bin == null) ? 0 : bin.getBytesRead(); + RuntimeException ex = null; + ObjectInputFilter.Status status = null; + if (serialFilter != null) { - RuntimeException ex = null; - ObjectInputFilter.Status status; - // Info about the stream is not available if overridden by subclass, return 0 - long bytesRead = (bin == null) ? 0 : bin.getBytesRead(); try { status = serialFilter.checkInput(new FilterValues(clazz, arrayLength, totalObjectRefs, depth, bytesRead)); @@ -1355,12 +1360,24 @@ public class ObjectInputStream status, clazz, arrayLength, totalObjectRefs, depth, bytesRead, Objects.toString(ex, "n/a")); } - if (status == null || - status == ObjectInputFilter.Status.REJECTED) { - InvalidClassException ice = new InvalidClassException("filter status: " + status); - ice.initCause(ex); - throw ice; - } + } + DeserializationEvent event = new DeserializationEvent(); + if (event.shouldCommit()) { + event.filterConfigured = serialFilter != null; + event.filterStatus = status != null ? status.name() : null; + event.type = clazz; + event.arrayLength = arrayLength; + event.objectReferences = totalObjectRefs; + event.depth = depth; + event.bytesRead = bytesRead; + event.exceptionType = ex != null ? ex.getClass() : null; + event.exceptionMessage = ex != null ? ex.getMessage() : null; + event.commit(); + } + if (serialFilter != null && (status == null || status == ObjectInputFilter.Status.REJECTED)) { + InvalidClassException ice = new InvalidClassException("filter status: " + status); + ice.initCause(ex); + throw ice; } } diff --git a/src/java.base/share/classes/java/io/ObjectStreamClass.java b/src/java.base/share/classes/java/io/ObjectStreamClass.java index 806de0aa2267251977f465ece1851459bbf8c007..ed4e3f10a5ab50665dc02b83dce83e29e7b62fcb 100644 --- a/src/java.base/share/classes/java/io/ObjectStreamClass.java +++ b/src/java.base/share/classes/java/io/ObjectStreamClass.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 1996, 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1996, 2021, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -510,7 +510,7 @@ public class ObjectStreamClass implements Serializable { AccessController.doPrivileged(new PrivilegedAction<>() { public Void run() { if (isEnum) { - suid = Long.valueOf(0); + suid = 0L; fields = NO_FIELDS; return null; } @@ -555,7 +555,7 @@ public class ObjectStreamClass implements Serializable { } }); } else { - suid = Long.valueOf(0); + suid = 0L; fields = NO_FIELDS; } @@ -673,7 +673,7 @@ public class ObjectStreamClass implements Serializable { this.superDesc = superDesc; isProxy = true; serializable = true; - suid = Long.valueOf(0); + suid = 0L; fields = NO_FIELDS; if (osc != null) { localDesc = osc; @@ -698,7 +698,7 @@ public class ObjectStreamClass implements Serializable { ObjectStreamClass superDesc) throws InvalidClassException { - long suid = Long.valueOf(model.getSerialVersionUID()); + long suid = model.getSerialVersionUID(); ObjectStreamClass osc = null; if (cl != null) { osc = lookup(cl, true); @@ -796,7 +796,7 @@ public class ObjectStreamClass implements Serializable { throws IOException, ClassNotFoundException { name = in.readUTF(); - suid = Long.valueOf(in.readLong()); + suid = in.readLong(); isProxy = false; byte flags = in.readByte(); @@ -1846,7 +1846,7 @@ public class ObjectStreamClass implements Serializable { int mask = Modifier.STATIC | Modifier.FINAL; if ((f.getModifiers() & mask) == mask) { f.setAccessible(true); - return Long.valueOf(f.getLong(null)); + return f.getLong(null); } } catch (Exception ex) { } @@ -2400,8 +2400,7 @@ public class ObjectStreamClass implements Serializable { return true; } - if (obj instanceof FieldReflectorKey) { - FieldReflectorKey other = (FieldReflectorKey) obj; + if (obj instanceof FieldReflectorKey other) { Class referent; return (nullClass ? other.nullClass : ((referent = get()) != null) && @@ -2597,8 +2596,7 @@ public class ObjectStreamClass implements Serializable { @Override public final boolean equals(Object obj) { - if (!(obj instanceof Key)) return false; - Key other = (Key) obj; + if (!(obj instanceof Key other)) return false; int n = length(); if (n != other.length()) return false; for (int i = 0; i < n; i++) if (fieldType(i) != other.fieldType(i)) return false; diff --git a/src/java.base/share/classes/java/io/PipedReader.java b/src/java.base/share/classes/java/io/PipedReader.java index 7741362bdec06fe9101870c2e87152ef885c2c0d..6dad73dd7c9fc52bdb7fc9a692aaa57981820d7a 100644 --- a/src/java.base/share/classes/java/io/PipedReader.java +++ b/src/java.base/share/classes/java/io/PipedReader.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 1996, 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1996, 2021, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -25,6 +25,7 @@ package java.io; +import java.util.Objects; /** * Piped character-input streams. @@ -270,23 +271,22 @@ public class PipedReader extends Reader { } /** - * Reads up to {@code len} characters of data from this piped - * stream into an array of characters. Less than {@code len} characters - * will be read if the end of the data stream is reached or if - * {@code len} exceeds the pipe's buffer size. This method - * blocks until at least one character of input is available. + * {@inheritDoc} * - * @param cbuf the buffer into which the data is read. - * @param off the start offset of the data. - * @param len the maximum number of characters read. - * @return the total number of characters read into the buffer, or - * {@code -1} if there is no more data because the end of - * the stream has been reached. + *

Fewer than {@code len} characters will be read if + * {@code len} exceeds the pipe's buffer size. + * + * @param cbuf {@inheritDoc} + * @param off {@inheritDoc} + * @param len {@inheritDoc} + * + * @return {@inheritDoc} + * + * @throws IndexOutOfBoundsException {@inheritDoc} * @throws IOException if the pipe is * {@code broken}, * {@link #connect(java.io.PipedWriter) unconnected}, closed, * or an I/O error occurs. - * @throws IndexOutOfBoundsException {@inheritDoc} */ public synchronized int read(char cbuf[], int off, int len) throws IOException { if (!connected) { @@ -298,10 +298,8 @@ public class PipedReader extends Reader { throw new IOException("Write end dead"); } - if ((off < 0) || (off > cbuf.length) || (len < 0) || - ((off + len) > cbuf.length) || ((off + len) < 0)) { - throw new IndexOutOfBoundsException(); - } else if (len == 0) { + Objects.checkFromIndexSize(off, len, cbuf.length); + if (len == 0) { return 0; } diff --git a/src/java.base/share/classes/java/io/PrintStream.java b/src/java.base/share/classes/java/io/PrintStream.java index b34f6c54e12ee0b52b492ec92b0e07e93b1c58fd..bb4ca6e796e7e05d3a37bf93321151641d9defbd 100644 --- a/src/java.base/share/classes/java/io/PrintStream.java +++ b/src/java.base/share/classes/java/io/PrintStream.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 1996, 2019, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1996, 2021, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -476,8 +476,7 @@ public class PrintStream extends FilterOutputStream public boolean checkError() { if (out != null) flush(); - if (out instanceof java.io.PrintStream) { - PrintStream ps = (PrintStream) out; + if (out instanceof PrintStream ps) { return ps.checkError(); } return trouble; diff --git a/src/java.base/share/classes/java/io/PrintWriter.java b/src/java.base/share/classes/java/io/PrintWriter.java index a338af28e8e18e095cce33246cbbb88c4a3931c3..67edd2c7bac54f06cfb6d2dac28da6e1073f3992 100644 --- a/src/java.base/share/classes/java/io/PrintWriter.java +++ b/src/java.base/share/classes/java/io/PrintWriter.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 1996, 2019, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1996, 2021, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -432,8 +432,7 @@ public class PrintWriter extends Writer { if (out != null) { flush(); } - if (out instanceof java.io.PrintWriter) { - PrintWriter pw = (PrintWriter) out; + if (out instanceof PrintWriter pw) { return pw.checkError(); } else if (psOut != null) { return psOut.checkError(); diff --git a/src/java.base/share/classes/java/io/PushbackReader.java b/src/java.base/share/classes/java/io/PushbackReader.java index cd5fea7a7b993689b75673247fd9911a31f6055f..7e55b9fb6a06b54eb331680be472a39353f6f9f8 100644 --- a/src/java.base/share/classes/java/io/PushbackReader.java +++ b/src/java.base/share/classes/java/io/PushbackReader.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 1996, 2019, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1996, 2021, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -25,6 +25,7 @@ package java.io; +import java.util.Objects; /** * A character-stream reader that allows characters to be pushed back into the @@ -92,28 +93,14 @@ public class PushbackReader extends FilterReader { } /** - * Reads characters into a portion of an array. - * - * @param cbuf Destination buffer - * @param off Offset at which to start writing characters - * @param len Maximum number of characters to read - * - * @return The number of characters read, or -1 if the end of the - * stream has been reached - * - * @throws IOException If an I/O error occurs - * @throws IndexOutOfBoundsException {@inheritDoc} + * {@inheritDoc} */ - public int read(char cbuf[], int off, int len) throws IOException { + public int read(char[] cbuf, int off, int len) throws IOException { synchronized (lock) { ensureOpen(); try { - if (len <= 0) { - if (len < 0) { - throw new IndexOutOfBoundsException(); - } else if ((off < 0) || (off > cbuf.length)) { - throw new IndexOutOfBoundsException(); - } + Objects.checkFromIndexSize(off, len, cbuf.length); + if (len == 0) { return 0; } int avail = buf.length - pos; @@ -172,7 +159,7 @@ public class PushbackReader extends FilterReader { * @throws IOException If there is insufficient room in the pushback * buffer, or if some other I/O error occurs */ - public void unread(char cbuf[], int off, int len) throws IOException { + public void unread(char[] cbuf, int off, int len) throws IOException { synchronized (lock) { ensureOpen(); if (len > pos) @@ -193,7 +180,7 @@ public class PushbackReader extends FilterReader { * @throws IOException If there is insufficient room in the pushback * buffer, or if some other I/O error occurs */ - public void unread(char cbuf[]) throws IOException { + public void unread(char[] cbuf) throws IOException { unread(cbuf, 0, cbuf.length); } @@ -254,15 +241,7 @@ public class PushbackReader extends FilterReader { } /** - * Skips characters. This method will block until some characters are - * available, an I/O error occurs, or the end of the stream is reached. - * - * @param n The number of characters to skip - * - * @return The number of characters actually skipped - * - * @throws IllegalArgumentException If {@code n} is negative. - * @throws IOException If an I/O error occurs + * {@inheritDoc} */ public long skip(long n) throws IOException { if (n < 0L) diff --git a/src/java.base/share/classes/java/io/Reader.java b/src/java.base/share/classes/java/io/Reader.java index c341924a4ec6957a8dd38c8a28c3aeee3ecc0e5b..54262b5209950271daf28cdbdd57e378e7fbb999 100644 --- a/src/java.base/share/classes/java/io/Reader.java +++ b/src/java.base/share/classes/java/io/Reader.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 1996, 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1996, 2021, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -253,10 +253,10 @@ public abstract class Reader implements Readable, Closeable { * @return The number of characters read, or -1 if the end of the * stream has been reached * - * @throws IOException If an I/O error occurs * @throws IndexOutOfBoundsException * If {@code off} is negative, or {@code len} is negative, * or {@code len} is greater than {@code cbuf.length - off} + * @throws IOException If an I/O error occurs */ public abstract int read(char cbuf[], int off, int len) throws IOException; @@ -269,6 +269,8 @@ public abstract class Reader implements Readable, Closeable { /** * Skips characters. This method will block until some characters are * available, an I/O error occurs, or the end of the stream is reached. + * If the stream is already at its end before this method is invoked, + * then no characters are skipped and zero is returned. * * @param n The number of characters to skip * diff --git a/src/java.base/share/classes/java/io/StringReader.java b/src/java.base/share/classes/java/io/StringReader.java index 2d18234583e81ae4b1365199206d25059766cb13..caef7471e3034a3a543dbd322efbbbe8471be844 100644 --- a/src/java.base/share/classes/java/io/StringReader.java +++ b/src/java.base/share/classes/java/io/StringReader.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 1996, 2019, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1996, 2021, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -25,6 +25,7 @@ package java.io; +import java.util.Objects; /** * A character stream whose source is a string. @@ -76,23 +77,26 @@ public class StringReader extends Reader { /** * Reads characters into a portion of an array. * - * @param cbuf Destination buffer - * @param off Offset at which to start writing characters - * @param len Maximum number of characters to read + *

If {@code len} is zero, then no characters are read and {@code 0} is + * returned; otherwise, there is an attempt to read at least one character. + * If no character is available because the stream is at its end, the value + * {@code -1} is returned; otherwise, at least one character is read and + * stored into {@code cbuf}. * - * @return The number of characters read, or -1 if the end of the - * stream has been reached + * @param cbuf {@inheritDoc} + * @param off {@inheritDoc} + * @param len {@inheritDoc} * - * @throws IOException If an I/O error occurs - * @throws IndexOutOfBoundsException {@inheritDoc} + * @return {@inheritDoc} + * + * @throws IndexOutOfBoundsException {@inheritDoc} + * @throws IOException {@inheritDoc} */ - public int read(char cbuf[], int off, int len) throws IOException { + public int read(char[] cbuf, int off, int len) throws IOException { synchronized (lock) { ensureOpen(); - if ((off < 0) || (off > cbuf.length) || (len < 0) || - ((off + len) > cbuf.length) || ((off + len) < 0)) { - throw new IndexOutOfBoundsException(); - } else if (len == 0) { + Objects.checkFromIndexSize(off, len, cbuf.length); + if (len == 0) { return 0; } if (next >= length) @@ -105,31 +109,35 @@ public class StringReader extends Reader { } /** - * Skips the specified number of characters in the stream. Returns - * the number of characters that were skipped. + * Skips characters. If the stream is already at its end before this method + * is invoked, then no characters are skipped and zero is returned. * - *

The {@code ns} parameter may be negative, even though the + *

The {@code n} parameter may be negative, even though the * {@code skip} method of the {@link Reader} superclass throws - * an exception in this case. Negative values of {@code ns} cause the + * an exception in this case. Negative values of {@code n} cause the * stream to skip backwards. Negative return values indicate a skip * backwards. It is not possible to skip backwards past the beginning of * the string. * *

If the entire string has been read or skipped, then this method has - * no effect and always returns 0. + * no effect and always returns {@code 0}. * - * @throws IOException If an I/O error occurs + * @param n {@inheritDoc} + * + * @return {@inheritDoc} + * + * @throws IOException {@inheritDoc} */ - public long skip(long ns) throws IOException { + public long skip(long n) throws IOException { synchronized (lock) { ensureOpen(); if (next >= length) return 0; // Bound skip by beginning and end of the source - long n = Math.min(length - next, ns); - n = Math.max(-next, n); - next += n; - return n; + long r = Math.min(length - next, n); + r = Math.max(-next, r); + next += r; + return r; } } diff --git a/src/java.base/share/classes/java/lang/Character.java b/src/java.base/share/classes/java/lang/Character.java index fb2431d045bd8004865c3479b4055bc408a914df..251cd6011698788116fd14ae700005eff1f10f4f 100644 --- a/src/java.base/share/classes/java/lang/Character.java +++ b/src/java.base/share/classes/java/lang/Character.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2002, 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2002, 2021, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -63,7 +63,43 @@ import static java.lang.constant.ConstantDescs.DEFAULT_NAME; * http://www.unicode.org. *

* Character information is based on the Unicode Standard, version 13.0. - * + *

+ * The Java platform has supported different versions of the Unicode + * Standard over time. Upgrades to newer versions of the Unicode Standard + * occurred in the following Java releases, each indicating the new version: + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + *
Shows Java releases and supported Unicode versions
Java releaseUnicode version
Java SE 15Unicode 13.0
Java SE 13Unicode 12.1
Java SE 12Unicode 11.0
Java SE 11Unicode 10.0
Java SE 9Unicode 8.0
Java SE 8Unicode 6.2
Java SE 7Unicode 6.0
Java SE 5.0Unicode 4.0
Java SE 1.4Unicode 3.0
JDK 1.1Unicode 2.0
JDK 1.0.2Unicode 1.1.5
+ * Variations from these base Unicode versions, such as recognized appendixes, + * are documented elsewhere. *

Unicode Character Representations

* *

The {@code char} data type (and therefore the value that a @@ -9459,8 +9495,7 @@ class Character implements java.io.Serializable, Comparable, Constabl * @since 1.5 */ public static boolean isLowerCase(int codePoint) { - return CharacterData.of(codePoint).isLowerCase(codePoint) || - CharacterData.of(codePoint).isOtherLowercase(codePoint); + return CharacterData.of(codePoint).isLowerCase(codePoint); } /** @@ -9525,8 +9560,7 @@ class Character implements java.io.Serializable, Comparable, Constabl * @since 1.5 */ public static boolean isUpperCase(int codePoint) { - return CharacterData.of(codePoint).isUpperCase(codePoint) || - CharacterData.of(codePoint).isOtherUppercase(codePoint); + return CharacterData.of(codePoint).isUpperCase(codePoint); } /** diff --git a/src/java.base/share/classes/java/lang/CharacterData.java b/src/java.base/share/classes/java/lang/CharacterData.java index 55cef5a9a06d901a00c85d47b5925d4e77da11f5..18366afab8c45c55518a092e98ae707720429d29 100644 --- a/src/java.base/share/classes/java/lang/CharacterData.java +++ b/src/java.base/share/classes/java/lang/CharacterData.java @@ -54,14 +54,6 @@ abstract class CharacterData { return null; } - boolean isOtherLowercase(int ch) { - return false; - } - - boolean isOtherUppercase(int ch) { - return false; - } - boolean isOtherAlphabetic(int ch) { return false; } diff --git a/src/java.base/share/classes/java/lang/CharacterDataPrivateUse.java b/src/java.base/share/classes/java/lang/CharacterDataPrivateUse.java index 49c3496635437df3b6dd1c8c8f14d2117565acdc..f139f938df1b1ac01e56693beec7ac27df5c5774 100644 --- a/src/java.base/share/classes/java/lang/CharacterDataPrivateUse.java +++ b/src/java.base/share/classes/java/lang/CharacterDataPrivateUse.java @@ -35,17 +35,17 @@ class CharacterDataPrivateUse extends CharacterData { } int getType(int ch) { - return (ch & 0xFFFE) == 0xFFFE - ? Character.UNASSIGNED - : Character.PRIVATE_USE; + return (ch & 0xFFFE) == 0xFFFE + ? Character.UNASSIGNED + : Character.PRIVATE_USE; } boolean isJavaIdentifierStart(int ch) { - return false; + return false; } boolean isJavaIdentifierPart(int ch) { - return false; + return false; } boolean isUnicodeIdentifierStart(int ch) { @@ -85,21 +85,21 @@ class CharacterDataPrivateUse extends CharacterData { } boolean isLowerCase(int ch) { - return false; + return false; } boolean isUpperCase(int ch) { - return false; + return false; } boolean isWhitespace(int ch) { - return false; + return false; } byte getDirectionality(int ch) { - return (ch & 0xFFFE) == 0xFFFE - ? Character.DIRECTIONALITY_UNDEFINED - : Character.DIRECTIONALITY_LEFT_TO_RIGHT; + return (ch & 0xFFFE) == 0xFFFE + ? Character.DIRECTIONALITY_UNDEFINED + : Character.DIRECTIONALITY_LEFT_TO_RIGHT; } boolean isMirrored(int ch) { @@ -109,5 +109,3 @@ class CharacterDataPrivateUse extends CharacterData { static final CharacterData instance = new CharacterDataPrivateUse(); private CharacterDataPrivateUse() {}; } - - diff --git a/src/java.base/share/classes/java/lang/Comparable.java b/src/java.base/share/classes/java/lang/Comparable.java index 0b5075c5e5f82f02cdcac7831579ee16901437d1..334c515f610d5598196d53c7b383ad39c7a66932 100644 --- a/src/java.base/share/classes/java/lang/Comparable.java +++ b/src/java.base/share/classes/java/lang/Comparable.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2021, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -62,11 +62,15 @@ import java.util.*; * because {@code a} and {@code b} are equivalent from the sorted set's * perspective.

* - * Virtually all Java core classes that implement {@code Comparable} have natural - * orderings that are consistent with equals. One exception is - * {@code java.math.BigDecimal}, whose natural ordering equates - * {@code BigDecimal} objects with equal values and different precisions - * (such as 4.0 and 4.00).

+ * Virtually all Java core classes that implement {@code Comparable} + * have natural orderings that are consistent with equals. One + * exception is {@link java.math.BigDecimal}, whose {@linkplain + * java.math.BigDecimal#compareTo natural ordering} equates {@code + * BigDecimal} objects with equal numerical values and different + * representations (such as 4.0 and 4.00). For {@link + * java.math.BigDecimal#equals BigDecimal.equals()} to return true, + * the representation and numerical value of the two {@code + * BigDecimal} objects must be the same.

* * For the mathematically inclined, the relation that defines * the natural ordering on a given class C is:

{@code
@@ -83,7 +87,12 @@ import java.util.*;
  * the class's {@link Object#equals(Object) equals(Object)} method:
  *     {(x, y) such that x.equals(y)}. 

* - * This interface is a member of the + * In other words, when a class's natural ordering is consistent with + * equals, the equivalence classes defined by the equivalence relation + * of the {@code equals} method and the equivalence classes defined by + * the quotient of the {@code compareTo} method are the same. + * + *

This interface is a member of the * * Java Collections Framework. * @@ -99,33 +108,28 @@ public interface Comparable { * negative integer, zero, or a positive integer as this object is less * than, equal to, or greater than the specified object. * - *

The implementor must ensure - * {@code sgn(x.compareTo(y)) == -sgn(y.compareTo(x))} - * for all {@code x} and {@code y}. (This - * implies that {@code x.compareTo(y)} must throw an exception iff - * {@code y.compareTo(x)} throws an exception.) + *

The implementor must ensure {@link Integer#signum + * signum}{@code (x.compareTo(y)) == -signum(y.compareTo(x))} for + * all {@code x} and {@code y}. (This implies that {@code + * x.compareTo(y)} must throw an exception if and only if {@code + * y.compareTo(x)} throws an exception.) * *

The implementor must also ensure that the relation is transitive: * {@code (x.compareTo(y) > 0 && y.compareTo(z) > 0)} implies * {@code x.compareTo(z) > 0}. * - *

Finally, the implementor must ensure that {@code x.compareTo(y)==0} - * implies that {@code sgn(x.compareTo(z)) == sgn(y.compareTo(z))}, for - * all {@code z}. + *

Finally, the implementor must ensure that {@code + * x.compareTo(y)==0} implies that {@code signum(x.compareTo(z)) + * == signum(y.compareTo(z))}, for all {@code z}. * - *

It is strongly recommended, but not strictly required that + * @apiNote + * It is strongly recommended, but not strictly required that * {@code (x.compareTo(y)==0) == (x.equals(y))}. Generally speaking, any * class that implements the {@code Comparable} interface and violates * this condition should clearly indicate this fact. The recommended * language is "Note: this class has a natural ordering that is * inconsistent with equals." * - *

In the foregoing description, the notation - * {@code sgn(}expression{@code )} designates the mathematical - * signum function, which is defined to return one of {@code -1}, - * {@code 0}, or {@code 1} according to whether the value of - * expression is negative, zero, or positive, respectively. - * * @param o the object to be compared. * @return a negative integer, zero, or a positive integer as this object * is less than, equal to, or greater than the specified object. diff --git a/src/java.base/share/classes/java/lang/Integer.java b/src/java.base/share/classes/java/lang/Integer.java index 2e33818963d325054d8cf42375755dbe47a9c6f4..358855550296e94161fb97885ea80802ff3bd49a 100644 --- a/src/java.base/share/classes/java/lang/Integer.java +++ b/src/java.base/share/classes/java/lang/Integer.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 1994, 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1994, 2021, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -264,7 +264,7 @@ public final class Integer extends Number *

* {@code Integer.toHexString(n).toUpperCase()} *
- *

+ * * @apiNote * The {@link java.util.HexFormat} class provides formatting and parsing * of byte arrays and primitives to return a string or adding to an {@link Appendable}. @@ -627,7 +627,7 @@ public final class Integer extends Number */ if (s == null) { - throw new NumberFormatException("null"); + throw new NumberFormatException("Cannot parse null string"); } if (radix < Character.MIN_RADIX) { @@ -832,7 +832,7 @@ public final class Integer extends Number public static int parseUnsignedInt(String s, int radix) throws NumberFormatException { if (s == null) { - throw new NumberFormatException("null"); + throw new NumberFormatException("Cannot parse null string"); } int len = s.length(); diff --git a/src/java.base/share/classes/java/lang/Long.java b/src/java.base/share/classes/java/lang/Long.java index 896503210183164ba6bc7278e40eab6135fec84e..9b82cd7774a9a41b8bd7d6c7f56cbcd165cab93f 100644 --- a/src/java.base/share/classes/java/lang/Long.java +++ b/src/java.base/share/classes/java/lang/Long.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 1994, 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1994, 2021, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -299,7 +299,7 @@ public final class Long extends Number *

* {@code Long.toHexString(n).toUpperCase()} *
- *

+ * * @apiNote * The {@link java.util.HexFormat} class provides formatting and parsing * of byte arrays and primitives to return a string or adding to an {@link Appendable}. @@ -682,7 +682,7 @@ public final class Long extends Number throws NumberFormatException { if (s == null) { - throw new NumberFormatException("null"); + throw new NumberFormatException("Cannot parse null string"); } if (radix < Character.MIN_RADIX) { @@ -893,7 +893,7 @@ public final class Long extends Number public static long parseUnsignedLong(String s, int radix) throws NumberFormatException { if (s == null) { - throw new NumberFormatException("null"); + throw new NumberFormatException("Cannot parse null string"); } int len = s.length(); diff --git a/src/java.base/share/classes/java/lang/Math.java b/src/java.base/share/classes/java/lang/Math.java index bc7d92c98e2e035c5533c0f6463d91b481ae6526..424fbb3f014576b8c9aa8d4aec2853edbc583119 100644 --- a/src/java.base/share/classes/java/lang/Math.java +++ b/src/java.base/share/classes/java/lang/Math.java @@ -1886,32 +1886,21 @@ public final class Math { */ @IntrinsicCandidate public static float fma(float a, float b, float c) { - /* - * Since the double format has more than twice the precision - * of the float format, the multiply of a * b is exact in - * double. The add of c to the product then incurs one - * rounding error. Since the double format moreover has more - * than (2p + 2) precision bits compared to the p bits of the - * float format, the two roundings of (a * b + c), first to - * the double format and then secondarily to the float format, - * are equivalent to rounding the intermediate result directly - * to the float format. - * - * In terms of strictfp vs default-fp concerns related to - * overflow and underflow, since - * - * (Float.MAX_VALUE * Float.MAX_VALUE) << Double.MAX_VALUE - * (Float.MIN_VALUE * Float.MIN_VALUE) >> Double.MIN_VALUE - * - * neither the multiply nor add will overflow or underflow in - * double. Therefore, it is not necessary for this method to - * be declared strictfp to have reproducible - * behavior. However, it is necessary to explicitly store down - * to a float variable to avoid returning a value in the float - * extended value set. - */ - float result = (float)(((double) a * (double) b ) + (double) c); - return result; + if (Float.isFinite(a) && Float.isFinite(b) && Float.isFinite(c)) { + if (a == 0.0 || b == 0.0) { + return a * b + c; // Handled signed zero cases + } else { + return (new BigDecimal((double)a * (double)b) // Exact multiply + .add(new BigDecimal((double)c))) // Exact sum + .floatValue(); // One rounding + // to a float value + } + } else { + // At least one of a,b, and c is non-finite. The result + // will be non-finite as well and will be the same + // non-finite value under double as float arithmetic. + return (float)fma((double)a, (double)b, (double)c); + } } /** diff --git a/src/java.base/share/classes/java/lang/Object.java b/src/java.base/share/classes/java/lang/Object.java index a155e1e8ba8d41a96215ccd1da326e9ad5af1016..d0ad7d43cd9e3d078750f7c17c456b4437014a49 100644 --- a/src/java.base/share/classes/java/lang/Object.java +++ b/src/java.base/share/classes/java/lang/Object.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 1994, 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1994, 2021, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -78,15 +78,16 @@ public class Object { * used in {@code equals} comparisons on the object is modified. * This integer need not remain consistent from one execution of an * application to another execution of the same application. - *

  • If two objects are equal according to the {@code equals(Object)} - * method, then calling the {@code hashCode} method on each of - * the two objects must produce the same integer result. + *
  • If two objects are equal according to the {@link + * equals(Object) equals} method, then calling the {@code + * hashCode} method on each of the two objects must produce the + * same integer result. *
  • It is not required that if two objects are unequal - * according to the {@link java.lang.Object#equals(java.lang.Object)} - * method, then calling the {@code hashCode} method on each of the - * two objects must produce distinct integer results. However, the - * programmer should be aware that producing distinct integer results - * for unequal objects may improve the performance of hash tables. + * according to the {@link equals(Object) equals} method, then + * calling the {@code hashCode} method on each of the two objects + * must produce distinct integer results. However, the programmer + * should be aware that producing distinct integer results for + * unequal objects may improve the performance of hash tables. * * * @implSpec @@ -127,15 +128,27 @@ public class Object { *
  • For any non-null reference value {@code x}, * {@code x.equals(null)} should return {@code false}. * + * *

    + * An equivalence relation partitions the elements it operates on + * into equivalence classes; all the members of an + * equivalence class are equal to each other. Members of an + * equivalence class are substitutable for each other, at least + * for some purposes. + * + * @implSpec * The {@code equals} method for class {@code Object} implements * the most discriminating possible equivalence relation on objects; * that is, for any non-null reference values {@code x} and * {@code y}, this method returns {@code true} if and only * if {@code x} and {@code y} refer to the same object * ({@code x == y} has the value {@code true}). - *

    - * Note that it is generally necessary to override the {@code hashCode} + * + * In other words, under the reference equality equivalence + * relation, each equivalence class only has a single element. + * + * @apiNote + * It is generally necessary to override the {@link hashCode hashCode} * method whenever this method is overridden, so as to maintain the * general contract for the {@code hashCode} method, which states * that equal objects must have equal hash codes. @@ -183,7 +196,8 @@ public class Object { * primitive fields or references to immutable objects, then it is usually * the case that no fields in the object returned by {@code super.clone} * need to be modified. - *

    + * + * @implSpec * The method {@code clone} for class {@code Object} performs a * specific cloning operation. First, if the class of this object does * not implement the interface {@code Cloneable}, then a @@ -214,13 +228,17 @@ public class Object { protected native Object clone() throws CloneNotSupportedException; /** - * Returns a string representation of the object. In general, the + * Returns a string representation of the object. + * @apiNote + * In general, the * {@code toString} method returns a string that * "textually represents" this object. The result should * be a concise but informative representation that is easy for a * person to read. * It is recommended that all subclasses override this method. - *

    + * The string output is not necessarily stable over time or across + * JVM invocations. + * @implSpec * The {@code toString} method for class {@code Object} * returns a string consisting of the name of the class of which the * object is an instance, the at-sign character `{@code @}', and diff --git a/src/java.base/share/classes/java/lang/String.java b/src/java.base/share/classes/java/lang/String.java index 5d5316f5f062f0df179c52197f51743eaf13cbdd..f757f2bd64d3911db285f34d61a0cafc8ebbd832 100644 --- a/src/java.base/share/classes/java/lang/String.java +++ b/src/java.base/share/classes/java/lang/String.java @@ -1008,6 +1008,27 @@ public final class String //////////////////////////////// utf8 //////////////////////////////////// + /** + * Decodes ASCII from the source byte array into the destination + * char array. Used via JavaLangAccess from UTF_8 and other charset + * decoders. + * + * @return the number of bytes successfully decoded, at most len + */ + /* package-private */ + static int decodeASCII(byte[] sa, int sp, char[] da, int dp, int len) { + if (!StringCoding.hasNegatives(sa, sp, len)) { + StringLatin1.inflate(sa, sp, da, dp, len); + return len; + } else { + int start = sp; + int end = sp + len; + while (sp < end && sa[sp] >= 0) { + da[dp++] = (char) sa[sp++]; + } + return sp - start; + } + } private static boolean isNotContinuation(int b) { return (b & 0xc0) != 0x80; diff --git a/src/java.base/share/classes/java/lang/System.java b/src/java.base/share/classes/java/lang/System.java index db1b20b3fd703a649b222a7e10e6375b4d95d9e0..706077f803ad71655d5dda3171b410f095418c81 100644 --- a/src/java.base/share/classes/java/lang/System.java +++ b/src/java.base/share/classes/java/lang/System.java @@ -1981,6 +1981,11 @@ public final class System { * Initialize the system class. Called after thread initialization. */ private static void initPhase1() { + + // register the shared secrets - do this first, since SystemProps.initProperties + // might initialize CharsetDecoders that rely on it + setJavaLangAccess(); + // VM might invoke JNU_NewStringPlatform() to set those encoding // sensitive properties (user.home, user.name, boot.class.path, etc.) // during "props" initialization. @@ -2026,8 +2031,6 @@ public final class System { Thread current = Thread.currentThread(); current.getThreadGroup().add(current); - // register shared secrets - setJavaLangAccess(); // Subsystems that are invoked during initialization can invoke // VM.isBooted() in order to avoid doing things that should @@ -2277,6 +2280,14 @@ public final class System { return String.getBytesUTF8NoRepl(s); } + public void inflateBytesToChars(byte[] src, int srcOff, char[] dst, int dstOff, int len) { + StringLatin1.inflate(src, srcOff, dst, dstOff, len); + } + + public int decodeASCII(byte[] src, int srcOff, char[] dst, int dstOff, int len) { + return String.decodeASCII(src, srcOff, dst, dstOff, len); + } + public void setCause(Throwable t, Throwable cause) { t.setCause(cause); } diff --git a/src/java.base/share/classes/java/lang/invoke/MethodHandles.java b/src/java.base/share/classes/java/lang/invoke/MethodHandles.java index fea8dc568010ab94ecfb8c8df6a484eb250dc860..9cf6d1f96930bc1bfd75ef0dfb11b27dcd58ff49 100644 --- a/src/java.base/share/classes/java/lang/invoke/MethodHandles.java +++ b/src/java.base/share/classes/java/lang/invoke/MethodHandles.java @@ -1681,7 +1681,7 @@ public class MethodHandles { * (used during {@link #findClass} invocations) * are determined by the lookup class' loader, * which may change due to this operation. - *

    + * * @param requestedLookupClass the desired lookup class for the new lookup object * @return a lookup object which reports the desired lookup class, or the same object * if there is no change diff --git a/src/java.base/share/classes/java/lang/reflect/Constructor.java b/src/java.base/share/classes/java/lang/reflect/Constructor.java index a3070717f4828065c7166d681b53cc11f441d6f1..ca01b7f423da380579e632a38183a4e8914b3530 100644 --- a/src/java.base/share/classes/java/lang/reflect/Constructor.java +++ b/src/java.base/share/classes/java/lang/reflect/Constructor.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 1996, 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1996, 2021, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -511,6 +511,7 @@ public final class Constructor extends Executable { /** * {@inheritDoc} * @jls 13.1 The Form of a Binary + * @jvms 4.6 Methods * @since 1.5 */ @Override diff --git a/src/java.base/share/classes/java/lang/reflect/Executable.java b/src/java.base/share/classes/java/lang/reflect/Executable.java index 9375aa18fe73432f42dbfea58905eb3221ff4239..c7daae19653e1725035c0e925f60aa5f04bfdc4b 100644 --- a/src/java.base/share/classes/java/lang/reflect/Executable.java +++ b/src/java.base/share/classes/java/lang/reflect/Executable.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012, 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2012, 2021, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -518,6 +518,7 @@ public abstract class Executable extends AccessibleObject * construct as defined by * The Java Language Specification. * @jls 13.1 The Form of a Binary + * @jvms 4.6 Methods */ public boolean isSynthetic() { return Modifier.isSynthetic(getModifiers()); diff --git a/src/java.base/share/classes/java/lang/reflect/Method.java b/src/java.base/share/classes/java/lang/reflect/Method.java index 4e28846dc5d8e3b4ba90c9c9e6e294bf2d26883b..f163964724cea5a0b9c92839c3a84418c10fe0c5 100644 --- a/src/java.base/share/classes/java/lang/reflect/Method.java +++ b/src/java.base/share/classes/java/lang/reflect/Method.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 1996, 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1996, 2021, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -567,12 +567,44 @@ public final class Method extends Executable { } /** - * Returns {@code true} if this method is a bridge - * method; returns {@code false} otherwise. + * {@return {@code true} if this method is a bridge + * method; returns {@code false} otherwise} * - * @return true if and only if this method is a bridge - * method as defined by the Java Language Specification. + * @apiNote + * A bridge method is a {@linkplain isSynthetic synthetic} method + * created by a Java compiler alongside a method originating from + * the source code. Bridge methods are used by Java compilers in + * various circumstances to span differences in Java programming + * language semantics and JVM semantics. + * + *

    One example use of bridge methods is as a technique for a + * Java compiler to support covariant overrides, where a + * subclass overrides a method and gives the new method a more + * specific return type than the method in the superclass. While + * the Java language specification forbids a class declaring two + * methods with the same parameter types but a different return + * type, the virtual machine does not. A common case where + * covariant overrides are used is for a {@link + * java.lang.Cloneable Cloneable} class where the {@link + * Object#clone() clone} method inherited from {@code + * java.lang.Object} is overridden and declared to return the type + * of the class. For example, {@code Object} declares + *

    {@code protected Object clone() throws CloneNotSupportedException {...}}
    + * and {@code EnumSet} declares its language-level {@linkplain + * java.util.EnumSet#clone() covariant override} + *
    {@code public EnumSet clone() {...}}
    + * If this technique was being used, the resulting class file for + * {@code EnumSet} would have two {@code clone} methods, one + * returning {@code EnumSet} and the second a bridge method + * returning {@code Object}. The bridge method is a JVM-level + * override of {@code Object.clone()}. The body of the {@code + * clone} bridge method calls its non-bridge counterpart and + * returns its result. * @since 1.5 + * + * @jls 8.4.8.3 Requirements in Overriding and Hiding + * @jls 15.12.4.5 Create Frame, Synchronize, Transfer Control + * @jvms 4.6 Methods */ public boolean isBridge() { return (getModifiers() & Modifier.BRIDGE) != 0; @@ -590,6 +622,7 @@ public final class Method extends Executable { /** * {@inheritDoc} * @jls 13.1 The Form of a Binary + * @jvms 4.6 Methods * @since 1.5 */ @Override diff --git a/src/java.base/share/classes/java/math/BigDecimal.java b/src/java.base/share/classes/java/math/BigDecimal.java index bb698a555b6760a67ccdd7a1e514fcab399b969e..f9f184b00bf7da175b229fc8a47ad2f5d0f0c02a 100644 --- a/src/java.base/share/classes/java/math/BigDecimal.java +++ b/src/java.base/share/classes/java/math/BigDecimal.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 1996, 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1996, 2021, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -30,18 +30,20 @@ package java.math; import static java.math.BigInteger.LONG_MASK; +import java.io.IOException; import java.util.Arrays; import java.util.Objects; /** - * Immutable, arbitrary-precision signed decimal numbers. A - * {@code BigDecimal} consists of an arbitrary precision integer - * unscaled value and a 32-bit integer scale. If zero - * or positive, the scale is the number of digits to the right of the - * decimal point. If negative, the unscaled value of the number is - * multiplied by ten to the power of the negation of the scale. The - * value of the number represented by the {@code BigDecimal} is - * therefore (unscaledValue × 10-scale). + * Immutable, arbitrary-precision signed decimal numbers. A {@code + * BigDecimal} consists of an arbitrary precision integer + * {@linkplain unscaledValue() unscaled value} and a 32-bit + * integer {@linkplain scale() scale}. If zero or positive, + * the scale is the number of digits to the right of the decimal + * point. If negative, the unscaled value of the number is multiplied + * by ten to the power of the negation of the scale. The value of the + * number represented by the {@code BigDecimal} is therefore + * (unscaledValue × 10-scale). * *

    The {@code BigDecimal} class provides operations for * arithmetic, scale manipulation, rounding, comparison, hashing, and @@ -63,33 +65,38 @@ import java.util.Objects; *

    When a {@code MathContext} object is supplied with a precision * setting of 0 (for example, {@link MathContext#UNLIMITED}), * arithmetic operations are exact, as are the arithmetic methods - * which take no {@code MathContext} object. (This is the only - * behavior that was supported in releases prior to 5.) As a - * corollary of computing the exact result, the rounding mode setting - * of a {@code MathContext} object with a precision setting of 0 is - * not used and thus irrelevant. In the case of divide, the exact - * quotient could have an infinitely long decimal expansion; for - * example, 1 divided by 3. If the quotient has a nonterminating - * decimal expansion and the operation is specified to return an exact - * result, an {@code ArithmeticException} is thrown. Otherwise, the - * exact result of the division is returned, as done for other - * operations. + * which take no {@code MathContext} object. As a corollary of + * computing the exact result, the rounding mode setting of a {@code + * MathContext} object with a precision setting of 0 is not used and + * thus irrelevant. In the case of divide, the exact quotient could + * have an infinitely long decimal expansion; for example, 1 divided + * by 3. If the quotient has a nonterminating decimal expansion and + * the operation is specified to return an exact result, an {@code + * ArithmeticException} is thrown. Otherwise, the exact result of the + * division is returned, as done for other operations. * - *

    When the precision setting is not 0, the rules of - * {@code BigDecimal} arithmetic are broadly compatible with selected - * modes of operation of the arithmetic defined in ANSI X3.274-1996 - * and ANSI X3.274-1996/AM 1-2000 (section 7.4). Unlike those - * standards, {@code BigDecimal} includes many rounding modes, which - * were mandatory for division in {@code BigDecimal} releases prior - * to 5. Any conflicts between these ANSI standards and the - * {@code BigDecimal} specification are resolved in favor of - * {@code BigDecimal}. + *

    When the precision setting is not 0, the rules of {@code + * BigDecimal} arithmetic are broadly compatible with selected modes + * of operation of the arithmetic defined in ANSI X3.274-1996 and ANSI + * X3.274-1996/AM 1-2000 (section 7.4). Unlike those standards, + * {@code BigDecimal} includes many rounding modes. Any conflicts + * between these ANSI standards and the {@code BigDecimal} + * specification are resolved in favor of {@code BigDecimal}. * *

    Since the same numerical value can have different * representations (with different scales), the rules of arithmetic * and rounding must specify both the numerical result and the scale * used in the result's representation. * + * The different representations of the same numerical value are + * called members of the same cohort. The {@linkplain + * compareTo(BigDecimal) natural order} of {@code BigDecimal} + * considers members of the same cohort to be equal to each other. In + * contrast, the {@link equals equals} method requires both the + * numerical value and representation to be the same for equality to + * hold. The results of methods like {@link scale} and {@link + * unscaledValue} will differ for numerically equal values with + * different representations. * *

    In general the rounding modes and precision setting determine * how operations return results with a limited number of digits when @@ -206,13 +213,71 @@ import java.util.Objects; * {@code NullPointerException} when passed a {@code null} object * reference for any input parameter. * - * @apiNote Care should be exercised if {@code BigDecimal} objects - * are used as keys in a {@link java.util.SortedMap SortedMap} or - * elements in a {@link java.util.SortedSet SortedSet} since - * {@code BigDecimal}'s natural ordering is inconsistent - * with equals. See {@link Comparable}, {@link - * java.util.SortedMap} or {@link java.util.SortedSet} for more - * information. + * @apiNote Care should be exercised if {@code BigDecimal} objects are + * used as keys in a {@link java.util.SortedMap SortedMap} or elements + * in a {@link java.util.SortedSet SortedSet} since {@code + * BigDecimal}'s {@linkplain compareTo(BigDecimal) natural + * ordering} is inconsistent with equals. See {@link + * Comparable}, {@link java.util.SortedMap} or {@link + * java.util.SortedSet} for more information. + * + *

    Relation to IEEE 754 Decimal Arithmetic

    + * + * Starting with its 2008 revision, the IEEE 754 Standard for + * Floating-point Arithmetic has covered decimal formats and + * operations. While there are broad similarities in the decimal + * arithmetic defined by IEEE 754 and by this class, there are notable + * differences as well. The fundamental similarity shared by {@code + * BigDecimal} and IEEE 754 decimal arithmetic is the conceptual + * operation of computing the mathematical infinitely precise real + * number value of an operation and then mapping that real number to a + * representable decimal floating-point value under a rounding + * policy. The rounding policy is called a {@linkplain + * RoundingMode rounding mode} for {@code BigDecimal} and called a + * rounding-direction attribute in IEEE 754-2019. When the exact value + * is not representable, the rounding policy determines which of the + * two representable decimal values bracketing the exact value is + * selected as the computed result. The notion of a preferred + * scale/preferred exponent is also shared by both systems. + * + *

    For differences, IEEE 754 includes several kinds of values not + * modeled by {@code BigDecimal} including negative zero, signed + * infinities, and NaN (not-a-number). IEEE 754 defines formats, which + * are parameterized by base (binary or decimal), number of digits of + * precision, and exponent range. A format determines the set of + * representable values. Most operations accept as input one or more + * values of a given format and produce a result in the same format. + * A {@code BigDecimal}'s {@linkplain scale() scale} is equivalent to + * negating an IEEE 754 value's exponent. {@code BigDecimal} values do + * not have a format in the same sense; all values have the same + * possible range of scale/exponent and the {@linkplain + * unscaledValue() unscaled value} has arbitrary precision. Instead, + * for the {@code BigDecimal} operations taking a {@code MathContext} + * parameter, if the {@code MathContext} has a nonzero precision, the + * set of possible representable values for the result is determined + * by the precision of the {@code MathContext} argument. For example + * in {@code BigDecimal}, if a nonzero three-digit number and a + * nonzero four-digit number are multiplied together in the context of + * a {@code MathContext} object having a precision of three, the + * result will have three digits (assuming no overflow or underflow, + * etc.). + * + *

    The rounding policies implemented by {@code BigDecimal} + * operations indicated by {@linkplain RoundingMode rounding modes} + * are a proper superset of the IEEE 754 rounding-direction + * attributes. + + *

    {@code BigDecimal} arithmetic will most resemble IEEE 754 + * decimal arithmetic if a {@code MathContext} corresponding to an + * IEEE 754 decimal format, such as {@linkplain MathContext#DECIMAL64 + * decimal64} or {@linkplain MathContext#DECIMAL128 decimal128} is + * used to round all starting values and intermediate operations. The + * numerical values computed can differ if the exponent range of the + * IEEE 754 format being approximated is exceeded since a {@code + * MathContext} does not constrain the scale of {@code BigDecimal} + * results. Operations that would generate a NaN or exact infinity, + * such as dividing by zero, throw an {@code ArithmeticException} in + * {@code BigDecimal} arithmetic. * * @see BigInteger * @see MathContext @@ -1675,7 +1740,7 @@ public class BigDecimal extends Number implements Comparable { * * @param divisor value by which this {@code BigDecimal} is to be divided. * @throws ArithmeticException if the exact quotient does not have a - * terminating decimal expansion + * terminating decimal expansion, including dividing by zero * @return {@code this / divisor} * @since 1.5 * @author Joseph D. Darcy @@ -1739,7 +1804,7 @@ public class BigDecimal extends Number implements Comparable { * @throws ArithmeticException if the result is inexact but the * rounding mode is {@code UNNECESSARY} or * {@code mc.precision == 0} and the quotient has a - * non-terminating decimal expansion. + * non-terminating decimal expansion,including dividing by zero * @since 1.5 */ public BigDecimal divide(BigDecimal divisor, MathContext mc) { @@ -3040,16 +3105,21 @@ public class BigDecimal extends Number implements Comparable { // Comparison Operations /** - * Compares this {@code BigDecimal} with the specified + * Compares this {@code BigDecimal} numerically with the specified * {@code BigDecimal}. Two {@code BigDecimal} objects that are * equal in value but have a different scale (like 2.0 and 2.00) - * are considered equal by this method. This method is provided - * in preference to individual methods for each of the six boolean - * comparison operators ({@literal <}, ==, - * {@literal >}, {@literal >=}, !=, {@literal <=}). The - * suggested idiom for performing these comparisons is: - * {@code (x.compareTo(y)} <op> {@code 0)}, where + * are considered equal by this method. Such values are in the + * same cohort. + * + * This method is provided in preference to individual methods for + * each of the six boolean comparison operators ({@literal <}, ==, + * {@literal >}, {@literal >=}, !=, {@literal <=}). The suggested + * idiom for performing these comparisons is: {@code + * (x.compareTo(y)} <op> {@code 0)}, where * <op> is one of the six comparison operators. + + * @apiNote + * Note: this class has a natural ordering that is inconsistent with equals. * * @param val {@code BigDecimal} to which this {@code BigDecimal} is * to be compared. @@ -3125,12 +3195,23 @@ public class BigDecimal extends Number implements Comparable { } /** - * Compares this {@code BigDecimal} with the specified - * {@code Object} for equality. Unlike {@link - * #compareTo(BigDecimal) compareTo}, this method considers two - * {@code BigDecimal} objects equal only if they are equal in - * value and scale (thus 2.0 is not equal to 2.00 when compared by - * this method). + * Compares this {@code BigDecimal} with the specified {@code + * Object} for equality. Unlike {@link #compareTo(BigDecimal) + * compareTo}, this method considers two {@code BigDecimal} + * objects equal only if they are equal in value and + * scale. Therefore 2.0 is not equal to 2.00 when compared by this + * method since the former has [{@code BigInteger}, {@code scale}] + * components equal to [20, 1] while the latter has components + * equal to [200, 2]. + * + * @apiNote + * One example that shows how 2.0 and 2.00 are not + * substitutable for each other under some arithmetic operations + * are the two expressions:
    + * {@code new BigDecimal("2.0" ).divide(BigDecimal.valueOf(3), + * HALF_UP)} which evaluates to 0.7 and
    + * {@code new BigDecimal("2.00").divide(BigDecimal.valueOf(3), + * HALF_UP)} which evaluates to 0.67. * * @param x {@code Object} to which this {@code BigDecimal} is * to be compared. @@ -3142,9 +3223,8 @@ public class BigDecimal extends Number implements Comparable { */ @Override public boolean equals(Object x) { - if (!(x instanceof BigDecimal)) + if (!(x instanceof BigDecimal xDec)) return false; - BigDecimal xDec = (BigDecimal) x; if (x == this) return true; if (scale != xDec.scale) @@ -3193,8 +3273,13 @@ public class BigDecimal extends Number implements Comparable { // Hash Function /** - * Returns the hash code for this {@code BigDecimal}. Note that - * two {@code BigDecimal} objects that are numerically equal but + * Returns the hash code for this {@code BigDecimal}. + * The hash code is computed as a function of the {@linkplain + * unscaledValue() unscaled value} and the {@linkplain scale() + * scale} of this {@code BigDecimal}. + * + * @apiNote + * Two {@code BigDecimal} objects that are numerically equal but * differ in scale (like 2.0 and 2.00) will generally not * have the same hash code. * @@ -4214,7 +4299,7 @@ public class BigDecimal extends Number implements Comparable { */ @java.io.Serial private void readObject(java.io.ObjectInputStream s) - throws java.io.IOException, ClassNotFoundException { + throws IOException, ClassNotFoundException { // Read in all fields s.defaultReadObject(); // validate possibly bad fields @@ -4234,7 +4319,7 @@ public class BigDecimal extends Number implements Comparable { */ @java.io.Serial private void writeObject(java.io.ObjectOutputStream s) - throws java.io.IOException { + throws IOException { // Must inflate to maintain compatible serial form. if (this.intVal == null) UnsafeHolder.setIntValVolatile(this, BigInteger.valueOf(this.intCompact)); diff --git a/src/java.base/share/classes/java/math/BigInteger.java b/src/java.base/share/classes/java/math/BigInteger.java index 79369e316be6243c9cf8b2c651d8e479b7dd048b..f2ee2c1d27a1b307b94e108bb4766ecb0ebe4cda 100644 --- a/src/java.base/share/classes/java/math/BigInteger.java +++ b/src/java.base/share/classes/java/math/BigInteger.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 1996, 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1996, 2021, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -3868,10 +3868,9 @@ public class BigInteger extends Number implements Comparable { if (x == this) return true; - if (!(x instanceof BigInteger)) + if (!(x instanceof BigInteger xInt)) return false; - BigInteger xInt = (BigInteger) x; if (xInt.signum != signum) return false; diff --git a/src/java.base/share/classes/java/math/MathContext.java b/src/java.base/share/classes/java/math/MathContext.java index 028ede34de6db9558840848ff4c129eb413e36d7..e53db0f05bd9b44e1bea08b58810032e0519a032 100644 --- a/src/java.base/share/classes/java/math/MathContext.java +++ b/src/java.base/share/classes/java/math/MathContext.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2021, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -69,39 +69,39 @@ public final class MathContext implements Serializable { /* ----- Public Properties ----- */ /** - * A {@code MathContext} object whose settings have the values - * required for unlimited precision arithmetic. - * The values of the settings are: - * - * precision=0 roundingMode=HALF_UP - * + * A {@code MathContext} object whose settings have the values + * required for unlimited precision arithmetic. + * The values of the settings are: {@code precision=0 roundingMode=HALF_UP} */ public static final MathContext UNLIMITED = new MathContext(0, RoundingMode.HALF_UP); /** - * A {@code MathContext} object with a precision setting - * matching the IEEE 754R Decimal32 format, 7 digits, and a - * rounding mode of {@link RoundingMode#HALF_EVEN HALF_EVEN}, the - * IEEE 754R default. + * A {@code MathContext} object with a precision setting + * matching the precision of the IEEE 754-2019 decimal32 format, 7 digits, and a + * rounding mode of {@link RoundingMode#HALF_EVEN HALF_EVEN}. + * Note the exponent range of decimal32 is not used for + * rounding. */ public static final MathContext DECIMAL32 = new MathContext(7, RoundingMode.HALF_EVEN); /** - * A {@code MathContext} object with a precision setting - * matching the IEEE 754R Decimal64 format, 16 digits, and a - * rounding mode of {@link RoundingMode#HALF_EVEN HALF_EVEN}, the - * IEEE 754R default. + * A {@code MathContext} object with a precision setting + * matching the precision of the IEEE 754-2019 decimal64 format, 16 digits, and a + * rounding mode of {@link RoundingMode#HALF_EVEN HALF_EVEN}. + * Note the exponent range of decimal64 is not used for + * rounding. */ public static final MathContext DECIMAL64 = new MathContext(16, RoundingMode.HALF_EVEN); /** - * A {@code MathContext} object with a precision setting - * matching the IEEE 754R Decimal128 format, 34 digits, and a - * rounding mode of {@link RoundingMode#HALF_EVEN HALF_EVEN}, the - * IEEE 754R default. + * A {@code MathContext} object with a precision setting + * matching the precision of the IEEE 754-2019 decimal128 format, 34 digits, and a + * rounding mode of {@link RoundingMode#HALF_EVEN HALF_EVEN}. + * Note the exponent range of decimal64 is not used for + * rounding. */ public static final MathContext DECIMAL128 = new MathContext(34, RoundingMode.HALF_EVEN); @@ -248,10 +248,8 @@ public final class MathContext implements Serializable { * settings as this object */ public boolean equals(Object x){ - MathContext mc; - if (!(x instanceof MathContext)) + if (!(x instanceof MathContext mc)) return false; - mc = (MathContext) x; return mc.precision == this.precision && mc.roundingMode == this.roundingMode; // no need for .equals() } diff --git a/src/java.base/share/classes/java/math/RoundingMode.java b/src/java.base/share/classes/java/math/RoundingMode.java index de7c33e5392f39193d20481544b80663359965c9..630fc03eca9ff2f54fdf4d37a907bf8bd73ca7f3 100644 --- a/src/java.base/share/classes/java/math/RoundingMode.java +++ b/src/java.base/share/classes/java/math/RoundingMode.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2021, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -29,12 +29,12 @@ package java.math; /** - * Specifies a rounding behavior for numerical operations - * capable of discarding precision. Each rounding mode indicates how - * the least significant returned digit of a rounded result is to be - * calculated. If fewer digits are returned than the digits needed to - * represent the exact numerical result, the discarded digits will be - * referred to as the discarded fraction regardless the digits' + * Specifies a rounding policy for numerical operations capable + * of discarding precision. Each rounding mode indicates how the least + * significant returned digit of a rounded result is to be calculated. + * If fewer digits are returned than the digits needed to represent + * the exact numerical result, the discarded digits will be referred + * to as the discarded fraction regardless the digits' * contribution to the value of the number. In other words, * considered as a numerical value, the discarded fraction could have * an absolute value greater than one. @@ -89,7 +89,7 @@ package java.math; * * @apiNote * Five of the rounding modes declared in this class correspond to - * rounding direction attributes defined in the IEEE Standard + * rounding-direction attributes defined in the IEEE Standard * for Floating-Point Arithmetic, IEEE 754-2019. Where present, * this correspondence will be noted in the documentation of the * particular constant. @@ -137,7 +137,7 @@ public enum RoundingMode { * Rounding mode to round towards zero. Never increments the digit * prior to a discarded fraction (i.e., truncates). Note that this * rounding mode never increases the magnitude of the calculated value. - * This mode corresponds to the IEEE 754-2019 rounding + * This mode corresponds to the IEEE 754-2019 rounding-direction * attribute roundTowardZero. * *

    Example: @@ -168,7 +168,7 @@ public enum RoundingMode { * result is positive, behaves as for {@code RoundingMode.UP}; * if negative, behaves as for {@code RoundingMode.DOWN}. Note * that this rounding mode never decreases the calculated value. - * This mode corresponds to the IEEE 754-2019 rounding + * This mode corresponds to the IEEE 754-2019 rounding-direction * attribute roundTowardPositive. * *

    Example: @@ -199,7 +199,7 @@ public enum RoundingMode { * result is positive, behave as for {@code RoundingMode.DOWN}; * if negative, behave as for {@code RoundingMode.UP}. Note that * this rounding mode never increases the calculated value. - * This mode corresponds to the IEEE 754-2019 rounding + * This mode corresponds to the IEEE 754-2019 rounding-direction * attribute roundTowardNegative. * *

    Example: @@ -232,7 +232,7 @@ public enum RoundingMode { * fraction is ≥ 0.5; otherwise, behaves as for * {@code RoundingMode.DOWN}. Note that this is the rounding * mode commonly taught at school. - * This mode corresponds to the IEEE 754-2019 rounding + * This mode corresponds to the IEEE 754-2019 rounding-direction * attribute roundTiesToAway. * *

    Example: @@ -301,7 +301,7 @@ public enum RoundingMode { * chiefly used in the USA. This rounding mode is analogous to * the rounding policy used for {@code float} and {@code double} * arithmetic in Java. - * This mode corresponds to the IEEE 754-2019 rounding + * This mode corresponds to the IEEE 754-2019 rounding-direction * attribute roundTiesToEven. * *

    Example: diff --git a/src/java.base/share/classes/java/net/DatagramSocket.java b/src/java.base/share/classes/java/net/DatagramSocket.java index 195894a57d745ede223cae608b286b1a5306c08f..ffd113ecf215857f8fd97713e7f5a11468a2a2a4 100644 --- a/src/java.base/share/classes/java/net/DatagramSocket.java +++ b/src/java.base/share/classes/java/net/DatagramSocket.java @@ -1132,7 +1132,18 @@ public class DatagramSocket implements java.io.Closeable { * @see java.net.DatagramSocketImplFactory#createDatagramSocketImpl() * @see SecurityManager#checkSetFactory * @since 1.3 + * + * @deprecated Use {@link DatagramChannel}, or subclass {@code DatagramSocket} + * directly. + *
    This method provided a way in early JDK releases to replace the + * system wide implementation of {@code DatagramSocket}. It has been mostly + * obsolete since Java 1.4. If required, a {@code DatagramSocket} can be + * created to use a custom implementation by extending {@code DatagramSocket} + * and using the {@linkplain #DatagramSocket(DatagramSocketImpl) protected + * constructor} that takes an {@linkplain DatagramSocketImpl implementation} + * as a parameter. */ + @Deprecated(since = "17") public static synchronized void setDatagramSocketImplFactory(DatagramSocketImplFactory fac) throws IOException diff --git a/src/java.base/share/classes/java/net/HttpConnectSocketImpl.java b/src/java.base/share/classes/java/net/HttpConnectSocketImpl.java index b06254699d59797f472a4c6f5bd3f61978641e0b..0f5ca35115c3610a7bb9a8691bfb67d096538b8d 100644 --- a/src/java.base/share/classes/java/net/HttpConnectSocketImpl.java +++ b/src/java.base/share/classes/java/net/HttpConnectSocketImpl.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2010, 2019, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2010, 2021, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -80,10 +80,9 @@ import java.util.Set; super(delegate); this.socket = socket; SocketAddress a = proxy.address(); - if ( !(a instanceof InetSocketAddress) ) + if ( !(a instanceof InetSocketAddress ad) ) throw new IllegalArgumentException("Unsupported address type"); - InetSocketAddress ad = (InetSocketAddress) a; server = ad.getHostString(); port = ad.getPort(); } diff --git a/src/java.base/share/classes/java/net/HttpCookie.java b/src/java.base/share/classes/java/net/HttpCookie.java index eff638e2e021c00e49abdf8363e46f24b7d0ef67..74c1c6dc693373a6957c0782864617f14af07f1a 100644 --- a/src/java.base/share/classes/java/net/HttpCookie.java +++ b/src/java.base/share/classes/java/net/HttpCookie.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2005, 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2005, 2021, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -715,9 +715,8 @@ public final class HttpCookie implements Cloneable { public boolean equals(Object obj) { if (obj == this) return true; - if (!(obj instanceof HttpCookie)) + if (!(obj instanceof HttpCookie other)) return false; - HttpCookie other = (HttpCookie)obj; // One http cookie is equal to another cookie (RFC 2965 sec. 3.3.3) if: // 1. they come from same domain (case-insensitive), diff --git a/src/java.base/share/classes/java/net/HttpRetryException.java b/src/java.base/share/classes/java/net/HttpRetryException.java index a36f9b1340af167b01e5e1ee68951edb4ea0f0bf..310a9e7b5fad3a286b7063406ac51b6b091a87f4 100644 --- a/src/java.base/share/classes/java/net/HttpRetryException.java +++ b/src/java.base/share/classes/java/net/HttpRetryException.java @@ -39,7 +39,14 @@ public class HttpRetryException extends IOException { @java.io.Serial private static final long serialVersionUID = -9186022286469111381L; + /** + * The response code. + */ private int responseCode; + + /** + * The URL to be redirected to. + */ private String location; /** diff --git a/src/java.base/share/classes/java/net/Inet6Address.java b/src/java.base/share/classes/java/net/Inet6Address.java index be34823d49fa6c36ddbe124220a06207d85ba97e..493696888410b372205f9d849baf11c4be21c94f 100644 --- a/src/java.base/share/classes/java/net/Inet6Address.java +++ b/src/java.base/share/classes/java/net/Inet6Address.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2019, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2021, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -262,10 +262,9 @@ class Inet6Address extends InetAddress { } public boolean equals(Object o) { - if (! (o instanceof Inet6AddressHolder)) { + if (!(o instanceof Inet6AddressHolder that)) { return false; } - Inet6AddressHolder that = (Inet6AddressHolder)o; return Arrays.equals(this.ipaddress, that.ipaddress); } @@ -525,10 +524,9 @@ class Inet6Address extends InetAddress { Enumeration addresses = ifc.getInetAddresses(); while (addresses.hasMoreElements()) { InetAddress addr = addresses.nextElement(); - if (!(addr instanceof Inet6Address)) { + if (!(addr instanceof Inet6Address ia6_addr)) { continue; } - Inet6Address ia6_addr = (Inet6Address)addr; /* check if site or link local prefixes match */ if (!isDifferentLocalAddressType(thisAddr, ia6_addr.getAddress())){ /* type not the same, so carry on searching */ @@ -557,11 +555,15 @@ class Inet6Address extends InetAddress { } /** - * @serialField ipaddress byte[] - * @serialField scope_id int - * @serialField scope_id_set boolean - * @serialField scope_ifname_set boolean - * @serialField ifname String + * @serialField ipaddress byte[] holds a 128-bit (16 bytes) IPv6 address + * @serialField scope_id int the address scope id. {@code 0} if undefined + * @serialField scope_id_set boolean {@code true} when the scope_id field + * contains a valid integer scope_id + * @serialField scope_ifname_set boolean {@code true} if the object is + * constructed with a scoped interface instead of a numeric + * scope id + * @serialField ifname String the name of the scoped network interface. + * {@code null} if undefined */ @java.io.Serial private static final ObjectStreamField[] serialPersistentFields = { @@ -578,9 +580,13 @@ class Inet6Address extends InetAddress { Inet6Address.class, "holder6"); /** - * restore the state of this object from stream - * including the scope information, only if the - * scoped interface name is valid on this system + * Restores the state of this object from the stream. + * This includes the scope information, but only if the + * scoped interface name is valid on this system. + * + * @param s the {@code ObjectInputStream} from which data is read + * @throws IOException if an I/O error occurs + * @throws ClassNotFoundException if a serialized class cannot be loaded */ @java.io.Serial private void readObject(ObjectInputStream s) @@ -642,9 +648,12 @@ class Inet6Address extends InetAddress { } /** - * default behavior is overridden in order to write the - * scope_ifname field as a String, rather than a NetworkInterface - * which is not serializable + * The default behavior of this method is overridden in order to + * write the scope_ifname field as a {@code String}, rather than a + * {@code NetworkInterface} which is not serializable. + * + * @param s the {@code ObjectOutputStream} to which data is written + * @throws IOException if an I/O error occurs */ @java.io.Serial private synchronized void writeObject(ObjectOutputStream s) diff --git a/src/java.base/share/classes/java/net/InetAddress.java b/src/java.base/share/classes/java/net/InetAddress.java index 3a18ee31531684c1b67aeb7f6bff5f8fe8083886..db75848606a0f38890bc0345b9bcd2866ea8c1c9 100644 --- a/src/java.base/share/classes/java/net/InetAddress.java +++ b/src/java.base/share/classes/java/net/InetAddress.java @@ -1712,6 +1712,9 @@ public class InetAddress implements java.io.Serializable { return (InetAddressImpl) impl; } + /** + * Initializes an empty InetAddress. + */ @java.io.Serial private void readObjectNoData () { if (getClass().getClassLoader() != null) { @@ -1724,6 +1727,13 @@ public class InetAddress implements java.io.Serializable { private static final long FIELDS_OFFSET = UNSAFE.objectFieldOffset(InetAddress.class, "holder"); + /** + * Restores the state of this object from the stream. + * + * @param s the {@code ObjectInputStream} from which data is read + * @throws IOException if an I/O error occurs + * @throws ClassNotFoundException if a serialized class cannot be loaded + */ @java.io.Serial private void readObject (ObjectInputStream s) throws IOException, ClassNotFoundException { @@ -1744,9 +1754,10 @@ public class InetAddress implements java.io.Serializable { /* needed because the serializable fields no longer exist */ /** - * @serialField hostName String - * @serialField address int - * @serialField family int + * @serialField hostName String the hostname for this address + * @serialField address int holds a 32-bit IPv4 address. + * @serialField family int specifies the address family type, for instance, + * {@code '1'} for IPv4 addresses, and {@code '2'} for IPv6 addresses. */ @java.io.Serial private static final ObjectStreamField[] serialPersistentFields = { @@ -1755,6 +1766,12 @@ public class InetAddress implements java.io.Serializable { new ObjectStreamField("family", int.class), }; + /** + * Writes the state of this object to the stream. + * + * @param s the {@code ObjectOutputStream} to which data is written + * @throws IOException if an I/O error occurs + */ @java.io.Serial private void writeObject (ObjectOutputStream s) throws IOException { diff --git a/src/java.base/share/classes/java/net/InetSocketAddress.java b/src/java.base/share/classes/java/net/InetSocketAddress.java index 56635cbe929cb10f1adc6feef05ad5139355f77f..3191d097b6ab7da0733e5502e817e77dad000618 100644 --- a/src/java.base/share/classes/java/net/InetSocketAddress.java +++ b/src/java.base/share/classes/java/net/InetSocketAddress.java @@ -264,9 +264,9 @@ public class InetSocketAddress } /** - * @serialField hostname String - * @serialField addr InetAddress - * @serialField port int + * @serialField hostname String the hostname of the Socket Address + * @serialField addr InetAddress the IP address of the Socket Address + * @serialField port int the port number of the Socket Address */ @java.io.Serial private static final ObjectStreamField[] serialPersistentFields = { @@ -274,6 +274,12 @@ public class InetSocketAddress new ObjectStreamField("addr", InetAddress.class), new ObjectStreamField("port", int.class)}; + /** + * Writes the state of this object to the stream. + * + * @param out the {@code ObjectOutputStream} to which data is written + * @throws IOException if an I/O error occurs + */ @java.io.Serial private void writeObject(ObjectOutputStream out) throws IOException @@ -286,6 +292,13 @@ public class InetSocketAddress out.writeFields(); } + /** + * Restores the state of this object from the stream. + * + * @param in the {@code ObjectInputStream} from which data is read + * @throws IOException if an I/O error occurs + * @throws ClassNotFoundException if a serialized class cannot be loaded + */ @java.io.Serial private void readObject(ObjectInputStream in) throws IOException, ClassNotFoundException @@ -308,6 +321,10 @@ public class InetSocketAddress UNSAFE.putReference(this, FIELDS_OFFSET, h); } + /** + * Throws {@code InvalidObjectException}, always. + * @throws ObjectStreamException always + */ @java.io.Serial private void readObjectNoData() throws ObjectStreamException diff --git a/src/java.base/share/classes/java/net/InterfaceAddress.java b/src/java.base/share/classes/java/net/InterfaceAddress.java index cc22970882a8436fa21628f0d834777992de2d17..f5b76ec9f90ce7d49ec61f4e162b2ec0127809a1 100644 --- a/src/java.base/share/classes/java/net/InterfaceAddress.java +++ b/src/java.base/share/classes/java/net/InterfaceAddress.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2005, 2019, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2005, 2021, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -101,17 +101,10 @@ public class InterfaceAddress { * @see java.net.InterfaceAddress#hashCode() */ public boolean equals(Object obj) { - if (obj instanceof InterfaceAddress) { - InterfaceAddress cmp = (InterfaceAddress) obj; - - if (Objects.equals(address, cmp.address) && + return obj instanceof InterfaceAddress cmp && + Objects.equals(address, cmp.address) && Objects.equals(broadcast, cmp.broadcast) && - maskLength == cmp.maskLength) - { - return true; - } - } - return false; + maskLength == cmp.maskLength; } /** diff --git a/src/java.base/share/classes/java/net/NetMulticastSocket.java b/src/java.base/share/classes/java/net/NetMulticastSocket.java index 86969a698daecbe5c9187d0eabdf5003baa52b05..79a902735d04eed4e07fb9005baa65beb12eb8cb 100644 --- a/src/java.base/share/classes/java/net/NetMulticastSocket.java +++ b/src/java.base/share/classes/java/net/NetMulticastSocket.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 1995, 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1995, 2021, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -216,9 +216,8 @@ final class NetMulticastSocket extends MulticastSocket { throw new SocketException("already bound"); if (addr == null) addr = new InetSocketAddress(0); - if (!(addr instanceof InetSocketAddress)) + if (!(addr instanceof InetSocketAddress epoint)) throw new IllegalArgumentException("Unsupported address type!"); - InetSocketAddress epoint = (InetSocketAddress) addr; if (epoint.isUnresolved()) throw new SocketException("Unresolved address"); InetAddress iaddr = epoint.getAddress(); @@ -259,9 +258,8 @@ final class NetMulticastSocket extends MulticastSocket { public void connect(SocketAddress addr) throws SocketException { if (addr == null) throw new IllegalArgumentException("Address can't be null"); - if (!(addr instanceof InetSocketAddress)) + if (!(addr instanceof InetSocketAddress epoint)) throw new IllegalArgumentException("Unsupported address type"); - InetSocketAddress epoint = (InetSocketAddress) addr; if (epoint.isUnresolved()) throw new SocketException("Unresolved address"); connectInternal(epoint.getAddress(), epoint.getPort()); diff --git a/src/java.base/share/classes/java/net/NetworkInterface.java b/src/java.base/share/classes/java/net/NetworkInterface.java index 1668ddbf347e40a1c7bb5da48c5ae93b757dd171..e463f35b50848143939706ffcac348c6a29a37d9 100644 --- a/src/java.base/share/classes/java/net/NetworkInterface.java +++ b/src/java.base/share/classes/java/net/NetworkInterface.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2021, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -590,10 +590,9 @@ public final class NetworkInterface { * @see java.net.InetAddress#getAddress() */ public boolean equals(Object obj) { - if (!(obj instanceof NetworkInterface)) { + if (!(obj instanceof NetworkInterface that)) { return false; } - NetworkInterface that = (NetworkInterface)obj; if (this.name != null ) { if (!this.name.equals(that.name)) { return false; diff --git a/src/java.base/share/classes/java/net/ServerSocket.java b/src/java.base/share/classes/java/net/ServerSocket.java index 249c09b967415ecbdcb72b48432c4c48e578890a..b7e6d07495bfa605bb0c13ee0f10e16504d718ed 100644 --- a/src/java.base/share/classes/java/net/ServerSocket.java +++ b/src/java.base/share/classes/java/net/ServerSocket.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 1995, 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1995, 2021, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -41,10 +41,7 @@ import sun.net.PlatformSocketImpl; * based on that request, and then possibly returns a result to the requester. *

    * The actual work of the server socket is performed by an instance - * of the {@code SocketImpl} class. An application can - * change the socket factory that creates the socket - * implementation to configure itself to create sockets - * appropriate to the local firewall. + * of the {@code SocketImpl} class. * *

    The {@code ServerSocket} class defines convenience * methods to set and get several socket options. This class also @@ -76,7 +73,6 @@ import sun.net.PlatformSocketImpl; * Additional (implementation specific) options may also be supported. * * @see java.net.SocketImpl - * @see java.net.ServerSocket#setSocketFactory(java.net.SocketImplFactory) * @see java.nio.channels.ServerSocketChannel * @since 1.0 */ @@ -164,8 +160,6 @@ public class ServerSocket implements java.io.Closeable { * 0 and 65535, inclusive. * * @see java.net.SocketImpl - * @see java.net.SocketImplFactory#createSocketImpl() - * @see java.net.ServerSocket#setSocketFactory(java.net.SocketImplFactory) * @see SecurityManager#checkListen */ public ServerSocket(int port) throws IOException { @@ -217,8 +211,6 @@ public class ServerSocket implements java.io.Closeable { * 0 and 65535, inclusive. * * @see java.net.SocketImpl - * @see java.net.SocketImplFactory#createSocketImpl() - * @see java.net.ServerSocket#setSocketFactory(java.net.SocketImplFactory) * @see SecurityManager#checkListen */ public ServerSocket(int port, int backlog) throws IOException { @@ -381,9 +373,8 @@ public class ServerSocket implements java.io.Closeable { throw new SocketException("Already bound"); if (endpoint == null) endpoint = new InetSocketAddress(0); - if (!(endpoint instanceof InetSocketAddress)) + if (!(endpoint instanceof InetSocketAddress epoint)) throw new IllegalArgumentException("Unsupported address type"); - InetSocketAddress epoint = (InetSocketAddress) endpoint; if (epoint.isUnresolved()) throw new SocketException("Unresolved address"); if (backlog < 1) @@ -929,7 +920,17 @@ public class ServerSocket implements java.io.Closeable { * {@code checkSetFactory} method doesn't allow the operation. * @see java.net.SocketImplFactory#createSocketImpl() * @see SecurityManager#checkSetFactory + * @deprecated Use a {@link javax.net.ServerSocketFactory} and subclass {@code ServerSocket} + * directly. + *
    This method provided a way in early JDK releases to replace the + * system wide implementation of {@code ServerSocket}. It has been mostly + * obsolete since Java 1.4. If required, a {@code ServerSocket} can be + * created to use a custom implementation by extending {@code ServerSocket} + * and using the {@linkplain #ServerSocket(SocketImpl) protected + * constructor} that takes an {@linkplain SocketImpl implementation} + * as a parameter. */ + @Deprecated(since = "17") public static synchronized void setSocketFactory(SocketImplFactory fac) throws IOException { if (factory != null) { throw new SocketException("factory already defined"); diff --git a/src/java.base/share/classes/java/net/Socket.java b/src/java.base/share/classes/java/net/Socket.java index 3357dbfe77738aab09283ab3b36d6bc43f466205..7ee552c20636b47785666a5049320c6baf5d31e1 100644 --- a/src/java.base/share/classes/java/net/Socket.java +++ b/src/java.base/share/classes/java/net/Socket.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 1995, 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1995, 2021, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -33,8 +33,6 @@ import java.io.IOException; import java.lang.invoke.MethodHandles; import java.lang.invoke.VarHandle; import java.nio.channels.SocketChannel; -import java.security.AccessController; -import java.security.PrivilegedAction; import java.util.Objects; import java.util.Set; import java.util.Collections; @@ -45,10 +43,7 @@ import java.util.Collections; * between two machines. *

    * The actual work of the socket is performed by an instance of the - * {@code SocketImpl} class. An application, by changing - * the socket factory that creates the socket implementation, - * can configure itself to create sockets appropriate to the local - * firewall. + * {@code SocketImpl} class. * *

    The {@code Socket} class defines convenience * methods to set and get several socket options. This class also @@ -96,7 +91,6 @@ import java.util.Collections; * * Additional (implementation specific) options may also be supported. * - * @see java.net.Socket#setSocketImplFactory(java.net.SocketImplFactory) * @see java.net.SocketImpl * @see java.nio.channels.SocketChannel * @since 1.0 @@ -282,9 +276,7 @@ public class Socket implements java.io.Closeable { * @throws IllegalArgumentException if the port parameter is outside * the specified range of valid port values, which is between * 0 and 65535, inclusive. - * @see java.net.Socket#setSocketImplFactory(java.net.SocketImplFactory) * @see java.net.SocketImpl - * @see java.net.SocketImplFactory#createSocketImpl() * @see SecurityManager#checkConnect */ public Socket(String host, int port) @@ -318,9 +310,7 @@ public class Socket implements java.io.Closeable { * the specified range of valid port values, which is between * 0 and 65535, inclusive. * @throws NullPointerException if {@code address} is null. - * @see java.net.Socket#setSocketImplFactory(java.net.SocketImplFactory) * @see java.net.SocketImpl - * @see java.net.SocketImplFactory#createSocketImpl() * @see SecurityManager#checkConnect */ public Socket(InetAddress address, int port) throws IOException { @@ -448,9 +438,7 @@ public class Socket implements java.io.Closeable { * @throws IllegalArgumentException if the port parameter is outside * the specified range of valid port values, which is between * 0 and 65535, inclusive. - * @see java.net.Socket#setSocketImplFactory(java.net.SocketImplFactory) * @see java.net.SocketImpl - * @see java.net.SocketImplFactory#createSocketImpl() * @see SecurityManager#checkConnect * @deprecated Use DatagramSocket instead for UDP transport. */ @@ -492,9 +480,7 @@ public class Socket implements java.io.Closeable { * the specified range of valid port values, which is between * 0 and 65535, inclusive. * @throws NullPointerException if {@code host} is null. - * @see java.net.Socket#setSocketImplFactory(java.net.SocketImplFactory) * @see java.net.SocketImpl - * @see java.net.SocketImplFactory#createSocketImpl() * @see SecurityManager#checkConnect * @deprecated Use DatagramSocket instead for UDP transport. */ @@ -625,10 +611,9 @@ public class Socket implements java.io.Closeable { if (isConnected()) throw new SocketException("already connected"); - if (!(endpoint instanceof InetSocketAddress)) + if (!(endpoint instanceof InetSocketAddress epoint)) throw new IllegalArgumentException("Unsupported address type"); - InetSocketAddress epoint = (InetSocketAddress) endpoint; InetAddress addr = epoint.getAddress (); int port = epoint.getPort(); checkAddress(addr, "connect"); @@ -1761,7 +1746,17 @@ public class Socket implements java.io.Closeable { * {@code checkSetFactory} method doesn't allow the operation. * @see java.net.SocketImplFactory#createSocketImpl() * @see SecurityManager#checkSetFactory - */ + * @deprecated Use a {@link javax.net.SocketFactory} and subclass {@code Socket} + * directly. + *
    This method provided a way in early JDK releases to replace the + * system wide implementation of {@code Socket}. It has been mostly + * obsolete since Java 1.4. If required, a {@code Socket} can be + * created to use a custom implementation by extending {@code Socket} + * and using the {@linkplain #Socket(SocketImpl) protected + * constructor} that takes an {@linkplain SocketImpl implementation} + * as a parameter. + */ + @Deprecated(since = "17") public static synchronized void setSocketImplFactory(SocketImplFactory fac) throws IOException { diff --git a/src/java.base/share/classes/java/net/SocketPermission.java b/src/java.base/share/classes/java/net/SocketPermission.java index 9080ce89a21fe7226ceda940635b14052e713e56..85166f601bb5663ec40fa3ba5bb760a5d59d3315 100644 --- a/src/java.base/share/classes/java/net/SocketPermission.java +++ b/src/java.base/share/classes/java/net/SocketPermission.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2021, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -873,14 +873,12 @@ public final class SocketPermission extends Permission public boolean implies(Permission p) { int i,j; - if (!(p instanceof SocketPermission)) + if (!(p instanceof SocketPermission that)) return false; if (p == this) return true; - SocketPermission that = (SocketPermission) p; - return ((this.mask & that.mask) == that.mask) && impliesIgnoreMask(that); } @@ -1047,11 +1045,9 @@ public final class SocketPermission extends Permission if (obj == this) return true; - if (! (obj instanceof SocketPermission)) + if (! (obj instanceof SocketPermission that)) return false; - SocketPermission that = (SocketPermission) obj; - //this is (overly?) complex!!! // check the mask first @@ -1196,9 +1192,12 @@ public final class SocketPermission extends Permission } /** - * WriteObject is called to save the state of the SocketPermission - * to a stream. The actions are serialized, and the superclass - * takes care of the name. + * {@code writeObject} is called to save the state of the + * {@code SocketPermission} to a stream. The actions are serialized, + * and the superclass takes care of the name. + * + * @param s the {@code ObjectOutputStream} to which data is written + * @throws IOException if an I/O error occurs */ @java.io.Serial private synchronized void writeObject(java.io.ObjectOutputStream s) @@ -1212,8 +1211,12 @@ public final class SocketPermission extends Permission } /** - * readObject is called to restore the state of the SocketPermission from - * a stream. + * {@code readObject} is called to restore the state of the + * {@code SocketPermission} from a stream. + * + * @param s the {@code ObjectInputStream} from which data is read + * @throws IOException if an I/O error occurs + * @throws ClassNotFoundException if a serialized class cannot be loaded */ @java.io.Serial private synchronized void readObject(java.io.ObjectInputStream s) @@ -1379,15 +1382,13 @@ final class SocketPermissionCollection extends PermissionCollection */ @Override public void add(Permission permission) { - if (! (permission instanceof SocketPermission)) + if (! (permission instanceof SocketPermission sp)) throw new IllegalArgumentException("invalid permission: "+ permission); if (isReadOnly()) throw new SecurityException( "attempt to add a Permission to a readonly PermissionCollection"); - SocketPermission sp = (SocketPermission)permission; - // Add permission to map if it is absent, or replace with new // permission if applicable. NOTE: cannot use lambda for // remappingFunction parameter until JDK-8076596 is fixed. @@ -1426,11 +1427,9 @@ final class SocketPermissionCollection extends PermissionCollection @Override public boolean implies(Permission permission) { - if (! (permission instanceof SocketPermission)) + if (! (permission instanceof SocketPermission np)) return false; - SocketPermission np = (SocketPermission) permission; - int desired = np.getMask(); int effective = 0; int needed = desired; @@ -1495,7 +1494,11 @@ final class SocketPermissionCollection extends PermissionCollection }; /** + * Writes the state of this object to the stream. * @serialData "permissions" field (a Vector containing the SocketPermissions). + * + * @param out the {@code ObjectOutputStream} to which data is written + * @throws IOException if an I/O error occurs */ /* * Writes the contents of the perms field out as a Vector for @@ -1513,8 +1516,13 @@ final class SocketPermissionCollection extends PermissionCollection out.writeFields(); } - /* - * Reads in a Vector of SocketPermissions and saves them in the perms field. + /** + * Reads in a {@code Vector} of {@code SocketPermission} and saves + * them in the perms field. + * + * @param in the {@code ObjectInputStream} from which data is read + * @throws IOException if an I/O error occurs + * @throws ClassNotFoundException if a serialized class cannot be loaded */ @java.io.Serial private void readObject(ObjectInputStream in) diff --git a/src/java.base/share/classes/java/net/SocksSocketImpl.java b/src/java.base/share/classes/java/net/SocksSocketImpl.java index 88e6f0f612909808a6d0e7e57b6e0f5ecd4e993c..8337eeb619ffd11e61015835639558e33b5dad9c 100644 --- a/src/java.base/share/classes/java/net/SocksSocketImpl.java +++ b/src/java.base/share/classes/java/net/SocksSocketImpl.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2019, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2021, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -57,8 +57,7 @@ class SocksSocketImpl extends DelegatingSocketImpl implements SocksConsts { SocksSocketImpl(Proxy proxy, SocketImpl delegate) { super(delegate); SocketAddress a = proxy.address(); - if (a instanceof InetSocketAddress) { - InetSocketAddress ad = (InetSocketAddress) a; + if (a instanceof InetSocketAddress ad) { // Use getHostString() to avoid reverse lookups server = ad.getHostString(); serverPort = ad.getPort(); diff --git a/src/java.base/share/classes/java/net/URI.java b/src/java.base/share/classes/java/net/URI.java index 98d660590ebcad13aa21aa87476f8f4866ae0d73..7248c8f0e154e75795dddf6512d51b73340e0ada 100644 --- a/src/java.base/share/classes/java/net/URI.java +++ b/src/java.base/share/classes/java/net/URI.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2021, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -1501,9 +1501,8 @@ public final class URI public boolean equals(Object ob) { if (ob == this) return true; - if (!(ob instanceof URI)) + if (!(ob instanceof URI that)) return false; - URI that = (URI)ob; if (this.isOpaque() != that.isOpaque()) return false; if (!equalIgnoringCase(this.scheme, that.scheme)) return false; if (!equal(this.fragment, that.fragment)) return false; @@ -1777,6 +1776,9 @@ public final class URI * * @param os The object-output stream to which this object * is to be written + * + * @throws IOException + * If an I/O error occurs */ @java.io.Serial private void writeObject(ObjectOutputStream os) @@ -1795,6 +1797,12 @@ public final class URI * * @param is The object-input stream from which this object * is being read + * + * @throws IOException + * If an I/O error occurs + * + * @throws ClassNotFoundException + * If a serialized class cannot be loaded */ @java.io.Serial private void readObject(ObjectInputStream is) diff --git a/src/java.base/share/classes/java/net/URISyntaxException.java b/src/java.base/share/classes/java/net/URISyntaxException.java index ea587fd3c0976a76540a216f6e2157a014919699..ba077a995fd594d13810dc1873994252b0eaf46c 100644 --- a/src/java.base/share/classes/java/net/URISyntaxException.java +++ b/src/java.base/share/classes/java/net/URISyntaxException.java @@ -41,7 +41,15 @@ public class URISyntaxException @java.io.Serial private static final long serialVersionUID = 2137979680897488891L; + /** + * The input string. + */ private String input; + + /** + * The index at which the parse error occurred, + * or {@code -1} if the index is not known. + */ private int index; /** diff --git a/src/java.base/share/classes/java/net/URL.java b/src/java.base/share/classes/java/net/URL.java index 1ed20fc6027c9b6b2c53961ec6e3332153288ad0..b6fd85e8646312c572dd02b0c5e709e0f79cb953 100644 --- a/src/java.base/share/classes/java/net/URL.java +++ b/src/java.base/share/classes/java/net/URL.java @@ -972,9 +972,8 @@ public final class URL implements java.io.Serializable { * {@code false} otherwise. */ public boolean equals(Object obj) { - if (!(obj instanceof URL)) + if (!(obj instanceof URL u2)) return false; - URL u2 = (URL)obj; return handler.equals(this, u2); } @@ -1481,19 +1480,20 @@ public final class URL implements java.io.Serializable { } /** - * @serialField protocol String + * @serialField protocol String the protocol to use (ftp, http, nntp, ... etc.) * - * @serialField host String + * @serialField host String the host name to connect to * - * @serialField port int + * @serialField port int the protocol port to connect to * - * @serialField authority String + * @serialField authority String the authority part of this URL * - * @serialField file String + * @serialField file String the specified file name on that host. {@code file} is + * defined as {@code path[?query]} * - * @serialField ref String + * @serialField ref String the fragment part of this URL * - * @serialField hashCode int + * @serialField hashCode int the hashCode of this URL * */ @java.io.Serial @@ -1515,6 +1515,9 @@ public final class URL implements java.io.Serializable { * the reader must ensure that calling getURLStreamHandler with * the protocol variable returns a valid URLStreamHandler and * throw an IOException if it does not. + * + * @param s the {@code ObjectOutputStream} to which data is written + * @throws IOException if an I/O error occurs */ @java.io.Serial private synchronized void writeObject(java.io.ObjectOutputStream s) @@ -1527,6 +1530,10 @@ public final class URL implements java.io.Serializable { * readObject is called to restore the state of the URL from the * stream. It reads the components of the URL and finds the local * stream handler. + * + * @param s the {@code ObjectInputStream} from which data is read + * @throws IOException if an I/O error occurs + * @throws ClassNotFoundException if a serialized class cannot be loaded */ @java.io.Serial private synchronized void readObject(java.io.ObjectInputStream s) diff --git a/src/java.base/share/classes/java/net/URLClassLoader.java b/src/java.base/share/classes/java/net/URLClassLoader.java index e15669b43b8c127698726e22b68025cdaab64af0..37691f666ed81e56810898ceb3137fd1b12433ae 100644 --- a/src/java.base/share/classes/java/net/URLClassLoader.java +++ b/src/java.base/share/classes/java/net/URLClassLoader.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2021, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -288,8 +288,7 @@ public class URLClassLoader extends SecureClassLoader implements Closeable { } URLConnection urlc = url.openConnection(); InputStream is = urlc.getInputStream(); - if (urlc instanceof JarURLConnection) { - JarURLConnection juc = (JarURLConnection)urlc; + if (urlc instanceof JarURLConnection juc) { JarFile jar = juc.getJarFile(); synchronized (closeables) { if (!closeables.containsKey(jar)) { diff --git a/src/java.base/share/classes/java/net/URLPermission.java b/src/java.base/share/classes/java/net/URLPermission.java index 98e87e3820fa5f5f6dc50811ea5985274c1b0851..0cd818ef64b83a5b81991d8828a814a9fbbc4a70 100644 --- a/src/java.base/share/classes/java/net/URLPermission.java +++ b/src/java.base/share/classes/java/net/URLPermission.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013, 2019, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2013, 2021, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -162,6 +162,9 @@ public final class URLPermission extends Permission { private transient Authority authority; // serialized field + /** + * The actions string + */ private String actions; /** @@ -295,12 +298,10 @@ public final class URLPermission extends Permission { * */ public boolean implies(Permission p) { - if (! (p instanceof URLPermission)) { + if (! (p instanceof URLPermission that)) { return false; } - URLPermission that = (URLPermission)p; - if (this.methods.isEmpty() && !that.methods.isEmpty()) { return false; } @@ -371,10 +372,9 @@ public final class URLPermission extends Permission { * and p's url equals this's url. Returns false otherwise. */ public boolean equals(Object p) { - if (!(p instanceof URLPermission)) { + if (!(p instanceof URLPermission that)) { return false; } - URLPermission that = (URLPermission)p; if (!this.scheme.equals(that.scheme)) { return false; } @@ -504,7 +504,11 @@ public final class URLPermission extends Permission { } /** - * restore the state of this object from stream + * Restores the state of this object from stream. + * + * @param s the {@code ObjectInputStream} from which data is read + * @throws IOException if an I/O error occurs + * @throws ClassNotFoundException if a serialized class cannot be loaded */ @java.io.Serial private void readObject(ObjectInputStream s) diff --git a/src/java.base/share/classes/java/net/UnixDomainSocketAddress.java b/src/java.base/share/classes/java/net/UnixDomainSocketAddress.java index d1d718fe1a1689b2e0b280922650b90b63db60be..f3ead246be11830230ee5481307d2adcd5f5b183 100644 --- a/src/java.base/share/classes/java/net/UnixDomainSocketAddress.java +++ b/src/java.base/share/classes/java/net/UnixDomainSocketAddress.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2020, 2021, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -93,8 +93,9 @@ public final class UnixDomainSocketAddress extends SocketAddress { * * Ser containing the path name of this instance. * - * @return a {@link Ser} - * representing the path name of this instance + * @return a {@link Ser} representing the path name of this instance + * + * @throws ObjectStreamException if an error occurs */ @java.io.Serial private Object writeReplace() throws ObjectStreamException { @@ -193,9 +194,8 @@ public final class UnixDomainSocketAddress extends SocketAddress { */ @Override public boolean equals(Object o) { - if (!(o instanceof UnixDomainSocketAddress)) + if (!(o instanceof UnixDomainSocketAddress that)) return false; - UnixDomainSocketAddress that = (UnixDomainSocketAddress)o; return this.path.equals(that.path); } diff --git a/src/java.base/share/classes/java/net/doc-files/net-properties.html b/src/java.base/share/classes/java/net/doc-files/net-properties.html index 8bd0fe7316d103b1864d1334a78856db72dcfba9..ea0311b71614627141a1e77f81e95097d7314f61 100644 --- a/src/java.base/share/classes/java/net/doc-files/net-properties.html +++ b/src/java.base/share/classes/java/net/doc-files/net-properties.html @@ -255,7 +255,6 @@ bytes (depending on the platform), it is important to ensure that the temporary together with the filename used for the socket (currently a name similar to {@code socket_1679697142}) does not exceed this limit. The following properties can be used to control the selection of this directory: -