* update firefox-esr to 115.13.0-1

This commit is contained in:
Alexander Baldeck 2024-09-25 14:22:47 +02:00
parent ecc4a01541
commit 330e037c93
9 changed files with 0 additions and 30302 deletions

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -1,488 +0,0 @@
# HG changeset patch
# User Cameron Kaiser <spectre@floodgap.com>
# Date 1694573058 25200
# Tue Sep 12 19:44:18 2023 -0700
# Node ID e3eda281a1dc739c862eb38c795833595724cefc
# Parent 671b771fd1de061e02f382e0cb20237d0e3a84a8
builds links
diff -r 671b771fd1de -r e3eda281a1dc config/check_macroassembler_style.py
--- a/config/check_macroassembler_style.py Tue Sep 12 10:27:52 2023 -0700
+++ b/config/check_macroassembler_style.py Tue Sep 12 19:44:18 2023 -0700
@@ -21,22 +21,22 @@
# ----------------------------------------------------------------------------
import difflib
import os
import re
import sys
architecture_independent = set(["generic"])
-all_unsupported_architectures_names = set(["mips32", "mips64", "mips_shared"])
+all_unsupported_architectures_names = set(["mips32", "mips64", "mips_shared", "ppc64"])
all_architecture_names = set(
- ["x86", "x64", "arm", "arm64", "loong64", "ppc64", "riscv64", "wasm32"]
+ ["x86", "x64", "arm", "arm64", "loong64", "riscv64", "wasm32"]
)
all_shared_architecture_names = set(
- ["x86_shared", "arm", "arm64", "loong64", "ppc64", "riscv64", "wasm32"]
+ ["x86_shared", "arm", "arm64", "loong64", "riscv64", "wasm32"]
)
reBeforeArg = "(?<=[(,\s])"
reArgType = "(?P<type>[\w\s:*&<>]+)"
reArgName = "(?P<name>\s\w+)"
reArgDefault = "(?P<default>(?:\s=(?:(?:\s[\w:]+\(\))|[^,)]+))?)"
reAfterArg = "(?=[,)])"
reMatchArg = re.compile(reBeforeArg + reArgType + reArgName + reArgDefault + reAfterArg)
diff -r 671b771fd1de -r e3eda281a1dc js/moz.configure
--- a/js/moz.configure Tue Sep 12 10:27:52 2023 -0700
+++ b/js/moz.configure Tue Sep 12 19:44:18 2023 -0700
@@ -258,16 +258,18 @@
if target.cpu == "aarch64":
return namespace(arm64=True)
elif target.cpu == "x86_64":
return namespace(x64=True)
elif target.cpu == "loongarch64":
return namespace(loong64=True)
elif target.cpu == "riscv64":
return namespace(riscv64=True)
+ elif target.cpu == "ppc64":
+ return namespace(ppc64=True)
return namespace(**{str(target.cpu): True})
set_config("JS_CODEGEN_NONE", jit_codegen.none)
set_config("JS_CODEGEN_ARM", jit_codegen.arm)
set_config("JS_CODEGEN_ARM64", jit_codegen.arm64)
set_config("JS_CODEGEN_MIPS32", jit_codegen.mips32)
@@ -281,17 +283,17 @@
set_define("JS_CODEGEN_NONE", jit_codegen.none)
set_define("JS_CODEGEN_ARM", jit_codegen.arm)
set_define("JS_CODEGEN_ARM64", jit_codegen.arm64)
set_define("JS_CODEGEN_MIPS32", jit_codegen.mips32)
set_define("JS_CODEGEN_MIPS64", jit_codegen.mips64)
set_define("JS_CODEGEN_LOONG64", jit_codegen.loong64)
set_define("JS_CODEGEN_RISCV64", jit_codegen.riscv64)
-set_config("JS_CODEGEN_PPC64", jit_codegen.ppc64)
+set_define("JS_CODEGEN_PPC64", jit_codegen.ppc64)
set_define("JS_CODEGEN_X86", jit_codegen.x86)
set_define("JS_CODEGEN_X64", jit_codegen.x64)
set_define("JS_CODEGEN_WASM32", jit_codegen.wasm32)
# Profiling
# =======================================================
option(
diff -r 671b771fd1de -r e3eda281a1dc js/src/jit/CodeGenerator.cpp
--- a/js/src/jit/CodeGenerator.cpp Tue Sep 12 10:27:52 2023 -0700
+++ b/js/src/jit/CodeGenerator.cpp Tue Sep 12 19:44:18 2023 -0700
@@ -12513,17 +12513,18 @@
// We're out-of-bounds. We only handle the index == initlength case.
// If index > initializedLength, bail out. Note that this relies on the
// condition flags sticking from the incoming branch.
// Also note: this branch does not need Spectre mitigations, doing that for
// the capacity check below is sufficient.
Label allocElement, addNewElement;
#if defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64) || \
- defined(JS_CODEGEN_LOONG64) || defined(JS_CODEGEN_RISCV64)
+ defined(JS_CODEGEN_LOONG64) || defined(JS_CODEGEN_RISCV64) || \
+ defined(JS_CODEGEN_PPC64)
// Had to reimplement for MIPS because there are no flags.
bailoutCmp32(Assembler::NotEqual, initLength, index, ins->snapshot());
#else
bailoutIf(Assembler::NotEqual, ins->snapshot());
#endif
// If index < capacity, we can add a dense element inline. If not, we need
// to allocate more elements first.
diff -r 671b771fd1de -r e3eda281a1dc js/src/jit/MacroAssembler.cpp
--- a/js/src/jit/MacroAssembler.cpp Tue Sep 12 10:27:52 2023 -0700
+++ b/js/src/jit/MacroAssembler.cpp Tue Sep 12 19:44:18 2023 -0700
@@ -5183,17 +5183,17 @@
ma_sll(temp1, temp1, temp3);
#elif JS_CODEGEN_MIPS64
ma_dsll(temp1, temp1, temp3);
#elif JS_CODEGEN_LOONG64
as_sll_d(temp1, temp1, temp3);
#elif JS_CODEGEN_RISCV64
sll(temp1, temp1, temp3);
#elif JS_CODEGEN_PPC64
- as_sld(temp1, temp1, temp3)
+ as_sld(temp1, temp1, temp3);
#elif JS_CODEGEN_WASM32
MOZ_CRASH();
#elif JS_CODEGEN_NONE
MOZ_CRASH();
#else
# error "Unknown architecture"
#endif
diff -r 671b771fd1de -r e3eda281a1dc js/src/jit/ppc64/Assembler-ppc64.h
--- a/js/src/jit/ppc64/Assembler-ppc64.h Tue Sep 12 10:27:52 2023 -0700
+++ b/js/src/jit/ppc64/Assembler-ppc64.h Tue Sep 12 19:44:18 2023 -0700
@@ -175,25 +175,30 @@
static constexpr FloatRegister ReturnFloat32Reg = {FloatRegisters::f1,
FloatRegisters::Single};
static constexpr FloatRegister ReturnDoubleReg = {FloatRegisters::f1,
FloatRegisters::Double};
static constexpr FloatRegister ABINonArgDoubleReg = {FloatRegisters::f14,
FloatRegisters::Double};
static constexpr ValueOperand JSReturnOperand = ValueOperand(JSReturnReg);
-// Registers used in RegExpMatcher instruction (do not use JSReturnOperand).
+// Registers used by RegExpMatcher and RegExpExecMatch stubs (do not use
+// JSReturnOperand).
static constexpr Register RegExpMatcherRegExpReg = CallTempReg0;
static constexpr Register RegExpMatcherStringReg = CallTempReg1;
static constexpr Register RegExpMatcherLastIndexReg = CallTempReg2;
-// Registers used in RegExpTester instruction (do not use ReturnReg).
-static constexpr Register RegExpTesterRegExpReg = CallTempReg0;
-static constexpr Register RegExpTesterStringReg = CallTempReg1;
-static constexpr Register RegExpTesterLastIndexReg = CallTempReg2;
+// Registers used by RegExpExecTest stub (do not use ReturnReg).
+static constexpr Register RegExpExecTestRegExpReg = CallTempReg0;
+static constexpr Register RegExpExecTestStringReg = CallTempReg1;
+
+// Registers used by RegExpSearcher stub (do not use ReturnReg).
+static constexpr Register RegExpSearcherRegExpReg = CallTempReg0;
+static constexpr Register RegExpSearcherStringReg = CallTempReg1;
+static constexpr Register RegExpSearcherLastIndexReg = CallTempReg2;
// TLS pointer argument register for WebAssembly functions. This must not alias
// any other register used for passing function arguments or return values.
// Preserved by WebAssembly functions.
static constexpr Register InstanceReg = r18;
// Registers used for wasm table calls. These registers must be disjoint
// from the ABI argument registers, WasmTlsReg and each other.
diff -r 671b771fd1de -r e3eda281a1dc js/src/jit/ppc64/CodeGenerator-ppc64.cpp
--- a/js/src/jit/ppc64/CodeGenerator-ppc64.cpp Tue Sep 12 10:27:52 2023 -0700
+++ b/js/src/jit/ppc64/CodeGenerator-ppc64.cpp Tue Sep 12 19:44:18 2023 -0700
@@ -1873,17 +1873,17 @@
CodeGeneratorPPC64::toMoveOperand(LAllocation a) const
{
if (a.isGeneralReg())
return MoveOperand(ToRegister(a));
if (a.isFloatReg()) {
return MoveOperand(ToFloatRegister(a));
}
MoveOperand::Kind kind =
- a.isStackArea() ? MoveOperand::EFFECTIVE_ADDRESS : MoveOperand::MEMORY;
+ a.isStackArea() ? MoveOperand::Kind::EffectiveAddress : MoveOperand::Kind::Memory;
Address address = ToAddress(a);
MOZ_ASSERT((address.offset & 3) == 0);
return MoveOperand(address, kind);
}
void
CodeGenerator::visitMathD(LMathD* math)
{
diff -r 671b771fd1de -r e3eda281a1dc js/src/jit/ppc64/MacroAssembler-ppc64-inl.h
--- a/js/src/jit/ppc64/MacroAssembler-ppc64-inl.h Tue Sep 12 10:27:52 2023 -0700
+++ b/js/src/jit/ppc64/MacroAssembler-ppc64-inl.h Tue Sep 12 19:44:18 2023 -0700
@@ -369,16 +369,23 @@
MacroAssembler::mulBy3(Register src, Register dest)
{
// I guess this *is* better than mulli.
MOZ_ASSERT(src != ScratchRegister);
as_add(ScratchRegister, src, src);
as_add(dest, ScratchRegister, src);
}
+void MacroAssembler::mulHighUnsigned32(Imm32 imm, Register src, Register dest) {
+ MOZ_ASSERT(src != ScratchRegister);
+ move32(imm, ScratchRegister);
+ as_mulhw(dest, ScratchRegister, src);
+ x_sldi(dest, dest, 32);
+}
+
void
MacroAssembler::inc64(AbsoluteAddress dest)
{
ma_li(SecondScratchReg, ImmWord(uintptr_t(dest.addr)));
as_ld(ThirdScratchReg, SecondScratchReg, 0);
as_addi(ScratchRegister, ThirdScratchReg, 1);
as_std(ScratchRegister, SecondScratchReg, 0);
}
diff -r 671b771fd1de -r e3eda281a1dc js/src/jit/ppc64/MacroAssembler-ppc64.cpp
--- a/js/src/jit/ppc64/MacroAssembler-ppc64.cpp Tue Sep 12 10:27:52 2023 -0700
+++ b/js/src/jit/ppc64/MacroAssembler-ppc64.cpp Tue Sep 12 19:44:18 2023 -0700
@@ -5,27 +5,28 @@
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#include "jit/ppc64/MacroAssembler-ppc64.h"
#include "mozilla/CheckedInt.h"
#include "mozilla/DebugOnly.h"
#include "mozilla/MathAlgorithms.h"
-#include <cmath>
-
+#include "jsmath.h"
#include "jit/Bailouts.h"
#include "jit/BaselineFrame.h"
#include "jit/JitFrames.h"
#include "jit/JitRuntime.h"
#include "jit/MacroAssembler.h"
#include "jit/MoveEmitter.h"
#include "jit/SharedICRegisters.h"
+#include "util/Memory.h"
#include "vm/JitActivation.h"
+#include "vm/JSContext.h"
#include "jit/MacroAssembler-inl.h"
using namespace js;
using namespace jit;
using mozilla::Abs;
using mozilla::CheckedInt;
@@ -1225,18 +1226,17 @@
}
void
MacroAssemblerPPC64Compat::movePtr(wasm::SymbolicAddress imm, Register dest)
{
append(wasm::SymbolicAccess(CodeOffset(nextOffset().getOffset()), imm));
ma_liPatchable(dest, ImmWord(-1));
}
-CodeOffset MacroAssembler::moveNearAddressWithPatch(Register dest)
-{
+CodeOffset MacroAssembler::moveNearAddressWithPatch(Register dest) {
return movWithPatch(ImmPtr(nullptr), dest);
}
void
MacroAssembler::patchNearAddressMove(CodeLocationLabel loc,
CodeLocationLabel target)
{
PatchDataWithValueCheck(loc, ImmPtr(target.raw()), ImmPtr(nullptr));
@@ -2579,35 +2579,23 @@
Label* label)
{
ADBlock();
MOZ_ASSERT(cond == Assembler::Equal || cond == Assembler::NotEqual);
Label done;
branchTestGCThing(Assembler::NotEqual, value,
cond == Assembler::Equal ? &done : label);
- if (temp != InvalidReg) {
- unboxGCThingForGCBarrier(value, temp);
- orPtr(Imm32(gc::ChunkMask), temp);
- loadPtr(Address(temp, gc::ChunkStoreBufferOffsetFromLastByte), temp);
- branchPtr(InvertCondition(cond), temp, ImmWord(0), label);
- } else {
- // Honey, Ion stole the temp register again. Get out the baseball
- // bat, would you?
- //
- // Both constants are too large to be immediates.
- unboxGCThingForGCBarrier(value, ScratchRegister);
- ma_li(SecondScratchReg, gc::ChunkMask);
- as_or(SecondScratchReg, ScratchRegister, SecondScratchReg);
- ma_li(ScratchRegister, gc::ChunkStoreBufferOffsetFromLastByte);
- as_add(SecondScratchReg, SecondScratchReg, ScratchRegister);
- as_ld(ScratchRegister, SecondScratchReg, 0);
- as_cmpdi(ScratchRegister, 0);
- ma_bc(InvertCondition(cond), label);
- }
+ // getGCThingValueChunk uses r0 and may use r12.
+ ScratchRegisterScope scratch2(*this);
+
+ getGCThingValueChunk(value, scratch2);
+ loadPtr(Address(scratch2, gc::ChunkStoreBufferOffset), scratch2);
+ branchPtr(InvertCondition(cond), scratch2, ImmWord(0), label);
+
bind(&done);
}
void
MacroAssembler::branchTestValue(Condition cond, const ValueOperand& lhs,
const Value& rhs, Label* label)
{
ADBlock();
@@ -4772,52 +4760,34 @@
addCodeLabel(cl);
return retAddr;
}
void
MacroAssembler::loadStoreBuffer(Register ptr, Register buffer)
{
- if (ptr != buffer)
- movePtr(ptr, buffer);
- orPtr(Imm32(gc::ChunkMask), buffer);
- loadPtr(Address(buffer, gc::ChunkStoreBufferOffsetFromLastByte), buffer);
+ ma_and(buffer, ptr, Imm32(int32_t(~gc::ChunkMask)));
+ loadPtr(Address(buffer, gc::ChunkStoreBufferOffset), buffer);
}
void
MacroAssembler::branchPtrInNurseryChunk(Condition cond, Register ptr, Register temp,
Label* label)
{
ADBlock();
- MOZ_ASSERT(cond == Assembler::Equal || cond == Assembler::NotEqual);
- MOZ_ASSERT(ptr != temp);
- MOZ_ASSERT(ptr != SecondScratchReg);
-
- if (temp != InvalidReg) {
- movePtr(ptr, temp);
- orPtr(Imm32(gc::ChunkMask), temp);
- branchPtr(InvertCondition(cond),
- Address(temp, gc::ChunkStoreBufferOffsetFromLastByte),
- ImmWord(0), label);
- } else {
- // Why, those cheapskates. We have to provide our own temp too?
- // Did the bean counters cut our temp register budget this year?
- // (Ion hits this.)
- MOZ_ASSERT(ptr != ScratchRegister);
-
- // Both offsets are too big to be immediate displacements.
- ma_li(ScratchRegister, gc::ChunkMask);
- as_or(SecondScratchReg, ptr, ScratchRegister);
- ma_li(ScratchRegister, gc::ChunkStoreBufferOffsetFromLastByte);
- as_add(SecondScratchReg, SecondScratchReg, ScratchRegister);
- as_ld(ScratchRegister, SecondScratchReg, 0);
- as_cmpdi(ScratchRegister, 0);
- ma_bc(InvertCondition(cond), label);
- }
+ MOZ_ASSERT(cond == Assembler::Equal || cond == Assembler::NotEqual);
+ MOZ_ASSERT(ptr != temp);
+ MOZ_ASSERT(ptr != ScratchRegister); // Both may be used internally.
+ MOZ_ASSERT(temp != ScratchRegister);
+ MOZ_ASSERT(temp != InvalidReg);
+
+ ma_and(temp, ptr, Imm32(int32_t(~gc::ChunkMask)));
+ branchPtr(InvertCondition(cond), Address(temp, gc::ChunkStoreBufferOffset),
+ ImmWord(0), label);
}
void
MacroAssembler::comment(const char* msg)
{
Assembler::comment(msg);
}
diff -r 671b771fd1de -r e3eda281a1dc js/src/jit/ppc64/MacroAssembler-ppc64.h
--- a/js/src/jit/ppc64/MacroAssembler-ppc64.h Tue Sep 12 10:27:52 2023 -0700
+++ b/js/src/jit/ppc64/MacroAssembler-ppc64.h Tue Sep 12 19:44:18 2023 -0700
@@ -647,16 +647,31 @@
void unboxGCThingForGCBarrier(const Address& src, Register dest) {
loadPtr(src, dest);
as_rldicl(dest, dest, 0, 64-JSVAL_TAG_SHIFT); // "clrldi"
}
void unboxGCThingForGCBarrier(const ValueOperand& src, Register dest) {
as_rldicl(dest, src.valueReg(), 0, 64-JSVAL_TAG_SHIFT); // "clrldi"
}
+ // Like unboxGCThingForGCBarrier, but loads the GC thing's chunk base.
+ void getGCThingValueChunk(const Address& src, Register dest) {
+// ScratchRegisterScope scratch(asMasm());
+// MOZ_ASSERT(scratch != dest);
+ MOZ_ASSERT(dest != ScratchRegister);
+ loadPtr(src, dest);
+ movePtr(ImmWord(JS::detail::ValueGCThingPayloadChunkMask), ScratchRegister);
+ as_and(dest, dest, ScratchRegister);
+ }
+ void getGCThingValueChunk(const ValueOperand& src, Register dest) {
+ MOZ_ASSERT(src.valueReg() != dest);
+ movePtr(ImmWord(JS::detail::ValueGCThingPayloadChunkMask), dest);
+ as_and(dest, dest, src.valueReg());
+ }
+
void unboxInt32(const ValueOperand& operand, Register dest);
void unboxInt32(Register src, Register dest);
void unboxInt32(const Address& src, Register dest);
void unboxInt32(const BaseIndex& src, Register dest);
void unboxBoolean(const ValueOperand& operand, Register dest);
void unboxBoolean(Register src, Register dest);
void unboxBoolean(const Address& src, Register dest);
void unboxBoolean(const BaseIndex& src, Register dest);
@@ -835,16 +850,20 @@
}
void pushValue(JSValueType type, Register reg) {
// Use SecondScratchReg as the temp since boxValue uses ScratchRegister
// for the tag.
boxValue(type, reg, SecondScratchReg);
push(SecondScratchReg);
}
void pushValue(const Address& addr);
+ void pushValue(const BaseIndex& addr, Register scratch) {
+ loadValue(addr, ValueOperand(scratch));
+ pushValue(ValueOperand(scratch));
+ }
void handleFailureWithHandlerTail(Label* profilerExitTail, Label* bailoutTail);
/////////////////////////////////////////////////////////////////
// Common interface.
/////////////////////////////////////////////////////////////////
public:
// The following functions are exposed for use in platform-shared code.
diff -r 671b771fd1de -r e3eda281a1dc js/src/jit/ppc64/Trampoline-ppc64.cpp
--- a/js/src/jit/ppc64/Trampoline-ppc64.cpp Tue Sep 12 10:27:52 2023 -0700
+++ b/js/src/jit/ppc64/Trampoline-ppc64.cpp Tue Sep 12 19:44:18 2023 -0700
@@ -1,28 +1,28 @@
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
* vim: set ts=8 sts=4 et sw=4 tw=99:
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
-#include "mozilla/DebugOnly.h"
-
#include "jit/Bailouts.h"
+#include "jit/BaselineFrame.h"
+#include "jit/CalleeToken.h"
#include "jit/JitFrames.h"
-#include "jit/JitRealm.h"
-#include "jit/JitSpewer.h"
-#include "jit/Linker.h"
-#include "jit/PerfSpewer.h"
-#include "jit/ppc64/SharedICHelpers-ppc64.h"
+#include "jit/JitRuntime.h"
+#ifdef JS_ION_PERF
+# include "jit/PerfSpewer.h"
+#endif
+#include "jit/ppc64/SharedICRegisters-ppc64.h"
#include "jit/VMFunctions.h"
-#include "vm/Realm.h"
+#include "vm/JitActivation.h" // js::jit::JitActivation
+#include "vm/JSContext.h"
#include "jit/MacroAssembler-inl.h"
-#include "jit/SharedICHelpers-inl.h"
#if DEBUG
/* Useful class to print visual guard blocks. */
class TrampolineAutoDeBlock
{
private:
const char *blockname;
@@ -891,17 +891,17 @@
case VMFunctionData::WordByValue:
if (f.argPassedInFloatReg(explicitArg))
masm.passABIArg(MoveOperand(argsBase, argDisp), MoveOp::DOUBLE);
else
masm.passABIArg(MoveOperand(argsBase, argDisp), MoveOp::GENERAL);
argDisp += sizeof(void*);
break;
case VMFunctionData::WordByRef:
- masm.passABIArg(MoveOperand(argsBase, argDisp, MoveOperand::EFFECTIVE_ADDRESS),
+ masm.passABIArg(MoveOperand(argsBase, argDisp, MoveOperand::Kind::EffectiveAddress),
MoveOp::GENERAL);
argDisp += sizeof(void*);
break;
case VMFunctionData::DoubleByValue:
case VMFunctionData::DoubleByRef:
MOZ_CRASH("NYI: PPC64 callVM no support for 128-bit values");
break;
}

View File

@ -1,146 +0,0 @@
# HG changeset patch
# User Cameron Kaiser <spectre@floodgap.com>
# Date 1694753553 25200
# Thu Sep 14 21:52:33 2023 -0700
# Node ID ba4e29926385c655d979fe4c3726f1bedbcc42b7
# Parent 4311f1e4d21272333a719950b15b91a486687ee7
passes blinterp and baseline except for wasm-containing tests
diff -r 4311f1e4d212 -r ba4e29926385 js/src/jit/ppc64/MacroAssembler-ppc64-inl.h
--- a/js/src/jit/ppc64/MacroAssembler-ppc64-inl.h Thu Sep 14 20:18:54 2023 -0700
+++ b/js/src/jit/ppc64/MacroAssembler-ppc64-inl.h Thu Sep 14 21:52:33 2023 -0700
@@ -1910,30 +1910,30 @@
Label* label)
{
ma_bc(cond, lhs, rhs, label);
}
void
MacroAssembler::branchTruncateFloat32ToInt32(FloatRegister src, Register dest, Label* fail)
{
- MOZ_CRASH();
+ truncDoubleToInt32(src, dest, fail);
}
void
MacroAssembler::branchDouble(DoubleCondition cond, FloatRegister lhs, FloatRegister rhs,
Label* label)
{
ma_bc(cond, lhs, rhs, label);
}
void
MacroAssembler::branchTruncateDoubleToInt32(FloatRegister src, Register dest, Label* fail)
{
- MOZ_CRASH();
+ truncDoubleToInt32(src, dest, fail);
}
void
MacroAssembler::branchMulPtr(Condition cond, Register src, Register dest, Label *overflow)
{
as_mulldo_rc(dest, src, dest);
ma_bc(cond, overflow);
}
diff -r 4311f1e4d212 -r ba4e29926385 js/src/jit/ppc64/MacroAssembler-ppc64.cpp
--- a/js/src/jit/ppc64/MacroAssembler-ppc64.cpp Thu Sep 14 20:18:54 2023 -0700
+++ b/js/src/jit/ppc64/MacroAssembler-ppc64.cpp Thu Sep 14 21:52:33 2023 -0700
@@ -2579,22 +2579,19 @@
Label* label)
{
ADBlock();
MOZ_ASSERT(cond == Assembler::Equal || cond == Assembler::NotEqual);
Label done;
branchTestGCThing(Assembler::NotEqual, value,
cond == Assembler::Equal ? &done : label);
- // getGCThingValueChunk uses r0 and may use r12.
- ScratchRegisterScope scratch2(*this);
-
- getGCThingValueChunk(value, scratch2);
- loadPtr(Address(scratch2, gc::ChunkStoreBufferOffset), scratch2);
- branchPtr(InvertCondition(cond), scratch2, ImmWord(0), label);
+ getGCThingValueChunk(value, SecondScratchReg);
+ loadPtr(Address(SecondScratchReg, gc::ChunkStoreBufferOffset), ScratchRegister);
+ branchPtr(InvertCondition(cond), ScratchRegister, ImmWord(0), label);
bind(&done);
}
void
MacroAssembler::branchTestValue(Condition cond, const ValueOperand& lhs,
const Value& rhs, Label* label)
{
diff -r 4311f1e4d212 -r ba4e29926385 js/src/jit/ppc64/Trampoline-ppc64.cpp
--- a/js/src/jit/ppc64/Trampoline-ppc64.cpp Thu Sep 14 20:18:54 2023 -0700
+++ b/js/src/jit/ppc64/Trampoline-ppc64.cpp Thu Sep 14 21:52:33 2023 -0700
@@ -297,22 +297,25 @@
CodeLabel returnLabel;
Label oomReturnLabel;
{
// Handle Interpreter -> Baseline OSR.
AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All());
MOZ_ASSERT(!regs.has(FramePointer));
regs.take(OsrFrameReg);
regs.take(reg_code);
+ MOZ_ASSERT(reg_code == ReturnReg); // regs.take(ReturnReg);
+ MOZ_ASSERT(!regs.has(ReturnReg), "ReturnReg matches reg_code");
+#if(0)
// On Power reg_code and the ReturnReg are always aliased because of
// ABI requirements. The first argument passed, the code pointer,
// comes in r3, and the ABI requires that r3 be the return register.
// Therefore, we don't implement the changes in bug 1770922.
- MOZ_ASSERT(reg_code == ReturnReg); // regs.take(ReturnReg);
regs.take(JSReturnOperand); // ???
+#endif
Label notOsr;
masm.ma_bc(OsrFrameReg, OsrFrameReg, &notOsr, Assembler::Zero, ShortJump);
Register numStackValues = reg_values;
regs.take(numStackValues);
Register scratch = regs.takeAny();
@@ -765,22 +768,22 @@
}
static void
GenerateBailoutThunk(MacroAssembler& masm, Label* bailoutTail)
{
PushBailoutFrame(masm, r3);
// Put pointer to BailoutInfo.
- static const uint32_t sizeOfBailoutInfo = sizeof(uintptr_t) * 2;
+ static const uint32_t sizeOfBailoutInfo = sizeof(uintptr_t); // * 2;
masm.subPtr(Imm32(sizeOfBailoutInfo), StackPointer);
masm.movePtr(StackPointer, r4);
using Fn = bool (*)(BailoutStack * sp, BaselineBailoutInfo * *info);
- masm.setupAlignedABICall();
+ masm.setupUnalignedABICall(r5);
masm.passABIArg(r3);
masm.passABIArg(r4);
masm.callWithABI<Fn, Bailout>(MoveOp::GENERAL,
CheckUnsafeCallWithABI::DontCheckOther);
// Get BailoutInfo pointer.
masm.loadPtr(Address(StackPointer, 0), r5);
@@ -986,16 +989,17 @@
ADBlock("generatePreBarrier");
uint32_t offset = startTrampolineCode(masm);
MOZ_ASSERT(PreBarrierReg == r4);
Register temp1 = r3;
Register temp2 = r5;
Register temp3 = r6;
+ // TODO: could be more efficient with multipush/pop
masm.push(temp1);
masm.push(temp2);
masm.push(temp3);
Label noBarrier;
masm.emitPreBarrierFastPath(cx->runtime(), type, temp1, temp2, temp3, &noBarrier);
// Call into C++ to mark this GC thing.

View File

@ -1,195 +0,0 @@
# HG changeset patch
# User Cameron Kaiser <spectre@floodgap.com>
# Date 1694807083 25200
# Fri Sep 15 12:44:43 2023 -0700
# Node ID 3ac07c6a65bceaeb75d59aafa7728388c31ea11d
# Parent ba4e29926385c655d979fe4c3726f1bedbcc42b7
PGO build stuff, hardcode ion off in test build
diff -r ba4e29926385 -r 3ac07c6a65bc build/moz.configure/lto-pgo.configure
--- a/build/moz.configure/lto-pgo.configure Thu Sep 14 21:52:33 2023 -0700
+++ b/build/moz.configure/lto-pgo.configure Fri Sep 15 12:44:43 2023 -0700
@@ -81,17 +81,17 @@
@depends(c_compiler, pgo_profile_path, target_is_windows)
@imports("multiprocessing")
def pgo_flags(compiler, profdata, target_is_windows):
if compiler.type == "gcc":
return namespace(
gen_cflags=["-fprofile-generate"],
gen_ldflags=["-fprofile-generate"],
- use_cflags=["-fprofile-use", "-fprofile-correction", "-Wcoverage-mismatch"],
+ use_cflags=["-fprofile-use", "-fprofile-correction", "-Wno-coverage-mismatch"],
use_ldflags=["-fprofile-use"],
)
if compiler.type in ("clang-cl", "clang"):
prefix = ""
if compiler.type == "clang-cl":
prefix = "/clang:"
gen_ldflags = None
diff -r ba4e29926385 -r 3ac07c6a65bc build/pgo/profileserver.py
--- a/build/pgo/profileserver.py Thu Sep 14 21:52:33 2023 -0700
+++ b/build/pgo/profileserver.py Fri Sep 15 12:44:43 2023 -0700
@@ -82,19 +82,32 @@
docroot=os.path.join(build.topsrcdir, "build", "pgo"),
path_mappings=path_mappings,
)
httpd.start(block=False)
locations = ServerLocations()
locations.add_host(host="127.0.0.1", port=PORT, options="primary,privileged")
- old_profraw_files = glob.glob("*.profraw")
- for f in old_profraw_files:
- os.remove(f)
+ using_gcc = False
+ try:
+ if build.config_environment.substs.get('CC_TYPE') == 'gcc':
+ using_gcc = True
+ except BuildEnvironmentNotFoundException:
+ pass
+
+ if using_gcc:
+ for dirpath, _, filenames in os.walk('.'):
+ for f in filenames:
+ if f.endswith('.gcda'):
+ os.remove(os.path.join(dirpath, f))
+ else:
+ old_profraw_files = glob.glob('*.profraw')
+ for f in old_profraw_files:
+ os.remove(f)
with TemporaryDirectory() as profilePath:
# TODO: refactor this into mozprofile
profile_data_dir = os.path.join(build.topsrcdir, "testing", "profiles")
with open(os.path.join(profile_data_dir, "profiles.json"), "r") as fh:
base_profiles = json.load(fh)["profileserver"]
prefpaths = [
@@ -208,16 +221,20 @@
# Try to move the crash reports to the artifacts even if Firefox appears
# to exit successfully, in case there's a crash that doesn't set the
# return code to non-zero for some reason.
if get_crashreports(profilePath, name="Firefox exited successfully?") != 0:
print("Firefox exited successfully, but produced a crashreport")
sys.exit(1)
+ print('Copying profile data....')
+ os.system('pwd');
+ os.system('tar cf profdata.tar.gz `find . -name "*.gcda"`; cd ..; tar xf instrumented/profdata.tar.gz;');
+
llvm_profdata = env.get("LLVM_PROFDATA")
if llvm_profdata:
profraw_files = glob.glob("*.profraw")
if not profraw_files:
print(
"Could not find profraw files in the current directory: %s"
% os.getcwd()
)
diff -r ba4e29926385 -r 3ac07c6a65bc js/xpconnect/src/XPCJSContext.cpp
--- a/js/xpconnect/src/XPCJSContext.cpp Thu Sep 14 21:52:33 2023 -0700
+++ b/js/xpconnect/src/XPCJSContext.cpp Fri Sep 15 12:44:43 2023 -0700
@@ -895,18 +895,18 @@
false);
JS_SetGlobalJitCompilerOption(cx, JSJITCOMPILER_JIT_HINTS_ENABLE, false);
sSelfHostedUseSharedMemory = false;
} else {
JS_SetGlobalJitCompilerOption(
cx, JSJITCOMPILER_BASELINE_ENABLE,
StaticPrefs::javascript_options_baselinejit_DoNotUseDirectly());
JS_SetGlobalJitCompilerOption(
- cx, JSJITCOMPILER_ION_ENABLE,
- StaticPrefs::javascript_options_ion_DoNotUseDirectly());
+ cx, JSJITCOMPILER_ION_ENABLE, false); // XXX
+ //StaticPrefs::javascript_options_ion_DoNotUseDirectly());
JS_SetGlobalJitCompilerOption(cx,
JSJITCOMPILER_JIT_TRUSTEDPRINCIPALS_ENABLE,
useJitForTrustedPrincipals);
JS_SetGlobalJitCompilerOption(
cx, JSJITCOMPILER_NATIVE_REGEXP_ENABLE,
StaticPrefs::javascript_options_native_regexp_DoNotUseDirectly());
// Only enable the jit hints cache for the content process to avoid
// any possible jank or delays on the parent process.
diff -r ba4e29926385 -r 3ac07c6a65bc modules/libpref/init/all.js
--- a/modules/libpref/init/all.js Thu Sep 14 21:52:33 2023 -0700
+++ b/modules/libpref/init/all.js Fri Sep 15 12:44:43 2023 -0700
@@ -951,21 +951,21 @@
// that are associated with other domains which have
// user interaction (even if they don't have user
// interaction directly).
pref("privacy.purge_trackers.consider_entity_list", false);
pref("dom.event.contextmenu.enabled", true);
pref("javascript.enabled", true);
-pref("javascript.options.asmjs", true);
-pref("javascript.options.wasm", true);
-pref("javascript.options.wasm_trustedprincipals", true);
+pref("javascript.options.asmjs", false);
+pref("javascript.options.wasm", false);
+pref("javascript.options.wasm_trustedprincipals", false);
pref("javascript.options.wasm_verbose", false);
-pref("javascript.options.wasm_baselinejit", true);
+pref("javascript.options.wasm_baselinejit", false);
pref("javascript.options.parallel_parsing", true);
pref("javascript.options.source_pragmas", true);
pref("javascript.options.asyncstack", true);
// Broadly capturing async stack data adds overhead that is only advisable for
// developers, so we only enable it when the devtools are open, by default.
pref("javascript.options.asyncstack_capture_debuggee_only", true);
diff -r ba4e29926385 -r 3ac07c6a65bc third_party/libwebrtc/moz.build
--- a/third_party/libwebrtc/moz.build Thu Sep 14 21:52:33 2023 -0700
+++ b/third_party/libwebrtc/moz.build Fri Sep 15 12:44:43 2023 -0700
@@ -675,17 +675,22 @@
"/third_party/libwebrtc/modules/audio_processing/agc2/rnn_vad/vector_math_avx2_gn",
"/third_party/libwebrtc/modules/desktop_capture/desktop_capture_differ_sse2_gn"
]
if CONFIG["CPU_ARCH"] == "ppc64" and CONFIG["MOZ_X11"] == "1" and CONFIG["OS_TARGET"] == "Linux":
DIRS += [
"/third_party/libwebrtc/modules/desktop_capture/desktop_capture_gn",
- "/third_party/libwebrtc/modules/desktop_capture/primitives_gn"
+ "/third_party/libwebrtc/modules/desktop_capture/primitives_gn",
+ "/third_party/libwebrtc/modules/portal/portal_gn",
+ "/third_party/libwebrtc/third_party/drm/drm_gn",
+ "/third_party/libwebrtc/third_party/gbm/gbm_gn",
+ "/third_party/libwebrtc/third_party/libepoxy/libepoxy_gn",
+ "/third_party/libwebrtc/third_party/pipewire/pipewire_gn"
]
if CONFIG["CPU_ARCH"] == "riscv64" and CONFIG["MOZ_X11"] == "1" and CONFIG["OS_TARGET"] == "Linux":
DIRS += [
"/third_party/libwebrtc/modules/desktop_capture/desktop_capture_gn",
"/third_party/libwebrtc/modules/desktop_capture/primitives_gn"
]
diff -r ba4e29926385 -r 3ac07c6a65bc toolkit/components/terminator/nsTerminator.cpp
--- a/toolkit/components/terminator/nsTerminator.cpp Thu Sep 14 21:52:33 2023 -0700
+++ b/toolkit/components/terminator/nsTerminator.cpp Fri Sep 15 12:44:43 2023 -0700
@@ -455,16 +455,21 @@
// Defend against overflow
crashAfterMS = INT32_MAX;
} else {
crashAfterMS *= scaleUp;
}
}
#endif
+ // Disable watchdog for PGO train builds - writting profile information at
+ // exit may take time and it is better to make build hang rather than
+ // silently produce poorly performing binary.
+ crashAfterMS = INT32_MAX;
+
UniquePtr<Options> options(new Options());
// crashAfterTicks is guaranteed to be > 0 as
// crashAfterMS >= ADDITIONAL_WAIT_BEFORE_CRASH_MS >> HEARTBEAT_INTERVAL_MS
options->crashAfterTicks = crashAfterMS / HEARTBEAT_INTERVAL_MS;
DebugOnly<PRThread*> watchdogThread =
CreateSystemThread(RunWatchdog, options.release());
MOZ_ASSERT(watchdogThread);

View File

@ -1,285 +0,0 @@
# HG changeset patch
# User Cameron Kaiser <spectre@floodgap.com>
# Date 1695240514 25200
# Wed Sep 20 13:08:34 2023 -0700
# Node ID bd8eea54a76bd887fd7741eb252ee8bc09bf79f2
# Parent 3ac07c6a65bceaeb75d59aafa7728388c31ea11d
Ion fixes
diff -r 3ac07c6a65bc -r bd8eea54a76b js/src/jit/JitFrames.cpp
--- a/js/src/jit/JitFrames.cpp Fri Sep 15 12:44:43 2023 -0700
+++ b/js/src/jit/JitFrames.cpp Wed Sep 20 13:08:34 2023 -0700
@@ -1701,17 +1701,25 @@
case RValueAllocation::CST_NULL:
return NullValue();
case RValueAllocation::DOUBLE_REG:
return DoubleValue(fromRegister<double>(alloc.fpuReg()));
case RValueAllocation::ANY_FLOAT_REG:
+#if defined(JS_CODEGEN_PPC64)
+ // There is no (simple) way from the ISA to determine if an arbitrary
+ // FPR contains a float or a double since the ISA treats them largely
+ // synonymously, so the MachineState will always contain a double even
+ // if it's encoding a float.
+ return Float32Value((float)fromRegister<double>(alloc.fpuReg()));
+#else
return Float32Value(fromRegister<float>(alloc.fpuReg()));
+#endif
case RValueAllocation::ANY_FLOAT_STACK:
return Float32Value(ReadFrameFloat32Slot(fp_, alloc.stackOffset()));
case RValueAllocation::TYPED_REG:
return FromTypedPayload(alloc.knownType(), fromRegister(alloc.reg2()));
case RValueAllocation::TYPED_STACK: {
@@ -2316,20 +2324,21 @@
uintptr_t* addr = state_.as<SafepointState>().addressOfRegister(reg);
return *addr;
}
MOZ_CRASH("Invalid state");
}
template <typename T>
T MachineState::read(FloatRegister reg) const {
-#if !defined(JS_CODEGEN_RISCV64)
+#if !defined(JS_CODEGEN_RISCV64) && !defined(JS_CODEGEN_PPC64)
MOZ_ASSERT(reg.size() == sizeof(T));
#else
// RISCV64 always store FloatRegister as 64bit.
+ // So does Power ISA (see SnapshotIterator::allocationValue).
MOZ_ASSERT(reg.size() == sizeof(double));
#endif
#if !defined(JS_CODEGEN_NONE) && !defined(JS_CODEGEN_WASM32)
if (state_.is<BailoutState>()) {
uint32_t offset = reg.getRegisterDumpOffsetInBytes();
MOZ_ASSERT((offset % sizeof(T)) == 0);
MOZ_ASSERT((offset + sizeof(T)) <= sizeof(RegisterDump::FPUArray));
diff -r 3ac07c6a65bc -r bd8eea54a76b js/src/jit/LIR.h
--- a/js/src/jit/LIR.h Fri Sep 15 12:44:43 2023 -0700
+++ b/js/src/jit/LIR.h Wed Sep 20 13:08:34 2023 -0700
@@ -547,17 +547,17 @@
static LDefinition BogusTemp() { return LDefinition(); }
Policy policy() const {
return (Policy)((bits_ >> POLICY_SHIFT) & POLICY_MASK);
}
Type type() const { return (Type)((bits_ >> TYPE_SHIFT) & TYPE_MASK); }
static bool isFloatRegCompatible(Type type, FloatRegister reg) {
-#ifdef JS_CODEGEN_RISCV64
+#if defined(JS_CODEGEN_RISCV64) || defined(JS_CODEGEN_PPC64)
if (type == FLOAT32 || type == DOUBLE) {
return reg.isSingle() || reg.isDouble();
}
#else
if (type == FLOAT32) {
return reg.isSingle();
}
if (type == DOUBLE) {
diff -r 3ac07c6a65bc -r bd8eea54a76b js/src/jit/ppc64/CodeGenerator-ppc64.cpp
--- a/js/src/jit/ppc64/CodeGenerator-ppc64.cpp Fri Sep 15 12:44:43 2023 -0700
+++ b/js/src/jit/ppc64/CodeGenerator-ppc64.cpp Wed Sep 20 13:08:34 2023 -0700
@@ -1364,18 +1364,17 @@
MOZ_ASSERT(shift == 1);
masm.x_srwi(tmp, lhs, 31);
masm.add32(lhs, tmp);
}
// Do the shift.
masm.as_srawi(dest, tmp, shift);
} else {
- if (lhs != dest)
- masm.move32(lhs, dest);
+ masm.move32(lhs, dest);
}
}
void
CodeGenerator::visitModI(LModI* ins)
{
ADBlock();
@@ -1627,45 +1626,35 @@
Register dest = ToRegister(ins->output());
if (rhs->isConstant()) {
int32_t shift = ToInt32(rhs) & 0x1F;
switch (ins->bitop()) {
case JSOp::Lsh:
if (shift)
masm.x_slwi(dest, lhs, shift);
- else if (dest != lhs)
+ else
masm.move32(lhs, dest);
break;
case JSOp::Rsh:
if (shift)
masm.as_srawi(dest, lhs, shift);
- else if (dest != lhs)
+ else
masm.move32(lhs, dest);
break;
case JSOp::Ursh:
if (shift) {
masm.x_srwi(dest, lhs, shift);
-#if(0)
- } else if (ins->mir()->toUrsh()->fallible()) {
+ } else {
// x >>> 0 can overflow.
- masm.as_extsw(ScratchRegister, lhs);
- bailoutCmp32(Assembler::LessThan, ScratchRegister, Imm32(0), ins->snapshot());
- } else {
+ if (ins->mir()->toUrsh()->fallible()) {
+ bailoutCmp32(Assembler::LessThan, lhs, Imm32(0), ins->snapshot());
+ }
masm.move32(lhs, dest);
}
-#else
- } else {
- // x >>> 0 can overflow.
- if (ins->mir()->toUrsh()->fallible())
- bailoutCmp32(Assembler::LessThan, lhs, Imm32(0), ins->snapshot());
- if (dest != lhs)
- masm.move32(lhs, dest);
- }
-#endif
break;
default:
MOZ_CRASH("Unexpected shift op");
}
} else {
// The shift amounts should be AND'ed into the 0-31 range.
masm.as_andi_rc(dest, ToRegister(rhs), 0x1f);
@@ -1675,22 +1664,17 @@
break;
case JSOp::Rsh:
masm.as_sraw(dest, lhs, dest);
break;
case JSOp::Ursh:
masm.as_srw(dest, lhs, dest);
if (ins->mir()->toUrsh()->fallible()) {
// x >>> 0 can overflow.
-#if(0)
- masm.as_extsw(ScratchRegister, lhs);
- bailoutCmp32(Assembler::LessThan, ScratchRegister, Imm32(0), ins->snapshot());
-#else
bailoutCmp32(Assembler::LessThan, dest, Imm32(0), ins->snapshot());
-#endif
}
break;
default:
MOZ_CRASH("Unexpected shift op");
}
}
}
diff -r 3ac07c6a65bc -r bd8eea54a76b js/src/jit/ppc64/MacroAssembler-ppc64-inl.h
--- a/js/src/jit/ppc64/MacroAssembler-ppc64-inl.h Fri Sep 15 12:44:43 2023 -0700
+++ b/js/src/jit/ppc64/MacroAssembler-ppc64-inl.h Wed Sep 20 13:08:34 2023 -0700
@@ -369,21 +369,29 @@
MacroAssembler::mulBy3(Register src, Register dest)
{
// I guess this *is* better than mulli.
MOZ_ASSERT(src != ScratchRegister);
as_add(ScratchRegister, src, src);
as_add(dest, ScratchRegister, src);
}
+// This is used in MacroAssembler::loadInt32ToStringWithBase. Instead of
+// letting us use our superior arithmetic instructions, the JIT has reduced
+// us to faffing around with magic constants because that's what x86* does.
+// This leads to sign extension hazards.
void MacroAssembler::mulHighUnsigned32(Imm32 imm, Register src, Register dest) {
MOZ_ASSERT(src != ScratchRegister);
+ // Compensate for (likely) sign extension by always clearing upper bits.
move32(imm, ScratchRegister);
- as_mulhw(dest, ScratchRegister, src);
- x_sldi(dest, dest, 32);
+ as_rldicl(ScratchRegister, ScratchRegister, 0, 32); // "clrldi"
+ // loadInt32ToStringWithBase expects what is effectively unsigned multiply.
+ as_mulhwu(dest, ScratchRegister, src);
+ // Clear upper bits again, as they are undefined by the spec.
+ as_rldicl(dest, dest, 0, 32); // "clrldi"
}
void
MacroAssembler::inc64(AbsoluteAddress dest)
{
ma_li(SecondScratchReg, ImmWord(uintptr_t(dest.addr)));
as_ld(ThirdScratchReg, SecondScratchReg, 0);
as_addi(ScratchRegister, ThirdScratchReg, 1);
diff -r 3ac07c6a65bc -r bd8eea54a76b js/src/jit/ppc64/MacroAssembler-ppc64.cpp
--- a/js/src/jit/ppc64/MacroAssembler-ppc64.cpp Fri Sep 15 12:44:43 2023 -0700
+++ b/js/src/jit/ppc64/MacroAssembler-ppc64.cpp Wed Sep 20 13:08:34 2023 -0700
@@ -4769,21 +4769,23 @@
void
MacroAssembler::branchPtrInNurseryChunk(Condition cond, Register ptr, Register temp,
Label* label)
{
ADBlock();
MOZ_ASSERT(cond == Assembler::Equal || cond == Assembler::NotEqual);
MOZ_ASSERT(ptr != temp);
MOZ_ASSERT(ptr != ScratchRegister); // Both may be used internally.
- MOZ_ASSERT(temp != ScratchRegister);
- MOZ_ASSERT(temp != InvalidReg);
-
- ma_and(temp, ptr, Imm32(int32_t(~gc::ChunkMask)));
- branchPtr(InvertCondition(cond), Address(temp, gc::ChunkStoreBufferOffset),
+ MOZ_ASSERT(ptr != SecondScratchReg);
+ MOZ_ASSERT(temp != ScratchRegister); // probably unpossible
+ MOZ_ASSERT(temp != SecondScratchReg);
+ MOZ_ASSERT(gc::ChunkStoreBufferOffset < 32767);
+
+ ma_and(SecondScratchReg, ptr, Imm32(int32_t(~gc::ChunkMask)));
+ branchPtr(InvertCondition(cond), Address(SecondScratchReg, gc::ChunkStoreBufferOffset),
ImmWord(0), label);
}
void
MacroAssembler::comment(const char* msg)
{
Assembler::comment(msg);
}
diff -r 3ac07c6a65bc -r bd8eea54a76b js/src/wasm/WasmFrameIter.cpp
--- a/js/src/wasm/WasmFrameIter.cpp Fri Sep 15 12:44:43 2023 -0700
+++ b/js/src/wasm/WasmFrameIter.cpp Wed Sep 20 13:08:34 2023 -0700
@@ -1286,17 +1286,33 @@
#elif defined(JS_CODEGEN_ARM)
if (offsetFromEntry == BeforePushRetAddr || codeRange->isThunk()) {
// The return address is still in lr and fp holds the caller's fp.
fixedPC = (uint8_t*)registers.lr;
fixedFP = fp;
AssertMatchesCallSite(fixedPC, fixedFP);
} else
#elif defined(JS_CODEGEN_PPC64)
- MOZ_ASSERT(0);
+ if (codeRange->isThunk()) {
+ // The FarJumpIsland sequence temporarily scrambles LR.
+ // Don't unwind to the caller.
+ fixedPC = pc;
+ fixedFP = fp;
+ *unwoundCaller = false;
+ AssertMatchesCallSite(
+ Frame::fromUntaggedWasmExitFP(fp)->returnAddress(),
+ Frame::fromUntaggedWasmExitFP(fp)->rawCaller());
+ } else if (offsetFromEntry < PushedFP) {
+ // On ppc64 we rely on register state instead of state saved on
+ // stack until the wasm::Frame is completely built.
+ // On entry the return address is in LR and fp holds the caller's fp.
+ fixedPC = (uint8_t*)registers.lr;
+ fixedFP = fp;
+ AssertMatchesCallSite(fixedPC, fixedFP);
+ } else
#endif
if (offsetFromEntry == PushedRetAddr || codeRange->isThunk()) {
// The return address has been pushed on the stack but fp still
// points to the caller's fp.
fixedPC = sp[0];
fixedFP = fp;
AssertMatchesCallSite(fixedPC, fixedFP);
} else if (offsetFromEntry == PushedFP) {

View File

@ -1,236 +0,0 @@
# HG changeset patch
# User Cameron Kaiser <spectre@floodgap.com>
# Date 1695318886 25200
# Thu Sep 21 10:54:46 2023 -0700
# Node ID 23890c8cfb6523602d62886442866799431e490d
# Parent bd8eea54a76bd887fd7741eb252ee8bc09bf79f2
clean up more fails and unfunk wasm
diff -r bd8eea54a76b -r 23890c8cfb65 js/src/builtin/TestingFunctions.cpp
--- a/js/src/builtin/TestingFunctions.cpp Wed Sep 20 13:08:34 2023 -0700
+++ b/js/src/builtin/TestingFunctions.cpp Thu Sep 21 10:54:46 2023 -0700
@@ -455,16 +455,25 @@
value = BooleanValue(true);
#else
value = BooleanValue(false);
#endif
if (!JS_SetProperty(cx, info, "riscv64", value)) {
return false;
}
+#ifdef JS_CODEGEN_PPC64
+ value = BooleanValue(true);
+#else
+ value = BooleanValue(false);
+#endif
+ if (!JS_SetProperty(cx, info, "ppc64", value)) {
+ return false;
+ }
+
#ifdef JS_SIMULATOR_RISCV64
value = BooleanValue(true);
#else
value = BooleanValue(false);
#endif
if (!JS_SetProperty(cx, info, "riscv64-simulator", value)) {
return false;
}
diff -r bd8eea54a76b -r 23890c8cfb65 js/src/jit-test/tests/gc/gcparam.js
--- a/js/src/jit-test/tests/gc/gcparam.js Wed Sep 20 13:08:34 2023 -0700
+++ b/js/src/jit-test/tests/gc/gcparam.js Thu Sep 21 10:54:46 2023 -0700
@@ -25,17 +25,19 @@
testGetParam("totalChunks");
testGetParam("nurseryBytes");
testGetParam("majorGCNumber");
testGetParam("minorGCNumber");
testGetParam("chunkBytes");
testGetParam("helperThreadCount");
testChangeParam("maxBytes");
-testChangeParam("minNurseryBytes", 16 * 1024);
+// This cannot be lower than 64K due to 64K page systems, like some ppc64le
+// machines in Linux.
+testChangeParam("minNurseryBytes", 64 * 1024);
testChangeParam("maxNurseryBytes", 1024 * 1024);
testChangeParam("incrementalGCEnabled");
testChangeParam("perZoneGCEnabled");
testChangeParam("sliceTimeBudgetMS");
testChangeParam("highFrequencyTimeLimit");
testChangeParam("smallHeapSizeMax");
testChangeParam("largeHeapSizeMin");
testChangeParam("highFrequencySmallHeapGrowth");
diff -r bd8eea54a76b -r 23890c8cfb65 js/src/jit-test/tests/gc/oomInRegExp2.js
--- a/js/src/jit-test/tests/gc/oomInRegExp2.js Wed Sep 20 13:08:34 2023 -0700
+++ b/js/src/jit-test/tests/gc/oomInRegExp2.js Thu Sep 21 10:54:46 2023 -0700
@@ -1,5 +1,6 @@
-// |jit-test| skip-if: !('oomTest' in this)
+// |jit-test| skip-if: !('oomTest' in this) || getBuildConfiguration().ppc64
+// On ppc64, this will never exhaust memory before timing out.
oomTest(() => assertEq("foobar\xff5baz\u1200".search(/bar\u0178\d/i), 3), {keepFailing: true});
oomTest(() => assertEq((/(?!(?!(?!6)[\Wc]))/i).test(), false), {keepFailing: true});
oomTest(() => assertEq((/bar\u0178\d/i).exec("foobar\xff5baz\u1200") != null, true), {keepFailing: true});
diff -r bd8eea54a76b -r 23890c8cfb65 js/src/jit-test/tests/modules/bug1670236.js
--- a/js/src/jit-test/tests/modules/bug1670236.js Wed Sep 20 13:08:34 2023 -0700
+++ b/js/src/jit-test/tests/modules/bug1670236.js Thu Sep 21 10:54:46 2023 -0700
@@ -1,6 +1,8 @@
-// |jit-test| skip-if: !('oomTest' in this)
+// |jit-test| skip-if: !('oomTest' in this) || getBuildConfiguration().ppc64
+// On ppc64, this will never exhaust memory before timing out.
+
o0=r=/x/;
this.toString=(function() {
evaluate("",({ element:o0 }));
})
oomTest(String.prototype.charCodeAt,{ keepFailing:true })
diff -r bd8eea54a76b -r 23890c8cfb65 js/src/jit-test/tests/promise/unhandled-rejections-oom.js
--- a/js/src/jit-test/tests/promise/unhandled-rejections-oom.js Wed Sep 20 13:08:34 2023 -0700
+++ b/js/src/jit-test/tests/promise/unhandled-rejections-oom.js Thu Sep 21 10:54:46 2023 -0700
@@ -1,3 +1,4 @@
-// |jit-test| allow-oom; skip-if: !('oomTest' in this)
+// |jit-test| allow-oom; skip-if: !('oomTest' in this) || getBuildConfiguration().ppc64
+// On ppc64, this will never exhaust memory before timing out.
oomTest(async function() {}, { keepFailing: true });
diff -r bd8eea54a76b -r 23890c8cfb65 js/src/jit/ppc64/Architecture-ppc64.h
--- a/js/src/jit/ppc64/Architecture-ppc64.h Wed Sep 20 13:08:34 2023 -0700
+++ b/js/src/jit/ppc64/Architecture-ppc64.h Thu Sep 21 10:54:46 2023 -0700
@@ -12,24 +12,38 @@
#include "jit/shared/Architecture-shared.h"
#include "js/Utility.h"
namespace js {
namespace jit {
-// Used to protect the stack from linkage area clobbers. Minimum size
-// is 4 doublewords for SP, LR, CR and TOC.
-static const uint32_t ShadowStackSpace = 32;
+// Despite my hopes, this does not help protect Wasm Frames from ABI callouts
+// unknowingly stomping on them expecting a regular linkage area; the "shadow
+// stack space" that this allocates is actually allocated at *higher* addresses
+// than the Frame. The Frame demands to be on top of the stack, but that's
+// exactly where the linkage area is supposed to go, and everything assumes
+// that the Frame will be exactly two pointers in size which defeats my earlier
+// attempt to just add the linkage area to the Frame. (On top of that, Wasm GC
+// won't let you nab more than 32 bytes anyway, the bare minimum space required
+// for simply LR, TOC, CR and SP, and includes no parameter area.) Instead, for
+// now we have to tediously pull down dummy frames on demand when calling out
+// to heavy functions that are ABI-compliant. This also does nothing for the
+// regular JIT, where periodically we need to do the same thing.
+//
+// See also MacroAssembler::call(wasm::SymbolicAddress) in
+// MacroAssembler-ppc64.cpp.
+static const uint32_t ShadowStackSpace = 0;
+
// The return address is in LR, not in memory/stack.
static const uint32_t SizeOfReturnAddressAfterCall = 0u;
// Size of each bailout table entry.
-// For PowerPC this is a single bl.
+// For Power ISA this is a single bl.
static const uint32_t BAILOUT_TABLE_ENTRY_SIZE = sizeof(void *);
// Range of an immediate jump (26 bit jumps). Take a fudge out in case.
static constexpr uint32_t JumpImmediateRange = (32 * 1024 * 1024) - 32;
// GPRs.
class Registers
{
diff -r bd8eea54a76b -r 23890c8cfb65 js/src/jit/ppc64/MacroAssembler-ppc64.cpp
--- a/js/src/jit/ppc64/MacroAssembler-ppc64.cpp Wed Sep 20 13:08:34 2023 -0700
+++ b/js/src/jit/ppc64/MacroAssembler-ppc64.cpp Thu Sep 21 10:54:46 2023 -0700
@@ -4621,18 +4621,36 @@
Assembler::WriteLoad64Instructions(inst, ScratchRegister, (uint64_t)offset);
FlushICache(inst, sizeof(uint32_t) * 5);
}
CodeOffset
MacroAssembler::call(wasm::SymbolicAddress target)
{
+ ADBlock();
+
+ // This call is very likely to ABI compliant code. Since this is coming
+ // from Wasm and Wasm Frames sit on the top of the stack where the linkage
+ // area goes, we need to pull down a dummy ABI stack frame to prevent the
+ // callee from unwittingly stomping on the Wasm Frame. ShadowStackSpace
+ // does not fix this; see Architecture-ppc64le.h for a more intemperate
+ // explanation. We can get away with this in the general case because the
+ // argument registers have already been calculated relative to the prior
+ // (unsafe) value of the stack pointer. If it's not to ABI compliant code,
+ // then we just bloat the stack temporarily and life goes on.
+ //
+ // 512 bytes ought to be enough for anybody ...
+ as_addi(StackPointer, StackPointer, -512);
movePtr(target, CallReg);
- return call(CallReg);
+ // XXX: No current consumer seems to care about the return value.
+ // Should it be after the call, or after the stack pointer adjustment?
+ CodeOffset c = call(CallReg);
+ as_addi(StackPointer, StackPointer, 512);
+ return c;
}
void
MacroAssembler::call(const Address& addr)
{
loadPtr(addr, CallReg);
call(CallReg);
}
diff -r bd8eea54a76b -r 23890c8cfb65 js/src/wasm/WasmStubs.cpp
--- a/js/src/wasm/WasmStubs.cpp Wed Sep 20 13:08:34 2023 -0700
+++ b/js/src/wasm/WasmStubs.cpp Thu Sep 21 10:54:46 2023 -0700
@@ -2134,26 +2134,17 @@
masm.storePtr(scratch,
Address(masm.getStackPointer(), i->offsetFromArgBase()));
}
i++;
MOZ_ASSERT(i.done());
// Make the call, test whether it succeeded, and extract the return value.
AssertStackAlignment(masm, ABIStackAlignment);
-#ifdef JS_CODEGEN_PPC64
- // Because this is calling an ABI-compliant function, we have to pull down
- // a dummy linkage area or the values on the stack will be stomped on. The
- // minimum size is sufficient.
- masm.as_addi(masm.getStackPointer(), masm.getStackPointer(), -32);
-#endif
masm.call(SymbolicAddress::CallImport_General);
-#ifdef JS_CODEGEN_PPC64
- masm.as_addi(masm.getStackPointer(), masm.getStackPointer(), 32);
-#endif
masm.branchTest32(Assembler::Zero, ReturnReg, ReturnReg, throwLabel);
ResultType resultType = ResultType::Vector(funcType.results());
ValType registerResultType;
for (ABIResultIter iter(resultType); !iter.done(); iter.next()) {
if (iter.cur().inRegister()) {
MOZ_ASSERT(!registerResultType.isValid());
registerResultType = iter.cur().type();
@@ -2680,19 +2671,24 @@
// PushRegsInMask strips out the high lanes of the XMM registers in this case,
// while the singles will be stripped as they are aliased by the larger doubles.
static const LiveRegisterSet RegsToPreserve(
GeneralRegisterSet(Registers::AllMask &
~(Registers::SetType(1) << Registers::StackPointer)),
FloatRegisterSet(FloatRegisters::AllMask));
#elif defined(JS_CODEGEN_PPC64)
// Note that this includes no SPRs, since the JIT is unaware of them.
+// Since we ass-U-me that traps don't occur while LR (an SPR, not a GPR) is
+// live, then we can clobber it and don't have to push it anyway.
static const LiveRegisterSet RegsToPreserve(
GeneralRegisterSet(Registers::AllMask),
FloatRegisterSet(FloatRegisters::AllMask));
+# ifdef ENABLE_WASM_SIMD
+# error "high lanes of SIMD registers need to be saved too."
+# endif
#else
static const LiveRegisterSet RegsToPreserve(
GeneralRegisterSet(0), FloatRegisterSet(FloatRegisters::AllDoubleMask));
# ifdef ENABLE_WASM_SIMD
# error "no SIMD support"
# endif
#endif

View File

@ -1,70 +0,0 @@
# HG changeset patch
# User Cameron Kaiser <spectre@floodgap.com>
# Date 1695355123 25200
# Thu Sep 21 20:58:43 2023 -0700
# Node ID 1771d1807f7bfd16be4631b7485f010cfb64031d
# Parent 23890c8cfb6523602d62886442866799431e490d
last wasm fails fixed, passes jit_test and jstests
diff -r 23890c8cfb65 -r 1771d1807f7b js/src/jit/ppc64/Assembler-ppc64.cpp
--- a/js/src/jit/ppc64/Assembler-ppc64.cpp Thu Sep 21 10:54:46 2023 -0700
+++ b/js/src/jit/ppc64/Assembler-ppc64.cpp Thu Sep 21 20:58:43 2023 -0700
@@ -38,32 +38,35 @@
{
switch (type) {
case MIRType::Int32:
case MIRType::Int64:
case MIRType::Pointer:
case MIRType::RefOrNull:
case MIRType::StackResults: {
if (usedGPRs_ > 7) {
- MOZ_ASSERT(IsCompilingWasm(), "no stack corruption from GPR overflow kthxbye");
+ // We only support spilling arguments to the stack with Wasm calls,
+ // but we could be generating Wasm code from the interpreter, so
+ // we can't assume there is a JIT context available.
+ MOZ_ASSERT(!MaybeGetJitContext() || IsCompilingWasm(), "no stack corruption from GPR overflow kthxbye");
current_ = ABIArg(stackOffset_);
stackOffset_ += sizeof(uintptr_t);
break;
}
// Note: we could be passing a full 64-bit quantity as an argument to,
// say, uint32_t. We have to compensate for that in other ways when
// it makes a difference (see notes in wasm).
current_ = ABIArg(Register::FromCode((Register::Code)(usedGPRs_ + 3)));
usedGPRs_++;
break;
}
case MIRType::Float32:
case MIRType::Double: {
if (usedFPRs_ == 12) {
- MOZ_ASSERT(IsCompilingWasm(), "no stack corruption from FPR overflow kthxbye");
+ MOZ_ASSERT(!MaybeGetJitContext() || IsCompilingWasm(), "no stack corruption from FPR overflow kthxbye");
current_ = ABIArg(stackOffset_);
stackOffset_ += sizeof(double); // keep stack aligned to double
break;
}
current_ = ABIArg(FloatRegister(FloatRegisters::Encoding(usedFPRs_ + 1),
type == MIRType::Double ? FloatRegisters::Double : FloatRegisters::Single));
usedGPRs_++;
usedFPRs_++;
diff -r 23890c8cfb65 -r 1771d1807f7b js/src/jit/ppc64/MacroAssembler-ppc64.cpp
--- a/js/src/jit/ppc64/MacroAssembler-ppc64.cpp Thu Sep 21 10:54:46 2023 -0700
+++ b/js/src/jit/ppc64/MacroAssembler-ppc64.cpp Thu Sep 21 20:58:43 2023 -0700
@@ -2235,16 +2235,17 @@
jump(bailoutTail);
// If we are throwing and the innermost frame was a wasm frame, reset SP and
// FP; SP is pointing to the unwound return address to the wasm entry, so
// we can just ret().
bind(&wasm);
loadPtr(Address(StackPointer, ResumeFromException::offsetOfFramePointer()), FramePointer);
loadPtr(Address(StackPointer, ResumeFromException::offsetOfStackPointer()), StackPointer);
+ ma_li(InstanceReg, ImmWord(wasm::FailInstanceReg));
ret();
// Found a wasm catch handler, restore state and jump to it.
bind(&wasmCatch);
loadPtr(Address(sp, ResumeFromException::offsetOfTarget()), r12);
xs_mtctr(r12);
loadPtr(Address(StackPointer, ResumeFromException::offsetOfFramePointer()),
FramePointer);

View File

@ -1,61 +0,0 @@
# HG changeset patch
# User Cameron Kaiser <spectre@floodgap.com>
# Date 1695355376 25200
# Thu Sep 21 21:02:56 2023 -0700
# Node ID 4404797bd39a18f98b2f1a2c65ffe079404c2ee6
# Parent 1771d1807f7bfd16be4631b7485f010cfb64031d
ion and wasm back on in browser build
diff -r 1771d1807f7b -r 4404797bd39a js/xpconnect/src/XPCJSContext.cpp
--- a/js/xpconnect/src/XPCJSContext.cpp Thu Sep 21 20:58:43 2023 -0700
+++ b/js/xpconnect/src/XPCJSContext.cpp Thu Sep 21 21:02:56 2023 -0700
@@ -895,18 +895,18 @@
false);
JS_SetGlobalJitCompilerOption(cx, JSJITCOMPILER_JIT_HINTS_ENABLE, false);
sSelfHostedUseSharedMemory = false;
} else {
JS_SetGlobalJitCompilerOption(
cx, JSJITCOMPILER_BASELINE_ENABLE,
StaticPrefs::javascript_options_baselinejit_DoNotUseDirectly());
JS_SetGlobalJitCompilerOption(
- cx, JSJITCOMPILER_ION_ENABLE, false); // XXX
- //StaticPrefs::javascript_options_ion_DoNotUseDirectly());
+ cx, JSJITCOMPILER_ION_ENABLE,
+ StaticPrefs::javascript_options_ion_DoNotUseDirectly());
JS_SetGlobalJitCompilerOption(cx,
JSJITCOMPILER_JIT_TRUSTEDPRINCIPALS_ENABLE,
useJitForTrustedPrincipals);
JS_SetGlobalJitCompilerOption(
cx, JSJITCOMPILER_NATIVE_REGEXP_ENABLE,
StaticPrefs::javascript_options_native_regexp_DoNotUseDirectly());
// Only enable the jit hints cache for the content process to avoid
// any possible jank or delays on the parent process.
diff -r 1771d1807f7b -r 4404797bd39a modules/libpref/init/all.js
--- a/modules/libpref/init/all.js Thu Sep 21 20:58:43 2023 -0700
+++ b/modules/libpref/init/all.js Thu Sep 21 21:02:56 2023 -0700
@@ -951,21 +951,21 @@
// that are associated with other domains which have
// user interaction (even if they don't have user
// interaction directly).
pref("privacy.purge_trackers.consider_entity_list", false);
pref("dom.event.contextmenu.enabled", true);
pref("javascript.enabled", true);
-pref("javascript.options.asmjs", false);
-pref("javascript.options.wasm", false);
-pref("javascript.options.wasm_trustedprincipals", false);
+pref("javascript.options.asmjs", true);
+pref("javascript.options.wasm", true);
+pref("javascript.options.wasm_trustedprincipals", true);
pref("javascript.options.wasm_verbose", false);
-pref("javascript.options.wasm_baselinejit", false);
+pref("javascript.options.wasm_baselinejit", true);
pref("javascript.options.parallel_parsing", true);
pref("javascript.options.source_pragmas", true);
pref("javascript.options.asyncstack", true);
// Broadly capturing async stack data adds overhead that is only advisable for
// developers, so we only enable it when the devtools are open, by default.
pref("javascript.options.asyncstack_capture_debuggee_only", true);