[PATCH] D95052: [WebAssembly][lld] R_WASM_TABLE_NUMBER_LEB is variable-width
Andy Wingo via Phabricator via llvm-commits
llvm-commits at lists.llvm.org
Wed Jan 20 08:32:16 PST 2021
wingo created this revision.
Herald added subscribers: ecnelises, sunfish, jgravelle-google, sbc100, dschuff.
wingo requested review of this revision.
Herald added subscribers: llvm-commits, aheejin.
Herald added a project: LLVM.
WebAssembly 1.0 only supports a table-number immediate operand of 0x00.
As we add support for multiple tables with a corresponding relocation,
continue to allow generation of one-byte table number references, so
that the "normal" case of just one table with the number 0 can be
written as 0x00. This allows the compiler to output relocations for
call_indirect while targetting WebAssembly 1.0, simplifying the
configuration space.
Repository:
rG LLVM Github Monorepo
https://reviews.llvm.org/D95052
Files:
lld/wasm/InputChunks.cpp
Index: lld/wasm/InputChunks.cpp
===================================================================
--- lld/wasm/InputChunks.cpp
+++ lld/wasm/InputChunks.cpp
@@ -105,8 +105,13 @@
llvm_unreachable("unknown relocation type");
}
- if (bytesRead && bytesRead != paddedLEBWidth)
- warn("expected LEB at relocation site be 5/10-byte padded");
+ if (rel.Type == R_WASM_TABLE_NUMBER_LEB) {
+ if (bytesRead < 1 || bytesRead > 5)
+ warn("expected LEB at relocation site be 1-5 bytes long");
+ } else {
+ if (bytesRead && bytesRead != paddedLEBWidth)
+ warn("expected LEB at relocation site be 5/10-byte padded");
+ }
if (rel.Type != R_WASM_GLOBAL_INDEX_LEB &&
rel.Type != R_WASM_GLOBAL_INDEX_I32) {
@@ -153,9 +158,14 @@
case R_WASM_GLOBAL_INDEX_LEB:
case R_WASM_EVENT_INDEX_LEB:
case R_WASM_MEMORY_ADDR_LEB:
- case R_WASM_TABLE_NUMBER_LEB:
encodeULEB128(value, loc, 5);
break;
+ case R_WASM_TABLE_NUMBER_LEB: {
+ uint32_t bytesRead = 0;
+ decodeULEB128(loc, &bytesRead);
+ encodeULEB128(value, loc, bytesRead);
+ break;
+ }
case R_WASM_MEMORY_ADDR_LEB64:
encodeULEB128(value, loc, 10);
break;
@@ -254,7 +264,6 @@
case R_WASM_GLOBAL_INDEX_LEB:
case R_WASM_EVENT_INDEX_LEB:
case R_WASM_MEMORY_ADDR_LEB:
- case R_WASM_TABLE_NUMBER_LEB:
case R_WASM_TABLE_INDEX_SLEB:
case R_WASM_MEMORY_ADDR_SLEB:
return 5;
@@ -272,6 +281,21 @@
return writeCompressedReloc(buf, rel, value);
}
+static bool relocHasPaddedWidth(const WasmRelocation &rel) {
+ return rel.Type != R_WASM_TABLE_NUMBER_LEB;
+}
+
+static uint32_t getRelocInputWidth(const uint8_t *buf,
+ const WasmRelocation &rel) {
+ if (relocHasPaddedWidth(rel)) {
+ return getRelocWidthPadded(rel);
+ } else {
+ uint32_t relocSize = 0;
+ decodeULEB128(buf + rel.Offset, &relocSize);
+ return relocSize;
+ }
+}
+
// Relocations of type LEB and SLEB in the code section are padded to 5 bytes
// so that a fast linker can blindly overwrite them without needing to worry
// about the number of bytes needed to encode the values.
@@ -303,7 +327,7 @@
LLVM_DEBUG(dbgs() << " region: " << (rel.Offset - lastRelocEnd) << "\n");
compressedFuncSize += rel.Offset - lastRelocEnd;
compressedFuncSize += getRelocWidth(rel, file->calcNewValue(rel, tombstone));
- lastRelocEnd = rel.Offset + getRelocWidthPadded(rel);
+ lastRelocEnd = rel.Offset + getRelocInputWidth(secStart, rel);
}
LLVM_DEBUG(dbgs() << " final region: " << (end - lastRelocEnd) << "\n");
compressedFuncSize += end - lastRelocEnd;
@@ -344,7 +368,7 @@
memcpy(buf, lastRelocEnd, chunkSize);
buf += chunkSize;
buf += writeCompressedReloc(buf, rel, file->calcNewValue(rel, tombstone));
- lastRelocEnd = secStart + rel.Offset + getRelocWidthPadded(rel);
+ lastRelocEnd = secStart + rel.Offset + getRelocInputWidth(secStart, rel);
}
unsigned chunkSize = end - lastRelocEnd;
-------------- next part --------------
A non-text attachment was scrubbed...
Name: D95052.317889.patch
Type: text/x-patch
Size: 3042 bytes
Desc: not available
URL: <http://lists.llvm.org/pipermail/llvm-commits/attachments/20210120/c8368cfb/attachment.bin>
More information about the llvm-commits
mailing list