Charakterbewegungen hinzugefügt, Deko hinzugefügt, Kochrezepte angepasst

This commit is contained in:
N-Nachtigal 2025-05-14 16:36:42 +02:00
parent 95945c0306
commit a0c893ca0b
1124 changed files with 64294 additions and 763 deletions

View file

@ -0,0 +1,83 @@
import argparse
import math
import multiprocessing
import pathlib
import time
import progressbar
import pymtdb
from whitelist_helpers import create_whitelist, write_whitelist, create_filter, count_blocks, existing_file, get_cursor, \
get_all_nodes
def process_chunk(args, offset, limit, completed, results):
cursor = get_cursor(args)
cursor.execute(f'SELECT data FROM blocks LIMIT {limit} OFFSET {offset}')
node_names = set()
i = 0
for i, row in enumerate(cursor, 1):
node_names.update(pymtdb.MapBlockSimple.import_from_serialized(row[0]).node_names)
if i % args.chunk_size == 0:
completed.value = i
completed.value = i
results.put(node_names, False)
def main(args):
num_blocks, count_blocks_elapsed = count_blocks(args) # 345104538, 13*60
work_size = math.ceil(num_blocks / args.workers)
offsets = range(0, num_blocks, work_size)
completeds = tuple(multiprocessing.Value('Q', 0, lock=False) for _ in range(args.workers))
# because we want to terminate the processes before we remove the results from the queue, use a manager
# see warnings in https://docs.python.org/3/library/multiprocessing.html#pipes-and-queues
results = multiprocessing.Manager().Queue()
processes = tuple(
multiprocessing.Process(target=process_chunk, name=f'processor {i}',
args=(args, offsets[i], work_size, completeds[i], results))
for i in range(args.workers)
)
for process in processes:
process.start()
print(f'NOTICE: not all jobs will start at the same time due to the nature of ranged queries. actual runtime will '
f'be closer to 1/{min(args.workers, multiprocessing.cpu_count())}th the early estimate, plus '
f'{count_blocks_elapsed}s.')
# TODO: if we know how long it takes to count the blocks, and how many workers there are, we can estimate how long
# before a process starts producing results, and resize the jobs to maximize processor usage.
# proper estimation requires differential equations, ugh.
with progressbar.ProgressBar(max_value=num_blocks) as bar:
while True:
time.sleep(1)
total_completed = sum(completed.value for completed in completeds)
bar.update(total_completed)
if total_completed == num_blocks:
break
print('joining...')
for process in processes:
process.join()
print('compiling results...')
all_nodes = get_all_nodes(results)
filter_ = create_filter(args.stairsplus_dump)
whitelist = create_whitelist(filter_, all_nodes)
write_whitelist(args, whitelist)
def parse_args(args=None, namespace=None):
p = argparse.ArgumentParser()
g = p.add_mutually_exclusive_group(required=True)
g.add_argument('--pg_connection', '-c')
g.add_argument('--sqlite_file', '-s', type=existing_file)
p.add_argument('--chunk_size', type=int, default=64)
p.add_argument('--workers', type=int, default=multiprocessing.cpu_count())
p.add_argument('--output', '-o', type=pathlib.Path)
p.add_argument('stairsplus_dump', type=existing_file)
return p.parse_args(args=args, namespace=namespace)
if __name__ == "__main__":
main(parse_args())

View file

@ -0,0 +1,33 @@
local f = string.format
local S = stairsplus.S
minetest.register_chatcommand("dump_stairsplus_registered_nodes", {
description = S("create a list of stairsplus nodes, including aliases, to use as a filter in creating a whitelist"),
privs = { server = true },
func = function()
local shaped_nodes = {}
for shaped_node, shape in pairs(stairsplus.api.shape_by_shaped_node) do
if shape ~= "node" then
shaped_nodes[shaped_node] = true
end
end
local aliases = {}
for original in pairs(minetest.registered_aliases) do
local resolved = futil.resolve_item(original)
if resolved and shaped_nodes[resolved] then
aliases[original] = resolved
end
end
local filename = futil.path_concat(minetest.get_worldpath(), "stairsplus_dump.json")
local contents = minetest.write_json({
aliases = aliases,
shaped_nodes = shaped_nodes,
}, true)
if not futil.write_file(filename, contents) then
return false, f("error writing file @ %s", filename)
end
return true, f("dump created @ %s.", filename)
end,
})

View file

@ -0,0 +1,239 @@
# https://github.com/minetest/minetest/blob/master/doc/world_format.txt#L301
# https://docs.python.org/3/library/struct.html
import collections
import pyzstd
from stream import StreamReader
MAP_BLOCKSIZE = 16
vector = collections.namedtuple('vector', ('x', 'y', 'z'))
def unpack_pos(packed_pos):
# 16*(16*z + y) + x
zy, x = divmod(packed_pos, 16)
z, y = divmod(zy, 16)
return vector(x, y, z)
class Inventory:
def __init__(self):
pass
@staticmethod
def from_bytes(data: bytes):
inv = Inventory()
return inv
class MetaData:
def __init__(self):
self._meta = {}
self._private = set()
def __getitem__(self, key: bytes):
return self._meta[key]
def __setitem__(self, key: bytes, value: bytes):
self._meta[key] = value
def mark_as_private(self, key: bytes, private: bool):
if private:
self._private.add(key)
else:
self._private.discard(key)
class StaticObject:
def __init__(self, type_, pos, data):
self._type = type_
self._pos = pos
self._data = data
class Timer:
def __init__(self, timeout: int, elapsed: int):
self._timeout = timeout
self._elapsed = elapsed
class MapBlock:
def __init__(self):
self._flags = 0
self._lighting_complete = 0
self._timestamp = 0
self._nodes = tuple(
tuple(
["ignore" for _ in range(MAP_BLOCKSIZE)]
for _ in range(MAP_BLOCKSIZE)
) for _ in range(MAP_BLOCKSIZE)
)
self._param1 = tuple(
tuple(
[0 for _ in range(MAP_BLOCKSIZE)]
for _ in range(MAP_BLOCKSIZE)
) for _ in range(MAP_BLOCKSIZE)
)
self._param2 = tuple(
tuple(
[0 for _ in range(MAP_BLOCKSIZE)]
for _ in range(MAP_BLOCKSIZE)
) for _ in range(MAP_BLOCKSIZE)
)
self._metadata = tuple(
tuple(
[None for _ in range(MAP_BLOCKSIZE)]
for _ in range(MAP_BLOCKSIZE)
) for _ in range(MAP_BLOCKSIZE)
)
self._inventory = tuple(
tuple(
[None for _ in range(MAP_BLOCKSIZE)]
for _ in range(MAP_BLOCKSIZE)
) for _ in range(MAP_BLOCKSIZE)
)
self._timer = tuple(
tuple(
[None for _ in range(MAP_BLOCKSIZE)]
for _ in range(MAP_BLOCKSIZE)
) for _ in range(MAP_BLOCKSIZE)
)
def iter_nodes(self):
for plane in self._nodes:
for row in plane:
yield from row
@staticmethod
def import_from_serialized(serialized_data: bytes):
mapblock = MapBlock()
version = serialized_data[0] # struct.unpack('>b', serialized_data)
if version != 29:
raise RuntimeError(f'can\'t parse version {version}')
stream = StreamReader(pyzstd.decompress(serialized_data[1:]))
mapblock._flags = stream.u8()
mapblock._lighting_complete = stream.u16()
mapblock._timestamp = stream.u32()
name_id_mapping_version = stream.u8()
num_name_id_mappings = stream.u16()
if name_id_mapping_version != 0:
raise RuntimeError(f'can\'t grok name_id_mapping_version {name_id_mapping_version}')
name_by_id = {}
for _ in range(num_name_id_mappings):
id_ = stream.u16()
name_len = stream.u16()
name_by_id[id_] = stream.bytes(name_len)
content_width = stream.u8()
if content_width != 2:
raise RuntimeError(f'invalid content_width {content_width}')
params_width = stream.u8()
if params_width != 2:
raise RuntimeError(f'invalid params_width {params_width}')
for z in range(MAP_BLOCKSIZE):
for y in range(MAP_BLOCKSIZE):
for x in range(MAP_BLOCKSIZE):
mapblock._nodes[z][y][x] = name_by_id[stream.u16()]
for z in range(MAP_BLOCKSIZE):
for y in range(MAP_BLOCKSIZE):
for x in range(MAP_BLOCKSIZE):
mapblock._param1[z][y][x] = stream.u8()
for z in range(MAP_BLOCKSIZE):
for y in range(MAP_BLOCKSIZE):
for x in range(MAP_BLOCKSIZE):
mapblock._param2[z][y][x] = stream.u8()
ib = ''
node_metadata_version = stream.u8()
if node_metadata_version > 0:
if node_metadata_version != 2:
raise RuntimeError(f'unexpected node_metadata_version {node_metadata_version}')
node_metadata_count = stream.u16()
for _ in range(node_metadata_count):
pos = unpack_pos(stream.u16())
meta = MetaData()
num_vars = stream.u32()
for _ in range(num_vars):
key_len = stream.u16()
key = stream.bytes(key_len)
val_len = stream.u32()
meta[key] = stream.bytes(val_len)
meta.mark_as_private(key, stream.u8() == 1)
mapblock._metadata[pos.z][pos.y][pos.x] = meta
mapblock._inventory[pos.z][pos.y][pos.x] = Inventory.from_bytes(stream.inventory_bytes())
static_object_version = stream.u8()
if static_object_version != 0:
raise RuntimeError(f'unexpected static_object_version {static_object_version} {ib} {stream._data}')
static_object_count = stream.u16()
static_objects = []
for _ in range(static_object_count):
type_ = stream.u8()
pos_x_nodes = stream.s32() / 1e5
pos_y_nodes = stream.s32() / 1e5
pos_z_nodes = stream.s32() / 1e5
data_size = stream.u16()
data = stream.bytes(data_size)
static_objects.append(StaticObject(type_, vector(pos_x_nodes, pos_y_nodes, pos_z_nodes), data))
timers_length = stream.u8()
if timers_length != 10:
raise RuntimeError(f'unexpected timers_length {timers_length}')
num_of_timers = stream.u16()
for _ in range(num_of_timers):
pos = unpack_pos(stream.u16())
timeout = stream.s32()
elapsed = stream.s32()
mapblock._timer[pos.z][pos.y][pos.x] = Timer(timeout, elapsed)
return mapblock
class MapBlockSimple:
def __init__(self):
self.node_names = []
@staticmethod
def import_from_serialized(serialized_data: bytes):
mapblock = MapBlockSimple()
version = serialized_data[0]
if type(version) is bytes:
version = ord(version)
if version != 29:
raise RuntimeError(f'can\'t parse version {version}')
stream = StreamReader(pyzstd.decompress(serialized_data[1:]))
stream.u8() # flags
stream.u16() # lighting_complete
stream.u32() # timestamp
name_id_mapping_version = stream.u8()
num_name_id_mappings = stream.u16()
if name_id_mapping_version != 0:
raise RuntimeError(f'can\'t grok name_id_mapping_version {name_id_mapping_version}')
for _ in range(num_name_id_mappings):
stream.u16() # id
name_len = stream.u16()
mapblock.node_names.append(stream.bytes(name_len))
return mapblock

View file

@ -0,0 +1,3 @@
progressbar2
psycopg2
pyzstd

View file

@ -0,0 +1,72 @@
import struct
class StreamReader:
def __init__(self, data: bytes):
self._data = data
self._start = 0
def u8(self) -> int:
sformat = '>B'
ssize = struct.calcsize(sformat)
rv = struct.unpack(sformat, self._data[self._start:self._start + ssize])
self._start = self._start + ssize
return rv[0]
def u16(self) -> int:
sformat = '>H'
ssize = struct.calcsize(sformat)
rv = struct.unpack(sformat, self._data[self._start:self._start + ssize])
self._start = self._start + ssize
return rv[0]
def s32(self) -> int:
sformat = '>i'
ssize = struct.calcsize(sformat)
rv = struct.unpack(sformat, self._data[self._start:self._start + ssize])
self._start = self._start + ssize
return rv[0]
def u32(self) -> int:
sformat = '>I'
ssize = struct.calcsize(sformat)
rv = struct.unpack(sformat, self._data[self._start:self._start + ssize])
self._start = self._start + ssize
return rv[0]
def bytes(self, count: int) -> bytes:
rv = self._data[self._start:self._start + count]
self._start = self._start + count
return rv
def inventory_bytes(self) -> bytes:
start_of_end = self._data.find(b'EndInventory\n', self._start)
if start_of_end == -1:
return
actual_end = start_of_end + len(b'EndInventory\n')
rv = self._data[self._start:actual_end]
self._start = actual_end
return rv
def rest(self) -> bytes:
return self._data[self._start:]
class StreamWriter:
def __init__(self, fh):
self._fh = fh
def u8(self, value):
sformat = '>B'
self._fh.write(struct.pack(sformat, value))
def u16(self, value):
sformat = '>H'
self._fh.write(struct.pack(sformat, value))
def u32(self, value):
sformat = '>I'
self._fh.write(struct.pack(sformat, value))
def bytes(self, value: bytes):
self._fh.write(value)

View file

@ -0,0 +1,162 @@
# https://gitlab.com/bztsrc/mtsedit/blob/master/docs/mts_format.md
import argparse
import json
import pathlib
import lupa
from stream import StreamReader, StreamWriter
lua = lupa.LuaRuntime(unpack_returned_tuples=True)
def is_schem(file: pathlib.Path):
return file.suffix == '.mts'
def convert_schem(child, alias_map):
print(f'processing {child}')
with child.open('rb') as fh:
contents = fh.read()
reader = StreamReader(contents)
magic = reader.bytes(4)
if magic != b'MTSM':
raise RuntimeError(f'invalid magic number {magic}')
version = reader.u16()
if version != 4:
raise RuntimeError(f'unexpected version {version}')
x = reader.u16()
y = reader.u16()
z = reader.u16()
layer_probability_values = reader.bytes(y)
name_id_length = reader.u16()
names = []
any_changed = False
for _ in range(name_id_length):
name = reader.bytes(reader.u16())
alias = alias_map.get(name.decode())
if alias:
any_changed = True
names.append(alias.encode())
else:
names.append(name)
if any_changed:
print('writing changes...')
rest = reader.rest()
with child.open('wb') as fh:
writer = StreamWriter(fh)
writer.bytes(b'MTSM')
writer.u16(4)
writer.u16(x)
writer.u16(y)
writer.u16(z)
writer.bytes(layer_probability_values)
writer.u16(name_id_length)
for name in names:
writer.u16(len(name))
writer.bytes(name)
writer.bytes(rest)
def is_we(file: pathlib.Path):
return file.suffix == '.we'
def lua_dump(value):
if type(value) is str:
return repr(value)
elif type(value) in {int, float}:
return str(value)
elif type(value) in {list, tuple}:
return f'{{{", ".join(map(lua_dump, value))}}}'
elif type(value) is dict:
return '{' + ', '.join(f'[{lua_dump(k)}] = {lua_dump(v)}' for k, v in value.items()) + '}'
elif value is None:
return 'nil'
elif value is True:
return 'true'
elif value is False:
return 'false'
elif lupa.lua_type(value) == 'table':
return lua_dump(dict(value.items()))
else:
raise RuntimeError(f'value {value!r} w/ unexpected type {type(value)}')
def convert_we(child, alias_map):
print(f'processing {child}')
with child.open('r') as fh:
contents = fh.read()
assert(contents[:9] == '5:return ')
table = lua.eval(contents[9:])
data = tuple(map(dict, table.values()))
any_changed = False
for point in data:
alias = alias_map.get(point['name'])
if alias:
point['name'] = alias
any_changed = True
if any_changed:
print('writing changes...')
output = f'5:return {lua_dump(data)}'
with child.open('w') as fh:
fh.write(output)
def create_alias_map(stairsplus_dump: pathlib.Path):
print('reading aliases from dump')
aliases = {}
with stairsplus_dump.open() as fh:
data = json.load(fh)
for alias, shaped_node in data['aliases'].items():
aliases[alias] = shaped_node
return aliases
def main(args):
alias_map = create_alias_map(args.stairsplus_dump)
for child in args.schems.iterdir():
if child.is_file():
if is_schem(child):
convert_schem(child, alias_map)
elif is_we(child):
convert_we(child, alias_map)
else:
print(f'unknown file type {child.suffix}')
def existing_file(path: str) -> pathlib.Path:
file_path = pathlib.Path(path)
if not file_path.exists():
raise argparse.ArgumentTypeError(f'{path!r} does not exist.')
if not file_path.is_file():
raise argparse.ArgumentTypeError(f'{path!r} is not a file.')
return file_path
def existing_directory(path: str) -> pathlib.Path:
file_path = pathlib.Path(path)
if not file_path.exists():
raise argparse.ArgumentTypeError(f'{path!r} does not exist.')
if not file_path.is_dir():
raise argparse.ArgumentTypeError(f'{path!r} is not a directory.')
return file_path
def parse_args(args=None, namespace=None):
p = argparse.ArgumentParser()
p.add_argument('stairsplus_dump', type=existing_file)
p.add_argument('schems', type=existing_directory)
return p.parse_args(args=args, namespace=namespace)
if __name__ == "__main__":
main(parse_args())

View file

@ -0,0 +1,87 @@
import argparse
import json
import multiprocessing
import pathlib
import queue
import time
def existing_file(path: str) -> pathlib.Path:
file_path = pathlib.Path(path)
if not file_path.exists():
raise argparse.ArgumentTypeError(f'{path!r} does not exist.')
if not file_path.is_file():
raise argparse.ArgumentTypeError(f'{path!r} is not a file.')
return file_path
def get_cursor(args):
if args.pg_connection:
import psycopg2
conn = psycopg2.connect(args.pg_connection)
cursor = conn.cursor(name='blocks')
cursor.itersize = args.chunk_size
else:
import sqlite3
conn = sqlite3.connect(args.sqlite_file)
cursor = conn.cursor()
return cursor
def create_filter(stairsplus_dump: pathlib.Path):
print('creating filter from dump...')
start = time.time()
f = {}
with stairsplus_dump.open() as fh:
data = json.load(fh)
for shaped_node in data['shaped_nodes'].keys():
f[shaped_node.encode()] = shaped_node.encode()
for alias, shaped_node in data['aliases'].items():
f[alias.encode()] = shaped_node.encode()
print(f'created in {time.time() - start}')
return f
def count_blocks(args):
cursor = get_cursor(args)
# just shy of 12 minutes for postgres w/ a 150GiB map dump, an opteron 6376, and 4 encrypted raid6 5400 RPM disks
print('counting mapblocks - this can take a while...')
start = time.time()
cursor.execute('SELECT COUNT(data) FROM blocks')
num_blocks = cursor.fetchone()[0]
elapsed = time.time() - start
print(f'num_blocks: {num_blocks} (fetched in {elapsed}s)')
return num_blocks, elapsed
def create_whitelist(filter_, all_nodes):
print('creating whitelist')
return set(
shaped_node for shaped_node in map(filter_.get, all_nodes) if shaped_node
)
def write_whitelist(args, whitelist):
if args.output:
output = args.output
else:
output = args.stairsplus_dump.parent / 'stairsplus.whitelist'
with output.open('wb') as fh:
print(f'writing whitelist to {output!r}')
fh.write(b'\n'.join(sorted(whitelist)))
def get_all_nodes(results: multiprocessing.Queue):
all_nodes = set()
try:
while True:
all_nodes.update(results.get(False))
except queue.Empty:
return all_nodes