diff --git a/.efrocachemap b/.efrocachemap
index f3df2c6f..f3a60421 100644
--- a/.efrocachemap
+++ b/.efrocachemap
@@ -4068,50 +4068,50 @@
"build/assets/windows/Win32/ucrtbased.dll": "2def5335207d41b21b9823f6805997f1",
"build/assets/windows/Win32/vc_redist.x86.exe": "b08a55e2e77623fe657bea24f223a3ae",
"build/assets/windows/Win32/vcruntime140d.dll": "865b2af4d1e26a1a8073c89acb06e599",
- "build/prefab/full/linux_arm64_gui/debug/ballisticakit": "1f6d9028dd5007bdb1ea940f3b1f9ffb",
- "build/prefab/full/linux_arm64_gui/release/ballisticakit": "64da0286b58456e4e1dbac45e7ba77e7",
- "build/prefab/full/linux_arm64_server/debug/dist/ballisticakit_headless": "0e2e107d97f6256e971ef5f5391c7641",
- "build/prefab/full/linux_arm64_server/release/dist/ballisticakit_headless": "06c59df009b3f8ef7a6bc7f3dd02fa27",
- "build/prefab/full/linux_x86_64_gui/debug/ballisticakit": "12612fae133533d2155f5094b44b8876",
- "build/prefab/full/linux_x86_64_gui/release/ballisticakit": "1ff6dee3bcdeccea98c43cefe38dbd59",
- "build/prefab/full/linux_x86_64_server/debug/dist/ballisticakit_headless": "c761e7132d753091db6f7621a4d82507",
- "build/prefab/full/linux_x86_64_server/release/dist/ballisticakit_headless": "fa79d2d5fe20dde12d8631b681b8bc02",
- "build/prefab/full/mac_arm64_gui/debug/ballisticakit": "28981340fb5acd51c068ce649c22019f",
- "build/prefab/full/mac_arm64_gui/release/ballisticakit": "0c0b3c9951c97b85aa6ee4e0874b5131",
- "build/prefab/full/mac_arm64_server/debug/dist/ballisticakit_headless": "25e4319520528b66b8ce9b75cde4667b",
- "build/prefab/full/mac_arm64_server/release/dist/ballisticakit_headless": "522750cf436f7f7dd004af239d3e5c9c",
- "build/prefab/full/mac_x86_64_gui/debug/ballisticakit": "13b5e8621b3f681af55c8f6d809e8cdf",
- "build/prefab/full/mac_x86_64_gui/release/ballisticakit": "bd8f13259ea0de5831f94bfabd6662fc",
- "build/prefab/full/mac_x86_64_server/debug/dist/ballisticakit_headless": "39f7be43e2d4a9d992a69c09d070f7c0",
- "build/prefab/full/mac_x86_64_server/release/dist/ballisticakit_headless": "23e005d825c4899b04e1df8275e63366",
- "build/prefab/full/windows_x86_gui/debug/BallisticaKit.exe": "48f6a92f679aaf6a96c3dd86a930fec1",
- "build/prefab/full/windows_x86_gui/release/BallisticaKit.exe": "9aa5991d4222207b7d2ec057af6ac7d2",
- "build/prefab/full/windows_x86_server/debug/dist/BallisticaKitHeadless.exe": "27f080c17d30ad819005b36a3f529d9b",
- "build/prefab/full/windows_x86_server/release/dist/BallisticaKitHeadless.exe": "bec0ebf67c7eac9cf93a8ca50fc894e8",
- "build/prefab/lib/linux_arm64_gui/debug/libballisticaplus.a": "85ba4e81a1f7ae2cff4b1355eb49904f",
- "build/prefab/lib/linux_arm64_gui/release/libballisticaplus.a": "498921f7eb2afd327d4b900cb70e31f9",
- "build/prefab/lib/linux_arm64_server/debug/libballisticaplus.a": "85ba4e81a1f7ae2cff4b1355eb49904f",
- "build/prefab/lib/linux_arm64_server/release/libballisticaplus.a": "498921f7eb2afd327d4b900cb70e31f9",
- "build/prefab/lib/linux_x86_64_gui/debug/libballisticaplus.a": "ded5f785236bf64e644ee20041ac8342",
- "build/prefab/lib/linux_x86_64_gui/release/libballisticaplus.a": "c436a058b7204fa39f22eafc7ca7855f",
- "build/prefab/lib/linux_x86_64_server/debug/libballisticaplus.a": "ded5f785236bf64e644ee20041ac8342",
- "build/prefab/lib/linux_x86_64_server/release/libballisticaplus.a": "c436a058b7204fa39f22eafc7ca7855f",
- "build/prefab/lib/mac_arm64_gui/debug/libballisticaplus.a": "fe0ba4b21528a557c5a434b8f2eeda41",
- "build/prefab/lib/mac_arm64_gui/release/libballisticaplus.a": "7950a02c3d9a1088e9acd4c29bd3cb72",
- "build/prefab/lib/mac_arm64_server/debug/libballisticaplus.a": "fe0ba4b21528a557c5a434b8f2eeda41",
- "build/prefab/lib/mac_arm64_server/release/libballisticaplus.a": "7950a02c3d9a1088e9acd4c29bd3cb72",
- "build/prefab/lib/mac_x86_64_gui/debug/libballisticaplus.a": "870d11d339fd1b3acf66cc601ff29c83",
- "build/prefab/lib/mac_x86_64_gui/release/libballisticaplus.a": "0ab638b6602610bdaf432e3cc2464080",
- "build/prefab/lib/mac_x86_64_server/debug/libballisticaplus.a": "92394eb19387c363471ce134ac9e6a1b",
- "build/prefab/lib/mac_x86_64_server/release/libballisticaplus.a": "0ab638b6602610bdaf432e3cc2464080",
- "build/prefab/lib/windows/Debug_Win32/BallisticaKitGenericPlus.lib": "4c932459a387f75168a8a1eb32523300",
- "build/prefab/lib/windows/Debug_Win32/BallisticaKitGenericPlus.pdb": "41a00a3d9ea038fcde6bf43f7f88e6a2",
- "build/prefab/lib/windows/Debug_Win32/BallisticaKitHeadlessPlus.lib": "3fecaabf37fdbaef3d8e7a4de4582d9e",
- "build/prefab/lib/windows/Debug_Win32/BallisticaKitHeadlessPlus.pdb": "95d54d70c4b9ff1ab788fd46eb0b73c4",
- "build/prefab/lib/windows/Release_Win32/BallisticaKitGenericPlus.lib": "abbd93b2c28fa0bdffa2f72d3bf516f5",
- "build/prefab/lib/windows/Release_Win32/BallisticaKitGenericPlus.pdb": "bd9fe5e01ca4ee7c48d0f56158f2252d",
- "build/prefab/lib/windows/Release_Win32/BallisticaKitHeadlessPlus.lib": "e0150f022655778773c6f954e257b113",
- "build/prefab/lib/windows/Release_Win32/BallisticaKitHeadlessPlus.pdb": "3c58883c79cbf4d8c66ddbeb1de935a5",
+ "build/prefab/full/linux_arm64_gui/debug/ballisticakit": "c26b65a24311880c74d69c4983b22ece",
+ "build/prefab/full/linux_arm64_gui/release/ballisticakit": "b5f0703ed12ca1a25200b5d4114909df",
+ "build/prefab/full/linux_arm64_server/debug/dist/ballisticakit_headless": "02f6cf0e2fe78cc1ac9c8e3094f60079",
+ "build/prefab/full/linux_arm64_server/release/dist/ballisticakit_headless": "1468af4af839e714c2d622caee6b9181",
+ "build/prefab/full/linux_x86_64_gui/debug/ballisticakit": "1464adfba201f6fcf79d54068a915409",
+ "build/prefab/full/linux_x86_64_gui/release/ballisticakit": "60fdad12226023caa043da4685831c8a",
+ "build/prefab/full/linux_x86_64_server/debug/dist/ballisticakit_headless": "ac01bff1450ed6b66bb77d8b0af6e84f",
+ "build/prefab/full/linux_x86_64_server/release/dist/ballisticakit_headless": "58cf449ec84b211b0bb38d1d1358c974",
+ "build/prefab/full/mac_arm64_gui/debug/ballisticakit": "6da4ad354507711c5857c81e3bed4e33",
+ "build/prefab/full/mac_arm64_gui/release/ballisticakit": "d642aeeaeffdd5ebe07e968be2311da5",
+ "build/prefab/full/mac_arm64_server/debug/dist/ballisticakit_headless": "0800f2ca27c13408afbb75b5bdf76bae",
+ "build/prefab/full/mac_arm64_server/release/dist/ballisticakit_headless": "e008727c4b62b7ef09c775b505cee886",
+ "build/prefab/full/mac_x86_64_gui/debug/ballisticakit": "ef184ee79f268744612130743cf8369d",
+ "build/prefab/full/mac_x86_64_gui/release/ballisticakit": "c1ea1a2c7362b2a47b5f55f8ff112c61",
+ "build/prefab/full/mac_x86_64_server/debug/dist/ballisticakit_headless": "7997bb41bb8db4a2aa1105c498787c41",
+ "build/prefab/full/mac_x86_64_server/release/dist/ballisticakit_headless": "939d32d2010fbcd76398fb9a08ac9152",
+ "build/prefab/full/windows_x86_gui/debug/BallisticaKit.exe": "b35c2813cfa23a4d4c58f50b71617f69",
+ "build/prefab/full/windows_x86_gui/release/BallisticaKit.exe": "48eeea81dc9bba2fe9d8afae1c163b69",
+ "build/prefab/full/windows_x86_server/debug/dist/BallisticaKitHeadless.exe": "4ae9e07d5d7b61bb5c019badfbef37a5",
+ "build/prefab/full/windows_x86_server/release/dist/BallisticaKitHeadless.exe": "95bbece528dfa908838caf48a496dca6",
+ "build/prefab/lib/linux_arm64_gui/debug/libballisticaplus.a": "2c39f4296ba083f11168beaa56256909",
+ "build/prefab/lib/linux_arm64_gui/release/libballisticaplus.a": "02b17ff1ab03fb4a526ef85186baf9b3",
+ "build/prefab/lib/linux_arm64_server/debug/libballisticaplus.a": "2c39f4296ba083f11168beaa56256909",
+ "build/prefab/lib/linux_arm64_server/release/libballisticaplus.a": "02b17ff1ab03fb4a526ef85186baf9b3",
+ "build/prefab/lib/linux_x86_64_gui/debug/libballisticaplus.a": "9a78f6330fea20ba8343b09a339595f1",
+ "build/prefab/lib/linux_x86_64_gui/release/libballisticaplus.a": "5a3358818ebea17293a1090d295e1047",
+ "build/prefab/lib/linux_x86_64_server/debug/libballisticaplus.a": "9a78f6330fea20ba8343b09a339595f1",
+ "build/prefab/lib/linux_x86_64_server/release/libballisticaplus.a": "5a3358818ebea17293a1090d295e1047",
+ "build/prefab/lib/mac_arm64_gui/debug/libballisticaplus.a": "6cc12ac10a557a546b6a9c3fd0792af0",
+ "build/prefab/lib/mac_arm64_gui/release/libballisticaplus.a": "e1fbd7e130511cd8690e0da886910d1a",
+ "build/prefab/lib/mac_arm64_server/debug/libballisticaplus.a": "6cc12ac10a557a546b6a9c3fd0792af0",
+ "build/prefab/lib/mac_arm64_server/release/libballisticaplus.a": "e1fbd7e130511cd8690e0da886910d1a",
+ "build/prefab/lib/mac_x86_64_gui/debug/libballisticaplus.a": "758dea018f7a06c611b9cff20e7d064f",
+ "build/prefab/lib/mac_x86_64_gui/release/libballisticaplus.a": "9355211cad3fae2a29eb8016f7cc062c",
+ "build/prefab/lib/mac_x86_64_server/debug/libballisticaplus.a": "6d309fba1c355902662343b627b6aa8c",
+ "build/prefab/lib/mac_x86_64_server/release/libballisticaplus.a": "9355211cad3fae2a29eb8016f7cc062c",
+ "build/prefab/lib/windows/Debug_Win32/BallisticaKitGenericPlus.lib": "e3085c83263ccc1c13e1bb344f0a7c8e",
+ "build/prefab/lib/windows/Debug_Win32/BallisticaKitGenericPlus.pdb": "38a1826608e0829e25ceded2e5a8e50d",
+ "build/prefab/lib/windows/Debug_Win32/BallisticaKitHeadlessPlus.lib": "9b3612f4c807362baf25daed9bd8ab01",
+ "build/prefab/lib/windows/Debug_Win32/BallisticaKitHeadlessPlus.pdb": "671c648cb9d8f257033b6c203e33aab8",
+ "build/prefab/lib/windows/Release_Win32/BallisticaKitGenericPlus.lib": "6b926b48877a0ecef54107be894f5dc2",
+ "build/prefab/lib/windows/Release_Win32/BallisticaKitGenericPlus.pdb": "15d1aec51cf77095399b46b7a5da5880",
+ "build/prefab/lib/windows/Release_Win32/BallisticaKitHeadlessPlus.lib": "b5b4cf9234f0f4f8d657f2a98364aba9",
+ "build/prefab/lib/windows/Release_Win32/BallisticaKitHeadlessPlus.pdb": "8418ee35e7ae3d6564df2c011b8e5838",
"src/assets/ba_data/python/babase/_mgen/__init__.py": "f885fed7f2ed98ff2ba271f9dbe3391c",
"src/assets/ba_data/python/babase/_mgen/enums.py": "f8cd3af311ac63147882590123b78318",
"src/ballistica/base/mgen/pyembed/binding_base.inc": "eeddad968b176000e31c65be6206a2bc",
diff --git a/.idea/dictionaries/ericf.xml b/.idea/dictionaries/ericf.xml
index 535342e7..fbe3d896 100644
--- a/.idea/dictionaries/ericf.xml
+++ b/.idea/dictionaries/ericf.xml
@@ -202,6 +202,7 @@
autodetected
autogenerate
autonoassets
+ autopep
autopoint
autoremove
autoretain
@@ -229,6 +230,7 @@
bacoremeta
badguy
baenv
+ baenv's
bafoobar
bafoobarmeta
bafoundation
@@ -420,6 +422,7 @@
cancelbtn
capb
caplog
+ capturable
capturetheflag
carentity
casefix
@@ -471,6 +474,7 @@
charstr
chatmessage
chdir
+ chdir'ing
cheadersline
checkarg
checkarglist
@@ -875,6 +879,7 @@
efrotoolsinternal
eftools
efxjtp
+ eglot
eids
elapsedf
elementtree
@@ -1035,6 +1040,7 @@
filelist
filelock
filenames
+ fileno
filepath
fileselector
filesize
@@ -2121,7 +2127,10 @@
pcall
pchild
pcommand
+ pcommandbatch
+ pcommandbatchbin
pcommands
+ pcommandserver
pcstr
pdataclass
pdoc
@@ -2203,6 +2212,7 @@
popupscale
popupstr
popuptext
+ portfile
positionadjusted
posixpath
posixshmem
@@ -2381,6 +2391,7 @@
pylintscripts
pylintscriptsfast
pylintscriptsfull
+ pylsp
pymodulenames
pyobjc
pyoffs
@@ -2474,6 +2485,7 @@
responsetype
responsetypes
responsetypevar
+ resultcode
resultstr
retcode
retrysecs
@@ -2725,6 +2737,7 @@
spinoffs
spinofftest
spinup
+ spinups
splayer
splitlen
splitnumstr
@@ -2868,6 +2881,7 @@
syncitem
syncitems
synclist
+ sysargv
syscall
sysconfigdata
sysctl
@@ -3138,6 +3152,7 @@
unstrl
unsubscriptable
untracked
+ unwritable
upcase
updatecheck
updatethencheck
diff --git a/.idea/misc.xml b/.idea/misc.xml
index 3ea7c1cb..0143897a 100644
--- a/.idea/misc.xml
+++ b/.idea/misc.xml
@@ -1,5 +1,11 @@
+
+
+
+
+
+
diff --git a/CHANGELOG.md b/CHANGELOG.md
index ad05d731..1f7bec7c 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,10 +1,15 @@
### 1.7.26 (build 21212, api 8, 2023-08-03)
+- Various general improvements to the pcommand (project command) system.
+- Modules containing pcommand functions are now named with an 's' - so
+ `pcommands.py` instead of `pcommand.py`. `pcommand.py` in efrotools is now
+ solely related to the functioning of the pcommand system.
+
### 1.7.25 (build 21211, api 8, 2023-08-03)
- Fixed an issue where the main thread was holding the Python GIL by default in
monolithic builds with environment-managed event loops. This theoretically
- could have lead to stuttery performanace in the Android or Mac builds.
+ could have lead to stuttery performance in the Android or Mac builds.
- Did a bit of cleanup on `baenv.py` in preparation for some additional setup it
will soon be doing to give users more control over logging.
- `getconfig` and `setconfig` in `efrotools` are now `getprojectconfig` and
diff --git a/Makefile b/Makefile
index f5ea4548..048fe580 100644
--- a/Makefile
+++ b/Makefile
@@ -35,10 +35,21 @@ ifeq ($(BA_ENABLE_COMPILE_COMMANDS_DB),1)
PREREQ_COMPILE_COMMANDS_DB = .cache/compile_commands_db/compile_commands.json
endif
+# Support for running pcommands in 'batch' mode in which a simple local server
+# handles command requests from a lightweight client binary. This largely
+# takes Python's startup time out of the equation, which can add up when
+# running lots of small pcommands in cases such as asset builds.
+PCOMMANDBATCHBIN := .cache/pcommandbatch/pcommandbatch
+ifeq ($(BA_PCOMMANDBATCH_DISABLE),1)
+ PCOMMANDBATCH = tools/pcommand
+else
+ PCOMMANDBATCH = $(PCOMMANDBATCHBIN)
+endif
+
# Prereq targets that should be safe to run anytime; even if project-files
# are out of date.
-PREREQS_SAFE = .cache/checkenv .dir-locals.el .mypy.ini .pyrightconfig.json \
- .pycheckers .pylintrc .style.yapf .clang-format \
+PREREQS_SAFE = .cache/checkenv $(PCOMMANDBATCH) .dir-locals.el .mypy.ini \
+ .pyrightconfig.json .pycheckers .pylintrc .style.yapf .clang-format \
ballisticakit-cmake/.clang-format .editorconfig
# Prereq targets that may break if the project needs updating should go here.
@@ -170,11 +181,15 @@ docs:
docs-pdoc:
@tools/pcommand gen_docs_pdoc
+pcommandbatch_speed_test: prereqs
+ @tools/pcommand pcommandbatch_speed_test $(PCOMMANDBATCH)
+
# Tell make which of these targets don't represent files.
-.PHONY: help prereqs prereqs-pre-update prereqs-clean assets assets-cmake \
- assets-cmake-scripts assets-windows assets-windows-Win32 assets-windows-x64 \
- assets-mac assets-ios assets-android assets-clean resources resources-clean \
- meta meta-clean clean clean-list dummymodules docs
+.PHONY: help prereqs prereqs-pre-update prereqs-clean assets assets-cmake \
+ assets-cmake-scripts assets-windows assets-windows-Win32 assets-windows-x64 \
+ assets-mac assets-ios assets-android assets-clean resources resources-clean \
+ meta meta-clean clean clean-list dummymodules docs docs-pdoc \
+ pcommandbatch_speed_test
################################################################################
@@ -1207,6 +1222,14 @@ SKIP_ENV_CHECKS ?= 0
tools/pcommand checkenv && mkdir -p .cache && touch .cache/checkenv; \
fi
+foof: CHANGELOG.md CONTRIBUTORS.md Makefile
+ echo OUT IS $@
+ echo IN IS $^
+
+$(PCOMMANDBATCHBIN): src/tools/pcommandbatch/pcommandbatch.c \
+ src/tools/pcommandbatch/cJSON.c
+ @tools/pcommand build_pcommandbatch $^ $@
+
# CMake build-type lowercase
CM_BT_LC = $(shell echo $(CMAKE_BUILD_TYPE) | tr A-Z a-z)
diff --git a/ballisticakit-cmake/.idea/dictionaries/ericf.xml b/ballisticakit-cmake/.idea/dictionaries/ericf.xml
index 5d3c5eae..fc4b2764 100644
--- a/ballisticakit-cmake/.idea/dictionaries/ericf.xml
+++ b/ballisticakit-cmake/.idea/dictionaries/ericf.xml
@@ -116,6 +116,7 @@
audiocache
autodetected
automagically
+ autopep
autoselect
availmins
avel
@@ -135,6 +136,7 @@
baclassicmeta
bacoremeta
baenv
+ baenv's
bafoobar
bafoobarmeta
bainternal
@@ -280,6 +282,7 @@
cancelbtn
capitan
caplog
+ capturable
cargs
casefix
cbegin
@@ -308,6 +311,7 @@
charstr
chatmessage
chdir
+ chdir'ing
checkarglist
checkboxwidget
checkchisel
@@ -534,6 +538,7 @@
efrohack
efrohome
efrotoolsinternal
+ eglot
elapsedf
elems
elevenbase
@@ -634,6 +639,7 @@
fifteenbits
filefilter
filelock
+ fileno
filt
filterdoc
filterstr
@@ -1268,7 +1274,10 @@
pbasename
pbxgrp
pbxgrps
+ pcommandbatch
+ pcommandbatchbin
pcommands
+ pcommandserver
pdataclass
pdoc
pdst
@@ -1302,6 +1311,7 @@
podcasts
popd
portaudio
+ portfile
positivex
positivey
positivez
@@ -1392,6 +1402,7 @@
pyhome
pylib
pylibpath
+ pylsp
pymodulenames
pyobj
pyobjs
@@ -1467,6 +1478,7 @@
responsecount
responsetypes
responsetypevar
+ resultcode
resync
retcode
retrysecs
@@ -1595,6 +1607,7 @@
spinoffconfig
spinofftest
spinup
+ spinups
spivak
spwd
srcabs
@@ -1682,6 +1695,7 @@
swiftmergegeneratedheaders
symbolification
symlinking
+ sysargv
syscall
syscalls
sysresponse
@@ -1824,6 +1838,7 @@
unsignaled
unstuff
unsynchronized
+ unwritable
uppercased
userspace
usid
diff --git a/config/projectconfig.json b/config/projectconfig.json
index db147cb5..3cd1482b 100644
--- a/config/projectconfig.json
+++ b/config/projectconfig.json
@@ -40,7 +40,8 @@
"psutil",
"pbxproj.XcodeProject",
"pbxproj.pbxextensions",
- "openstep_parser"
+ "openstep_parser",
+ "daemon"
],
"python_paths": [
"src/assets/ba_data/python",
diff --git a/config/spinoffconfig.py b/config/spinoffconfig.py
index 00d0f51e..275ccdfa 100644
--- a/config/spinoffconfig.py
+++ b/config/spinoffconfig.py
@@ -172,7 +172,7 @@ ctx.filter_file_names = {
'assets_phase_xcode',
'ballistica_maya_tools.mel',
'check_python_syntax',
- 'compile_python_files',
+ 'compile_python_file',
'pcommand',
'vmshell',
'cloudshell',
diff --git a/config/toolconfigsrc/mypy.ini b/config/toolconfigsrc/mypy.ini
index 5f65e91e..3811bb05 100644
--- a/config/toolconfigsrc/mypy.ini
+++ b/config/toolconfigsrc/mypy.ini
@@ -36,3 +36,6 @@ ignore_missing_imports = True
[mypy-openstep_parser.*]
ignore_missing_imports = True
+[mypy-daemon.*]
+ignore_missing_imports = True
+
diff --git a/src/assets/Makefile b/src/assets/Makefile
index c0336ef5..caf6ee97 100644
--- a/src/assets/Makefile
+++ b/src/assets/Makefile
@@ -21,55 +21,68 @@ PROJ_DIR = ../..
TOOLS_DIR = $(PROJ_DIR)/tools
BUILD_DIR = $(PROJ_DIR)/build/assets
+PCOMMAND = $(TOOLS_DIR)/pcommand
+
+# Support for running pcommands in 'batch' mode in which a simple local server
+# handles command requests from a lightweight client binary. This largely
+# takes Python's startup time out of the equation, which can add up when
+# running lots of small pcommands in cases such as asset builds.
+PCOMMANDBATCHBIN := $(PROJ_DIR)/.cache/pcommandbatch/pcommandbatch
+ifeq ($(BA_PCOMMANDBATCH_DISABLE),1)
+ PCOMMANDBATCH = $(TOOLS_DIR)/pcommand
+else
+ PCOMMANDBATCH = $(PCOMMANDBATCHBIN)
+endif
+
# High level targets: generally these are what should be used here.
# Build everything needed for all platforms.
all:
- @$(TOOLS_DIR)/pcommand warm_start_asset_build
+ @$(PCOMMAND) warm_start_asset_build
@$(MAKE) assets
- @$(TOOLS_DIR)/pcommand clean_orphaned_assets
+ @$(PCOMMAND) clean_orphaned_assets
# Build everything needed for our cmake builds (linux, mac).
cmake:
- @$(TOOLS_DIR)/pcommand warm_start_asset_build
+ @$(PCOMMAND) warm_start_asset_build
@$(MAKE) assets-cmake
- @$(TOOLS_DIR)/pcommand clean_orphaned_assets
+ @$(PCOMMAND) clean_orphaned_assets
# Build everything needed for our server builds.
server:
@echo Note - skipping warm_start_asset_build for server target.
@$(MAKE) assets-server
- @$(TOOLS_DIR)/pcommand clean_orphaned_assets
+ @$(PCOMMAND) clean_orphaned_assets
# Build everything needed for x86 windows builds.
win-Win32:
- @$(TOOLS_DIR)/pcommand warm_start_asset_build
+ @$(PCOMMAND) warm_start_asset_build
@$(MAKE) assets-win-Win32
- @$(TOOLS_DIR)/pcommand clean_orphaned_assets
+ @$(PCOMMAND) clean_orphaned_assets
# Build everything needed for x86-64 windows builds.
win-x64:
- @$(TOOLS_DIR)/pcommand warm_start_asset_build
+ @$(PCOMMAND) warm_start_asset_build
@$(MAKE) assets-win-x64
- @$(TOOLS_DIR)/pcommand clean_orphaned_assets
+ @$(PCOMMAND) clean_orphaned_assets
# Build everything needed for our mac xcode builds.
mac:
- @$(TOOLS_DIR)/pcommand warm_start_asset_build
+ @$(PCOMMAND) warm_start_asset_build
@$(MAKE) assets-mac
- @$(TOOLS_DIR)/pcommand clean_orphaned_assets
+ @$(PCOMMAND) clean_orphaned_assets
# Build everything needed for our ios/tvos builds.
ios:
- @$(TOOLS_DIR)/pcommand warm_start_asset_build
+ @$(PCOMMAND) warm_start_asset_build
@$(MAKE) assets-ios
- @$(TOOLS_DIR)/pcommand clean_orphaned_assets
+ @$(PCOMMAND) clean_orphaned_assets
# Build everything needed for android.
android:
- @$(TOOLS_DIR)/pcommand warm_start_asset_build
+ @$(PCOMMAND) warm_start_asset_build
@$(MAKE) assets-android
- @$(TOOLS_DIR)/pcommand clean_orphaned_assets
+ @$(PCOMMAND) clean_orphaned_assets
MAKE_AUDIO = 1
MAKE_TEXTURES = 1
@@ -137,9 +150,8 @@ ASSET_TARGETS_WIN_X64 += $(EXTRAS_TARGETS_WIN_X64)
# Note: Code below needs updating when Python version changes (currently 3.11)
define make-opt-pyc-target
$1: $$(subst /__pycache__,,$$(subst .cpython-311.opt-1.pyc,.py,$1))
- @echo Compiling script: $$(subst $(BUILD_DIR)/,,$$^)
- @rm -rf $$@ && PYTHONHASHSEED=1 \
- $$(TOOLS_DIR)/pcommand compile_python_files $$^ && chmod 444 $$@
+# @echo Compiling script: $$(subst $(BUILD_DIR)/,,$$^)
+ @$$(PCOMMANDBATCH) compile_python_file $$^
endef
# This section is generated by batools.assetsmakefile; do not edit by hand.
@@ -690,11 +702,8 @@ SCRIPT_TARGETS_PYC_PUBLIC = \
# Rule to copy src asset scripts to dst.
# (and make non-writable so I'm less likely to accidentally edit them there)
$(SCRIPT_TARGETS_PY_PUBLIC) : $(BUILD_DIR)/%.py : %.py
- @echo Copying script: $(subst $(BUILD_DIR)/,,$@)
- @mkdir -p $(dir $@)
- @rm -f $@
- @cp $^ $@
- @chmod 444 $@
+# @echo Copying script: $(subst $(BUILD_DIR)/,,$@)
+ @$(PCOMMANDBATCH) copy_python_file $^ $@
# These are too complex to define in a pattern rule;
# Instead we generate individual targets in a loop.
@@ -772,11 +781,8 @@ SCRIPT_TARGETS_PYC_PUBLIC_TOOLS = \
# Rule to copy src asset scripts to dst.
# (and make non-writable so I'm less likely to accidentally edit them there)
$(SCRIPT_TARGETS_PY_PUBLIC_TOOLS) : $(BUILD_DIR)/ba_data/python/%.py : $(TOOLS_DIR)/%.py
- @echo Copying script: $(subst $(BUILD_DIR)/,,$@)
- @mkdir -p $(dir $@)
- @rm -f $@
- @cp $^ $@
- @chmod 444 $@
+# @echo Copying script: $(subst $(BUILD_DIR)/,,$@)
+ @$(PCOMMANDBATCH) copy_python_file $^ $@
# These are too complex to define in a pattern rule;
# Instead we generate individual targets in a loop.
diff --git a/src/assets/ba_data/python/bauiv1lib/settings/plugins.py b/src/assets/ba_data/python/bauiv1lib/settings/plugins.py
index 0c7c58ce..5f22847b 100644
--- a/src/assets/ba_data/python/bauiv1lib/settings/plugins.py
+++ b/src/assets/ba_data/python/bauiv1lib/settings/plugins.py
@@ -263,7 +263,6 @@ class PluginWindow(bui.Window):
def _show_plugins(self) -> None:
# pylint: disable=too-many-locals
# pylint: disable=too-many-branches
- # pylint: disable=too-many-statements
plugspecs = bui.app.plugins.plugin_specs
plugstates: dict[str, dict] = bui.app.config.setdefault('Plugins', {})
assert isinstance(plugstates, dict)
@@ -301,7 +300,6 @@ class PluginWindow(bui.Window):
else:
# Make sure we handle all cases.
assert_never(self._category)
- sub_height = 0
num_shown = 0
for classpath, plugspec in plugspecs_sorted:
@@ -316,7 +314,7 @@ class PluginWindow(bui.Window):
show = not enabled
else:
assert_never(self._category)
- show = False
+ # show = False
if not show:
continue
diff --git a/src/meta/Makefile b/src/meta/Makefile
index c0c56acf..8290ba8b 100644
--- a/src/meta/Makefile
+++ b/src/meta/Makefile
@@ -61,7 +61,7 @@ $(PROJ_SRC_DIR)/ballistica/template_fs/mgen/pyembed/binding_template_fs.inc : ba
$(PROJ_SRC_DIR)/ballistica/ui_v1/mgen/pyembed/binding_ui_v1.inc : bauiv1meta/pyembed/binding_ui_v1.py
@$(PCOMMAND) gen_binding_code $< $@
-$(PROJ_SRC_DIR)/assets/ba_data/python/babase/_mgen/__init__.py : $(TOOLS_DIR)/batools/pcommand.py
+$(PROJ_SRC_DIR)/assets/ba_data/python/babase/_mgen/__init__.py : $(TOOLS_DIR)/batools/pcommands.py
@$(PCOMMAND) gen_python_init_module $@
$(PROJ_SRC_DIR)/assets/ba_data/python/babase/_mgen/enums.py : $(PROJ_DIR)/src/ballistica/shared/foundation/types.h $(TOOLS_DIR)/batools/pythonenumsmodule.py
diff --git a/src/tools/pcommandbatch/cJSON.c b/src/tools/pcommandbatch/cJSON.c
new file mode 100644
index 00000000..f6dd11c5
--- /dev/null
+++ b/src/tools/pcommandbatch/cJSON.c
@@ -0,0 +1,3119 @@
+/*
+ Copyright (c) 2009-2017 Dave Gamble and cJSON contributors
+
+ Permission is hereby granted, free of charge, to any person obtaining a copy
+ of this software and associated documentation files (the "Software"), to deal
+ in the Software without restriction, including without limitation the rights
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ copies of the Software, and to permit persons to whom the Software is
+ furnished to do so, subject to the following conditions:
+
+ The above copyright notice and this permission notice shall be included in
+ all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ THE SOFTWARE.
+*/
+
+/* cJSON */
+/* JSON parser in C. */
+
+/* disable warnings about old C89 functions in MSVC */
+#if !defined(_CRT_SECURE_NO_DEPRECATE) && defined(_MSC_VER)
+#define _CRT_SECURE_NO_DEPRECATE
+#endif
+
+#ifdef __GNUC__
+#pragma GCC visibility push(default)
+#endif
+#if defined(_MSC_VER)
+#pragma warning (push)
+/* disable warning about single line comments in system headers */
+#pragma warning (disable : 4001)
+#endif
+
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+
+#ifdef ENABLE_LOCALES
+#include
+#endif
+
+#if defined(_MSC_VER)
+#pragma warning (pop)
+#endif
+#ifdef __GNUC__
+#pragma GCC visibility pop
+#endif
+
+#include "cJSON.h"
+
+/* define our own boolean type */
+#ifdef true
+#undef true
+#endif
+#define true ((cJSON_bool)1)
+
+#ifdef false
+#undef false
+#endif
+#define false ((cJSON_bool)0)
+
+/* define isnan and isinf for ANSI C, if in C99 or above, isnan and isinf has been defined in math.h */
+#ifndef isinf
+#define isinf(d) (isnan((d - d)) && !isnan(d))
+#endif
+#ifndef isnan
+#define isnan(d) (d != d)
+#endif
+
+#ifndef NAN
+#ifdef _WIN32
+#define NAN sqrt(-1.0)
+#else
+#define NAN 0.0/0.0
+#endif
+#endif
+
+typedef struct {
+ const unsigned char *json;
+ size_t position;
+} error;
+static error global_error = { NULL, 0 };
+
+CJSON_PUBLIC(const char *) cJSON_GetErrorPtr(void)
+{
+ return (const char*) (global_error.json + global_error.position);
+}
+
+CJSON_PUBLIC(char *) cJSON_GetStringValue(const cJSON * const item)
+{
+ if (!cJSON_IsString(item))
+ {
+ return NULL;
+ }
+
+ return item->valuestring;
+}
+
+CJSON_PUBLIC(double) cJSON_GetNumberValue(const cJSON * const item)
+{
+ if (!cJSON_IsNumber(item))
+ {
+ return (double) NAN;
+ }
+
+ return item->valuedouble;
+}
+
+/* This is a safeguard to prevent copy-pasters from using incompatible C and header files */
+#if (CJSON_VERSION_MAJOR != 1) || (CJSON_VERSION_MINOR != 7) || (CJSON_VERSION_PATCH != 16)
+ #error cJSON.h and cJSON.c have different versions. Make sure that both have the same.
+#endif
+
+CJSON_PUBLIC(const char*) cJSON_Version(void)
+{
+ static char version[15];
+ sprintf(version, "%i.%i.%i", CJSON_VERSION_MAJOR, CJSON_VERSION_MINOR, CJSON_VERSION_PATCH);
+
+ return version;
+}
+
+/* Case insensitive string comparison, doesn't consider two NULL pointers equal though */
+static int case_insensitive_strcmp(const unsigned char *string1, const unsigned char *string2)
+{
+ if ((string1 == NULL) || (string2 == NULL))
+ {
+ return 1;
+ }
+
+ if (string1 == string2)
+ {
+ return 0;
+ }
+
+ for(; tolower(*string1) == tolower(*string2); (void)string1++, string2++)
+ {
+ if (*string1 == '\0')
+ {
+ return 0;
+ }
+ }
+
+ return tolower(*string1) - tolower(*string2);
+}
+
+typedef struct internal_hooks
+{
+ void *(CJSON_CDECL *allocate)(size_t size);
+ void (CJSON_CDECL *deallocate)(void *pointer);
+ void *(CJSON_CDECL *reallocate)(void *pointer, size_t size);
+} internal_hooks;
+
+#if defined(_MSC_VER)
+/* work around MSVC error C2322: '...' address of dllimport '...' is not static */
+static void * CJSON_CDECL internal_malloc(size_t size)
+{
+ return malloc(size);
+}
+static void CJSON_CDECL internal_free(void *pointer)
+{
+ free(pointer);
+}
+static void * CJSON_CDECL internal_realloc(void *pointer, size_t size)
+{
+ return realloc(pointer, size);
+}
+#else
+#define internal_malloc malloc
+#define internal_free free
+#define internal_realloc realloc
+#endif
+
+/* strlen of character literals resolved at compile time */
+#define static_strlen(string_literal) (sizeof(string_literal) - sizeof(""))
+
+static internal_hooks global_hooks = { internal_malloc, internal_free, internal_realloc };
+
+static unsigned char* cJSON_strdup(const unsigned char* string, const internal_hooks * const hooks)
+{
+ size_t length = 0;
+ unsigned char *copy = NULL;
+
+ if (string == NULL)
+ {
+ return NULL;
+ }
+
+ length = strlen((const char*)string) + sizeof("");
+ copy = (unsigned char*)hooks->allocate(length);
+ if (copy == NULL)
+ {
+ return NULL;
+ }
+ memcpy(copy, string, length);
+
+ return copy;
+}
+
+CJSON_PUBLIC(void) cJSON_InitHooks(cJSON_Hooks* hooks)
+{
+ if (hooks == NULL)
+ {
+ /* Reset hooks */
+ global_hooks.allocate = malloc;
+ global_hooks.deallocate = free;
+ global_hooks.reallocate = realloc;
+ return;
+ }
+
+ global_hooks.allocate = malloc;
+ if (hooks->malloc_fn != NULL)
+ {
+ global_hooks.allocate = hooks->malloc_fn;
+ }
+
+ global_hooks.deallocate = free;
+ if (hooks->free_fn != NULL)
+ {
+ global_hooks.deallocate = hooks->free_fn;
+ }
+
+ /* use realloc only if both free and malloc are used */
+ global_hooks.reallocate = NULL;
+ if ((global_hooks.allocate == malloc) && (global_hooks.deallocate == free))
+ {
+ global_hooks.reallocate = realloc;
+ }
+}
+
+/* Internal constructor. */
+static cJSON *cJSON_New_Item(const internal_hooks * const hooks)
+{
+ cJSON* node = (cJSON*)hooks->allocate(sizeof(cJSON));
+ if (node)
+ {
+ memset(node, '\0', sizeof(cJSON));
+ }
+
+ return node;
+}
+
+/* Delete a cJSON structure. */
+CJSON_PUBLIC(void) cJSON_Delete(cJSON *item)
+{
+ cJSON *next = NULL;
+ while (item != NULL)
+ {
+ next = item->next;
+ if (!(item->type & cJSON_IsReference) && (item->child != NULL))
+ {
+ cJSON_Delete(item->child);
+ }
+ if (!(item->type & cJSON_IsReference) && (item->valuestring != NULL))
+ {
+ global_hooks.deallocate(item->valuestring);
+ }
+ if (!(item->type & cJSON_StringIsConst) && (item->string != NULL))
+ {
+ global_hooks.deallocate(item->string);
+ }
+ global_hooks.deallocate(item);
+ item = next;
+ }
+}
+
+/* get the decimal point character of the current locale */
+static unsigned char get_decimal_point(void)
+{
+#ifdef ENABLE_LOCALES
+ struct lconv *lconv = localeconv();
+ return (unsigned char) lconv->decimal_point[0];
+#else
+ return '.';
+#endif
+}
+
+typedef struct
+{
+ const unsigned char *content;
+ size_t length;
+ size_t offset;
+ size_t depth; /* How deeply nested (in arrays/objects) is the input at the current offset. */
+ internal_hooks hooks;
+} parse_buffer;
+
+/* check if the given size is left to read in a given parse buffer (starting with 1) */
+#define can_read(buffer, size) ((buffer != NULL) && (((buffer)->offset + size) <= (buffer)->length))
+/* check if the buffer can be accessed at the given index (starting with 0) */
+#define can_access_at_index(buffer, index) ((buffer != NULL) && (((buffer)->offset + index) < (buffer)->length))
+#define cannot_access_at_index(buffer, index) (!can_access_at_index(buffer, index))
+/* get a pointer to the buffer at the position */
+#define buffer_at_offset(buffer) ((buffer)->content + (buffer)->offset)
+
+/* Parse the input text to generate a number, and populate the result into item. */
+static cJSON_bool parse_number(cJSON * const item, parse_buffer * const input_buffer)
+{
+ double number = 0;
+ unsigned char *after_end = NULL;
+ unsigned char number_c_string[64];
+ unsigned char decimal_point = get_decimal_point();
+ size_t i = 0;
+
+ if ((input_buffer == NULL) || (input_buffer->content == NULL))
+ {
+ return false;
+ }
+
+ /* copy the number into a temporary buffer and replace '.' with the decimal point
+ * of the current locale (for strtod)
+ * This also takes care of '\0' not necessarily being available for marking the end of the input */
+ for (i = 0; (i < (sizeof(number_c_string) - 1)) && can_access_at_index(input_buffer, i); i++)
+ {
+ switch (buffer_at_offset(input_buffer)[i])
+ {
+ case '0':
+ case '1':
+ case '2':
+ case '3':
+ case '4':
+ case '5':
+ case '6':
+ case '7':
+ case '8':
+ case '9':
+ case '+':
+ case '-':
+ case 'e':
+ case 'E':
+ number_c_string[i] = buffer_at_offset(input_buffer)[i];
+ break;
+
+ case '.':
+ number_c_string[i] = decimal_point;
+ break;
+
+ default:
+ goto loop_end;
+ }
+ }
+loop_end:
+ number_c_string[i] = '\0';
+
+ number = strtod((const char*)number_c_string, (char**)&after_end);
+ if (number_c_string == after_end)
+ {
+ return false; /* parse_error */
+ }
+
+ item->valuedouble = number;
+
+ /* use saturation in case of overflow */
+ if (number >= INT_MAX)
+ {
+ item->valueint = INT_MAX;
+ }
+ else if (number <= (double)INT_MIN)
+ {
+ item->valueint = INT_MIN;
+ }
+ else
+ {
+ item->valueint = (int)number;
+ }
+
+ item->type = cJSON_Number;
+
+ input_buffer->offset += (size_t)(after_end - number_c_string);
+ return true;
+}
+
+/* don't ask me, but the original cJSON_SetNumberValue returns an integer or double */
+CJSON_PUBLIC(double) cJSON_SetNumberHelper(cJSON *object, double number)
+{
+ if (number >= INT_MAX)
+ {
+ object->valueint = INT_MAX;
+ }
+ else if (number <= (double)INT_MIN)
+ {
+ object->valueint = INT_MIN;
+ }
+ else
+ {
+ object->valueint = (int)number;
+ }
+
+ return object->valuedouble = number;
+}
+
+CJSON_PUBLIC(char*) cJSON_SetValuestring(cJSON *object, const char *valuestring)
+{
+ char *copy = NULL;
+ /* if object's type is not cJSON_String or is cJSON_IsReference, it should not set valuestring */
+ if (!(object->type & cJSON_String) || (object->type & cJSON_IsReference))
+ {
+ return NULL;
+ }
+ if (strlen(valuestring) <= strlen(object->valuestring))
+ {
+ strcpy(object->valuestring, valuestring);
+ return object->valuestring;
+ }
+ copy = (char*) cJSON_strdup((const unsigned char*)valuestring, &global_hooks);
+ if (copy == NULL)
+ {
+ return NULL;
+ }
+ if (object->valuestring != NULL)
+ {
+ cJSON_free(object->valuestring);
+ }
+ object->valuestring = copy;
+
+ return copy;
+}
+
+typedef struct
+{
+ unsigned char *buffer;
+ size_t length;
+ size_t offset;
+ size_t depth; /* current nesting depth (for formatted printing) */
+ cJSON_bool noalloc;
+ cJSON_bool format; /* is this print a formatted print */
+ internal_hooks hooks;
+} printbuffer;
+
+/* realloc printbuffer if necessary to have at least "needed" bytes more */
+static unsigned char* ensure(printbuffer * const p, size_t needed)
+{
+ unsigned char *newbuffer = NULL;
+ size_t newsize = 0;
+
+ if ((p == NULL) || (p->buffer == NULL))
+ {
+ return NULL;
+ }
+
+ if ((p->length > 0) && (p->offset >= p->length))
+ {
+ /* make sure that offset is valid */
+ return NULL;
+ }
+
+ if (needed > INT_MAX)
+ {
+ /* sizes bigger than INT_MAX are currently not supported */
+ return NULL;
+ }
+
+ needed += p->offset + 1;
+ if (needed <= p->length)
+ {
+ return p->buffer + p->offset;
+ }
+
+ if (p->noalloc) {
+ return NULL;
+ }
+
+ /* calculate new buffer size */
+ if (needed > (INT_MAX / 2))
+ {
+ /* overflow of int, use INT_MAX if possible */
+ if (needed <= INT_MAX)
+ {
+ newsize = INT_MAX;
+ }
+ else
+ {
+ return NULL;
+ }
+ }
+ else
+ {
+ newsize = needed * 2;
+ }
+
+ if (p->hooks.reallocate != NULL)
+ {
+ /* reallocate with realloc if available */
+ newbuffer = (unsigned char*)p->hooks.reallocate(p->buffer, newsize);
+ if (newbuffer == NULL)
+ {
+ p->hooks.deallocate(p->buffer);
+ p->length = 0;
+ p->buffer = NULL;
+
+ return NULL;
+ }
+ }
+ else
+ {
+ /* otherwise reallocate manually */
+ newbuffer = (unsigned char*)p->hooks.allocate(newsize);
+ if (!newbuffer)
+ {
+ p->hooks.deallocate(p->buffer);
+ p->length = 0;
+ p->buffer = NULL;
+
+ return NULL;
+ }
+
+ memcpy(newbuffer, p->buffer, p->offset + 1);
+ p->hooks.deallocate(p->buffer);
+ }
+ p->length = newsize;
+ p->buffer = newbuffer;
+
+ return newbuffer + p->offset;
+}
+
+/* calculate the new length of the string in a printbuffer and update the offset */
+static void update_offset(printbuffer * const buffer)
+{
+ const unsigned char *buffer_pointer = NULL;
+ if ((buffer == NULL) || (buffer->buffer == NULL))
+ {
+ return;
+ }
+ buffer_pointer = buffer->buffer + buffer->offset;
+
+ buffer->offset += strlen((const char*)buffer_pointer);
+}
+
+/* securely comparison of floating-point variables */
+static cJSON_bool compare_double(double a, double b)
+{
+ double maxVal = fabs(a) > fabs(b) ? fabs(a) : fabs(b);
+ return (fabs(a - b) <= maxVal * DBL_EPSILON);
+}
+
+/* Render the number nicely from the given item into a string. */
+static cJSON_bool print_number(const cJSON * const item, printbuffer * const output_buffer)
+{
+ unsigned char *output_pointer = NULL;
+ double d = item->valuedouble;
+ int length = 0;
+ size_t i = 0;
+ unsigned char number_buffer[26] = {0}; /* temporary buffer to print the number into */
+ unsigned char decimal_point = get_decimal_point();
+ double test = 0.0;
+
+ if (output_buffer == NULL)
+ {
+ return false;
+ }
+
+ /* This checks for NaN and Infinity */
+ if (isnan(d) || isinf(d))
+ {
+ length = sprintf((char*)number_buffer, "null");
+ }
+ else if(d == (double)item->valueint)
+ {
+ length = sprintf((char*)number_buffer, "%d", item->valueint);
+ }
+ else
+ {
+ /* Try 15 decimal places of precision to avoid nonsignificant nonzero digits */
+ length = sprintf((char*)number_buffer, "%1.15g", d);
+
+ /* Check whether the original double can be recovered */
+ if ((sscanf((char*)number_buffer, "%lg", &test) != 1) || !compare_double((double)test, d))
+ {
+ /* If not, print with 17 decimal places of precision */
+ length = sprintf((char*)number_buffer, "%1.17g", d);
+ }
+ }
+
+ /* sprintf failed or buffer overrun occurred */
+ if ((length < 0) || (length > (int)(sizeof(number_buffer) - 1)))
+ {
+ return false;
+ }
+
+ /* reserve appropriate space in the output */
+ output_pointer = ensure(output_buffer, (size_t)length + sizeof(""));
+ if (output_pointer == NULL)
+ {
+ return false;
+ }
+
+ /* copy the printed number to the output and replace locale
+ * dependent decimal point with '.' */
+ for (i = 0; i < ((size_t)length); i++)
+ {
+ if (number_buffer[i] == decimal_point)
+ {
+ output_pointer[i] = '.';
+ continue;
+ }
+
+ output_pointer[i] = number_buffer[i];
+ }
+ output_pointer[i] = '\0';
+
+ output_buffer->offset += (size_t)length;
+
+ return true;
+}
+
+/* parse 4 digit hexadecimal number */
+static unsigned parse_hex4(const unsigned char * const input)
+{
+ unsigned int h = 0;
+ size_t i = 0;
+
+ for (i = 0; i < 4; i++)
+ {
+ /* parse digit */
+ if ((input[i] >= '0') && (input[i] <= '9'))
+ {
+ h += (unsigned int) input[i] - '0';
+ }
+ else if ((input[i] >= 'A') && (input[i] <= 'F'))
+ {
+ h += (unsigned int) 10 + input[i] - 'A';
+ }
+ else if ((input[i] >= 'a') && (input[i] <= 'f'))
+ {
+ h += (unsigned int) 10 + input[i] - 'a';
+ }
+ else /* invalid */
+ {
+ return 0;
+ }
+
+ if (i < 3)
+ {
+ /* shift left to make place for the next nibble */
+ h = h << 4;
+ }
+ }
+
+ return h;
+}
+
+/* converts a UTF-16 literal to UTF-8
+ * A literal can be one or two sequences of the form \uXXXX */
+static unsigned char utf16_literal_to_utf8(const unsigned char * const input_pointer, const unsigned char * const input_end, unsigned char **output_pointer)
+{
+ long unsigned int codepoint = 0;
+ unsigned int first_code = 0;
+ const unsigned char *first_sequence = input_pointer;
+ unsigned char utf8_length = 0;
+ unsigned char utf8_position = 0;
+ unsigned char sequence_length = 0;
+ unsigned char first_byte_mark = 0;
+
+ if ((input_end - first_sequence) < 6)
+ {
+ /* input ends unexpectedly */
+ goto fail;
+ }
+
+ /* get the first utf16 sequence */
+ first_code = parse_hex4(first_sequence + 2);
+
+ /* check that the code is valid */
+ if (((first_code >= 0xDC00) && (first_code <= 0xDFFF)))
+ {
+ goto fail;
+ }
+
+ /* UTF16 surrogate pair */
+ if ((first_code >= 0xD800) && (first_code <= 0xDBFF))
+ {
+ const unsigned char *second_sequence = first_sequence + 6;
+ unsigned int second_code = 0;
+ sequence_length = 12; /* \uXXXX\uXXXX */
+
+ if ((input_end - second_sequence) < 6)
+ {
+ /* input ends unexpectedly */
+ goto fail;
+ }
+
+ if ((second_sequence[0] != '\\') || (second_sequence[1] != 'u'))
+ {
+ /* missing second half of the surrogate pair */
+ goto fail;
+ }
+
+ /* get the second utf16 sequence */
+ second_code = parse_hex4(second_sequence + 2);
+ /* check that the code is valid */
+ if ((second_code < 0xDC00) || (second_code > 0xDFFF))
+ {
+ /* invalid second half of the surrogate pair */
+ goto fail;
+ }
+
+
+ /* calculate the unicode codepoint from the surrogate pair */
+ codepoint = 0x10000 + (((first_code & 0x3FF) << 10) | (second_code & 0x3FF));
+ }
+ else
+ {
+ sequence_length = 6; /* \uXXXX */
+ codepoint = first_code;
+ }
+
+ /* encode as UTF-8
+ * takes at maximum 4 bytes to encode:
+ * 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx */
+ if (codepoint < 0x80)
+ {
+ /* normal ascii, encoding 0xxxxxxx */
+ utf8_length = 1;
+ }
+ else if (codepoint < 0x800)
+ {
+ /* two bytes, encoding 110xxxxx 10xxxxxx */
+ utf8_length = 2;
+ first_byte_mark = 0xC0; /* 11000000 */
+ }
+ else if (codepoint < 0x10000)
+ {
+ /* three bytes, encoding 1110xxxx 10xxxxxx 10xxxxxx */
+ utf8_length = 3;
+ first_byte_mark = 0xE0; /* 11100000 */
+ }
+ else if (codepoint <= 0x10FFFF)
+ {
+ /* four bytes, encoding 1110xxxx 10xxxxxx 10xxxxxx 10xxxxxx */
+ utf8_length = 4;
+ first_byte_mark = 0xF0; /* 11110000 */
+ }
+ else
+ {
+ /* invalid unicode codepoint */
+ goto fail;
+ }
+
+ /* encode as utf8 */
+ for (utf8_position = (unsigned char)(utf8_length - 1); utf8_position > 0; utf8_position--)
+ {
+ /* 10xxxxxx */
+ (*output_pointer)[utf8_position] = (unsigned char)((codepoint | 0x80) & 0xBF);
+ codepoint >>= 6;
+ }
+ /* encode first byte */
+ if (utf8_length > 1)
+ {
+ (*output_pointer)[0] = (unsigned char)((codepoint | first_byte_mark) & 0xFF);
+ }
+ else
+ {
+ (*output_pointer)[0] = (unsigned char)(codepoint & 0x7F);
+ }
+
+ *output_pointer += utf8_length;
+
+ return sequence_length;
+
+fail:
+ return 0;
+}
+
+/* Parse the input text into an unescaped cinput, and populate item. */
+static cJSON_bool parse_string(cJSON * const item, parse_buffer * const input_buffer)
+{
+ const unsigned char *input_pointer = buffer_at_offset(input_buffer) + 1;
+ const unsigned char *input_end = buffer_at_offset(input_buffer) + 1;
+ unsigned char *output_pointer = NULL;
+ unsigned char *output = NULL;
+
+ /* not a string */
+ if (buffer_at_offset(input_buffer)[0] != '\"')
+ {
+ goto fail;
+ }
+
+ {
+ /* calculate approximate size of the output (overestimate) */
+ size_t allocation_length = 0;
+ size_t skipped_bytes = 0;
+ while (((size_t)(input_end - input_buffer->content) < input_buffer->length) && (*input_end != '\"'))
+ {
+ /* is escape sequence */
+ if (input_end[0] == '\\')
+ {
+ if ((size_t)(input_end + 1 - input_buffer->content) >= input_buffer->length)
+ {
+ /* prevent buffer overflow when last input character is a backslash */
+ goto fail;
+ }
+ skipped_bytes++;
+ input_end++;
+ }
+ input_end++;
+ }
+ if (((size_t)(input_end - input_buffer->content) >= input_buffer->length) || (*input_end != '\"'))
+ {
+ goto fail; /* string ended unexpectedly */
+ }
+
+ /* This is at most how much we need for the output */
+ allocation_length = (size_t) (input_end - buffer_at_offset(input_buffer)) - skipped_bytes;
+ output = (unsigned char*)input_buffer->hooks.allocate(allocation_length + sizeof(""));
+ if (output == NULL)
+ {
+ goto fail; /* allocation failure */
+ }
+ }
+
+ output_pointer = output;
+ /* loop through the string literal */
+ while (input_pointer < input_end)
+ {
+ if (*input_pointer != '\\')
+ {
+ *output_pointer++ = *input_pointer++;
+ }
+ /* escape sequence */
+ else
+ {
+ unsigned char sequence_length = 2;
+ if ((input_end - input_pointer) < 1)
+ {
+ goto fail;
+ }
+
+ switch (input_pointer[1])
+ {
+ case 'b':
+ *output_pointer++ = '\b';
+ break;
+ case 'f':
+ *output_pointer++ = '\f';
+ break;
+ case 'n':
+ *output_pointer++ = '\n';
+ break;
+ case 'r':
+ *output_pointer++ = '\r';
+ break;
+ case 't':
+ *output_pointer++ = '\t';
+ break;
+ case '\"':
+ case '\\':
+ case '/':
+ *output_pointer++ = input_pointer[1];
+ break;
+
+ /* UTF-16 literal */
+ case 'u':
+ sequence_length = utf16_literal_to_utf8(input_pointer, input_end, &output_pointer);
+ if (sequence_length == 0)
+ {
+ /* failed to convert UTF16-literal to UTF-8 */
+ goto fail;
+ }
+ break;
+
+ default:
+ goto fail;
+ }
+ input_pointer += sequence_length;
+ }
+ }
+
+ /* zero terminate the output */
+ *output_pointer = '\0';
+
+ item->type = cJSON_String;
+ item->valuestring = (char*)output;
+
+ input_buffer->offset = (size_t) (input_end - input_buffer->content);
+ input_buffer->offset++;
+
+ return true;
+
+fail:
+ if (output != NULL)
+ {
+ input_buffer->hooks.deallocate(output);
+ }
+
+ if (input_pointer != NULL)
+ {
+ input_buffer->offset = (size_t)(input_pointer - input_buffer->content);
+ }
+
+ return false;
+}
+
+/* Render the cstring provided to an escaped version that can be printed. */
+static cJSON_bool print_string_ptr(const unsigned char * const input, printbuffer * const output_buffer)
+{
+ const unsigned char *input_pointer = NULL;
+ unsigned char *output = NULL;
+ unsigned char *output_pointer = NULL;
+ size_t output_length = 0;
+ /* numbers of additional characters needed for escaping */
+ size_t escape_characters = 0;
+
+ if (output_buffer == NULL)
+ {
+ return false;
+ }
+
+ /* empty string */
+ if (input == NULL)
+ {
+ output = ensure(output_buffer, sizeof("\"\""));
+ if (output == NULL)
+ {
+ return false;
+ }
+ strcpy((char*)output, "\"\"");
+
+ return true;
+ }
+
+ /* set "flag" to 1 if something needs to be escaped */
+ for (input_pointer = input; *input_pointer; input_pointer++)
+ {
+ switch (*input_pointer)
+ {
+ case '\"':
+ case '\\':
+ case '\b':
+ case '\f':
+ case '\n':
+ case '\r':
+ case '\t':
+ /* one character escape sequence */
+ escape_characters++;
+ break;
+ default:
+ if (*input_pointer < 32)
+ {
+ /* UTF-16 escape sequence uXXXX */
+ escape_characters += 5;
+ }
+ break;
+ }
+ }
+ output_length = (size_t)(input_pointer - input) + escape_characters;
+
+ output = ensure(output_buffer, output_length + sizeof("\"\""));
+ if (output == NULL)
+ {
+ return false;
+ }
+
+ /* no characters have to be escaped */
+ if (escape_characters == 0)
+ {
+ output[0] = '\"';
+ memcpy(output + 1, input, output_length);
+ output[output_length + 1] = '\"';
+ output[output_length + 2] = '\0';
+
+ return true;
+ }
+
+ output[0] = '\"';
+ output_pointer = output + 1;
+ /* copy the string */
+ for (input_pointer = input; *input_pointer != '\0'; (void)input_pointer++, output_pointer++)
+ {
+ if ((*input_pointer > 31) && (*input_pointer != '\"') && (*input_pointer != '\\'))
+ {
+ /* normal character, copy */
+ *output_pointer = *input_pointer;
+ }
+ else
+ {
+ /* character needs to be escaped */
+ *output_pointer++ = '\\';
+ switch (*input_pointer)
+ {
+ case '\\':
+ *output_pointer = '\\';
+ break;
+ case '\"':
+ *output_pointer = '\"';
+ break;
+ case '\b':
+ *output_pointer = 'b';
+ break;
+ case '\f':
+ *output_pointer = 'f';
+ break;
+ case '\n':
+ *output_pointer = 'n';
+ break;
+ case '\r':
+ *output_pointer = 'r';
+ break;
+ case '\t':
+ *output_pointer = 't';
+ break;
+ default:
+ /* escape and print as unicode codepoint */
+ sprintf((char*)output_pointer, "u%04x", *input_pointer);
+ output_pointer += 4;
+ break;
+ }
+ }
+ }
+ output[output_length + 1] = '\"';
+ output[output_length + 2] = '\0';
+
+ return true;
+}
+
+/* Invoke print_string_ptr (which is useful) on an item. */
+static cJSON_bool print_string(const cJSON * const item, printbuffer * const p)
+{
+ return print_string_ptr((unsigned char*)item->valuestring, p);
+}
+
+/* Predeclare these prototypes. */
+static cJSON_bool parse_value(cJSON * const item, parse_buffer * const input_buffer);
+static cJSON_bool print_value(const cJSON * const item, printbuffer * const output_buffer);
+static cJSON_bool parse_array(cJSON * const item, parse_buffer * const input_buffer);
+static cJSON_bool print_array(const cJSON * const item, printbuffer * const output_buffer);
+static cJSON_bool parse_object(cJSON * const item, parse_buffer * const input_buffer);
+static cJSON_bool print_object(const cJSON * const item, printbuffer * const output_buffer);
+
+/* Utility to jump whitespace and cr/lf */
+static parse_buffer *buffer_skip_whitespace(parse_buffer * const buffer)
+{
+ if ((buffer == NULL) || (buffer->content == NULL))
+ {
+ return NULL;
+ }
+
+ if (cannot_access_at_index(buffer, 0))
+ {
+ return buffer;
+ }
+
+ while (can_access_at_index(buffer, 0) && (buffer_at_offset(buffer)[0] <= 32))
+ {
+ buffer->offset++;
+ }
+
+ if (buffer->offset == buffer->length)
+ {
+ buffer->offset--;
+ }
+
+ return buffer;
+}
+
+/* skip the UTF-8 BOM (byte order mark) if it is at the beginning of a buffer */
+static parse_buffer *skip_utf8_bom(parse_buffer * const buffer)
+{
+ if ((buffer == NULL) || (buffer->content == NULL) || (buffer->offset != 0))
+ {
+ return NULL;
+ }
+
+ if (can_access_at_index(buffer, 4) && (strncmp((const char*)buffer_at_offset(buffer), "\xEF\xBB\xBF", 3) == 0))
+ {
+ buffer->offset += 3;
+ }
+
+ return buffer;
+}
+
+CJSON_PUBLIC(cJSON *) cJSON_ParseWithOpts(const char *value, const char **return_parse_end, cJSON_bool require_null_terminated)
+{
+ size_t buffer_length;
+
+ if (NULL == value)
+ {
+ return NULL;
+ }
+
+ /* Adding null character size due to require_null_terminated. */
+ buffer_length = strlen(value) + sizeof("");
+
+ return cJSON_ParseWithLengthOpts(value, buffer_length, return_parse_end, require_null_terminated);
+}
+
+/* Parse an object - create a new root, and populate. */
+CJSON_PUBLIC(cJSON *) cJSON_ParseWithLengthOpts(const char *value, size_t buffer_length, const char **return_parse_end, cJSON_bool require_null_terminated)
+{
+ parse_buffer buffer = { 0, 0, 0, 0, { 0, 0, 0 } };
+ cJSON *item = NULL;
+
+ /* reset error position */
+ global_error.json = NULL;
+ global_error.position = 0;
+
+ if (value == NULL || 0 == buffer_length)
+ {
+ goto fail;
+ }
+
+ buffer.content = (const unsigned char*)value;
+ buffer.length = buffer_length;
+ buffer.offset = 0;
+ buffer.hooks = global_hooks;
+
+ item = cJSON_New_Item(&global_hooks);
+ if (item == NULL) /* memory fail */
+ {
+ goto fail;
+ }
+
+ if (!parse_value(item, buffer_skip_whitespace(skip_utf8_bom(&buffer))))
+ {
+ /* parse failure. ep is set. */
+ goto fail;
+ }
+
+ /* if we require null-terminated JSON without appended garbage, skip and then check for a null terminator */
+ if (require_null_terminated)
+ {
+ buffer_skip_whitespace(&buffer);
+ if ((buffer.offset >= buffer.length) || buffer_at_offset(&buffer)[0] != '\0')
+ {
+ goto fail;
+ }
+ }
+ if (return_parse_end)
+ {
+ *return_parse_end = (const char*)buffer_at_offset(&buffer);
+ }
+
+ return item;
+
+fail:
+ if (item != NULL)
+ {
+ cJSON_Delete(item);
+ }
+
+ if (value != NULL)
+ {
+ error local_error;
+ local_error.json = (const unsigned char*)value;
+ local_error.position = 0;
+
+ if (buffer.offset < buffer.length)
+ {
+ local_error.position = buffer.offset;
+ }
+ else if (buffer.length > 0)
+ {
+ local_error.position = buffer.length - 1;
+ }
+
+ if (return_parse_end != NULL)
+ {
+ *return_parse_end = (const char*)local_error.json + local_error.position;
+ }
+
+ global_error = local_error;
+ }
+
+ return NULL;
+}
+
+/* Default options for cJSON_Parse */
+CJSON_PUBLIC(cJSON *) cJSON_Parse(const char *value)
+{
+ return cJSON_ParseWithOpts(value, 0, 0);
+}
+
+CJSON_PUBLIC(cJSON *) cJSON_ParseWithLength(const char *value, size_t buffer_length)
+{
+ return cJSON_ParseWithLengthOpts(value, buffer_length, 0, 0);
+}
+
+#define cjson_min(a, b) (((a) < (b)) ? (a) : (b))
+
+static unsigned char *print(const cJSON * const item, cJSON_bool format, const internal_hooks * const hooks)
+{
+ static const size_t default_buffer_size = 256;
+ printbuffer buffer[1];
+ unsigned char *printed = NULL;
+
+ memset(buffer, 0, sizeof(buffer));
+
+ /* create buffer */
+ buffer->buffer = (unsigned char*) hooks->allocate(default_buffer_size);
+ buffer->length = default_buffer_size;
+ buffer->format = format;
+ buffer->hooks = *hooks;
+ if (buffer->buffer == NULL)
+ {
+ goto fail;
+ }
+
+ /* print the value */
+ if (!print_value(item, buffer))
+ {
+ goto fail;
+ }
+ update_offset(buffer);
+
+ /* check if reallocate is available */
+ if (hooks->reallocate != NULL)
+ {
+ printed = (unsigned char*) hooks->reallocate(buffer->buffer, buffer->offset + 1);
+ if (printed == NULL) {
+ goto fail;
+ }
+ buffer->buffer = NULL;
+ }
+ else /* otherwise copy the JSON over to a new buffer */
+ {
+ printed = (unsigned char*) hooks->allocate(buffer->offset + 1);
+ if (printed == NULL)
+ {
+ goto fail;
+ }
+ memcpy(printed, buffer->buffer, cjson_min(buffer->length, buffer->offset + 1));
+ printed[buffer->offset] = '\0'; /* just to be sure */
+
+ /* free the buffer */
+ hooks->deallocate(buffer->buffer);
+ }
+
+ return printed;
+
+fail:
+ if (buffer->buffer != NULL)
+ {
+ hooks->deallocate(buffer->buffer);
+ }
+
+ if (printed != NULL)
+ {
+ hooks->deallocate(printed);
+ }
+
+ return NULL;
+}
+
+/* Render a cJSON item/entity/structure to text. */
+CJSON_PUBLIC(char *) cJSON_Print(const cJSON *item)
+{
+ return (char*)print(item, true, &global_hooks);
+}
+
+CJSON_PUBLIC(char *) cJSON_PrintUnformatted(const cJSON *item)
+{
+ return (char*)print(item, false, &global_hooks);
+}
+
+CJSON_PUBLIC(char *) cJSON_PrintBuffered(const cJSON *item, int prebuffer, cJSON_bool fmt)
+{
+ printbuffer p = { 0, 0, 0, 0, 0, 0, { 0, 0, 0 } };
+
+ if (prebuffer < 0)
+ {
+ return NULL;
+ }
+
+ p.buffer = (unsigned char*)global_hooks.allocate((size_t)prebuffer);
+ if (!p.buffer)
+ {
+ return NULL;
+ }
+
+ p.length = (size_t)prebuffer;
+ p.offset = 0;
+ p.noalloc = false;
+ p.format = fmt;
+ p.hooks = global_hooks;
+
+ if (!print_value(item, &p))
+ {
+ global_hooks.deallocate(p.buffer);
+ return NULL;
+ }
+
+ return (char*)p.buffer;
+}
+
+CJSON_PUBLIC(cJSON_bool) cJSON_PrintPreallocated(cJSON *item, char *buffer, const int length, const cJSON_bool format)
+{
+ printbuffer p = { 0, 0, 0, 0, 0, 0, { 0, 0, 0 } };
+
+ if ((length < 0) || (buffer == NULL))
+ {
+ return false;
+ }
+
+ p.buffer = (unsigned char*)buffer;
+ p.length = (size_t)length;
+ p.offset = 0;
+ p.noalloc = true;
+ p.format = format;
+ p.hooks = global_hooks;
+
+ return print_value(item, &p);
+}
+
+/* Parser core - when encountering text, process appropriately. */
+static cJSON_bool parse_value(cJSON * const item, parse_buffer * const input_buffer)
+{
+ if ((input_buffer == NULL) || (input_buffer->content == NULL))
+ {
+ return false; /* no input */
+ }
+
+ /* parse the different types of values */
+ /* null */
+ if (can_read(input_buffer, 4) && (strncmp((const char*)buffer_at_offset(input_buffer), "null", 4) == 0))
+ {
+ item->type = cJSON_NULL;
+ input_buffer->offset += 4;
+ return true;
+ }
+ /* false */
+ if (can_read(input_buffer, 5) && (strncmp((const char*)buffer_at_offset(input_buffer), "false", 5) == 0))
+ {
+ item->type = cJSON_False;
+ input_buffer->offset += 5;
+ return true;
+ }
+ /* true */
+ if (can_read(input_buffer, 4) && (strncmp((const char*)buffer_at_offset(input_buffer), "true", 4) == 0))
+ {
+ item->type = cJSON_True;
+ item->valueint = 1;
+ input_buffer->offset += 4;
+ return true;
+ }
+ /* string */
+ if (can_access_at_index(input_buffer, 0) && (buffer_at_offset(input_buffer)[0] == '\"'))
+ {
+ return parse_string(item, input_buffer);
+ }
+ /* number */
+ if (can_access_at_index(input_buffer, 0) && ((buffer_at_offset(input_buffer)[0] == '-') || ((buffer_at_offset(input_buffer)[0] >= '0') && (buffer_at_offset(input_buffer)[0] <= '9'))))
+ {
+ return parse_number(item, input_buffer);
+ }
+ /* array */
+ if (can_access_at_index(input_buffer, 0) && (buffer_at_offset(input_buffer)[0] == '['))
+ {
+ return parse_array(item, input_buffer);
+ }
+ /* object */
+ if (can_access_at_index(input_buffer, 0) && (buffer_at_offset(input_buffer)[0] == '{'))
+ {
+ return parse_object(item, input_buffer);
+ }
+
+ return false;
+}
+
+/* Render a value to text. */
+static cJSON_bool print_value(const cJSON * const item, printbuffer * const output_buffer)
+{
+ unsigned char *output = NULL;
+
+ if ((item == NULL) || (output_buffer == NULL))
+ {
+ return false;
+ }
+
+ switch ((item->type) & 0xFF)
+ {
+ case cJSON_NULL:
+ output = ensure(output_buffer, 5);
+ if (output == NULL)
+ {
+ return false;
+ }
+ strcpy((char*)output, "null");
+ return true;
+
+ case cJSON_False:
+ output = ensure(output_buffer, 6);
+ if (output == NULL)
+ {
+ return false;
+ }
+ strcpy((char*)output, "false");
+ return true;
+
+ case cJSON_True:
+ output = ensure(output_buffer, 5);
+ if (output == NULL)
+ {
+ return false;
+ }
+ strcpy((char*)output, "true");
+ return true;
+
+ case cJSON_Number:
+ return print_number(item, output_buffer);
+
+ case cJSON_Raw:
+ {
+ size_t raw_length = 0;
+ if (item->valuestring == NULL)
+ {
+ return false;
+ }
+
+ raw_length = strlen(item->valuestring) + sizeof("");
+ output = ensure(output_buffer, raw_length);
+ if (output == NULL)
+ {
+ return false;
+ }
+ memcpy(output, item->valuestring, raw_length);
+ return true;
+ }
+
+ case cJSON_String:
+ return print_string(item, output_buffer);
+
+ case cJSON_Array:
+ return print_array(item, output_buffer);
+
+ case cJSON_Object:
+ return print_object(item, output_buffer);
+
+ default:
+ return false;
+ }
+}
+
+/* Build an array from input text. */
+static cJSON_bool parse_array(cJSON * const item, parse_buffer * const input_buffer)
+{
+ cJSON *head = NULL; /* head of the linked list */
+ cJSON *current_item = NULL;
+
+ if (input_buffer->depth >= CJSON_NESTING_LIMIT)
+ {
+ return false; /* to deeply nested */
+ }
+ input_buffer->depth++;
+
+ if (buffer_at_offset(input_buffer)[0] != '[')
+ {
+ /* not an array */
+ goto fail;
+ }
+
+ input_buffer->offset++;
+ buffer_skip_whitespace(input_buffer);
+ if (can_access_at_index(input_buffer, 0) && (buffer_at_offset(input_buffer)[0] == ']'))
+ {
+ /* empty array */
+ goto success;
+ }
+
+ /* check if we skipped to the end of the buffer */
+ if (cannot_access_at_index(input_buffer, 0))
+ {
+ input_buffer->offset--;
+ goto fail;
+ }
+
+ /* step back to character in front of the first element */
+ input_buffer->offset--;
+ /* loop through the comma separated array elements */
+ do
+ {
+ /* allocate next item */
+ cJSON *new_item = cJSON_New_Item(&(input_buffer->hooks));
+ if (new_item == NULL)
+ {
+ goto fail; /* allocation failure */
+ }
+
+ /* attach next item to list */
+ if (head == NULL)
+ {
+ /* start the linked list */
+ current_item = head = new_item;
+ }
+ else
+ {
+ /* add to the end and advance */
+ current_item->next = new_item;
+ new_item->prev = current_item;
+ current_item = new_item;
+ }
+
+ /* parse next value */
+ input_buffer->offset++;
+ buffer_skip_whitespace(input_buffer);
+ if (!parse_value(current_item, input_buffer))
+ {
+ goto fail; /* failed to parse value */
+ }
+ buffer_skip_whitespace(input_buffer);
+ }
+ while (can_access_at_index(input_buffer, 0) && (buffer_at_offset(input_buffer)[0] == ','));
+
+ if (cannot_access_at_index(input_buffer, 0) || buffer_at_offset(input_buffer)[0] != ']')
+ {
+ goto fail; /* expected end of array */
+ }
+
+success:
+ input_buffer->depth--;
+
+ if (head != NULL) {
+ head->prev = current_item;
+ }
+
+ item->type = cJSON_Array;
+ item->child = head;
+
+ input_buffer->offset++;
+
+ return true;
+
+fail:
+ if (head != NULL)
+ {
+ cJSON_Delete(head);
+ }
+
+ return false;
+}
+
+/* Render an array to text */
+static cJSON_bool print_array(const cJSON * const item, printbuffer * const output_buffer)
+{
+ unsigned char *output_pointer = NULL;
+ size_t length = 0;
+ cJSON *current_element = item->child;
+
+ if (output_buffer == NULL)
+ {
+ return false;
+ }
+
+ /* Compose the output array. */
+ /* opening square bracket */
+ output_pointer = ensure(output_buffer, 1);
+ if (output_pointer == NULL)
+ {
+ return false;
+ }
+
+ *output_pointer = '[';
+ output_buffer->offset++;
+ output_buffer->depth++;
+
+ while (current_element != NULL)
+ {
+ if (!print_value(current_element, output_buffer))
+ {
+ return false;
+ }
+ update_offset(output_buffer);
+ if (current_element->next)
+ {
+ length = (size_t) (output_buffer->format ? 2 : 1);
+ output_pointer = ensure(output_buffer, length + 1);
+ if (output_pointer == NULL)
+ {
+ return false;
+ }
+ *output_pointer++ = ',';
+ if(output_buffer->format)
+ {
+ *output_pointer++ = ' ';
+ }
+ *output_pointer = '\0';
+ output_buffer->offset += length;
+ }
+ current_element = current_element->next;
+ }
+
+ output_pointer = ensure(output_buffer, 2);
+ if (output_pointer == NULL)
+ {
+ return false;
+ }
+ *output_pointer++ = ']';
+ *output_pointer = '\0';
+ output_buffer->depth--;
+
+ return true;
+}
+
+/* Build an object from the text. */
+static cJSON_bool parse_object(cJSON * const item, parse_buffer * const input_buffer)
+{
+ cJSON *head = NULL; /* linked list head */
+ cJSON *current_item = NULL;
+
+ if (input_buffer->depth >= CJSON_NESTING_LIMIT)
+ {
+ return false; /* to deeply nested */
+ }
+ input_buffer->depth++;
+
+ if (cannot_access_at_index(input_buffer, 0) || (buffer_at_offset(input_buffer)[0] != '{'))
+ {
+ goto fail; /* not an object */
+ }
+
+ input_buffer->offset++;
+ buffer_skip_whitespace(input_buffer);
+ if (can_access_at_index(input_buffer, 0) && (buffer_at_offset(input_buffer)[0] == '}'))
+ {
+ goto success; /* empty object */
+ }
+
+ /* check if we skipped to the end of the buffer */
+ if (cannot_access_at_index(input_buffer, 0))
+ {
+ input_buffer->offset--;
+ goto fail;
+ }
+
+ /* step back to character in front of the first element */
+ input_buffer->offset--;
+ /* loop through the comma separated array elements */
+ do
+ {
+ /* allocate next item */
+ cJSON *new_item = cJSON_New_Item(&(input_buffer->hooks));
+ if (new_item == NULL)
+ {
+ goto fail; /* allocation failure */
+ }
+
+ /* attach next item to list */
+ if (head == NULL)
+ {
+ /* start the linked list */
+ current_item = head = new_item;
+ }
+ else
+ {
+ /* add to the end and advance */
+ current_item->next = new_item;
+ new_item->prev = current_item;
+ current_item = new_item;
+ }
+
+ /* parse the name of the child */
+ input_buffer->offset++;
+ buffer_skip_whitespace(input_buffer);
+ if (!parse_string(current_item, input_buffer))
+ {
+ goto fail; /* failed to parse name */
+ }
+ buffer_skip_whitespace(input_buffer);
+
+ /* swap valuestring and string, because we parsed the name */
+ current_item->string = current_item->valuestring;
+ current_item->valuestring = NULL;
+
+ if (cannot_access_at_index(input_buffer, 0) || (buffer_at_offset(input_buffer)[0] != ':'))
+ {
+ goto fail; /* invalid object */
+ }
+
+ /* parse the value */
+ input_buffer->offset++;
+ buffer_skip_whitespace(input_buffer);
+ if (!parse_value(current_item, input_buffer))
+ {
+ goto fail; /* failed to parse value */
+ }
+ buffer_skip_whitespace(input_buffer);
+ }
+ while (can_access_at_index(input_buffer, 0) && (buffer_at_offset(input_buffer)[0] == ','));
+
+ if (cannot_access_at_index(input_buffer, 0) || (buffer_at_offset(input_buffer)[0] != '}'))
+ {
+ goto fail; /* expected end of object */
+ }
+
+success:
+ input_buffer->depth--;
+
+ if (head != NULL) {
+ head->prev = current_item;
+ }
+
+ item->type = cJSON_Object;
+ item->child = head;
+
+ input_buffer->offset++;
+ return true;
+
+fail:
+ if (head != NULL)
+ {
+ cJSON_Delete(head);
+ }
+
+ return false;
+}
+
+/* Render an object to text. */
+static cJSON_bool print_object(const cJSON * const item, printbuffer * const output_buffer)
+{
+ unsigned char *output_pointer = NULL;
+ size_t length = 0;
+ cJSON *current_item = item->child;
+
+ if (output_buffer == NULL)
+ {
+ return false;
+ }
+
+ /* Compose the output: */
+ length = (size_t) (output_buffer->format ? 2 : 1); /* fmt: {\n */
+ output_pointer = ensure(output_buffer, length + 1);
+ if (output_pointer == NULL)
+ {
+ return false;
+ }
+
+ *output_pointer++ = '{';
+ output_buffer->depth++;
+ if (output_buffer->format)
+ {
+ *output_pointer++ = '\n';
+ }
+ output_buffer->offset += length;
+
+ while (current_item)
+ {
+ if (output_buffer->format)
+ {
+ size_t i;
+ output_pointer = ensure(output_buffer, output_buffer->depth);
+ if (output_pointer == NULL)
+ {
+ return false;
+ }
+ for (i = 0; i < output_buffer->depth; i++)
+ {
+ *output_pointer++ = '\t';
+ }
+ output_buffer->offset += output_buffer->depth;
+ }
+
+ /* print key */
+ if (!print_string_ptr((unsigned char*)current_item->string, output_buffer))
+ {
+ return false;
+ }
+ update_offset(output_buffer);
+
+ length = (size_t) (output_buffer->format ? 2 : 1);
+ output_pointer = ensure(output_buffer, length);
+ if (output_pointer == NULL)
+ {
+ return false;
+ }
+ *output_pointer++ = ':';
+ if (output_buffer->format)
+ {
+ *output_pointer++ = '\t';
+ }
+ output_buffer->offset += length;
+
+ /* print value */
+ if (!print_value(current_item, output_buffer))
+ {
+ return false;
+ }
+ update_offset(output_buffer);
+
+ /* print comma if not last */
+ length = ((size_t)(output_buffer->format ? 1 : 0) + (size_t)(current_item->next ? 1 : 0));
+ output_pointer = ensure(output_buffer, length + 1);
+ if (output_pointer == NULL)
+ {
+ return false;
+ }
+ if (current_item->next)
+ {
+ *output_pointer++ = ',';
+ }
+
+ if (output_buffer->format)
+ {
+ *output_pointer++ = '\n';
+ }
+ *output_pointer = '\0';
+ output_buffer->offset += length;
+
+ current_item = current_item->next;
+ }
+
+ output_pointer = ensure(output_buffer, output_buffer->format ? (output_buffer->depth + 1) : 2);
+ if (output_pointer == NULL)
+ {
+ return false;
+ }
+ if (output_buffer->format)
+ {
+ size_t i;
+ for (i = 0; i < (output_buffer->depth - 1); i++)
+ {
+ *output_pointer++ = '\t';
+ }
+ }
+ *output_pointer++ = '}';
+ *output_pointer = '\0';
+ output_buffer->depth--;
+
+ return true;
+}
+
+/* Get Array size/item / object item. */
+CJSON_PUBLIC(int) cJSON_GetArraySize(const cJSON *array)
+{
+ cJSON *child = NULL;
+ size_t size = 0;
+
+ if (array == NULL)
+ {
+ return 0;
+ }
+
+ child = array->child;
+
+ while(child != NULL)
+ {
+ size++;
+ child = child->next;
+ }
+
+ /* FIXME: Can overflow here. Cannot be fixed without breaking the API */
+
+ return (int)size;
+}
+
+static cJSON* get_array_item(const cJSON *array, size_t index)
+{
+ cJSON *current_child = NULL;
+
+ if (array == NULL)
+ {
+ return NULL;
+ }
+
+ current_child = array->child;
+ while ((current_child != NULL) && (index > 0))
+ {
+ index--;
+ current_child = current_child->next;
+ }
+
+ return current_child;
+}
+
+CJSON_PUBLIC(cJSON *) cJSON_GetArrayItem(const cJSON *array, int index)
+{
+ if (index < 0)
+ {
+ return NULL;
+ }
+
+ return get_array_item(array, (size_t)index);
+}
+
+static cJSON *get_object_item(const cJSON * const object, const char * const name, const cJSON_bool case_sensitive)
+{
+ cJSON *current_element = NULL;
+
+ if ((object == NULL) || (name == NULL))
+ {
+ return NULL;
+ }
+
+ current_element = object->child;
+ if (case_sensitive)
+ {
+ while ((current_element != NULL) && (current_element->string != NULL) && (strcmp(name, current_element->string) != 0))
+ {
+ current_element = current_element->next;
+ }
+ }
+ else
+ {
+ while ((current_element != NULL) && (case_insensitive_strcmp((const unsigned char*)name, (const unsigned char*)(current_element->string)) != 0))
+ {
+ current_element = current_element->next;
+ }
+ }
+
+ if ((current_element == NULL) || (current_element->string == NULL)) {
+ return NULL;
+ }
+
+ return current_element;
+}
+
+CJSON_PUBLIC(cJSON *) cJSON_GetObjectItem(const cJSON * const object, const char * const string)
+{
+ return get_object_item(object, string, false);
+}
+
+CJSON_PUBLIC(cJSON *) cJSON_GetObjectItemCaseSensitive(const cJSON * const object, const char * const string)
+{
+ return get_object_item(object, string, true);
+}
+
+CJSON_PUBLIC(cJSON_bool) cJSON_HasObjectItem(const cJSON *object, const char *string)
+{
+ return cJSON_GetObjectItem(object, string) ? 1 : 0;
+}
+
+/* Utility for array list handling. */
+static void suffix_object(cJSON *prev, cJSON *item)
+{
+ prev->next = item;
+ item->prev = prev;
+}
+
+/* Utility for handling references. */
+static cJSON *create_reference(const cJSON *item, const internal_hooks * const hooks)
+{
+ cJSON *reference = NULL;
+ if (item == NULL)
+ {
+ return NULL;
+ }
+
+ reference = cJSON_New_Item(hooks);
+ if (reference == NULL)
+ {
+ return NULL;
+ }
+
+ memcpy(reference, item, sizeof(cJSON));
+ reference->string = NULL;
+ reference->type |= cJSON_IsReference;
+ reference->next = reference->prev = NULL;
+ return reference;
+}
+
+static cJSON_bool add_item_to_array(cJSON *array, cJSON *item)
+{
+ cJSON *child = NULL;
+
+ if ((item == NULL) || (array == NULL) || (array == item))
+ {
+ return false;
+ }
+
+ child = array->child;
+ /*
+ * To find the last item in array quickly, we use prev in array
+ */
+ if (child == NULL)
+ {
+ /* list is empty, start new one */
+ array->child = item;
+ item->prev = item;
+ item->next = NULL;
+ }
+ else
+ {
+ /* append to the end */
+ if (child->prev)
+ {
+ suffix_object(child->prev, item);
+ array->child->prev = item;
+ }
+ }
+
+ return true;
+}
+
+/* Add item to array/object. */
+CJSON_PUBLIC(cJSON_bool) cJSON_AddItemToArray(cJSON *array, cJSON *item)
+{
+ return add_item_to_array(array, item);
+}
+
+#if defined(__clang__) || (defined(__GNUC__) && ((__GNUC__ > 4) || ((__GNUC__ == 4) && (__GNUC_MINOR__ > 5))))
+ #pragma GCC diagnostic push
+#endif
+#ifdef __GNUC__
+#pragma GCC diagnostic ignored "-Wcast-qual"
+#endif
+/* helper function to cast away const */
+static void* cast_away_const(const void* string)
+{
+ return (void*)string;
+}
+#if defined(__clang__) || (defined(__GNUC__) && ((__GNUC__ > 4) || ((__GNUC__ == 4) && (__GNUC_MINOR__ > 5))))
+ #pragma GCC diagnostic pop
+#endif
+
+
+static cJSON_bool add_item_to_object(cJSON * const object, const char * const string, cJSON * const item, const internal_hooks * const hooks, const cJSON_bool constant_key)
+{
+ char *new_key = NULL;
+ int new_type = cJSON_Invalid;
+
+ if ((object == NULL) || (string == NULL) || (item == NULL) || (object == item))
+ {
+ return false;
+ }
+
+ if (constant_key)
+ {
+ new_key = (char*)cast_away_const(string);
+ new_type = item->type | cJSON_StringIsConst;
+ }
+ else
+ {
+ new_key = (char*)cJSON_strdup((const unsigned char*)string, hooks);
+ if (new_key == NULL)
+ {
+ return false;
+ }
+
+ new_type = item->type & ~cJSON_StringIsConst;
+ }
+
+ if (!(item->type & cJSON_StringIsConst) && (item->string != NULL))
+ {
+ hooks->deallocate(item->string);
+ }
+
+ item->string = new_key;
+ item->type = new_type;
+
+ return add_item_to_array(object, item);
+}
+
+CJSON_PUBLIC(cJSON_bool) cJSON_AddItemToObject(cJSON *object, const char *string, cJSON *item)
+{
+ return add_item_to_object(object, string, item, &global_hooks, false);
+}
+
+/* Add an item to an object with constant string as key */
+CJSON_PUBLIC(cJSON_bool) cJSON_AddItemToObjectCS(cJSON *object, const char *string, cJSON *item)
+{
+ return add_item_to_object(object, string, item, &global_hooks, true);
+}
+
+CJSON_PUBLIC(cJSON_bool) cJSON_AddItemReferenceToArray(cJSON *array, cJSON *item)
+{
+ if (array == NULL)
+ {
+ return false;
+ }
+
+ return add_item_to_array(array, create_reference(item, &global_hooks));
+}
+
+CJSON_PUBLIC(cJSON_bool) cJSON_AddItemReferenceToObject(cJSON *object, const char *string, cJSON *item)
+{
+ if ((object == NULL) || (string == NULL))
+ {
+ return false;
+ }
+
+ return add_item_to_object(object, string, create_reference(item, &global_hooks), &global_hooks, false);
+}
+
+CJSON_PUBLIC(cJSON*) cJSON_AddNullToObject(cJSON * const object, const char * const name)
+{
+ cJSON *null = cJSON_CreateNull();
+ if (add_item_to_object(object, name, null, &global_hooks, false))
+ {
+ return null;
+ }
+
+ cJSON_Delete(null);
+ return NULL;
+}
+
+CJSON_PUBLIC(cJSON*) cJSON_AddTrueToObject(cJSON * const object, const char * const name)
+{
+ cJSON *true_item = cJSON_CreateTrue();
+ if (add_item_to_object(object, name, true_item, &global_hooks, false))
+ {
+ return true_item;
+ }
+
+ cJSON_Delete(true_item);
+ return NULL;
+}
+
+CJSON_PUBLIC(cJSON*) cJSON_AddFalseToObject(cJSON * const object, const char * const name)
+{
+ cJSON *false_item = cJSON_CreateFalse();
+ if (add_item_to_object(object, name, false_item, &global_hooks, false))
+ {
+ return false_item;
+ }
+
+ cJSON_Delete(false_item);
+ return NULL;
+}
+
+CJSON_PUBLIC(cJSON*) cJSON_AddBoolToObject(cJSON * const object, const char * const name, const cJSON_bool boolean)
+{
+ cJSON *bool_item = cJSON_CreateBool(boolean);
+ if (add_item_to_object(object, name, bool_item, &global_hooks, false))
+ {
+ return bool_item;
+ }
+
+ cJSON_Delete(bool_item);
+ return NULL;
+}
+
+CJSON_PUBLIC(cJSON*) cJSON_AddNumberToObject(cJSON * const object, const char * const name, const double number)
+{
+ cJSON *number_item = cJSON_CreateNumber(number);
+ if (add_item_to_object(object, name, number_item, &global_hooks, false))
+ {
+ return number_item;
+ }
+
+ cJSON_Delete(number_item);
+ return NULL;
+}
+
+CJSON_PUBLIC(cJSON*) cJSON_AddStringToObject(cJSON * const object, const char * const name, const char * const string)
+{
+ cJSON *string_item = cJSON_CreateString(string);
+ if (add_item_to_object(object, name, string_item, &global_hooks, false))
+ {
+ return string_item;
+ }
+
+ cJSON_Delete(string_item);
+ return NULL;
+}
+
+CJSON_PUBLIC(cJSON*) cJSON_AddRawToObject(cJSON * const object, const char * const name, const char * const raw)
+{
+ cJSON *raw_item = cJSON_CreateRaw(raw);
+ if (add_item_to_object(object, name, raw_item, &global_hooks, false))
+ {
+ return raw_item;
+ }
+
+ cJSON_Delete(raw_item);
+ return NULL;
+}
+
+CJSON_PUBLIC(cJSON*) cJSON_AddObjectToObject(cJSON * const object, const char * const name)
+{
+ cJSON *object_item = cJSON_CreateObject();
+ if (add_item_to_object(object, name, object_item, &global_hooks, false))
+ {
+ return object_item;
+ }
+
+ cJSON_Delete(object_item);
+ return NULL;
+}
+
+CJSON_PUBLIC(cJSON*) cJSON_AddArrayToObject(cJSON * const object, const char * const name)
+{
+ cJSON *array = cJSON_CreateArray();
+ if (add_item_to_object(object, name, array, &global_hooks, false))
+ {
+ return array;
+ }
+
+ cJSON_Delete(array);
+ return NULL;
+}
+
+CJSON_PUBLIC(cJSON *) cJSON_DetachItemViaPointer(cJSON *parent, cJSON * const item)
+{
+ if ((parent == NULL) || (item == NULL))
+ {
+ return NULL;
+ }
+
+ if (item != parent->child)
+ {
+ /* not the first element */
+ item->prev->next = item->next;
+ }
+ if (item->next != NULL)
+ {
+ /* not the last element */
+ item->next->prev = item->prev;
+ }
+
+ if (item == parent->child)
+ {
+ /* first element */
+ parent->child = item->next;
+ }
+ else if (item->next == NULL)
+ {
+ /* last element */
+ parent->child->prev = item->prev;
+ }
+
+ /* make sure the detached item doesn't point anywhere anymore */
+ item->prev = NULL;
+ item->next = NULL;
+
+ return item;
+}
+
+CJSON_PUBLIC(cJSON *) cJSON_DetachItemFromArray(cJSON *array, int which)
+{
+ if (which < 0)
+ {
+ return NULL;
+ }
+
+ return cJSON_DetachItemViaPointer(array, get_array_item(array, (size_t)which));
+}
+
+CJSON_PUBLIC(void) cJSON_DeleteItemFromArray(cJSON *array, int which)
+{
+ cJSON_Delete(cJSON_DetachItemFromArray(array, which));
+}
+
+CJSON_PUBLIC(cJSON *) cJSON_DetachItemFromObject(cJSON *object, const char *string)
+{
+ cJSON *to_detach = cJSON_GetObjectItem(object, string);
+
+ return cJSON_DetachItemViaPointer(object, to_detach);
+}
+
+CJSON_PUBLIC(cJSON *) cJSON_DetachItemFromObjectCaseSensitive(cJSON *object, const char *string)
+{
+ cJSON *to_detach = cJSON_GetObjectItemCaseSensitive(object, string);
+
+ return cJSON_DetachItemViaPointer(object, to_detach);
+}
+
+CJSON_PUBLIC(void) cJSON_DeleteItemFromObject(cJSON *object, const char *string)
+{
+ cJSON_Delete(cJSON_DetachItemFromObject(object, string));
+}
+
+CJSON_PUBLIC(void) cJSON_DeleteItemFromObjectCaseSensitive(cJSON *object, const char *string)
+{
+ cJSON_Delete(cJSON_DetachItemFromObjectCaseSensitive(object, string));
+}
+
+/* Replace array/object items with new ones. */
+CJSON_PUBLIC(cJSON_bool) cJSON_InsertItemInArray(cJSON *array, int which, cJSON *newitem)
+{
+ cJSON *after_inserted = NULL;
+
+ if (which < 0)
+ {
+ return false;
+ }
+
+ after_inserted = get_array_item(array, (size_t)which);
+ if (after_inserted == NULL)
+ {
+ return add_item_to_array(array, newitem);
+ }
+
+ newitem->next = after_inserted;
+ newitem->prev = after_inserted->prev;
+ after_inserted->prev = newitem;
+ if (after_inserted == array->child)
+ {
+ array->child = newitem;
+ }
+ else
+ {
+ newitem->prev->next = newitem;
+ }
+ return true;
+}
+
+CJSON_PUBLIC(cJSON_bool) cJSON_ReplaceItemViaPointer(cJSON * const parent, cJSON * const item, cJSON * replacement)
+{
+ if ((parent == NULL) || (parent->child == NULL) || (replacement == NULL) || (item == NULL))
+ {
+ return false;
+ }
+
+ if (replacement == item)
+ {
+ return true;
+ }
+
+ replacement->next = item->next;
+ replacement->prev = item->prev;
+
+ if (replacement->next != NULL)
+ {
+ replacement->next->prev = replacement;
+ }
+ if (parent->child == item)
+ {
+ if (parent->child->prev == parent->child)
+ {
+ replacement->prev = replacement;
+ }
+ parent->child = replacement;
+ }
+ else
+ { /*
+ * To find the last item in array quickly, we use prev in array.
+ * We can't modify the last item's next pointer where this item was the parent's child
+ */
+ if (replacement->prev != NULL)
+ {
+ replacement->prev->next = replacement;
+ }
+ if (replacement->next == NULL)
+ {
+ parent->child->prev = replacement;
+ }
+ }
+
+ item->next = NULL;
+ item->prev = NULL;
+ cJSON_Delete(item);
+
+ return true;
+}
+
+CJSON_PUBLIC(cJSON_bool) cJSON_ReplaceItemInArray(cJSON *array, int which, cJSON *newitem)
+{
+ if (which < 0)
+ {
+ return false;
+ }
+
+ return cJSON_ReplaceItemViaPointer(array, get_array_item(array, (size_t)which), newitem);
+}
+
+static cJSON_bool replace_item_in_object(cJSON *object, const char *string, cJSON *replacement, cJSON_bool case_sensitive)
+{
+ if ((replacement == NULL) || (string == NULL))
+ {
+ return false;
+ }
+
+ /* replace the name in the replacement */
+ if (!(replacement->type & cJSON_StringIsConst) && (replacement->string != NULL))
+ {
+ cJSON_free(replacement->string);
+ }
+ replacement->string = (char*)cJSON_strdup((const unsigned char*)string, &global_hooks);
+ if (replacement->string == NULL)
+ {
+ return false;
+ }
+
+ replacement->type &= ~cJSON_StringIsConst;
+
+ return cJSON_ReplaceItemViaPointer(object, get_object_item(object, string, case_sensitive), replacement);
+}
+
+CJSON_PUBLIC(cJSON_bool) cJSON_ReplaceItemInObject(cJSON *object, const char *string, cJSON *newitem)
+{
+ return replace_item_in_object(object, string, newitem, false);
+}
+
+CJSON_PUBLIC(cJSON_bool) cJSON_ReplaceItemInObjectCaseSensitive(cJSON *object, const char *string, cJSON *newitem)
+{
+ return replace_item_in_object(object, string, newitem, true);
+}
+
+/* Create basic types: */
+CJSON_PUBLIC(cJSON *) cJSON_CreateNull(void)
+{
+ cJSON *item = cJSON_New_Item(&global_hooks);
+ if(item)
+ {
+ item->type = cJSON_NULL;
+ }
+
+ return item;
+}
+
+CJSON_PUBLIC(cJSON *) cJSON_CreateTrue(void)
+{
+ cJSON *item = cJSON_New_Item(&global_hooks);
+ if(item)
+ {
+ item->type = cJSON_True;
+ }
+
+ return item;
+}
+
+CJSON_PUBLIC(cJSON *) cJSON_CreateFalse(void)
+{
+ cJSON *item = cJSON_New_Item(&global_hooks);
+ if(item)
+ {
+ item->type = cJSON_False;
+ }
+
+ return item;
+}
+
+CJSON_PUBLIC(cJSON *) cJSON_CreateBool(cJSON_bool boolean)
+{
+ cJSON *item = cJSON_New_Item(&global_hooks);
+ if(item)
+ {
+ item->type = boolean ? cJSON_True : cJSON_False;
+ }
+
+ return item;
+}
+
+CJSON_PUBLIC(cJSON *) cJSON_CreateNumber(double num)
+{
+ cJSON *item = cJSON_New_Item(&global_hooks);
+ if(item)
+ {
+ item->type = cJSON_Number;
+ item->valuedouble = num;
+
+ /* use saturation in case of overflow */
+ if (num >= INT_MAX)
+ {
+ item->valueint = INT_MAX;
+ }
+ else if (num <= (double)INT_MIN)
+ {
+ item->valueint = INT_MIN;
+ }
+ else
+ {
+ item->valueint = (int)num;
+ }
+ }
+
+ return item;
+}
+
+CJSON_PUBLIC(cJSON *) cJSON_CreateString(const char *string)
+{
+ cJSON *item = cJSON_New_Item(&global_hooks);
+ if(item)
+ {
+ item->type = cJSON_String;
+ item->valuestring = (char*)cJSON_strdup((const unsigned char*)string, &global_hooks);
+ if(!item->valuestring)
+ {
+ cJSON_Delete(item);
+ return NULL;
+ }
+ }
+
+ return item;
+}
+
+CJSON_PUBLIC(cJSON *) cJSON_CreateStringReference(const char *string)
+{
+ cJSON *item = cJSON_New_Item(&global_hooks);
+ if (item != NULL)
+ {
+ item->type = cJSON_String | cJSON_IsReference;
+ item->valuestring = (char*)cast_away_const(string);
+ }
+
+ return item;
+}
+
+CJSON_PUBLIC(cJSON *) cJSON_CreateObjectReference(const cJSON *child)
+{
+ cJSON *item = cJSON_New_Item(&global_hooks);
+ if (item != NULL) {
+ item->type = cJSON_Object | cJSON_IsReference;
+ item->child = (cJSON*)cast_away_const(child);
+ }
+
+ return item;
+}
+
+CJSON_PUBLIC(cJSON *) cJSON_CreateArrayReference(const cJSON *child) {
+ cJSON *item = cJSON_New_Item(&global_hooks);
+ if (item != NULL) {
+ item->type = cJSON_Array | cJSON_IsReference;
+ item->child = (cJSON*)cast_away_const(child);
+ }
+
+ return item;
+}
+
+CJSON_PUBLIC(cJSON *) cJSON_CreateRaw(const char *raw)
+{
+ cJSON *item = cJSON_New_Item(&global_hooks);
+ if(item)
+ {
+ item->type = cJSON_Raw;
+ item->valuestring = (char*)cJSON_strdup((const unsigned char*)raw, &global_hooks);
+ if(!item->valuestring)
+ {
+ cJSON_Delete(item);
+ return NULL;
+ }
+ }
+
+ return item;
+}
+
+CJSON_PUBLIC(cJSON *) cJSON_CreateArray(void)
+{
+ cJSON *item = cJSON_New_Item(&global_hooks);
+ if(item)
+ {
+ item->type=cJSON_Array;
+ }
+
+ return item;
+}
+
+CJSON_PUBLIC(cJSON *) cJSON_CreateObject(void)
+{
+ cJSON *item = cJSON_New_Item(&global_hooks);
+ if (item)
+ {
+ item->type = cJSON_Object;
+ }
+
+ return item;
+}
+
+/* Create Arrays: */
+CJSON_PUBLIC(cJSON *) cJSON_CreateIntArray(const int *numbers, int count)
+{
+ size_t i = 0;
+ cJSON *n = NULL;
+ cJSON *p = NULL;
+ cJSON *a = NULL;
+
+ if ((count < 0) || (numbers == NULL))
+ {
+ return NULL;
+ }
+
+ a = cJSON_CreateArray();
+
+ for(i = 0; a && (i < (size_t)count); i++)
+ {
+ n = cJSON_CreateNumber(numbers[i]);
+ if (!n)
+ {
+ cJSON_Delete(a);
+ return NULL;
+ }
+ if(!i)
+ {
+ a->child = n;
+ }
+ else
+ {
+ suffix_object(p, n);
+ }
+ p = n;
+ }
+
+ if (a && a->child) {
+ a->child->prev = n;
+ }
+
+ return a;
+}
+
+CJSON_PUBLIC(cJSON *) cJSON_CreateFloatArray(const float *numbers, int count)
+{
+ size_t i = 0;
+ cJSON *n = NULL;
+ cJSON *p = NULL;
+ cJSON *a = NULL;
+
+ if ((count < 0) || (numbers == NULL))
+ {
+ return NULL;
+ }
+
+ a = cJSON_CreateArray();
+
+ for(i = 0; a && (i < (size_t)count); i++)
+ {
+ n = cJSON_CreateNumber((double)numbers[i]);
+ if(!n)
+ {
+ cJSON_Delete(a);
+ return NULL;
+ }
+ if(!i)
+ {
+ a->child = n;
+ }
+ else
+ {
+ suffix_object(p, n);
+ }
+ p = n;
+ }
+
+ if (a && a->child) {
+ a->child->prev = n;
+ }
+
+ return a;
+}
+
+CJSON_PUBLIC(cJSON *) cJSON_CreateDoubleArray(const double *numbers, int count)
+{
+ size_t i = 0;
+ cJSON *n = NULL;
+ cJSON *p = NULL;
+ cJSON *a = NULL;
+
+ if ((count < 0) || (numbers == NULL))
+ {
+ return NULL;
+ }
+
+ a = cJSON_CreateArray();
+
+ for(i = 0; a && (i < (size_t)count); i++)
+ {
+ n = cJSON_CreateNumber(numbers[i]);
+ if(!n)
+ {
+ cJSON_Delete(a);
+ return NULL;
+ }
+ if(!i)
+ {
+ a->child = n;
+ }
+ else
+ {
+ suffix_object(p, n);
+ }
+ p = n;
+ }
+
+ if (a && a->child) {
+ a->child->prev = n;
+ }
+
+ return a;
+}
+
+CJSON_PUBLIC(cJSON *) cJSON_CreateStringArray(const char *const *strings, int count)
+{
+ size_t i = 0;
+ cJSON *n = NULL;
+ cJSON *p = NULL;
+ cJSON *a = NULL;
+
+ if ((count < 0) || (strings == NULL))
+ {
+ return NULL;
+ }
+
+ a = cJSON_CreateArray();
+
+ for (i = 0; a && (i < (size_t)count); i++)
+ {
+ n = cJSON_CreateString(strings[i]);
+ if(!n)
+ {
+ cJSON_Delete(a);
+ return NULL;
+ }
+ if(!i)
+ {
+ a->child = n;
+ }
+ else
+ {
+ suffix_object(p,n);
+ }
+ p = n;
+ }
+
+ if (a && a->child) {
+ a->child->prev = n;
+ }
+
+ return a;
+}
+
+/* Duplication */
+CJSON_PUBLIC(cJSON *) cJSON_Duplicate(const cJSON *item, cJSON_bool recurse)
+{
+ cJSON *newitem = NULL;
+ cJSON *child = NULL;
+ cJSON *next = NULL;
+ cJSON *newchild = NULL;
+
+ /* Bail on bad ptr */
+ if (!item)
+ {
+ goto fail;
+ }
+ /* Create new item */
+ newitem = cJSON_New_Item(&global_hooks);
+ if (!newitem)
+ {
+ goto fail;
+ }
+ /* Copy over all vars */
+ newitem->type = item->type & (~cJSON_IsReference);
+ newitem->valueint = item->valueint;
+ newitem->valuedouble = item->valuedouble;
+ if (item->valuestring)
+ {
+ newitem->valuestring = (char*)cJSON_strdup((unsigned char*)item->valuestring, &global_hooks);
+ if (!newitem->valuestring)
+ {
+ goto fail;
+ }
+ }
+ if (item->string)
+ {
+ newitem->string = (item->type&cJSON_StringIsConst) ? item->string : (char*)cJSON_strdup((unsigned char*)item->string, &global_hooks);
+ if (!newitem->string)
+ {
+ goto fail;
+ }
+ }
+ /* If non-recursive, then we're done! */
+ if (!recurse)
+ {
+ return newitem;
+ }
+ /* Walk the ->next chain for the child. */
+ child = item->child;
+ while (child != NULL)
+ {
+ newchild = cJSON_Duplicate(child, true); /* Duplicate (with recurse) each item in the ->next chain */
+ if (!newchild)
+ {
+ goto fail;
+ }
+ if (next != NULL)
+ {
+ /* If newitem->child already set, then crosswire ->prev and ->next and move on */
+ next->next = newchild;
+ newchild->prev = next;
+ next = newchild;
+ }
+ else
+ {
+ /* Set newitem->child and move to it */
+ newitem->child = newchild;
+ next = newchild;
+ }
+ child = child->next;
+ }
+ if (newitem && newitem->child)
+ {
+ newitem->child->prev = newchild;
+ }
+
+ return newitem;
+
+fail:
+ if (newitem != NULL)
+ {
+ cJSON_Delete(newitem);
+ }
+
+ return NULL;
+}
+
+static void skip_oneline_comment(char **input)
+{
+ *input += static_strlen("//");
+
+ for (; (*input)[0] != '\0'; ++(*input))
+ {
+ if ((*input)[0] == '\n') {
+ *input += static_strlen("\n");
+ return;
+ }
+ }
+}
+
+static void skip_multiline_comment(char **input)
+{
+ *input += static_strlen("/*");
+
+ for (; (*input)[0] != '\0'; ++(*input))
+ {
+ if (((*input)[0] == '*') && ((*input)[1] == '/'))
+ {
+ *input += static_strlen("*/");
+ return;
+ }
+ }
+}
+
+static void minify_string(char **input, char **output) {
+ (*output)[0] = (*input)[0];
+ *input += static_strlen("\"");
+ *output += static_strlen("\"");
+
+
+ for (; (*input)[0] != '\0'; (void)++(*input), ++(*output)) {
+ (*output)[0] = (*input)[0];
+
+ if ((*input)[0] == '\"') {
+ (*output)[0] = '\"';
+ *input += static_strlen("\"");
+ *output += static_strlen("\"");
+ return;
+ } else if (((*input)[0] == '\\') && ((*input)[1] == '\"')) {
+ (*output)[1] = (*input)[1];
+ *input += static_strlen("\"");
+ *output += static_strlen("\"");
+ }
+ }
+}
+
+CJSON_PUBLIC(void) cJSON_Minify(char *json)
+{
+ char *into = json;
+
+ if (json == NULL)
+ {
+ return;
+ }
+
+ while (json[0] != '\0')
+ {
+ switch (json[0])
+ {
+ case ' ':
+ case '\t':
+ case '\r':
+ case '\n':
+ json++;
+ break;
+
+ case '/':
+ if (json[1] == '/')
+ {
+ skip_oneline_comment(&json);
+ }
+ else if (json[1] == '*')
+ {
+ skip_multiline_comment(&json);
+ } else {
+ json++;
+ }
+ break;
+
+ case '\"':
+ minify_string(&json, (char**)&into);
+ break;
+
+ default:
+ into[0] = json[0];
+ json++;
+ into++;
+ }
+ }
+
+ /* and null-terminate. */
+ *into = '\0';
+}
+
+CJSON_PUBLIC(cJSON_bool) cJSON_IsInvalid(const cJSON * const item)
+{
+ if (item == NULL)
+ {
+ return false;
+ }
+
+ return (item->type & 0xFF) == cJSON_Invalid;
+}
+
+CJSON_PUBLIC(cJSON_bool) cJSON_IsFalse(const cJSON * const item)
+{
+ if (item == NULL)
+ {
+ return false;
+ }
+
+ return (item->type & 0xFF) == cJSON_False;
+}
+
+CJSON_PUBLIC(cJSON_bool) cJSON_IsTrue(const cJSON * const item)
+{
+ if (item == NULL)
+ {
+ return false;
+ }
+
+ return (item->type & 0xff) == cJSON_True;
+}
+
+
+CJSON_PUBLIC(cJSON_bool) cJSON_IsBool(const cJSON * const item)
+{
+ if (item == NULL)
+ {
+ return false;
+ }
+
+ return (item->type & (cJSON_True | cJSON_False)) != 0;
+}
+CJSON_PUBLIC(cJSON_bool) cJSON_IsNull(const cJSON * const item)
+{
+ if (item == NULL)
+ {
+ return false;
+ }
+
+ return (item->type & 0xFF) == cJSON_NULL;
+}
+
+CJSON_PUBLIC(cJSON_bool) cJSON_IsNumber(const cJSON * const item)
+{
+ if (item == NULL)
+ {
+ return false;
+ }
+
+ return (item->type & 0xFF) == cJSON_Number;
+}
+
+CJSON_PUBLIC(cJSON_bool) cJSON_IsString(const cJSON * const item)
+{
+ if (item == NULL)
+ {
+ return false;
+ }
+
+ return (item->type & 0xFF) == cJSON_String;
+}
+
+CJSON_PUBLIC(cJSON_bool) cJSON_IsArray(const cJSON * const item)
+{
+ if (item == NULL)
+ {
+ return false;
+ }
+
+ return (item->type & 0xFF) == cJSON_Array;
+}
+
+CJSON_PUBLIC(cJSON_bool) cJSON_IsObject(const cJSON * const item)
+{
+ if (item == NULL)
+ {
+ return false;
+ }
+
+ return (item->type & 0xFF) == cJSON_Object;
+}
+
+CJSON_PUBLIC(cJSON_bool) cJSON_IsRaw(const cJSON * const item)
+{
+ if (item == NULL)
+ {
+ return false;
+ }
+
+ return (item->type & 0xFF) == cJSON_Raw;
+}
+
+CJSON_PUBLIC(cJSON_bool) cJSON_Compare(const cJSON * const a, const cJSON * const b, const cJSON_bool case_sensitive)
+{
+ if ((a == NULL) || (b == NULL) || ((a->type & 0xFF) != (b->type & 0xFF)))
+ {
+ return false;
+ }
+
+ /* check if type is valid */
+ switch (a->type & 0xFF)
+ {
+ case cJSON_False:
+ case cJSON_True:
+ case cJSON_NULL:
+ case cJSON_Number:
+ case cJSON_String:
+ case cJSON_Raw:
+ case cJSON_Array:
+ case cJSON_Object:
+ break;
+
+ default:
+ return false;
+ }
+
+ /* identical objects are equal */
+ if (a == b)
+ {
+ return true;
+ }
+
+ switch (a->type & 0xFF)
+ {
+ /* in these cases and equal type is enough */
+ case cJSON_False:
+ case cJSON_True:
+ case cJSON_NULL:
+ return true;
+
+ case cJSON_Number:
+ if (compare_double(a->valuedouble, b->valuedouble))
+ {
+ return true;
+ }
+ return false;
+
+ case cJSON_String:
+ case cJSON_Raw:
+ if ((a->valuestring == NULL) || (b->valuestring == NULL))
+ {
+ return false;
+ }
+ if (strcmp(a->valuestring, b->valuestring) == 0)
+ {
+ return true;
+ }
+
+ return false;
+
+ case cJSON_Array:
+ {
+ cJSON *a_element = a->child;
+ cJSON *b_element = b->child;
+
+ for (; (a_element != NULL) && (b_element != NULL);)
+ {
+ if (!cJSON_Compare(a_element, b_element, case_sensitive))
+ {
+ return false;
+ }
+
+ a_element = a_element->next;
+ b_element = b_element->next;
+ }
+
+ /* one of the arrays is longer than the other */
+ if (a_element != b_element) {
+ return false;
+ }
+
+ return true;
+ }
+
+ case cJSON_Object:
+ {
+ cJSON *a_element = NULL;
+ cJSON *b_element = NULL;
+ cJSON_ArrayForEach(a_element, a)
+ {
+ /* TODO This has O(n^2) runtime, which is horrible! */
+ b_element = get_object_item(b, a_element->string, case_sensitive);
+ if (b_element == NULL)
+ {
+ return false;
+ }
+
+ if (!cJSON_Compare(a_element, b_element, case_sensitive))
+ {
+ return false;
+ }
+ }
+
+ /* doing this twice, once on a and b to prevent true comparison if a subset of b
+ * TODO: Do this the proper way, this is just a fix for now */
+ cJSON_ArrayForEach(b_element, b)
+ {
+ a_element = get_object_item(a, b_element->string, case_sensitive);
+ if (a_element == NULL)
+ {
+ return false;
+ }
+
+ if (!cJSON_Compare(b_element, a_element, case_sensitive))
+ {
+ return false;
+ }
+ }
+
+ return true;
+ }
+
+ default:
+ return false;
+ }
+}
+
+CJSON_PUBLIC(void *) cJSON_malloc(size_t size)
+{
+ return global_hooks.allocate(size);
+}
+
+CJSON_PUBLIC(void) cJSON_free(void *object)
+{
+ global_hooks.deallocate(object);
+}
diff --git a/src/tools/pcommandbatch/cJSON.h b/src/tools/pcommandbatch/cJSON.h
new file mode 100644
index 00000000..2628d763
--- /dev/null
+++ b/src/tools/pcommandbatch/cJSON.h
@@ -0,0 +1,300 @@
+/*
+ Copyright (c) 2009-2017 Dave Gamble and cJSON contributors
+
+ Permission is hereby granted, free of charge, to any person obtaining a copy
+ of this software and associated documentation files (the "Software"), to deal
+ in the Software without restriction, including without limitation the rights
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ copies of the Software, and to permit persons to whom the Software is
+ furnished to do so, subject to the following conditions:
+
+ The above copyright notice and this permission notice shall be included in
+ all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ THE SOFTWARE.
+*/
+
+#ifndef cJSON__h
+#define cJSON__h
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+#if !defined(__WINDOWS__) && (defined(WIN32) || defined(WIN64) || defined(_MSC_VER) || defined(_WIN32))
+#define __WINDOWS__
+#endif
+
+#ifdef __WINDOWS__
+
+/* When compiling for windows, we specify a specific calling convention to avoid issues where we are being called from a project with a different default calling convention. For windows you have 3 define options:
+
+CJSON_HIDE_SYMBOLS - Define this in the case where you don't want to ever dllexport symbols
+CJSON_EXPORT_SYMBOLS - Define this on library build when you want to dllexport symbols (default)
+CJSON_IMPORT_SYMBOLS - Define this if you want to dllimport symbol
+
+For *nix builds that support visibility attribute, you can define similar behavior by
+
+setting default visibility to hidden by adding
+-fvisibility=hidden (for gcc)
+or
+-xldscope=hidden (for sun cc)
+to CFLAGS
+
+then using the CJSON_API_VISIBILITY flag to "export" the same symbols the way CJSON_EXPORT_SYMBOLS does
+
+*/
+
+#define CJSON_CDECL __cdecl
+#define CJSON_STDCALL __stdcall
+
+/* export symbols by default, this is necessary for copy pasting the C and header file */
+#if !defined(CJSON_HIDE_SYMBOLS) && !defined(CJSON_IMPORT_SYMBOLS) && !defined(CJSON_EXPORT_SYMBOLS)
+#define CJSON_EXPORT_SYMBOLS
+#endif
+
+#if defined(CJSON_HIDE_SYMBOLS)
+#define CJSON_PUBLIC(type) type CJSON_STDCALL
+#elif defined(CJSON_EXPORT_SYMBOLS)
+#define CJSON_PUBLIC(type) __declspec(dllexport) type CJSON_STDCALL
+#elif defined(CJSON_IMPORT_SYMBOLS)
+#define CJSON_PUBLIC(type) __declspec(dllimport) type CJSON_STDCALL
+#endif
+#else /* !__WINDOWS__ */
+#define CJSON_CDECL
+#define CJSON_STDCALL
+
+#if (defined(__GNUC__) || defined(__SUNPRO_CC) || defined (__SUNPRO_C)) && defined(CJSON_API_VISIBILITY)
+#define CJSON_PUBLIC(type) __attribute__((visibility("default"))) type
+#else
+#define CJSON_PUBLIC(type) type
+#endif
+#endif
+
+/* project version */
+#define CJSON_VERSION_MAJOR 1
+#define CJSON_VERSION_MINOR 7
+#define CJSON_VERSION_PATCH 16
+
+#include
+
+/* cJSON Types: */
+#define cJSON_Invalid (0)
+#define cJSON_False (1 << 0)
+#define cJSON_True (1 << 1)
+#define cJSON_NULL (1 << 2)
+#define cJSON_Number (1 << 3)
+#define cJSON_String (1 << 4)
+#define cJSON_Array (1 << 5)
+#define cJSON_Object (1 << 6)
+#define cJSON_Raw (1 << 7) /* raw json */
+
+#define cJSON_IsReference 256
+#define cJSON_StringIsConst 512
+
+/* The cJSON structure: */
+typedef struct cJSON
+{
+ /* next/prev allow you to walk array/object chains. Alternatively, use GetArraySize/GetArrayItem/GetObjectItem */
+ struct cJSON *next;
+ struct cJSON *prev;
+ /* An array or object item will have a child pointer pointing to a chain of the items in the array/object. */
+ struct cJSON *child;
+
+ /* The type of the item, as above. */
+ int type;
+
+ /* The item's string, if type==cJSON_String and type == cJSON_Raw */
+ char *valuestring;
+ /* writing to valueint is DEPRECATED, use cJSON_SetNumberValue instead */
+ int valueint;
+ /* The item's number, if type==cJSON_Number */
+ double valuedouble;
+
+ /* The item's name string, if this item is the child of, or is in the list of subitems of an object. */
+ char *string;
+} cJSON;
+
+typedef struct cJSON_Hooks
+{
+ /* malloc/free are CDECL on Windows regardless of the default calling convention of the compiler, so ensure the hooks allow passing those functions directly. */
+ void *(CJSON_CDECL *malloc_fn)(size_t sz);
+ void (CJSON_CDECL *free_fn)(void *ptr);
+} cJSON_Hooks;
+
+typedef int cJSON_bool;
+
+/* Limits how deeply nested arrays/objects can be before cJSON rejects to parse them.
+ * This is to prevent stack overflows. */
+#ifndef CJSON_NESTING_LIMIT
+#define CJSON_NESTING_LIMIT 1000
+#endif
+
+/* returns the version of cJSON as a string */
+CJSON_PUBLIC(const char*) cJSON_Version(void);
+
+/* Supply malloc, realloc and free functions to cJSON */
+CJSON_PUBLIC(void) cJSON_InitHooks(cJSON_Hooks* hooks);
+
+/* Memory Management: the caller is always responsible to free the results from all variants of cJSON_Parse (with cJSON_Delete) and cJSON_Print (with stdlib free, cJSON_Hooks.free_fn, or cJSON_free as appropriate). The exception is cJSON_PrintPreallocated, where the caller has full responsibility of the buffer. */
+/* Supply a block of JSON, and this returns a cJSON object you can interrogate. */
+CJSON_PUBLIC(cJSON *) cJSON_Parse(const char *value);
+CJSON_PUBLIC(cJSON *) cJSON_ParseWithLength(const char *value, size_t buffer_length);
+/* ParseWithOpts allows you to require (and check) that the JSON is null terminated, and to retrieve the pointer to the final byte parsed. */
+/* If you supply a ptr in return_parse_end and parsing fails, then return_parse_end will contain a pointer to the error so will match cJSON_GetErrorPtr(). */
+CJSON_PUBLIC(cJSON *) cJSON_ParseWithOpts(const char *value, const char **return_parse_end, cJSON_bool require_null_terminated);
+CJSON_PUBLIC(cJSON *) cJSON_ParseWithLengthOpts(const char *value, size_t buffer_length, const char **return_parse_end, cJSON_bool require_null_terminated);
+
+/* Render a cJSON entity to text for transfer/storage. */
+CJSON_PUBLIC(char *) cJSON_Print(const cJSON *item);
+/* Render a cJSON entity to text for transfer/storage without any formatting. */
+CJSON_PUBLIC(char *) cJSON_PrintUnformatted(const cJSON *item);
+/* Render a cJSON entity to text using a buffered strategy. prebuffer is a guess at the final size. guessing well reduces reallocation. fmt=0 gives unformatted, =1 gives formatted */
+CJSON_PUBLIC(char *) cJSON_PrintBuffered(const cJSON *item, int prebuffer, cJSON_bool fmt);
+/* Render a cJSON entity to text using a buffer already allocated in memory with given length. Returns 1 on success and 0 on failure. */
+/* NOTE: cJSON is not always 100% accurate in estimating how much memory it will use, so to be safe allocate 5 bytes more than you actually need */
+CJSON_PUBLIC(cJSON_bool) cJSON_PrintPreallocated(cJSON *item, char *buffer, const int length, const cJSON_bool format);
+/* Delete a cJSON entity and all subentities. */
+CJSON_PUBLIC(void) cJSON_Delete(cJSON *item);
+
+/* Returns the number of items in an array (or object). */
+CJSON_PUBLIC(int) cJSON_GetArraySize(const cJSON *array);
+/* Retrieve item number "index" from array "array". Returns NULL if unsuccessful. */
+CJSON_PUBLIC(cJSON *) cJSON_GetArrayItem(const cJSON *array, int index);
+/* Get item "string" from object. Case insensitive. */
+CJSON_PUBLIC(cJSON *) cJSON_GetObjectItem(const cJSON * const object, const char * const string);
+CJSON_PUBLIC(cJSON *) cJSON_GetObjectItemCaseSensitive(const cJSON * const object, const char * const string);
+CJSON_PUBLIC(cJSON_bool) cJSON_HasObjectItem(const cJSON *object, const char *string);
+/* For analysing failed parses. This returns a pointer to the parse error. You'll probably need to look a few chars back to make sense of it. Defined when cJSON_Parse() returns 0. 0 when cJSON_Parse() succeeds. */
+CJSON_PUBLIC(const char *) cJSON_GetErrorPtr(void);
+
+/* Check item type and return its value */
+CJSON_PUBLIC(char *) cJSON_GetStringValue(const cJSON * const item);
+CJSON_PUBLIC(double) cJSON_GetNumberValue(const cJSON * const item);
+
+/* These functions check the type of an item */
+CJSON_PUBLIC(cJSON_bool) cJSON_IsInvalid(const cJSON * const item);
+CJSON_PUBLIC(cJSON_bool) cJSON_IsFalse(const cJSON * const item);
+CJSON_PUBLIC(cJSON_bool) cJSON_IsTrue(const cJSON * const item);
+CJSON_PUBLIC(cJSON_bool) cJSON_IsBool(const cJSON * const item);
+CJSON_PUBLIC(cJSON_bool) cJSON_IsNull(const cJSON * const item);
+CJSON_PUBLIC(cJSON_bool) cJSON_IsNumber(const cJSON * const item);
+CJSON_PUBLIC(cJSON_bool) cJSON_IsString(const cJSON * const item);
+CJSON_PUBLIC(cJSON_bool) cJSON_IsArray(const cJSON * const item);
+CJSON_PUBLIC(cJSON_bool) cJSON_IsObject(const cJSON * const item);
+CJSON_PUBLIC(cJSON_bool) cJSON_IsRaw(const cJSON * const item);
+
+/* These calls create a cJSON item of the appropriate type. */
+CJSON_PUBLIC(cJSON *) cJSON_CreateNull(void);
+CJSON_PUBLIC(cJSON *) cJSON_CreateTrue(void);
+CJSON_PUBLIC(cJSON *) cJSON_CreateFalse(void);
+CJSON_PUBLIC(cJSON *) cJSON_CreateBool(cJSON_bool boolean);
+CJSON_PUBLIC(cJSON *) cJSON_CreateNumber(double num);
+CJSON_PUBLIC(cJSON *) cJSON_CreateString(const char *string);
+/* raw json */
+CJSON_PUBLIC(cJSON *) cJSON_CreateRaw(const char *raw);
+CJSON_PUBLIC(cJSON *) cJSON_CreateArray(void);
+CJSON_PUBLIC(cJSON *) cJSON_CreateObject(void);
+
+/* Create a string where valuestring references a string so
+ * it will not be freed by cJSON_Delete */
+CJSON_PUBLIC(cJSON *) cJSON_CreateStringReference(const char *string);
+/* Create an object/array that only references it's elements so
+ * they will not be freed by cJSON_Delete */
+CJSON_PUBLIC(cJSON *) cJSON_CreateObjectReference(const cJSON *child);
+CJSON_PUBLIC(cJSON *) cJSON_CreateArrayReference(const cJSON *child);
+
+/* These utilities create an Array of count items.
+ * The parameter count cannot be greater than the number of elements in the number array, otherwise array access will be out of bounds.*/
+CJSON_PUBLIC(cJSON *) cJSON_CreateIntArray(const int *numbers, int count);
+CJSON_PUBLIC(cJSON *) cJSON_CreateFloatArray(const float *numbers, int count);
+CJSON_PUBLIC(cJSON *) cJSON_CreateDoubleArray(const double *numbers, int count);
+CJSON_PUBLIC(cJSON *) cJSON_CreateStringArray(const char *const *strings, int count);
+
+/* Append item to the specified array/object. */
+CJSON_PUBLIC(cJSON_bool) cJSON_AddItemToArray(cJSON *array, cJSON *item);
+CJSON_PUBLIC(cJSON_bool) cJSON_AddItemToObject(cJSON *object, const char *string, cJSON *item);
+/* Use this when string is definitely const (i.e. a literal, or as good as), and will definitely survive the cJSON object.
+ * WARNING: When this function was used, make sure to always check that (item->type & cJSON_StringIsConst) is zero before
+ * writing to `item->string` */
+CJSON_PUBLIC(cJSON_bool) cJSON_AddItemToObjectCS(cJSON *object, const char *string, cJSON *item);
+/* Append reference to item to the specified array/object. Use this when you want to add an existing cJSON to a new cJSON, but don't want to corrupt your existing cJSON. */
+CJSON_PUBLIC(cJSON_bool) cJSON_AddItemReferenceToArray(cJSON *array, cJSON *item);
+CJSON_PUBLIC(cJSON_bool) cJSON_AddItemReferenceToObject(cJSON *object, const char *string, cJSON *item);
+
+/* Remove/Detach items from Arrays/Objects. */
+CJSON_PUBLIC(cJSON *) cJSON_DetachItemViaPointer(cJSON *parent, cJSON * const item);
+CJSON_PUBLIC(cJSON *) cJSON_DetachItemFromArray(cJSON *array, int which);
+CJSON_PUBLIC(void) cJSON_DeleteItemFromArray(cJSON *array, int which);
+CJSON_PUBLIC(cJSON *) cJSON_DetachItemFromObject(cJSON *object, const char *string);
+CJSON_PUBLIC(cJSON *) cJSON_DetachItemFromObjectCaseSensitive(cJSON *object, const char *string);
+CJSON_PUBLIC(void) cJSON_DeleteItemFromObject(cJSON *object, const char *string);
+CJSON_PUBLIC(void) cJSON_DeleteItemFromObjectCaseSensitive(cJSON *object, const char *string);
+
+/* Update array items. */
+CJSON_PUBLIC(cJSON_bool) cJSON_InsertItemInArray(cJSON *array, int which, cJSON *newitem); /* Shifts pre-existing items to the right. */
+CJSON_PUBLIC(cJSON_bool) cJSON_ReplaceItemViaPointer(cJSON * const parent, cJSON * const item, cJSON * replacement);
+CJSON_PUBLIC(cJSON_bool) cJSON_ReplaceItemInArray(cJSON *array, int which, cJSON *newitem);
+CJSON_PUBLIC(cJSON_bool) cJSON_ReplaceItemInObject(cJSON *object,const char *string,cJSON *newitem);
+CJSON_PUBLIC(cJSON_bool) cJSON_ReplaceItemInObjectCaseSensitive(cJSON *object,const char *string,cJSON *newitem);
+
+/* Duplicate a cJSON item */
+CJSON_PUBLIC(cJSON *) cJSON_Duplicate(const cJSON *item, cJSON_bool recurse);
+/* Duplicate will create a new, identical cJSON item to the one you pass, in new memory that will
+ * need to be released. With recurse!=0, it will duplicate any children connected to the item.
+ * The item->next and ->prev pointers are always zero on return from Duplicate. */
+/* Recursively compare two cJSON items for equality. If either a or b is NULL or invalid, they will be considered unequal.
+ * case_sensitive determines if object keys are treated case sensitive (1) or case insensitive (0) */
+CJSON_PUBLIC(cJSON_bool) cJSON_Compare(const cJSON * const a, const cJSON * const b, const cJSON_bool case_sensitive);
+
+/* Minify a strings, remove blank characters(such as ' ', '\t', '\r', '\n') from strings.
+ * The input pointer json cannot point to a read-only address area, such as a string constant,
+ * but should point to a readable and writable address area. */
+CJSON_PUBLIC(void) cJSON_Minify(char *json);
+
+/* Helper functions for creating and adding items to an object at the same time.
+ * They return the added item or NULL on failure. */
+CJSON_PUBLIC(cJSON*) cJSON_AddNullToObject(cJSON * const object, const char * const name);
+CJSON_PUBLIC(cJSON*) cJSON_AddTrueToObject(cJSON * const object, const char * const name);
+CJSON_PUBLIC(cJSON*) cJSON_AddFalseToObject(cJSON * const object, const char * const name);
+CJSON_PUBLIC(cJSON*) cJSON_AddBoolToObject(cJSON * const object, const char * const name, const cJSON_bool boolean);
+CJSON_PUBLIC(cJSON*) cJSON_AddNumberToObject(cJSON * const object, const char * const name, const double number);
+CJSON_PUBLIC(cJSON*) cJSON_AddStringToObject(cJSON * const object, const char * const name, const char * const string);
+CJSON_PUBLIC(cJSON*) cJSON_AddRawToObject(cJSON * const object, const char * const name, const char * const raw);
+CJSON_PUBLIC(cJSON*) cJSON_AddObjectToObject(cJSON * const object, const char * const name);
+CJSON_PUBLIC(cJSON*) cJSON_AddArrayToObject(cJSON * const object, const char * const name);
+
+/* When assigning an integer value, it needs to be propagated to valuedouble too. */
+#define cJSON_SetIntValue(object, number) ((object) ? (object)->valueint = (object)->valuedouble = (number) : (number))
+/* helper for the cJSON_SetNumberValue macro */
+CJSON_PUBLIC(double) cJSON_SetNumberHelper(cJSON *object, double number);
+#define cJSON_SetNumberValue(object, number) ((object != NULL) ? cJSON_SetNumberHelper(object, (double)number) : (number))
+/* Change the valuestring of a cJSON_String object, only takes effect when type of object is cJSON_String */
+CJSON_PUBLIC(char*) cJSON_SetValuestring(cJSON *object, const char *valuestring);
+
+/* If the object is not a boolean type this does nothing and returns cJSON_Invalid else it returns the new type*/
+#define cJSON_SetBoolValue(object, boolValue) ( \
+ (object != NULL && ((object)->type & (cJSON_False|cJSON_True))) ? \
+ (object)->type=((object)->type &(~(cJSON_False|cJSON_True)))|((boolValue)?cJSON_True:cJSON_False) : \
+ cJSON_Invalid\
+)
+
+/* Macro for iterating over an array or object */
+#define cJSON_ArrayForEach(element, array) for(element = (array != NULL) ? (array)->child : NULL; element != NULL; element = element->next)
+
+/* malloc/free objects using the malloc/free functions that have been set with cJSON_InitHooks */
+CJSON_PUBLIC(void *) cJSON_malloc(size_t size);
+CJSON_PUBLIC(void) cJSON_free(void *object);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/src/tools/pcommandbatch/pcommandbatch.c b/src/tools/pcommandbatch/pcommandbatch.c
new file mode 100644
index 00000000..07e38b59
--- /dev/null
+++ b/src/tools/pcommandbatch/pcommandbatch.c
@@ -0,0 +1,464 @@
+// Released under the MIT License. See LICENSE for details.
+
+// An ultra-simple client app to forward commands to a pcommand server. This
+// lets us run *lots* of small pcommands very fast. Normally the limiting
+// factor in such cases is the startup time of Python which this mostly
+// eliminates. See tools/efrotools/pcommandbatch.py for more info.
+
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+
+#include "cJSON.h"
+
+struct Context_ {
+ const char* state_dir_path;
+ const char* instance_prefix;
+ int instance_num;
+ int pid;
+ int verbose;
+ int debug;
+ int server_idle_seconds;
+ const char* pcommandpath;
+ int sockfd;
+};
+
+int path_exists_(const char* path);
+int establish_connection_(const struct Context_* ctx);
+int calc_paths_(struct Context_* ctx);
+int send_command_(struct Context_* ctx, int argc, char** argv);
+int handle_response_(const struct Context_* ctx);
+int get_running_server_port_(const struct Context_* ctx,
+ const char* state_file_path_full);
+
+int main(int argc, char** argv) {
+ struct Context_ ctx;
+ memset(&ctx, 0, sizeof(ctx));
+
+ ctx.state_dir_path = NULL;
+ ctx.instance_prefix = NULL;
+ ctx.pcommandpath = NULL;
+ ctx.server_idle_seconds = 5;
+ ctx.pid = getpid();
+
+ // Verbose mode enables more printing here. Debug mode enables that plus
+ // extra stuff. The extra stuff is mostly the server side though.
+ {
+ const char* debug_env = getenv("BA_PCOMMANDBATCH_DEBUG");
+ ctx.debug = debug_env && !strcmp(debug_env, "1");
+ const char* verbose_env = getenv("BA_PCOMMANDBATCH_VERBOSE");
+ ctx.verbose = ctx.debug || (verbose_env && !strcmp(verbose_env, "1"));
+ }
+
+ // Seed rand() using the current time in microseconds.
+ struct timeval tv;
+ gettimeofday(&tv, NULL);
+ unsigned int seed = tv.tv_usec;
+ srand(seed);
+
+ // Figure our which file path we'll use to get server state.
+ if (calc_paths_(&ctx) != 0) {
+ return 1;
+ }
+
+ // Establish communication with said server (spinning it up if needed).
+ ctx.sockfd = establish_connection_(&ctx);
+ if (ctx.sockfd == -1) {
+ return 1;
+ }
+
+ if (send_command_(&ctx, argc, argv) != 0) {
+ return 1;
+ }
+
+ int result_val = handle_response_(&ctx);
+ if (result_val != 0) {
+ return 1;
+ }
+
+ if (close(ctx.sockfd) != 0) {
+ fprintf(
+ stderr,
+ "Error: pcommandbatch client %s_%d (pid %d): error on socket close.\n",
+ ctx.instance_prefix, ctx.instance_num, ctx.pid);
+ return 1;
+ }
+ return result_val;
+}
+
+// If a valid state file is present at the provided path and not older than
+// server_idle_seconds, return said port as an int. Otherwise return -1;
+int get_running_server_port_(const struct Context_* ctx,
+ const char* state_file_path_full) {
+ struct stat file_stat;
+
+ time_t current_time = time(NULL);
+ if (current_time == -1) {
+ perror("time");
+ return -1;
+ }
+
+ int fd = open(state_file_path_full, O_RDONLY);
+ if (fd < 0) {
+ return -1;
+ }
+
+ if (fstat(fd, &file_stat) == -1) {
+ close(fd);
+ return -1;
+ }
+
+ int age_seconds = current_time - file_stat.st_mtime;
+ if (ctx->verbose) {
+ if (age_seconds <= ctx->server_idle_seconds) {
+ fprintf(
+ stderr,
+ "pcommandbatch client %s_%d (pid %d) found state file with age %d at "
+ "time %ld.\n",
+ ctx->instance_prefix, ctx->instance_num, ctx->pid, age_seconds,
+ time(NULL));
+ }
+ }
+
+ if (age_seconds > ctx->server_idle_seconds) {
+ close(fd);
+ return -1;
+ } else if (age_seconds < 0) {
+ fprintf(stderr, "pcommandbatch got negative age; unexpected.");
+ }
+
+ char buf[256];
+ ssize_t amt = read(fd, buf, sizeof(buf) - 1);
+ close(fd);
+
+ if (amt == -1 || amt == sizeof(buf) - 1) {
+ return -1;
+ }
+ buf[amt] = 0; // Null-terminate it.
+
+ cJSON* state_dict = cJSON_Parse(buf);
+ if (!state_dict) {
+ fprintf(stderr,
+ "Error: pcommandbatch client %s_%d (pid %d): failed to parse state "
+ "value.\n",
+ ctx->instance_prefix, ctx->instance_num, ctx->pid);
+ return -1;
+ }
+ // If results included output, print it.
+ cJSON* port_obj = cJSON_GetObjectItem(state_dict, "p");
+ if (!port_obj || !cJSON_IsNumber(port_obj)) {
+ fprintf(stderr,
+ "Error: pcommandbatch client %s_%d (pid %d): failed to get port "
+ "value from state.\n",
+ ctx->instance_prefix, ctx->instance_num, ctx->pid);
+ cJSON_Delete(state_dict);
+ return -1;
+ }
+ int port = cJSON_GetNumberValue(port_obj);
+ cJSON_Delete(state_dict);
+ return port;
+
+ // return val;
+}
+
+int path_exists_(const char* path) {
+ struct stat file_stat;
+ return (stat(path, &file_stat) != -1);
+}
+
+int establish_connection_(const struct Context_* ctx) {
+ char state_file_path_full[256];
+ snprintf(state_file_path_full, sizeof(state_file_path_full),
+ "%s/worker_state_%s_%d", ctx->state_dir_path, ctx->instance_prefix,
+ ctx->instance_num);
+
+ int sockfd = 0;
+
+ if ((sockfd = socket(AF_INET, SOCK_STREAM, 0)) < 0) {
+ fprintf(stderr,
+ "Error: pcommandbatch client %s_%d (pid %d): could not create "
+ "socket.\n",
+ ctx->instance_prefix, ctx->instance_num, ctx->pid);
+ return -1;
+ }
+
+ // On Mac I'm running into EADDRNOTAVAIL errors if I spit out too many
+ // requests in a short enough period of time. I'm guessing its exhausting
+ // free ports when cooldown time is taken into account. Sleeping and
+ // trying again in a moment seems to work.
+ int retry_attempt = 0;
+ int retry_sleep_secs = 1;
+ while (1) {
+ // First look for an already-running batch server.
+ int port = get_running_server_port_(ctx, state_file_path_full);
+ if (port == -1) {
+ // Ok; no running server. Spin one up.
+ if (ctx->verbose) {
+ fprintf(stderr,
+ "pcommandbatch client %s_%d (pid %d) requesting batch server "
+ "spinup...\n",
+ ctx->instance_prefix, ctx->instance_num, ctx->pid);
+ }
+
+ // In non-debug-mode, route to a log file.
+ char endbuf[512];
+ if (ctx->debug) {
+ snprintf(endbuf, sizeof(endbuf), " &");
+ } else {
+ snprintf(endbuf, sizeof(endbuf), " >>%s/worker_log_%s_%d 2>&1 &",
+ ctx->state_dir_path, ctx->instance_prefix, ctx->instance_num);
+ }
+ char buf[512];
+ snprintf(buf, sizeof(buf),
+ "%s run_pcommandbatch_server --timeout %d --state-dir %s "
+ "--instance %s_%d %s",
+ ctx->pcommandpath, ctx->server_idle_seconds, ctx->state_dir_path,
+ ctx->instance_prefix, ctx->instance_num, endbuf);
+ system(buf);
+
+ // Spin and wait up to a few seconds for the file to appear.
+ time_t start_time = time(NULL);
+ int cycles = 0;
+ while (time(NULL) - start_time < 5) {
+ port = get_running_server_port_(ctx, state_file_path_full);
+ if (port != -1) {
+ break;
+ }
+ usleep(10000);
+ cycles += 1;
+ }
+ if (ctx->verbose) {
+ fprintf(stderr,
+ "pcommandbatch client %s_%d (pid %d) waited %d"
+ " cycles for state file to appear at '%s'.\n",
+ ctx->instance_prefix, ctx->instance_num, ctx->pid, cycles,
+ state_file_path_full);
+ }
+
+ if (port == -1) {
+ // We failed but we can retry.
+ if (ctx->verbose) {
+ fprintf(stderr,
+ "Error: pcommandbatch client %s_%d (pid %d): failed to open "
+ "server on attempt %d.\n",
+ ctx->instance_prefix, ctx->instance_num, ctx->pid,
+ retry_attempt);
+ }
+ }
+ }
+
+ // Ok we got a port; now try to connect to it.
+ if (port != -1) {
+ if (ctx->verbose) {
+ fprintf(
+ stderr,
+ "pcommandbatch client %s_%d (pid %d) will use server on port %d at "
+ "time %ld.\n",
+ ctx->instance_prefix, ctx->instance_num, ctx->pid, port,
+ time(NULL));
+ }
+
+ struct sockaddr_in serv_addr;
+ memset(&serv_addr, '0', sizeof(serv_addr));
+ serv_addr.sin_family = AF_INET;
+ serv_addr.sin_port = htons(port);
+ serv_addr.sin_addr.s_addr = inet_addr("127.0.0.1");
+
+ int cresult =
+ connect(sockfd, (struct sockaddr*)&serv_addr, sizeof(serv_addr));
+ if (cresult == 0) {
+ break;
+ } else if (errno == EADDRNOTAVAIL) {
+ if (ctx->verbose) {
+ fprintf(stderr,
+ "pcommandbatch client %s_%d (pid %d): got EADDRNOTAVAIL"
+ " on connect attempt %d.\n",
+ ctx->instance_prefix, ctx->instance_num, ctx->pid,
+ retry_attempt + 1);
+ }
+ } else {
+ // Currently not retrying on other errors.
+ fprintf(
+ stderr,
+ "Error: pcommandbatch client %s_%d (pid %d): connect failed (errno "
+ "%d).\n",
+ ctx->instance_prefix, ctx->instance_num, ctx->pid, errno);
+ close(sockfd);
+ return -1;
+ }
+ }
+ if (retry_attempt >= 10) {
+ fprintf(stderr,
+ "Error: pcommandbatch client %s_%d (pid %d): too many "
+ "retry attempts; giving up.\n",
+ ctx->instance_prefix, ctx->instance_num, ctx->pid);
+ close(sockfd);
+ return -1;
+ }
+ if (ctx->verbose) {
+ fprintf(
+ stderr,
+ "pcommandbatch client %s_%d (pid %d) connection attempt %d failed;"
+ " will sleep %d secs and try again.\n",
+ ctx->instance_prefix, ctx->instance_num, ctx->pid, retry_attempt + 1,
+ retry_sleep_secs);
+ }
+ sleep(retry_sleep_secs);
+ retry_attempt += 1;
+ retry_sleep_secs *= 2;
+ }
+ return sockfd;
+}
+
+int calc_paths_(struct Context_* ctx) {
+ // Because the server needs to be in the same cwd as we are for things to
+ // work, we only support a specific few locations to run from. Currently
+ // this is project-root and src/assets
+ if (path_exists_("config/projectconfig.json")) {
+ // Looks like we're in project root.
+ ctx->state_dir_path = ".cache/pcommandbatch";
+ ctx->instance_prefix = "root";
+ ctx->pcommandpath = "tools/pcommand";
+ } else if (path_exists_("ba_data")
+ && path_exists_("../../config/projectconfig.json")) {
+ // Looks like we're in src/assets.
+ ctx->state_dir_path = "../../.cache/pcommandbatch";
+ ctx->instance_prefix = "assets";
+ ctx->pcommandpath = "../../tools/pcommand";
+ }
+ if (ctx->state_dir_path == NULL) {
+ char cwdbuf[MAXPATHLEN];
+ if (getcwd(cwdbuf, sizeof(cwdbuf)) < 0) {
+ fprintf(stderr,
+ "Error: pcommandbatch client %s (pid %d): unable to get cwd.\n",
+ ctx->instance_prefix, ctx->pid);
+ return -1;
+ }
+ fprintf(
+ stderr,
+ "Error: pcommandbatch client %s (pid %d): pcommandbatch from cwd '%s' "
+ "is not supported.\n",
+ ctx->instance_prefix, ctx->pid, cwdbuf);
+ return -1;
+ }
+ assert(ctx->pcommandpath != NULL);
+ assert(ctx->instance_prefix != NULL);
+
+ // Spread requests for each location out randomly across a few instances.
+ // This greatly increases scalability though is probably wasteful when
+ // running just a few commands. Maybe there's some way to smartly scale
+ // this. The best setup might be to have a single 'controller' server
+ // instance that spins up worker instances as needed. Though such a fancy
+ // setup might be overkill.
+ ctx->instance_num = rand() % 6;
+ return 0;
+}
+
+int send_command_(struct Context_* ctx, int argc, char** argv) {
+ // Build a json array of our args.
+ cJSON* array = cJSON_CreateArray();
+ for (int i = 0; i < argc; ++i) {
+ cJSON_AddItemToArray(array, cJSON_CreateString(argv[i]));
+ }
+ char* json_out = cJSON_Print(array);
+
+ // Send our command.
+ int msglen = strlen(json_out);
+ if (write(ctx->sockfd, json_out, msglen) != msglen) {
+ fprintf(stderr,
+ "Error: pcommandbatch client %s_%d (pid %d): write failed.\n",
+ ctx->instance_prefix, ctx->instance_num, ctx->pid);
+ return -1;
+ }
+
+ // Issue a write shutdown so they get EOF on the other end.
+ if (shutdown(ctx->sockfd, SHUT_WR) < 0) {
+ fprintf(
+ stderr,
+ "Error: pcommandbatch client %s_%d (pid %d): write shutdown failed.\n",
+ ctx->instance_prefix, ctx->instance_num, ctx->pid);
+ return -1;
+ }
+
+ // Clean up our mess after we've sent them on their way.
+ free(json_out);
+ cJSON_Delete(array);
+
+ return 0;
+}
+
+int handle_response_(const struct Context_* ctx) {
+ // Read the response. Currently expecting short-ish responses only; will
+ // have to revisit this if/when they get long.
+ char inbuf[512];
+ ssize_t result = read(ctx->sockfd, inbuf, sizeof(inbuf) - 1);
+ if (result < 0 || result == sizeof(inbuf) - 1) {
+ fprintf(stderr,
+ "Error: pcommandbatch client %s_%d (pid %d): failed to read result "
+ "(errno %d).\n",
+ ctx->instance_prefix, ctx->instance_num, ctx->pid, errno);
+ close(ctx->sockfd);
+ return -1;
+ }
+ if (ctx->verbose) {
+ fprintf(stderr,
+ "pcommandbatch client %s_%d (pid %d) read %zd byte response.\n",
+ ctx->instance_prefix, ctx->instance_num, ctx->pid, result);
+ }
+ inbuf[result] = 0; // null terminate result str.
+
+ cJSON* result_dict = cJSON_Parse(inbuf);
+ if (!result_dict) {
+ fprintf(
+ stderr,
+ "Error: pcommandbatch client %s_%d (pid %d): failed to parse result "
+ "value.\n",
+ ctx->instance_prefix, ctx->instance_num, ctx->pid);
+ return -1;
+ }
+
+ // If results included output, print it.
+ cJSON* result_output = cJSON_GetObjectItem(result_dict, "o");
+ if (!result_output || !cJSON_IsString(result_output)) {
+ fprintf(
+ stderr,
+ "Error: pcommandbatch client %s_%d (pid %d): failed to parse result "
+ "output value.\n",
+ ctx->instance_prefix, ctx->instance_num, ctx->pid);
+ return -1;
+ }
+ char* output_str = cJSON_GetStringValue(result_output);
+ assert(output_str);
+ if (output_str[0] != 0) {
+ printf("%s", output_str);
+ }
+
+ cJSON* result_code = cJSON_GetObjectItem(result_dict, "r");
+ if (!result_code || !cJSON_IsNumber(result_code)) {
+ fprintf(
+ stderr,
+ "Error: pcommandbatch client %s_%d (pid %d): failed to parse result "
+ "code value.\n",
+ ctx->instance_prefix, ctx->instance_num, ctx->pid);
+ return -1;
+ }
+ int result_val = cJSON_GetNumberValue(result_code);
+ if (ctx->verbose) {
+ fprintf(stderr, "pcommandbatch client %s_%d (pid %d) final result is %d.\n",
+ ctx->instance_prefix, ctx->instance_num, ctx->pid, result_val);
+ }
+ cJSON_Delete(result_dict);
+
+ return result_val;
+}
diff --git a/tools/batools/assetsmakefile.py b/tools/batools/assetsmakefile.py
index e8f3bb60..fb775d9f 100755
--- a/tools/batools/assetsmakefile.py
+++ b/tools/batools/assetsmakefile.py
@@ -265,13 +265,22 @@ def _get_py_targets_subset(
'# (and make non-writable so I\'m less likely to '
'accidentally edit them there)\n'
f'{efc}$(SCRIPT_TARGETS_PY{suffix}) : {copyrule}\n'
- '\t@echo Copying script: $(subst $(BUILD_DIR)/,,$@)\n'
- '\t@mkdir -p $(dir $@)\n'
- '\t@rm -f $@\n'
- '\t@cp $^ $@\n'
- '\t@chmod 444 $@\n'
+ '#\t@echo Copying script: $(subst $(BUILD_DIR)/,,$@)\n'
+ '\t@$(PCOMMANDBATCH) copy_python_file $^ $@\n'
)
+ # out += (
+ # '\n# Rule to copy src asset scripts to dst.\n'
+ # '# (and make non-writable so I\'m less likely to '
+ # 'accidentally edit them there)\n'
+ # f'{efc}$(SCRIPT_TARGETS_PY{suffix}) : {copyrule}\n'
+ # '\t@echo Copying script: $(subst $(BUILD_DIR)/,,$@)\n'
+ # '\t@mkdir -p $(dir $@)\n'
+ # '\t@rm -f $@\n'
+ # '\t@cp $^ $@\n'
+ # '\t@chmod 444 $@\n'
+ # )
+
# Fancy new simple loop-based target generation.
out += (
f'\n# These are too complex to define in a pattern rule;\n'
@@ -301,7 +310,7 @@ def _get_py_targets_subset(
+ py_targets[i]
+ '\n\t@echo Compiling script: $(subst $(BUILD_DIR),,$^)\n'
'\t@rm -rf $@ && PYTHONHASHSEED=1 $(TOOLS_DIR)/pcommand'
- ' compile_python_files $^'
+ ' compile_python_file $^'
' && chmod 444 $@\n'
)
diff --git a/tools/batools/build.py b/tools/batools/build.py
index bb09f159..b72660b1 100644
--- a/tools/batools/build.py
+++ b/tools/batools/build.py
@@ -32,32 +32,37 @@ class PyRequirement:
# entries; this accounts for manual installations or other nonstandard
# setups.
-# Note 2: That is probably overkill. We can probably just replace this
-# with a simple requirements.txt file, can't we? Feels like we're mostly
-# reinventing the wheel here. We just need a clean way to check/list
-# missing stuff without necessarily installing it. And as far as
-# manually-installed bits, pip itself must have some way to allow for
-# that, right?...
+# Note 2: That is probably unnecessary. I'm certainly not using it. We
+# can probably just replace this with a simple requirements.txt file,
+# can't we? Feels like we're mostly reinventing the wheel here. We just
+# need a clean way to check/list missing stuff without necessarily
+# installing it. And as far as manually-installed bits, pip itself must
+# have some way to allow for that, right?...
+
+# Note 3: Have transitioned all these to pipname only; can at least
+# remove our custom module based stuff soon if nobody complains, which
+# would free us to theoretically move to a requirements.txt based setup.
PY_REQUIREMENTS = [
- PyRequirement(modulename='pylint', minversion=[2, 17, 3]),
- PyRequirement(modulename='mypy', minversion=[1, 2, 0]),
- PyRequirement(modulename='cpplint', minversion=[1, 6, 1]),
- PyRequirement(modulename='pytest', minversion=[7, 3, 1]),
- PyRequirement(modulename='pytz'),
- PyRequirement(modulename='ansiwrap'),
- PyRequirement(modulename='yaml', pipname='PyYAML'),
- PyRequirement(modulename='requests'),
- PyRequirement(modulename='pdoc'),
- PyRequirement(pipname='black', minversion=[23, 3, 0]),
- PyRequirement(pipname='typing_extensions', minversion=[4, 5, 0]),
+ PyRequirement(pipname='pylint', minversion=[2, 17, 5]),
+ PyRequirement(pipname='mypy', minversion=[1, 4, 1]),
+ PyRequirement(pipname='cpplint', minversion=[1, 6, 1]),
+ PyRequirement(pipname='pytest', minversion=[7, 4, 0]),
+ PyRequirement(pipname='pytz', minversion=[2023, 3]),
+ PyRequirement(pipname='ansiwrap', minversion=[0, 8, 4]),
+ PyRequirement(pipname='requests', minversion=[2, 31, 0]),
+ PyRequirement(pipname='pdoc', minversion=[14, 0, 0]),
+ PyRequirement(pipname='PyYAML', minversion=[6, 0, 1]),
+ PyRequirement(pipname='black', minversion=[23, 7, 0]),
+ PyRequirement(pipname='typing_extensions', minversion=[4, 7, 1]),
PyRequirement(pipname='types-filelock', minversion=[3, 2, 7]),
- PyRequirement(pipname='types-requests', minversion=[2, 28, 11, 17]),
+ PyRequirement(pipname='types-requests', minversion=[2, 31, 0, 2]),
PyRequirement(pipname='types-pytz', minversion=[2023, 3, 0, 0]),
- PyRequirement(pipname='types-PyYAML', minversion=[6, 0, 12, 9]),
- PyRequirement(pipname='certifi', minversion=[2022, 12, 7]),
+ PyRequirement(pipname='types-PyYAML', minversion=[6, 0, 12, 11]),
+ PyRequirement(pipname='certifi', minversion=[2023, 7, 22]),
PyRequirement(pipname='types-certifi', minversion=[2021, 10, 8, 3]),
PyRequirement(pipname='pbxproj', minversion=[3, 5, 0]),
- PyRequirement(pipname='filelock', minversion=[3, 12, 0]),
+ PyRequirement(pipname='filelock', minversion=[3, 12, 2]),
+ PyRequirement(pipname='python-daemon', minversion=[3, 0, 1]),
]
@@ -207,7 +212,7 @@ def lazybuild(target: str, category: LazyBuildCategory, command: str) -> None:
# Even though this category currently doesn't run any clean
# commands, going to restrict to one use at a time for now
# in case we want to add that.
- buildlockname=category.value,
+ # buildlockname=category.value,
srcpaths=[
'Makefile',
'tools',
diff --git a/tools/batools/metamakefile.py b/tools/batools/metamakefile.py
index 3bfff57f..1d79a098 100755
--- a/tools/batools/metamakefile.py
+++ b/tools/batools/metamakefile.py
@@ -242,7 +242,7 @@ class MetaMakefileGenerator:
) -> None:
targets.append(
Target(
- src=['$(TOOLS_DIR)/batools/pcommand.py'],
+ src=['$(TOOLS_DIR)/batools/pcommands.py'],
dst=os.path.join(moduledir, '__init__.py'),
cmd='$(PCOMMAND) gen_python_init_module $@',
)
diff --git a/tools/batools/pcommand.py b/tools/batools/pcommands.py
similarity index 81%
rename from tools/batools/pcommand.py
rename to tools/batools/pcommands.py
index c45adc89..e68b9540 100644
--- a/tools/batools/pcommand.py
+++ b/tools/batools/pcommands.py
@@ -7,13 +7,15 @@ from __future__ import annotations
# keep launch times fast for small snippets.
import sys
-from efrotools.pcommand import PROJROOT
+from efrotools import pcommand
def prune_includes() -> None:
"""Check for unnecessary includes in C++ files."""
from batools.pruneincludes import Pruner
+ pcommand.disallow_in_batch()
+
args = sys.argv.copy()[2:]
commit = False
if '--commit' in args:
@@ -32,6 +34,8 @@ def resize_image() -> None:
import os
import subprocess
+ pcommand.disallow_in_batch()
+
if len(sys.argv) != 6:
raise RuntimeError('Expected 5 args.')
width = int(sys.argv[2])
@@ -61,18 +65,21 @@ def check_clean_safety() -> None:
from efro.terminal import Clr
from efro.error import CleanError
- import efrotools.pcommand
+
+ import efrotools.pcommands
+
+ pcommand.disallow_in_batch()
ignorevar = 'BA_IGNORE_CLEAN_SAFETY_CHECK'
if os.environ.get(ignorevar) == '1':
return
try:
# First do standard checks.
- efrotools.pcommand.check_clean_safety()
+ efrotools.pcommands.check_clean_safety()
# Then also make sure there are no untracked changes to core files
# (since we may be blowing core away here).
- spinoff_bin = os.path.join(str(PROJROOT), 'tools', 'spinoff')
+ spinoff_bin = os.path.join(str(pcommand.PROJROOT), 'tools', 'spinoff')
if os.path.exists(spinoff_bin):
result = subprocess.run(
[spinoff_bin, 'cleancheck', '--soft'], check=False
@@ -94,6 +101,8 @@ def archive_old_builds() -> None:
"""
import batools.build
+ pcommand.disallow_in_batch()
+
if len(sys.argv) < 3:
raise RuntimeError('Invalid arguments.')
ssh_server = sys.argv[2]
@@ -117,10 +126,12 @@ def lazy_increment_build() -> None:
from efrotools import get_files_hash
from efrotools.code import get_code_filenames
+ pcommand.disallow_in_batch()
+
if sys.argv[2:] not in [[], ['--update-hash-only']]:
raise CleanError('Invalid arguments')
update_hash_only = '--update-hash-only' in sys.argv
- codefiles = get_code_filenames(PROJROOT, include_generated=False)
+ codefiles = get_code_filenames(pcommand.PROJROOT, include_generated=False)
codehash = get_files_hash(codefiles)
hashfilename = '.cache/lazy_increment_build'
try:
@@ -151,6 +162,8 @@ def get_master_asset_src_dir() -> None:
import subprocess
import os
+ pcommand.disallow_in_batch()
+
master_assets_dir = '/Users/ericf/Documents/ballisticakit_master_assets'
dummy_dir = '/__DUMMY_MASTER_SRC_DISABLED_PATH__'
@@ -188,6 +201,8 @@ def androidaddr() -> None:
import batools.android
from efro.error import CleanError
+ pcommand.disallow_in_batch()
+
if len(sys.argv) != 5:
raise CleanError(
f'ERROR: expected 3 args; got {len(sys.argv) - 2}\n'
@@ -206,13 +221,17 @@ def push_ipa() -> None:
from efrotools import extract_arg
import efrotools.ios
+ pcommand.disallow_in_batch()
+
args = sys.argv[2:]
signing_config = extract_arg(args, '--signing-config')
if len(args) != 1:
raise RuntimeError('Expected 1 mode arg (debug or release).')
modename = args[0].lower()
- efrotools.ios.push_ipa(PROJROOT, modename, signing_config=signing_config)
+ efrotools.ios.push_ipa(
+ pcommand.PROJROOT, modename, signing_config=signing_config
+ )
def printcolors() -> None:
@@ -220,6 +239,8 @@ def printcolors() -> None:
from efro.error import CleanError
from efro.terminal import TerminalColor, Clr
+ pcommand.disallow_in_batch()
+
if Clr.RED == '':
raise CleanError('Efro color terminal output is disabled.')
@@ -244,6 +265,8 @@ def python_version_android_base() -> None:
"""Print built Python base version."""
from efrotools.pybuild import PY_VER_ANDROID
+ pcommand.disallow_in_batch()
+
print(PY_VER_ANDROID, end='')
@@ -251,6 +274,8 @@ def python_version_android() -> None:
"""Print Android embedded Python version."""
from efrotools.pybuild import PY_VER_EXACT_ANDROID
+ pcommand.disallow_in_batch()
+
print(PY_VER_EXACT_ANDROID, end='')
@@ -258,16 +283,24 @@ def python_version_apple() -> None:
"""Print Apple embedded Python version."""
from efrotools.pybuild import PY_VER_EXACT_APPLE
+ pcommand.disallow_in_batch()
+
print(PY_VER_EXACT_APPLE, end='')
def python_build_apple() -> None:
"""Build an embeddable python for mac/ios/tvos."""
+
+ pcommand.disallow_in_batch()
+
_python_build_apple(debug=False)
def python_build_apple_debug() -> None:
"""Build embeddable python for mac/ios/tvos (dbg ver)."""
+
+ pcommand.disallow_in_batch()
+
_python_build_apple(debug=True)
@@ -277,7 +310,9 @@ def _python_build_apple(debug: bool) -> None:
from efro.error import CleanError
from efrotools import pybuild
- os.chdir(PROJROOT)
+ pcommand.disallow_in_batch()
+
+ os.chdir(pcommand.PROJROOT)
archs = ('mac', 'ios', 'tvos')
if len(sys.argv) != 3:
raise CleanError('Error: expected one arg: ' + ', '.join(archs))
@@ -291,11 +326,17 @@ def _python_build_apple(debug: bool) -> None:
def python_build_android() -> None:
"""Build an embeddable Python lib for Android."""
+
+ pcommand.disallow_in_batch()
+
_python_build_android(debug=False)
def python_build_android_debug() -> None:
"""Build embeddable Android Python lib (debug ver)."""
+
+ pcommand.disallow_in_batch()
+
_python_build_android(debug=True)
@@ -304,7 +345,9 @@ def _python_build_android(debug: bool) -> None:
from efro.error import CleanError
from efrotools import pybuild
- os.chdir(PROJROOT)
+ pcommand.disallow_in_batch()
+
+ os.chdir(pcommand.PROJROOT)
archs = ('arm', 'arm64', 'x86', 'x86_64')
if len(sys.argv) != 3:
raise CleanError('Error: Expected one arg: ' + ', '.join(archs))
@@ -313,7 +356,7 @@ def _python_build_android(debug: bool) -> None:
raise CleanError(
'Error: invalid arch. valid values are: ' + ', '.join(archs)
)
- pybuild.build_android(str(PROJROOT), arch, debug=debug)
+ pybuild.build_android(str(pcommand.PROJROOT), arch, debug=debug)
def python_android_patch() -> None:
@@ -321,6 +364,8 @@ def python_android_patch() -> None:
import os
from efrotools import pybuild
+ pcommand.disallow_in_batch()
+
os.chdir(sys.argv[2])
pybuild.android_patch()
@@ -329,6 +374,8 @@ def python_android_patch_ssl() -> None:
"""Patches Python ssl to prep for building for Android."""
from efrotools import pybuild
+ pcommand.disallow_in_batch()
+
pybuild.android_patch_ssl()
@@ -337,6 +384,8 @@ def python_apple_patch() -> None:
from efro.error import CleanError
from efrotools import pybuild
+ pcommand.disallow_in_batch()
+
if len(sys.argv) != 3:
raise CleanError('Expected 1 arg.')
@@ -357,7 +406,9 @@ def python_gather() -> None:
import os
from efrotools import pybuild
- os.chdir(PROJROOT)
+ pcommand.disallow_in_batch()
+
+ os.chdir(pcommand.PROJROOT)
pybuild.gather(do_android=True, do_apple=True)
@@ -366,7 +417,9 @@ def python_gather_android() -> None:
import os
from efrotools import pybuild
- os.chdir(PROJROOT)
+ pcommand.disallow_in_batch()
+
+ os.chdir(pcommand.PROJROOT)
pybuild.gather(do_android=True, do_apple=False)
@@ -375,7 +428,9 @@ def python_gather_apple() -> None:
import os
from efrotools import pybuild
- os.chdir(PROJROOT)
+ pcommand.disallow_in_batch()
+
+ os.chdir(pcommand.PROJROOT)
pybuild.gather(do_android=False, do_apple=True)
@@ -384,17 +439,25 @@ def python_winprune() -> None:
import os
from efrotools import pybuild
- os.chdir(PROJROOT)
+ pcommand.disallow_in_batch()
+
+ os.chdir(pcommand.PROJROOT)
pybuild.winprune()
def capitalize() -> None:
"""Print args capitalized."""
+
+ pcommand.disallow_in_batch()
+
print(' '.join(w.capitalize() for w in sys.argv[2:]), end='')
def upper() -> None:
"""Print args uppercased."""
+
+ pcommand.disallow_in_batch()
+
print(' '.join(w.upper() for w in sys.argv[2:]), end='')
@@ -402,6 +465,8 @@ def efrocache_update() -> None:
"""Build & push files to efrocache for public access."""
from efrotools.efrocache import update_cache
+ pcommand.disallow_in_batch()
+
makefile_dirs = ['', 'src/assets', 'src/resources', 'src/meta']
update_cache(makefile_dirs)
@@ -410,6 +475,8 @@ def efrocache_get() -> None:
"""Get a file from efrocache."""
from efrotools.efrocache import get_target
+ pcommand.disallow_in_batch()
+
if len(sys.argv) != 3:
raise RuntimeError('Expected exactly 1 arg')
get_target(sys.argv[2])
@@ -420,6 +487,8 @@ def get_modern_make() -> None:
import platform
import subprocess
+ pcommand.disallow_in_batch()
+
# Mac gnu make is outdated (due to newer versions using GPL3 I believe).
# so let's return 'gmake' there which will point to homebrew make which
# should be up to date.
@@ -448,19 +517,21 @@ def warm_start_asset_build() -> None:
from pathlib import Path
from efrotools import getprojectconfig
- public: bool = getprojectconfig(PROJROOT)['public']
+ pcommand.disallow_in_batch()
+
+ public: bool = getprojectconfig(pcommand.PROJROOT)['public']
if public:
from efrotools.efrocache import warm_start_cache
- os.chdir(PROJROOT)
+ os.chdir(pcommand.PROJROOT)
warm_start_cache()
else:
# For internal builds we don't use efrocache but we do use an
# internal build cache. Download an initial cache/etc. if need be.
subprocess.run(
[
- str(Path(PROJROOT, 'tools/pcommand')),
+ str(Path(pcommand.PROJROOT, 'tools/pcommand')),
'convert_util',
'--init-asset-cache',
],
@@ -473,14 +544,18 @@ def gen_docs_pdoc() -> None:
from efro.terminal import Clr
import batools.docs
+ pcommand.disallow_in_batch()
+
print(f'{Clr.BLU}Generating documentation...{Clr.RST}')
- batools.docs.generate_pdoc(projroot=str(PROJROOT))
+ batools.docs.generate_pdoc(projroot=str(pcommand.PROJROOT))
def list_pip_reqs() -> None:
"""List Python Pip packages needed for this project."""
from batools.build import get_pip_reqs
+ pcommand.disallow_in_batch()
+
print(' '.join(get_pip_reqs()))
@@ -491,6 +566,8 @@ def install_pip_reqs() -> None:
from efro.terminal import Clr
from batools.build import get_pip_reqs
+ pcommand.disallow_in_batch()
+
# Make sure pip itself is up to date first.
subprocess.run(
[PYTHON_BIN, '-m', 'pip', 'install', '--upgrade', 'pip'], check=True
@@ -507,132 +584,11 @@ def checkenv() -> None:
"""Check for tools necessary to build and run the app."""
import batools.build
+ pcommand.disallow_in_batch()
+
batools.build.checkenv()
-def wsl_build_check_win_drive() -> None:
- """Make sure we're building on a windows drive."""
- import os
- import subprocess
- import textwrap
- from efro.error import CleanError
-
- if (
- subprocess.run(
- ['which', 'wslpath'], check=False, capture_output=True
- ).returncode
- != 0
- ):
- raise CleanError(
- 'wslpath not found; you must run this from a WSL environment'
- )
-
- if os.environ.get('WSL_BUILD_CHECK_WIN_DRIVE_IGNORE') == '1':
- return
-
- # Get a windows path to the current dir.
- path = (
- subprocess.run(
- ['wslpath', '-w', '-a', os.getcwd()],
- capture_output=True,
- check=True,
- )
- .stdout.decode()
- .strip()
- )
-
- # If we're sitting under the linux filesystem, our path
- # will start with \\wsl$; fail in that case and explain why.
- if not path.startswith('\\\\wsl$'):
- return
-
- def _wrap(txt: str) -> str:
- return textwrap.fill(txt, 76)
-
- raise CleanError(
- '\n\n'.join(
- [
- _wrap(
- 'ERROR: This project appears to live'
- ' on the Linux filesystem.'
- ),
- _wrap(
- 'Visual Studio compiles will error here for reasons related'
- ' to Linux filesystem case-sensitivity, and thus are'
- ' disallowed.'
- ' Clone the repo to a location that maps to a native'
- ' Windows drive such as \'/mnt/c/ballistica\''
- ' and try again.'
- ),
- _wrap(
- 'Note that WSL2 filesystem performance'
- ' is poor when accessing'
- ' native Windows drives, so if Visual Studio builds are not'
- ' needed it may be best to keep things'
- ' on the Linux filesystem.'
- ' This behavior may differ under WSL1 (untested).'
- ),
- _wrap(
- 'Set env-var WSL_BUILD_CHECK_WIN_DRIVE_IGNORE=1 to skip'
- ' this check.'
- ),
- ]
- )
- )
-
-
-def wsl_path_to_win() -> None:
- """Forward escape slashes in a provided win path arg."""
- import subprocess
- import logging
- import os
- from efro.error import CleanError
-
- try:
- create = False
- escape = False
- if len(sys.argv) < 3:
- raise CleanError('Expected at least 1 path arg.')
- wsl_path: str | None = None
- for arg in sys.argv[2:]:
- if arg == '--create':
- create = True
- elif arg == '--escape':
- escape = True
- else:
- if wsl_path is not None:
- raise CleanError('More than one path provided.')
- wsl_path = arg
- if wsl_path is None:
- raise CleanError('No path provided.')
-
- # wslpath fails on nonexistent paths; make it clear when that happens.
- if create:
- os.makedirs(wsl_path, exist_ok=True)
- if not os.path.exists(wsl_path):
- raise CleanError(f'Path \'{wsl_path}\' does not exist.')
-
- results = subprocess.run(
- ['wslpath', '-w', '-a', wsl_path], capture_output=True, check=True
- )
- except Exception:
- # This gets used in a makefile so our returncode is ignored;
- # let's try to make our failure known in other ways.
- logging.exception('wsl_to_escaped_win_path failed.')
- print('wsl_to_escaped_win_path_error_occurred', end='')
- return
-
- out = results.stdout.decode().strip()
-
- # If our input ended with a slash, match in the output.
- if wsl_path.endswith('/') and not out.endswith('\\'):
- out += '\\'
-
- if escape:
- out = out.replace('\\', '\\\\')
- print(out, end='')
-
-
def ensure_prefab_platform() -> None:
"""Ensure we are running on a particular prefab platform.
@@ -644,6 +600,8 @@ def ensure_prefab_platform() -> None:
import batools.build
from efro.error import CleanError
+ pcommand.disallow_in_batch()
+
if len(sys.argv) != 3:
raise CleanError('Expected 1 platform name arg.')
needed = sys.argv[2]
@@ -658,6 +616,8 @@ def prefab_run_var() -> None:
"""Print the current platform prefab run target var."""
import batools.build
+ pcommand.disallow_in_batch()
+
if len(sys.argv) != 3:
raise RuntimeError('Expected 1 arg.')
base = sys.argv[2].replace('-', '_').upper()
@@ -669,6 +629,8 @@ def prefab_binary_path() -> None:
"""Print the current platform prefab binary path."""
import batools.build
+ pcommand.disallow_in_batch()
+
if len(sys.argv) != 3:
raise RuntimeError('Expected 1 arg.')
buildtype, buildmode = sys.argv[2].split('-')
@@ -690,6 +652,8 @@ def make_prefab() -> None:
import subprocess
import batools.build
+ pcommand.disallow_in_batch()
+
if len(sys.argv) != 3:
raise RuntimeError('Expected one argument')
target = batools.build.PrefabTarget(sys.argv[2])
@@ -713,6 +677,8 @@ def lazybuild() -> None:
import batools.build
from efro.error import CleanError
+ pcommand.disallow_in_batch()
+
if len(sys.argv) < 5:
raise CleanError('Expected at least 3 args')
try:
@@ -733,6 +699,8 @@ def logcat() -> None:
from efro.terminal import Clr
from efro.error import CleanError
+ pcommand.disallow_in_batch()
+
if len(sys.argv) != 4:
raise CleanError('Expected 2 args')
adb = sys.argv[2]
@@ -754,6 +722,8 @@ def logcat() -> None:
def _camel_case_split(string: str) -> list[str]:
+ pcommand.disallow_in_batch()
+
words = [[string[0]]]
for char in string[1:]:
if words[-1][-1].islower() and char.isupper():
@@ -769,6 +739,8 @@ def efro_gradle() -> None:
from efro.terminal import Clr
from efrotools.android import filter_gradle_file
+ pcommand.disallow_in_batch()
+
args = ['./gradlew'] + sys.argv[2:]
print(f'{Clr.BLU}Running gradle with args:{Clr.RST} {args}.', flush=True)
enabled_tags: set[str] = {'true'}
@@ -809,8 +781,12 @@ def stage_build() -> None:
import batools.staging
from efro.error import CleanError
+ pcommand.disallow_in_batch()
+
try:
- batools.staging.stage_build(projroot=str(PROJROOT), args=sys.argv[2:])
+ batools.staging.stage_build(
+ projroot=str(pcommand.PROJROOT), args=sys.argv[2:]
+ )
except CleanError as exc:
exc.pretty_print()
sys.exit(1)
@@ -835,6 +811,8 @@ def update_project() -> None:
import os
from batools.project import ProjectUpdater
+ pcommand.disallow_in_batch()
+
check = '--check' in sys.argv
fix = '--fix' in sys.argv
@@ -856,6 +834,8 @@ def cmake_prep_dir() -> None:
from efro.error import CleanError
import batools.build
+ pcommand.disallow_in_batch()
+
if len(sys.argv) != 3:
raise CleanError('Expected 1 arg (dir name)')
dirname = sys.argv[2]
@@ -869,11 +849,13 @@ def gen_binding_code() -> None:
from efro.error import CleanError
import batools.metabuild
+ pcommand.disallow_in_batch()
+
if len(sys.argv) != 4:
raise CleanError('Expected 2 args (srcfile, dstfile)')
inpath = sys.argv[2]
outpath = sys.argv[3]
- batools.metabuild.gen_binding_code(str(PROJROOT), inpath, outpath)
+ batools.metabuild.gen_binding_code(str(pcommand.PROJROOT), inpath, outpath)
def gen_flat_data_code() -> None:
@@ -881,13 +863,15 @@ def gen_flat_data_code() -> None:
from efro.error import CleanError
import batools.metabuild
+ pcommand.disallow_in_batch()
+
if len(sys.argv) != 5:
raise CleanError('Expected 3 args (srcfile, dstfile, varname)')
inpath = sys.argv[2]
outpath = sys.argv[3]
varname = sys.argv[4]
batools.metabuild.gen_flat_data_code(
- str(PROJROOT), inpath, outpath, varname
+ str(pcommand.PROJROOT), inpath, outpath, varname
)
@@ -895,24 +879,32 @@ def genchangelog() -> None:
"""Gen a pretty html changelog."""
from batools.changelog import generate
- generate(projroot=str(PROJROOT))
+ pcommand.disallow_in_batch()
+
+ generate(projroot=str(pcommand.PROJROOT))
def android_sdk_utils() -> None:
"""Wrangle android sdk stuff."""
from batools.androidsdkutils import run
- run(projroot=str(PROJROOT), args=sys.argv[2:])
+ pcommand.disallow_in_batch()
+
+ run(projroot=str(pcommand.PROJROOT), args=sys.argv[2:])
def gen_python_enums_module() -> None:
"""Update our procedurally generated python enums."""
from batools.pythonenumsmodule import generate
+ pcommand.disallow_in_batch()
+
if len(sys.argv) != 4:
raise RuntimeError('Expected infile and outfile args.')
generate(
- projroot=str(PROJROOT), infilename=sys.argv[2], outfilename=sys.argv[3]
+ projroot=str(pcommand.PROJROOT),
+ infilename=sys.argv[2],
+ outfilename=sys.argv[3],
)
@@ -921,14 +913,18 @@ def gen_dummy_modules() -> None:
from efro.error import CleanError
from batools.dummymodule import generate_dummy_modules
+ pcommand.disallow_in_batch()
+
if len(sys.argv) != 2:
raise CleanError(f'Expected no args; got {len(sys.argv)-2}.')
- generate_dummy_modules(projroot=str(PROJROOT))
+ generate_dummy_modules(projroot=str(pcommand.PROJROOT))
def version() -> None:
"""Check app versions."""
from batools.version import run
- run(projroot=str(PROJROOT), args=sys.argv[2:])
+ pcommand.disallow_in_batch()
+
+ run(projroot=str(pcommand.PROJROOT), args=sys.argv[2:])
diff --git a/tools/batools/pcommand2.py b/tools/batools/pcommands2.py
similarity index 72%
rename from tools/batools/pcommand2.py
rename to tools/batools/pcommands2.py
index fbb83fb8..3dee371f 100644
--- a/tools/batools/pcommand2.py
+++ b/tools/batools/pcommands2.py
@@ -7,7 +7,7 @@ from __future__ import annotations
# keep launch times fast for small snippets.
import sys
-from efrotools.pcommand import PROJROOT
+from efrotools import pcommand
def gen_monolithic_register_modules() -> None:
@@ -18,11 +18,13 @@ def gen_monolithic_register_modules() -> None:
from efro.error import CleanError
from batools.featureset import FeatureSet
+ pcommand.disallow_in_batch()
+
if len(sys.argv) != 3:
raise CleanError('Expected 1 arg.')
outpath = sys.argv[2]
- featuresets = FeatureSet.get_all_for_project(str(PROJROOT))
+ featuresets = FeatureSet.get_all_for_project(str(pcommand.PROJROOT))
# Filter out ones without native modules.
featuresets = [f for f in featuresets if f.has_python_binary_module]
@@ -124,6 +126,8 @@ def py_examine() -> None:
from pathlib import Path
import efrotools
+ pcommand.disallow_in_batch()
+
if len(sys.argv) != 7:
print('ERROR: expected 7 args')
sys.exit(255)
@@ -134,7 +138,7 @@ def py_examine() -> None:
operation = sys.argv[6]
# This stuff assumes it is being run from project root.
- os.chdir(PROJROOT)
+ os.chdir(pcommand.PROJROOT)
# Set up pypaths so our main distro stuff works.
scriptsdir = os.path.abspath(
@@ -149,7 +153,9 @@ def py_examine() -> None:
sys.path.append(scriptsdir)
if toolsdir not in sys.path:
sys.path.append(toolsdir)
- efrotools.py_examine(PROJROOT, filename, line, column, selection, operation)
+ efrotools.py_examine(
+ pcommand.PROJROOT, filename, line, column, selection, operation
+ )
def clean_orphaned_assets() -> None:
@@ -158,8 +164,10 @@ def clean_orphaned_assets() -> None:
import json
import subprocess
+ pcommand.disallow_in_batch()
+
# Operate from dist root..
- os.chdir(PROJROOT)
+ os.chdir(pcommand.PROJROOT)
# Our manifest is split into 2 files (public and private)
with open(
@@ -191,6 +199,8 @@ def win_ci_install_prereqs() -> None:
import json
from efrotools.efrocache import get_target
+ pcommand.disallow_in_batch()
+
# We'll need to pull a handful of things out of efrocache for the
# build to succeed. Normally this would happen through our Makefile
# targets but we can't use them under raw window so we need to just
@@ -227,6 +237,8 @@ def win_ci_binary_build() -> None:
"""Simple windows binary build for ci."""
import subprocess
+ pcommand.disallow_in_batch()
+
# Do the thing.
subprocess.run(
[
@@ -249,6 +261,8 @@ def update_cmake_prefab_lib() -> None:
from efro.error import CleanError
import batools.build
+ pcommand.disallow_in_batch()
+
if len(sys.argv) != 5:
raise CleanError(
'Expected 3 args (standard/server, debug/release, build-dir)'
@@ -293,6 +307,8 @@ def android_archive_unstripped_libs() -> None:
from efro.error import CleanError
from efro.terminal import Clr
+ pcommand.disallow_in_batch()
+
if len(sys.argv) != 4:
raise CleanError('Expected 2 args; src-dir and dst-dir')
src = Path(sys.argv[2])
@@ -334,6 +350,8 @@ def spinoff_check_submodule_parent() -> None:
import os
from efro.error import CleanError
+ pcommand.disallow_in_batch()
+
# Make sure we're a spinoff dst project. The spinoff command will be
# a symlink if this is the case.
if not os.path.exists('tools/spinoff'):
@@ -353,14 +371,21 @@ def spinoff_check_submodule_parent() -> None:
def gen_python_init_module() -> None:
"""Generate a basic __init__.py."""
import os
+
+ from efro.error import CleanError
from efro.terminal import Clr
+
from batools.project import project_centric_path
+ pcommand.disallow_in_batch()
+
if len(sys.argv) != 3:
- raise RuntimeError('Expected an outfile arg.')
+ raise CleanError('Expected an outfile arg.')
outfilename = sys.argv[2]
os.makedirs(os.path.dirname(outfilename), exist_ok=True)
- prettypath = project_centric_path(projroot=str(PROJROOT), path=outfilename)
+ prettypath = project_centric_path(
+ projroot=str(pcommand.PROJROOT), path=outfilename
+ )
print(f'Meta-building {Clr.BLD}{prettypath}{Clr.RST}')
with open(outfilename, 'w', encoding='utf-8') as outfile:
outfile.write(
@@ -379,7 +404,136 @@ def tests_warm_start() -> None:
"""
from batools import apprun
+ pcommand.disallow_in_batch()
+
# We do lots of apprun.python_command() within test. Pre-build the
# binary that they need to do their thing.
if not apprun.test_runs_disabled():
apprun.acquire_binary_for_python_command(purpose='running tests')
+
+
+def wsl_build_check_win_drive() -> None:
+ """Make sure we're building on a windows drive."""
+ import os
+ import subprocess
+ import textwrap
+ from efro.error import CleanError
+
+ pcommand.disallow_in_batch()
+
+ if (
+ subprocess.run(
+ ['which', 'wslpath'], check=False, capture_output=True
+ ).returncode
+ != 0
+ ):
+ raise CleanError(
+ 'wslpath not found; you must run this from a WSL environment'
+ )
+
+ if os.environ.get('WSL_BUILD_CHECK_WIN_DRIVE_IGNORE') == '1':
+ return
+
+ # Get a windows path to the current dir.
+ path = (
+ subprocess.run(
+ ['wslpath', '-w', '-a', os.getcwd()],
+ capture_output=True,
+ check=True,
+ )
+ .stdout.decode()
+ .strip()
+ )
+
+ # If we're sitting under the linux filesystem, our path
+ # will start with \\wsl$; fail in that case and explain why.
+ if not path.startswith('\\\\wsl$'):
+ return
+
+ def _wrap(txt: str) -> str:
+ return textwrap.fill(txt, 76)
+
+ raise CleanError(
+ '\n\n'.join(
+ [
+ _wrap(
+ 'ERROR: This project appears to live'
+ ' on the Linux filesystem.'
+ ),
+ _wrap(
+ 'Visual Studio compiles will error here for reasons related'
+ ' to Linux filesystem case-sensitivity, and thus are'
+ ' disallowed.'
+ ' Clone the repo to a location that maps to a native'
+ ' Windows drive such as \'/mnt/c/ballistica\''
+ ' and try again.'
+ ),
+ _wrap(
+ 'Note that WSL2 filesystem performance'
+ ' is poor when accessing'
+ ' native Windows drives, so if Visual Studio builds are not'
+ ' needed it may be best to keep things'
+ ' on the Linux filesystem.'
+ ' This behavior may differ under WSL1 (untested).'
+ ),
+ _wrap(
+ 'Set env-var WSL_BUILD_CHECK_WIN_DRIVE_IGNORE=1 to skip'
+ ' this check.'
+ ),
+ ]
+ )
+ )
+
+
+def wsl_path_to_win() -> None:
+ """Forward escape slashes in a provided win path arg."""
+ import subprocess
+ import logging
+ import os
+ from efro.error import CleanError
+
+ pcommand.disallow_in_batch()
+
+ try:
+ create = False
+ escape = False
+ if len(sys.argv) < 3:
+ raise CleanError('Expected at least 1 path arg.')
+ wsl_path: str | None = None
+ for arg in sys.argv[2:]:
+ if arg == '--create':
+ create = True
+ elif arg == '--escape':
+ escape = True
+ else:
+ if wsl_path is not None:
+ raise CleanError('More than one path provided.')
+ wsl_path = arg
+ if wsl_path is None:
+ raise CleanError('No path provided.')
+
+ # wslpath fails on nonexistent paths; make it clear when that happens.
+ if create:
+ os.makedirs(wsl_path, exist_ok=True)
+ if not os.path.exists(wsl_path):
+ raise CleanError(f'Path \'{wsl_path}\' does not exist.')
+
+ results = subprocess.run(
+ ['wslpath', '-w', '-a', wsl_path], capture_output=True, check=True
+ )
+ except Exception:
+ # This gets used in a makefile so our returncode is ignored;
+ # let's try to make our failure known in other ways.
+ logging.exception('wsl_to_escaped_win_path failed.')
+ print('wsl_to_escaped_win_path_error_occurred', end='')
+ return
+
+ out = results.stdout.decode().strip()
+
+ # If our input ended with a slash, match in the output.
+ if wsl_path.endswith('/') and not out.endswith('\\'):
+ out += '\\'
+
+ if escape:
+ out = out.replace('\\', '\\\\')
+ print(out, end='')
diff --git a/tools/batools/spinoff/_test.py b/tools/batools/spinoff/_test.py
index a63acbd8..096f2015 100644
--- a/tools/batools/spinoff/_test.py
+++ b/tools/batools/spinoff/_test.py
@@ -58,6 +58,51 @@ def spinoff_test(args: list[str]) -> None:
flush=True,
)
+ # Normally we spin the project off from where we currently
+ # are, but for cloud builds we may want to use a dedicated
+ # shared source instead. (since we need a git managed source
+ # we need to pull *something* fresh from git instead of just
+ # using the files that were synced up by cloudshell).
+ # Here we make sure that shared source is up to date.
+ spinoff_src = '.'
+ spinoff_path = path
+ if shared_test_parent:
+ spinoff_src = 'build/spinoff_shared_test_parent'
+ # Need an abs target path since we change cwd in this case.
+ spinoff_path = os.path.abspath(path)
+ if bool(False):
+ print('TEMP BLOWING AWAY')
+ subprocess.run(['rm', '-rf', spinoff_src], check=True)
+ if os.path.exists(spinoff_src):
+ print(
+ 'Pulling latest spinoff_shared_test_parent...',
+ flush=True,
+ )
+ subprocess.run(
+ ['git', 'pull', '--ff-only'],
+ check=True,
+ cwd=spinoff_src,
+ )
+ else:
+ os.makedirs(spinoff_src, exist_ok=True)
+ cmd = [
+ 'git',
+ 'clone',
+ 'git@github.com:efroemling/ballistica-internal.git',
+ spinoff_src,
+ ]
+
+ print(
+ f'{Clr.BLU}Creating spinoff shared test parent'
+ f" at '{spinoff_src}' with command {cmd}...{Clr.RST}"
+ )
+ subprocess.run(
+ cmd,
+ check=True,
+ )
+
+ # If the spinoff project already exists and is submodule-based,
+ # bring the submodule up to date.
if os.path.exists(path):
if bool(False):
subprocess.run(['rm', '-rf', path], check=True)
@@ -73,51 +118,8 @@ def spinoff_test(args: list[str]) -> None:
shell=True,
check=True,
)
-
else:
- # Normally we spin the project off from where we currently
- # are, but for cloud builds we may want to use a dedicated
- # shared source instead. (since we need a git managed source
- # we need to pull something fresh from git instead of just
- # using the files that were synced up by cloudshell).
- spinoff_src = '.'
- spinoff_path = path
- if shared_test_parent:
- spinoff_src = 'build/spinoff_shared_test_parent'
- # Need an abs target path since we change cwd in this case.
- spinoff_path = os.path.abspath(path)
- if bool(False):
- print('TEMP BLOWING AWAY')
- subprocess.run(['rm', '-rf', spinoff_src], check=True)
- if os.path.exists(spinoff_src):
- print(
- 'Pulling latest spinoff_shared_test_parent...',
- flush=True,
- )
- subprocess.run(
- ['git', 'pull', '--ff-only'],
- check=True,
- cwd=spinoff_src,
- )
- else:
- os.makedirs(spinoff_src, exist_ok=True)
- cmd = [
- 'git',
- 'clone',
- 'git@github.com:efroemling/ballistica-internal.git',
- spinoff_src,
- ]
-
- print(
- f'{Clr.BLU}Creating spinoff shared test parent'
- f" at '{spinoff_src}' with command {cmd}...{Clr.RST}"
- )
- subprocess.run(
- cmd,
- check=True,
- )
- # raise CleanError('SO FAR SO GOOD5')
-
+ # No spinoff project there yet; create it.
cmd = [
'./tools/spinoff',
'create',
diff --git a/tools/efrotools/pcommand.py b/tools/efrotools/pcommand.py
index 5b04a0de..ccd3d9d4 100644
--- a/tools/efrotools/pcommand.py
+++ b/tools/efrotools/pcommand.py
@@ -15,23 +15,37 @@ from pathlib import Path
from typing import TYPE_CHECKING
if TYPE_CHECKING:
+ import threading
from typing import Any
# Absolute path of the project root.
PROJROOT = Path(__file__).resolve().parents[2]
+# Set of arguments for the currently running command.
+# Note that, unlike sys.argv, this will not include the script path or
+# the name of the pcommand; only the arguments *to* the command.
+_g_thread_local_storage: threading.local | None = None
+
+# Discovered functions for the currently running pcommand instance.
+_g_funcs: dict | None = None
+
+# Are we running as a server?
+_g_batch_server_mode: bool = False
+
def pcommand_main(globs: dict[str, Any]) -> None:
- """Run a snippet contained in the pcommand script.
+ """Main entry point to pcommand scripts.
We simply look for all public functions and call
the one corresponding to the first passed arg.
"""
import types
- from efro.error import CleanError
- from efro.terminal import Clr
- funcs = dict(
+ global _g_funcs # pylint: disable=global-statement
+ assert _g_funcs is None
+
+ # Build our list of available funcs.
+ _g_funcs = dict(
(
(name, obj)
for name, obj in globs.items()
@@ -40,42 +54,97 @@ def pcommand_main(globs: dict[str, Any]) -> None:
and isinstance(obj, types.FunctionType)
)
)
- show_help = False
+
+ # Call the one based on sys args.
+ sys.exit(_run_pcommand(sys.argv))
+
+
+def get_args() -> list[str]:
+ """Return the args for the current pcommand."""
+ # pylint: disable=unsubscriptable-object, not-an-iterable
+ if not _g_batch_server_mode:
+ return sys.argv[2:]
+
+ # Ok, we're in batch mode. We should have stuffed some args into
+ # thread-local storage.
+ assert _g_thread_local_storage is not None
+ argv: list[str] | None = getattr(_g_thread_local_storage, 'argv', None)
+ if argv is None:
+ raise RuntimeError('Thread local args not found where expected.')
+ assert isinstance(argv, list)
+ assert all(isinstance(i, str) for i in argv)
+ return argv[2:]
+
+
+def set_output(output: str, newline: bool = True) -> None:
+ """Set an output string for the current pcommand.
+
+ This will be printed to stdout on the client even in batch mode.
+ """
+ if newline:
+ output = f'{output}\n'
+
+ if not _g_batch_server_mode:
+ print(output, end='')
+ return
+
+ # Ok, we're in batch mode. Stuff this into thread-local storage to
+ # be returned once we're done.
+ assert _g_thread_local_storage is not None
+ if hasattr(_g_thread_local_storage, 'output'):
+ raise RuntimeError('Output is already set for this pcommand.')
+ _g_thread_local_storage.output = output
+
+
+def _run_pcommand(sysargv: list[str]) -> int:
+ """Do the thing."""
+ from efro.error import CleanError
+ from efro.terminal import Clr
+
+ assert _g_funcs is not None
+
+ # If we're in batch mode, stuff these args into our thread-local
+ # storage.
+ if _g_batch_server_mode:
+ assert _g_thread_local_storage is not None
+ _g_thread_local_storage.argv = sysargv
+
retval = 0
- if len(sys.argv) < 2:
+ show_help = False
+ if len(sysargv) < 2:
print(f'{Clr.RED}ERROR: command expected.{Clr.RST}')
show_help = True
- retval = 255
+ retval = 1
else:
- if sys.argv[1] == 'help':
- if len(sys.argv) == 2:
+ if sysargv[1] == 'help':
+ if len(sysargv) == 2:
show_help = True
- elif sys.argv[2] not in funcs:
+ elif sysargv[2] not in _g_funcs:
print('Invalid help command.')
- retval = 255
+ retval = 1
else:
docs = _trim_docstring(
- getattr(funcs[sys.argv[2]], '__doc__', '')
+ getattr(_g_funcs[sysargv[2]], '__doc__', '')
)
print(
- f'\n{Clr.MAG}{Clr.BLD}pcommand {sys.argv[2]}:{Clr.RST}\n'
+ f'\n{Clr.MAG}{Clr.BLD}pcommand {sysargv[2]}:{Clr.RST}\n'
f'{Clr.MAG}{docs}{Clr.RST}\n'
)
- elif sys.argv[1] in funcs:
+ elif sysargv[1] in _g_funcs:
try:
- funcs[sys.argv[1]]()
+ _g_funcs[sysargv[1]]()
except KeyboardInterrupt as exc:
print(f'{Clr.RED}{exc}{Clr.RST}')
- sys.exit(1)
+ retval = 1
except CleanError as exc:
exc.pretty_print()
- sys.exit(1)
+ retval = 1
else:
print(
- f'{Clr.RED}Unknown pcommand: "{sys.argv[1]}"{Clr.RST}',
+ f'{Clr.RED}Unknown pcommand: "{sysargv[1]}"{Clr.RST}',
file=sys.stderr,
)
- retval = 255
+ retval = 1
if show_help:
print(
@@ -91,10 +160,76 @@ def pcommand_main(globs: dict[str, Any]) -> None:
f'{Clr.RST} for full documentation for a command.'
)
print('Available commands:')
- for func, obj in sorted(funcs.items()):
+ for func, obj in sorted(_g_funcs.items()):
doc = getattr(obj, '__doc__', '').splitlines()[0].strip()
print(f'{Clr.MAG}{func}{Clr.BLU} - {doc}{Clr.RST}')
- sys.exit(retval)
+
+ return retval
+
+
+def enter_batch_server_mode() -> None:
+ """Called by pcommandserver when we start serving."""
+ # (try to avoid importing this in non-batch mode in case it shaves
+ # off a bit of time)
+ import threading
+
+ # pylint: disable=global-statement
+ global _g_batch_server_mode, _g_thread_local_storage
+ assert not _g_batch_server_mode
+ _g_batch_server_mode = True
+
+ # Spin up our thread-local storage.
+ assert _g_thread_local_storage is None
+ _g_thread_local_storage = threading.local()
+
+
+def is_batch() -> bool:
+ """Is the current pcommand running under a batch server?
+
+ Commands that do things that are unsafe to do in server mode
+ such as chdir should assert that this is not true.
+ """
+ return _g_batch_server_mode
+
+
+def run_client_pcommand(args: list[str], log_path: str) -> tuple[int, str]:
+ """Call a pcommand function when running as a batch server."""
+ assert _g_batch_server_mode
+ assert _g_thread_local_storage is not None
+
+ # Clear any output from previous commands on this thread.
+ if hasattr(_g_thread_local_storage, 'output'):
+ delattr(_g_thread_local_storage, 'output')
+ if hasattr(_g_thread_local_storage, 'output'):
+ delattr(_g_thread_local_storage, 'output')
+
+ # Run the command.
+ resultcode: int = _run_pcommand(args)
+
+ # Return the result code and any output the command provided.
+ output = getattr(_g_thread_local_storage, 'output', '')
+
+ if resultcode != 0:
+ if output:
+ output += '\n'
+ output += (
+ f'Error: pcommandbatch command failed: {args}.'
+ f" See '{log_path}' for more info.\n"
+ )
+
+ assert isinstance(output, str)
+ return (resultcode, output)
+
+
+def disallow_in_batch() -> None:
+ """Utility call to raise a clean error if running under batch mode."""
+ from efro.error import CleanError
+
+ if _g_batch_server_mode:
+ raise CleanError(
+ 'This pcommand does not support batch mode.\n'
+ 'See docs in efrotools.pcommand if you want to add it.'
+ )
def _trim_docstring(docstring: str) -> str:
@@ -130,707 +265,3 @@ def _trim_docstring(docstring: str) -> str:
# Return a single string.
return '\n'.join(trimmed)
-
-
-def _spelling(words: list[str]) -> None:
- from efrotools.code import sort_jetbrains_dict
- import os
-
- num_modded_dictionaries = 0
- for fname in [
- '.idea/dictionaries/ericf.xml',
- 'ballisticakit-cmake/.idea/dictionaries/ericf.xml',
- ]:
- if not os.path.exists(fname):
- continue
- with open(fname, encoding='utf-8') as infile:
- lines = infile.read().splitlines()
- if lines[2] != ' ':
- raise RuntimeError('Unexpected dictionary format.')
- added_count = 0
- for word in words:
- line = f' {word.lower()}'
- if line not in lines:
- lines.insert(3, line)
- added_count += 1
-
- with open(fname, 'w', encoding='utf-8') as outfile:
- outfile.write(sort_jetbrains_dict('\n'.join(lines)))
-
- print(f'Added {added_count} words to {fname}.')
- num_modded_dictionaries += 1
- print(f'Modified {num_modded_dictionaries} dictionaries.')
-
-
-def pur() -> None:
- """Run pur using project's Python version."""
- import subprocess
-
- subprocess.run([sys.executable, '-m', 'pur'] + sys.argv[2:], check=True)
-
-
-def spelling_all() -> None:
- """Add all misspellings from a pycharm run."""
- import subprocess
-
- print('Running "make pycharm-full"...')
- lines = [
- line
- for line in subprocess.run(
- ['make', 'pycharm-full'], check=False, capture_output=True
- )
- .stdout.decode()
- .splitlines()
- if 'Typo: In word' in line
- ]
- words = [line.split('Typo: In word')[1].strip() for line in lines]
-
- # Strip enclosing quotes but not internal ones.
- for i, word in enumerate(words):
- assert word[0] == "'"
- assert word[-1] == "'"
- words[i] = word[1:-1]
-
- _spelling(words)
-
-
-def spelling() -> None:
- """Add words to the PyCharm dictionary."""
- _spelling(sys.argv[2:])
-
-
-def xcodebuild() -> None:
- """Run xcodebuild with added smarts."""
- from efrotools.xcodebuild import XCodeBuild
-
- XCodeBuild(projroot=str(PROJROOT), args=sys.argv[2:]).run()
-
-
-def xcoderun() -> None:
- """Run an xcode build in the terminal."""
- import os
- import subprocess
- from efro.error import CleanError
- from efrotools.xcodebuild import project_build_path
-
- if len(sys.argv) != 5:
- raise CleanError(
- 'Expected 3 args: '
- )
- project_path = os.path.abspath(sys.argv[2])
- scheme = sys.argv[3]
- configuration = sys.argv[4]
- path = project_build_path(
- projroot=str(PROJROOT),
- project_path=project_path,
- scheme=scheme,
- configuration=configuration,
- )
- subprocess.run(path, check=True)
-
-
-def pyver() -> None:
- """Prints the Python version used by this project."""
- from efrotools import PYVER
-
- print(PYVER, end='')
-
-
-def try_repeat() -> None:
- """Run a command with repeat attempts on failure.
-
- First arg is the number of retries; remaining args are the command.
- """
- import subprocess
- from efro.error import CleanError
-
- # We require one number arg and at least one command arg.
- if len(sys.argv) < 4:
- raise CleanError(
- 'Expected a retry-count arg and at least one command arg'
- )
- try:
- repeats = int(sys.argv[2])
- except Exception:
- raise CleanError('Expected int as first arg') from None
- if repeats < 0:
- raise CleanError('Retries must be >= 0')
- cmd = sys.argv[3:]
- for i in range(repeats + 1):
- result = subprocess.run(cmd, check=False)
- if result.returncode == 0:
- return
- print(
- f'try_repeat attempt {i + 1} of {repeats + 1} failed for {cmd}.',
- file=sys.stderr,
- flush=True,
- )
- raise CleanError(f'Command failed {repeats + 1} time(s): {cmd}')
-
-
-def check_clean_safety() -> None:
- """Ensure all files are are added to git or in gitignore.
-
- Use to avoid losing work if we accidentally do a clean without
- adding something.
- """
- import os
- import subprocess
- from efro.error import CleanError
-
- if len(sys.argv) != 2:
- raise CleanError('invalid arguments')
-
- # Make sure we wouldn't be deleting anything not tracked by git
- # or ignored.
- output = subprocess.check_output(
- ['git', 'status', '--porcelain=v2']
- ).decode()
- if any(line.startswith('?') for line in output.splitlines()):
- raise CleanError(
- 'Untracked file(s) found; aborting.'
- ' (see "git status" from "'
- + os.getcwd()
- + '") Either \'git add\' them, add them to .gitignore,'
- ' or remove them and try again.'
- )
-
-
-def gen_empty_py_init() -> None:
- """Generate an empty __init__.py for a package dir.
-
- Used as part of meta builds.
- """
- from efro.terminal import Clr
- from efro.error import CleanError
-
- if len(sys.argv) != 3:
- raise CleanError('Expected a single path arg.')
- outpath = Path(sys.argv[2])
- outpath.parent.mkdir(parents=True, exist_ok=True)
- print(f'Meta-building {Clr.BLD}{outpath}{Clr.RST}')
- with open(outpath, 'w', encoding='utf-8') as outfile:
- outfile.write('# This file is autogenerated; do not hand-edit.\n')
-
-
-def formatcode() -> None:
- """Format all of our C/C++/etc. code."""
- import efrotools.code
-
- full = '-full' in sys.argv
- efrotools.code.format_project_cpp_files(PROJROOT, full)
-
-
-def formatscripts() -> None:
- """Format all of our Python/etc. code."""
- import efrotools.code
-
- full = '-full' in sys.argv
- efrotools.code.format_project_python_files(PROJROOT, full)
-
-
-def formatmakefile() -> None:
- """Format the main makefile."""
- from efrotools.makefile import Makefile
-
- with open('Makefile', encoding='utf-8') as infile:
- original = infile.read()
-
- formatted = Makefile(original).get_output()
-
- # Only write if it changed.
- if formatted != original:
- with open('Makefile', 'w', encoding='utf-8') as outfile:
- outfile.write(formatted)
-
-
-def cpplint() -> None:
- """Run lint-checking on all code deemed lint-able."""
- import efrotools.code
-
- full = '-full' in sys.argv
- efrotools.code.check_cpplint(PROJROOT, full)
-
-
-def scriptfiles() -> None:
- """List project script files.
-
- Pass -lines to use newlines as separators. The default is spaces.
- """
- import efrotools.code
-
- paths = efrotools.code.get_script_filenames(projroot=PROJROOT)
- assert not any(' ' in path for path in paths)
- if '-lines' in sys.argv:
- print('\n'.join(paths))
- else:
- print(' '.join(paths))
-
-
-def pylint() -> None:
- """Run pylint checks on our scripts."""
- import efrotools.code
-
- full = '-full' in sys.argv
- fast = '-fast' in sys.argv
- efrotools.code.pylint(PROJROOT, full, fast)
-
-
-def pylint_files() -> None:
- """Run pylint checks on provided filenames."""
- from efro.terminal import Clr
- from efro.error import CleanError
- import efrotools.code
-
- if len(sys.argv) < 3:
- raise CleanError('Expected at least 1 filename arg.')
- filenames = sys.argv[2:]
- efrotools.code.runpylint(PROJROOT, filenames)
- print(f'{Clr.GRN}Pylint Passed.{Clr.RST}')
-
-
-def mypy() -> None:
- """Run mypy checks on our scripts."""
- import efrotools.code
-
- full = '-full' in sys.argv
- efrotools.code.mypy(PROJROOT, full)
-
-
-def mypy_files() -> None:
- """Run mypy checks on provided filenames."""
- from efro.terminal import Clr
- from efro.error import CleanError
- import efrotools.code
-
- if len(sys.argv) < 3:
- raise CleanError('Expected at least 1 filename arg.')
- filenames = sys.argv[2:]
- try:
- efrotools.code.mypy_files(PROJROOT, filenames)
- print(f'{Clr.GRN}Mypy Passed.{Clr.RST}')
- except Exception as exc:
- raise CleanError('Mypy Failed.') from exc
-
-
-def dmypy() -> None:
- """Run mypy checks on our scripts using the mypy daemon."""
- import efrotools.code
-
- efrotools.code.dmypy(PROJROOT)
-
-
-def pycharm() -> None:
- """Run PyCharm checks on our scripts."""
- import efrotools.code
-
- full = '-full' in sys.argv
- verbose = '-v' in sys.argv
- efrotools.code.check_pycharm(PROJROOT, full, verbose)
-
-
-def clioncode() -> None:
- """Run CLion checks on our code."""
- import efrotools.code
-
- full = '-full' in sys.argv
- verbose = '-v' in sys.argv
- efrotools.code.check_clioncode(PROJROOT, full, verbose)
-
-
-def androidstudiocode() -> None:
- """Run Android Studio checks on our code."""
- import efrotools.code
-
- full = '-full' in sys.argv
- verbose = '-v' in sys.argv
- efrotools.code.check_android_studio(PROJROOT, full, verbose)
-
-
-def tool_config_install() -> None:
- """Install a tool config file (with some filtering)."""
- from efro.error import CleanError
-
- import efrotools.toolconfig
-
- if len(sys.argv) != 4:
- raise CleanError('expected 2 args')
- src = Path(sys.argv[2])
- dst = Path(sys.argv[3])
-
- efrotools.toolconfig.install_tool_config(PROJROOT, src, dst)
-
-
-def sync_all() -> None:
- """Runs full syncs between all efrotools projects.
-
- This list is defined in the EFROTOOLS_SYNC_PROJECTS env var.
- This assumes that there is a 'sync-full' and 'sync-list' Makefile target
- under each project.
- """
- import os
- import subprocess
- import concurrent.futures
- from efro.error import CleanError
- from efro.terminal import Clr
-
- print(f'{Clr.BLD}Updating formatting for all projects...{Clr.RST}')
- projects_str = os.environ.get('EFROTOOLS_SYNC_PROJECTS')
- if projects_str is None:
- raise CleanError('EFROTOOL_SYNC_PROJECTS is not defined.')
- projects = projects_str.split(':')
-
- def _format_project(fproject: str) -> None:
- fcmd = f'cd "{fproject}" && make format'
- # print(fcmd)
- subprocess.run(fcmd, shell=True, check=True)
-
- # No matter what we're doing (even if just listing), run formatting
- # in all projects before beginning. Otherwise if we do a sync and then
- # a preflight we'll often wind up getting out-of-sync errors due to
- # formatting changing after the sync.
- with concurrent.futures.ThreadPoolExecutor(
- max_workers=len(projects)
- ) as executor:
- # Converting this to a list will propagate any errors.
- list(executor.map(_format_project, projects))
-
- if len(sys.argv) > 2 and sys.argv[2] == 'list':
- # List mode
- for project in projects_str.split(':'):
- cmd = f'cd "{project}" && make sync-list'
- print(cmd)
- subprocess.run(cmd, shell=True, check=True)
-
- else:
- # Real mode
- for i in range(2):
- if i == 0:
- print(
- f'{Clr.BLD}Running sync pass 1'
- f' (ensures all changes at dsts are pushed to src):'
- f'{Clr.RST}'
- )
- else:
- print(
- f'{Clr.BLD}Running sync pass 2'
- f' (ensures latest src is pulled to all dsts):{Clr.RST}'
- )
- for project in projects_str.split(':'):
- cmd = f'cd "{project}" && make sync-full'
- subprocess.run(cmd, shell=True, check=True)
- print(Clr.BLD + 'Sync-all successful!' + Clr.RST)
-
-
-def sync() -> None:
- """Runs standard syncs between this project and others."""
- from efrotools import getprojectconfig
- from efrotools.sync import Mode, SyncItem, run_standard_syncs
-
- mode = Mode(sys.argv[2]) if len(sys.argv) > 2 else Mode.PULL
-
- # Load sync-items from project config and run them
- sync_items = [
- SyncItem(**i) for i in getprojectconfig(PROJROOT).get('sync_items', [])
- ]
- run_standard_syncs(PROJROOT, mode, sync_items)
-
-
-def compile_python_files() -> None:
- """Compile pyc files for packaging.
-
- This creates hash-based PYC files in opt level 1 with hash checks
- defaulting to off, so we don't have to worry about timestamps or
- loading speed hits due to hash checks. (see PEP 552).
- We just need to tell modders that they'll need to clear these
- cache files out or turn on debugging mode if they want to tweak
- the built-in scripts directly (or go through the asset build system which
- properly recreates the .pyc files).
- """
- import py_compile
-
- for arg in sys.argv[2:]:
- mode = py_compile.PycInvalidationMode.UNCHECKED_HASH
- py_compile.compile(
- arg,
- # dfile=os.path.basename(arg),
- doraise=True,
- optimize=1,
- invalidation_mode=mode,
- )
-
-
-def pytest() -> None:
- """Run pytest with project environment set up properly."""
- import os
- import platform
- import subprocess
- from efrotools import getprojectconfig, PYTHON_BIN
- from efro.error import CleanError
-
- # Grab our python paths for the project and stuff them in PYTHONPATH.
- pypaths = getprojectconfig(PROJROOT).get('python_paths')
- if pypaths is None:
- raise CleanError('python_paths not found in project config.')
-
- separator = ';' if platform.system() == 'Windows' else ':'
- os.environ['PYTHONPATH'] = separator.join(pypaths)
-
- # Also tell Python interpreters not to write __pycache__ dirs everywhere
- # which can screw up our builds.
- os.environ['PYTHONDONTWRITEBYTECODE'] = '1'
-
- # Let's flip on dev mode to hopefully be informed on more bad stuff
- # happening. https://docs.python.org/3/library/devmode.html
- os.environ['PYTHONDEVMODE'] = '1'
-
- # Do the thing.
- results = subprocess.run(
- [PYTHON_BIN, '-m', 'pytest'] + sys.argv[2:], check=False
- )
- if results.returncode != 0:
- sys.exit(results.returncode)
-
-
-def makefile_target_list() -> None:
- """Prints targets in a makefile.
-
- Takes a single argument: a path to a Makefile.
- """
- from dataclasses import dataclass
- from efro.error import CleanError
- from efro.terminal import Clr
-
- @dataclass
- class _Entry:
- kind: str
- line: int
- title: str
-
- if len(sys.argv) != 3:
- raise CleanError('Expected exactly one filename arg.')
-
- with open(sys.argv[2], encoding='utf-8') as infile:
- lines = infile.readlines()
-
- def _docstr(lines2: list[str], linenum: int) -> str:
- doc = ''
- j = linenum - 1
- while j >= 0 and lines2[j].startswith('#'):
- doc = lines2[j][1:].strip()
- j -= 1
- if doc != '':
- return ' - ' + doc
- return doc
-
- print(
- '----------------------\n'
- 'Available Make Targets\n'
- '----------------------'
- )
-
- entries: list[_Entry] = []
- for i, line in enumerate(lines):
- # Targets.
- if (
- ':' in line
- and line.split(':')[0].replace('-', '').replace('_', '').isalnum()
- and not line.startswith('_')
- ):
- entries.append(
- _Entry(kind='target', line=i, title=line.split(':')[0])
- )
-
- # Section titles.
- if (
- line.startswith('# ')
- and line.endswith(' #\n')
- and len(line.split()) > 2
- ):
- entries.append(
- _Entry(kind='section', line=i, title=line[1:-2].strip())
- )
-
- for i, entry in enumerate(entries):
- if entry.kind == 'section':
- # Don't print headers for empty sections.
- if i + 1 >= len(entries) or entries[i + 1].kind == 'section':
- continue
- print('\n' + entry.title + '\n' + '-' * len(entry.title))
- elif entry.kind == 'target':
- print(
- Clr.MAG
- + entry.title
- + Clr.BLU
- + _docstr(lines, entry.line)
- + Clr.RST
- )
-
-
-def echo() -> None:
- """Echo with support for efro.terminal.Clr args (RED, GRN, BLU, etc).
-
- Prints a Clr.RST at the end so that can be omitted.
- """
- from efro.terminal import Clr
-
- clrnames = {n for n in dir(Clr) if n.isupper() and not n.startswith('_')}
- first = True
- out: list[str] = []
- for arg in sys.argv[2:]:
- if arg in clrnames:
- out.append(getattr(Clr, arg))
- else:
- if not first:
- out.append(' ')
- first = False
- out.append(arg)
- out.append(Clr.RST)
- print(''.join(out))
-
-
-def urandom_pretty() -> None:
- """Spits out urandom bytes formatted for source files."""
- # Note; this is not especially efficient. It should probably be rewritten
- # if ever needed in a performance-sensitive context.
- import os
- from efro.error import CleanError
-
- if len(sys.argv) not in (3, 4):
- raise CleanError(
- 'Expected one arg (count) and possibly two (line len).'
- )
- size = int(sys.argv[2])
- linemax = 72 if len(sys.argv) < 4 else int(sys.argv[3])
-
- val = os.urandom(size)
- lines: list[str] = []
- line = b''
-
- for i in range(len(val)):
- char = val[i : i + 1]
- thislinelen = len(repr(line + char))
- if thislinelen > linemax:
- lines.append(repr(line))
- line = b''
- line += char
- if line:
- lines.append(repr(line))
-
- bstr = '\n'.join(str(l) for l in lines)
- print(f'({bstr})')
-
-
-def tweak_empty_py_files() -> None:
- """Find any zero-length Python files and make them length 1."""
- from efro.error import CleanError
- import efrotools.pybuild
-
- if len(sys.argv) != 3:
- raise CleanError('Expected exactly 1 path arg.')
- efrotools.pybuild.tweak_empty_py_files(sys.argv[2])
-
-
-def make_ensure() -> None:
- """Make sure a makefile target is up-to-date.
-
- This can technically be done by simply `make --question`, but this
- has some extra bells and whistles such as printing some of the commands
- that would run.
- Can be useful to run after cloud-builds to ensure the local results
- consider themselves up-to-date.
- """
- # pylint: disable=too-many-locals
- from efro.error import CleanError
- from efro.terminal import Clr
- import subprocess
-
- dirpath: str | None = None
- args = sys.argv[2:]
- if '--dir' in args:
- argindex = args.index('--dir')
- dirpath = args[argindex + 1]
- del args[argindex : argindex + 2]
-
- if len(args) not in (0, 1):
- raise CleanError('Expected zero or one target args.')
- target = args[0] if args else None
-
- cmd = ['make', '--no-print-directory', '--dry-run']
- if target is not None:
- cmd.append(target)
- results = subprocess.run(cmd, check=False, capture_output=True, cwd=dirpath)
- out = results.stdout.decode()
- err = results.stderr.decode()
- if results.returncode != 0:
- print(f'Failed command stdout:\n{out}\nFailed command stderr:\n{err}')
- raise CleanError(f"Command failed during make_ensure: '{cmd}'.")
-
- targetname: str = '' if target is None else target
- lines = out.splitlines()
- in_str = '' if dirpath is None else f"in directory '{dirpath}' "
- if len(lines) == 1 and 'Nothing to be done for ' in lines[0]:
- print(f"make_ensure: '{targetname}' target {in_str}is up to date.")
- else:
- maxlines = 20
- if len(lines) > maxlines:
- outlines = '\n'.join(
- lines[:maxlines] + [f'(plus {len(lines)-maxlines} more lines)']
- )
- else:
- outlines = '\n'.join(lines)
-
- print(
- f"make_ensure: '{targetname}' target {in_str}"
- f'is out of date; would run:\n\n'
- '-------------------------- MAKE-ENSURE COMMANDS BEGIN '
- f'--------------------------\n{Clr.YLW}'
- f'{outlines}{Clr.RST}\n'
- '--------------------------- MAKE-ENSURE COMMANDS END '
- '---------------------------\n'
- )
- raise CleanError(
- f"make_ensure: '{targetname}' target {in_str}is out of date."
- )
-
-
-def make_target_debug() -> None:
- """Debug makefile src/target mod times given src and dst path.
-
- Built to debug stubborn Makefile targets that insist on being
- rebuilt just after being built via a cloud target.
- """
- import os
- import datetime
-
- from efro.error import CleanError
-
- # from efro.util import ago_str, utc_now
-
- args = sys.argv[2:]
- if len(args) != 2:
- raise CleanError('Expected 2 args.')
-
- def _utc_mod_time(path: str) -> datetime.datetime:
- mtime = os.path.getmtime(path)
- mdtime = datetime.datetime.fromtimestamp(mtime, datetime.timezone.utc)
- # mdtime.replace(tzinfo=datetime.timezone.utc)
- return mdtime
-
- # srcname = os.path.basename(args[0])
- # dstname = os.path.basename(args[1])
- srctime = _utc_mod_time(args[0])
- dsttime = _utc_mod_time(args[1])
- # now = utc_now()
- # src_ago = ago_str(srctime, maxparts=3, decimals=2, now=now)
- # dst_ago = ago_str(dsttime, maxparts=3, decimals=2, now=now)
- srctimestr = (
- f'{srctime.hour}:{srctime.minute}:{srctime.second}:'
- f'{srctime.microsecond}'
- )
- dsttimestr = (
- f'{dsttime.hour}:{dsttime.minute}:{dsttime.second}:'
- f'{dsttime.microsecond}'
- )
- print(f'SRC modified at {srctimestr}.')
- print(f'DST modified at {dsttimestr}.')
diff --git a/tools/efrotools/pcommand2.py b/tools/efrotools/pcommand2.py
deleted file mode 100644
index 0683eb45..00000000
--- a/tools/efrotools/pcommand2.py
+++ /dev/null
@@ -1,86 +0,0 @@
-# Released under the MIT License. See LICENSE for details.
-#
-"""Standard snippets that can be pulled into project pcommand scripts.
-
-A snippet is a mini-program that directly takes input from stdin and does
-some focused task. This module is a repository of common snippets that can
-be imported into projects' pcommand script for easy reuse.
-"""
-from __future__ import annotations
-
-import sys
-from typing import TYPE_CHECKING
-
-if TYPE_CHECKING:
- pass
-
-
-def with_build_lock() -> None:
- """Run a shell command wrapped in a build-lock."""
- from efro.error import CleanError
- from efrotools.buildlock import BuildLock
-
- import subprocess
-
- args = sys.argv[2:]
- if len(args) < 2:
- raise CleanError(
- 'Expected one lock-name arg and at least one command arg'
- )
- with BuildLock(args[0]):
- subprocess.run(' '.join(args[1:]), check=True, shell=True)
-
-
-def sortlines() -> None:
- """Sort provided lines. For tidying import lists, etc."""
- from efro.error import CleanError
-
- if len(sys.argv) != 3:
- raise CleanError('Expected 1 arg.')
- val = sys.argv[2]
- lines = val.splitlines()
- print('\n'.join(sorted(lines, key=lambda l: l.lower())))
-
-
-def openal_build_android() -> None:
- """Build openalsoft for android."""
- from efro.error import CleanError
- from efrotools.openalbuild import build
-
- args = sys.argv[2:]
- if len(args) != 2:
- raise CleanError(
- 'Expected one arg: arm, arm64, x86, x86_64'
- ' and one arg: debug, release'
- )
-
- build(args[0], args[1])
-
-
-def openal_gather() -> None:
- """Gather built opealsoft libs into src."""
- from efro.error import CleanError
- from efrotools.openalbuild import gather
-
- args = sys.argv[2:]
- if args:
- raise CleanError('No args expected.')
-
- gather()
-
-
-def pyright() -> None:
- """Run Pyright checks on project Python code."""
- import subprocess
-
- from efro.terminal import Clr
-
- from efro.error import CleanError
-
- print(f'{Clr.BLU}Running Pyright (experimental)...{Clr.RST}')
- try:
- subprocess.run(
- ['pyright', '--project', '.pyrightconfig.json'], check=True
- )
- except Exception as exc:
- raise CleanError('Pyright failed.') from exc
diff --git a/tools/efrotools/pcommandbatch.py b/tools/efrotools/pcommandbatch.py
new file mode 100644
index 00000000..19801104
--- /dev/null
+++ b/tools/efrotools/pcommandbatch.py
@@ -0,0 +1,450 @@
+# Released under the MIT License. See LICENSE for details.
+#
+"""Wrangles pcommandbatch; an efficient way to run small pcommands.
+
+The whole purpose of pcommand is to be a lightweight way to run small
+snippets of Python to do bits of work in a project. The pcommand script
+tries to minimize imports and work done in order to keep runtime as
+short as possible. However, even an 'empty' pcommand still takes a
+fraction of a second due to the time needed to spin up Python and import
+a minimal set of modules. This can add up for large builds where
+hundreds or thousands of pcommands are being run.
+
+To help fight that problem, pcommandbatch introduces a way to run
+pcommands by submitting requests to a temporary local server daemon.
+This allows individual pcommand calls to go through a very lightweight
+client binary that simply forwards the command to a running server.
+This cuts minimal client runtime down to nearly zero. Building and
+managing the server and client are handled automatically, and systems
+which are unable to compile a client binary can fall back to using
+vanilla pcommand in those cases.
+
+A few considerations must be made when writing pcommands that work
+in batch mode. By default, all existing pcommands have been fitted with
+a disallow_in_batch() call which triggers an error under batch mode.
+These calls should be removed if/when each call is updated to work
+cleanly in batch mode. Requirements for batch-friendly pcommands follow:
+
+- Batch mode runs parallel pcommands in different background threads.
+ Commands must be ok with that.
+
+- Batch-enabled pcommands must not look at sys.argv. They should instead
+ use pcommand.get_args(). Be aware that this value does not include
+ the first two values from sys.argv (executable path and pcommand name)
+ so is generally cleaner to use anyway. Also be aware that args are
+ thread-local, so only call get_args() from the thread your pcommand
+ was called in.
+
+- Batch-enabled pcommands should not call os.chdir() or sys.exit() or
+ anything else having global effects. This should be self-explanatory
+ considering the shared server model in use.
+
+- Standard print and log calls will wind up in the pcommandbatch server
+ log and will not be seen by the user or capturable by the calling
+ process. By default, only a return code is passed back to the client,
+ where an error messages instructs the user to refer to the log or run
+ again with batch mode disabled. Commands that should print some output
+ even in batch mode can use pcommand.set_output() to do so. Note that
+ this is currently limited to small bits of output (but that can be
+ changed).
+
+"""
+from __future__ import annotations
+
+import os
+import sys
+import time
+import json
+import asyncio
+import tempfile
+import subprocess
+from typing import TYPE_CHECKING
+
+import filelock
+from efro.error import CleanError
+from efro.terminal import Clr
+
+if TYPE_CHECKING:
+ pass
+
+# Enable debug-mode, in which server commands are *not* spun off into
+# daemons. This means some commands will block waiting for background
+# servers they launched to exit, but it can make everything easier to
+# debug as a whole since all client and server output will go a single
+# terminal.
+DEBUG = os.environ.get('BA_PCOMMANDBATCH_DEBUG', 0) == '1'
+
+# Enable extra logging during server runs/etc. Debug mode implicitly
+# enables this as well.
+VERBOSE = DEBUG or os.environ.get('BA_PCOMMANDBATCH_VERBOSE', 0) == '1'
+
+
+def build_pcommandbatch(inpaths: list[str], outpath: str) -> None:
+ """Create the binary or link regular pcommand."""
+
+ # Make an quiet attempt to build a batch binary, but just symlink
+ # the plain old pcommand if anything goes wrong. That should work in
+ # all cases; it'll just be slower.
+ try:
+ # TEMP - clean up old path (our dir used to be just a binary).
+ if os.path.isfile(os.path.dirname(outpath)):
+ os.remove(os.path.dirname(outpath))
+
+ if os.path.islink(outpath):
+ os.unlink(outpath)
+
+ os.makedirs(os.path.dirname(outpath), exist_ok=True)
+
+ # Note: this is kinda a project-specific path; perhaps we'd
+ # want to specify this in project-config?
+ subprocess.run(
+ ['cc'] + inpaths + ['-o', outpath],
+ check=True,
+ capture_output=os.environ.get('BA_PCOMMANDBATCH_BUILD_VERBOSE')
+ != '1',
+ )
+ except Exception:
+ print(
+ f'{Clr.YLW}Warning: Unable to build pcommandbatch executable;'
+ f' falling back to regular pcommand. Build with env var'
+ f' BA_PCOMMANDBATCH_BUILD_VERBOSE=1 to see what went wrong.'
+ f'{Clr.RST}',
+ file=sys.stderr,
+ )
+ subprocess.run(
+ ['ln', '-sf', '../../tools/pcommand', outpath], check=True
+ )
+
+
+def run_pcommandbatch_server(
+ idle_timeout_secs: int, state_dir: str, instance: str
+) -> None:
+ """Run a server for handling batches of pcommands.
+
+ If a matching instance is already running, is a no-op.
+ """
+ import daemon
+
+ # Be aware that when running without daemons, various build commands
+ # will block waiting for the server processes that they spawned to
+ # exit. It can be worth it to debug things with everything spitting
+ # output to the same terminal though.
+ use_daemon = not DEBUG
+
+ # Our stdout/stderr should already be directed to a file so we can
+ # just keep the existing ones.
+ server = Server(
+ idle_timeout_secs=idle_timeout_secs,
+ state_dir=state_dir,
+ instance=instance,
+ daemon=use_daemon,
+ )
+
+ if use_daemon:
+ with daemon.DaemonContext(
+ working_directory=os.getcwd(), stdout=sys.stdout, stderr=sys.stderr
+ ):
+ server.run()
+ else:
+ server.run()
+
+
+class IdleError(RuntimeError):
+ """Error we raise to quit peacefully."""
+
+
+class Server:
+ """A server that handles requests from pcommandbatch clients."""
+
+ def __init__(
+ self,
+ idle_timeout_secs: int,
+ state_dir: str,
+ instance: str,
+ daemon: bool,
+ ) -> None:
+ self._daemon = daemon
+ self._state_dir = state_dir
+ self._idle_timeout_secs = idle_timeout_secs
+ self._worker_state_file_path = f'{state_dir}/worker_state_{instance}'
+ self._worker_log_file_path = f'{self._state_dir}/worker_log_{instance}'
+ self._client_count_since_last_check = 0
+ self._running_client_count = 0
+ self._port: int | None = None
+ self._pid = os.getpid()
+ self._next_request_id = 0
+ self._instance = instance
+ self._spinup_lock_path = f'{self._state_dir}/lock'
+ self._spinup_lock = filelock.FileLock(self._spinup_lock_path)
+ self._server_error: str | None = None
+
+ # def __del__(self) -> None:
+ # if self._spinup_lock.is_locked:
+ # self._spinup_lock.release()
+ # pass
+
+ def run(self) -> None:
+ """Do the thing."""
+
+ try:
+ self._spinup_lock.acquire(timeout=10)
+ if VERBOSE:
+ print(
+ f'pcommandbatch server {self._instance}'
+ f' (pid {os.getpid()}) acquired spinup-lock'
+ f' at time {time.time():.3f}.',
+ file=sys.stderr,
+ )
+
+ except filelock.Timeout:
+ # Attempt to error and inform clients if we weren't able to
+ # acquire the file-lock. Unfortunately I can't really test this
+ # case because file-lock releases itself in its destructor.
+ if VERBOSE:
+ print(
+ f'pcommandbatch server {self._instance}'
+ f' (pid {os.getpid()}) timed out acquiring spinup-lock'
+ f' at time {time.time():.3f}; this should not happen.',
+ file=sys.stderr,
+ )
+
+ self._server_error = (
+ f'Error: pcommandbatch unable to acquire file-lock at'
+ f' {self._spinup_lock_path}. Something is probably broken.'
+ )
+
+ # In daemon mode we get multiple processes dumping to the same
+ # instance log file. We want to try and clear the log whenever a
+ # new batch run starts so it doesn't grow infinitely. So let's
+ # have any holder of the spinup lock (including aborted spinups)
+ # truncate it if it appears to have been idle long enough to
+ # have shut down.
+ if self._daemon:
+ try:
+ existing_log_age = int(
+ time.time() - os.path.getmtime(self._worker_log_file_path)
+ )
+ if existing_log_age > self._idle_timeout_secs:
+ sys.stderr.truncate(0)
+ except FileNotFoundError:
+ pass
+
+ # If there's an existing file younger than idle-seconds,
+ # consider it still valid and abort our creation.
+ try:
+ existing_age = int(
+ time.time() - os.path.getmtime(self._worker_state_file_path)
+ )
+ if existing_age <= self._idle_timeout_secs:
+ if VERBOSE:
+ print(
+ f'pcommandbatch server {self._instance}'
+ f' (pid {os.getpid()}) found existing batch'
+ f' server (age {existing_age})'
+ f' at time {time.time():.3f};'
+ f' aborting run...',
+ file=sys.stderr,
+ )
+ return
+ except FileNotFoundError:
+ # No state; no problem. Keep spinning up ours.
+ if VERBOSE:
+ print(
+ f'pcommandbatch server {self._instance}'
+ f' (pid {os.getpid()})'
+ f' found no existing batch server at time'
+ f' {time.time():.3f};'
+ f' proceeding with run...',
+ file=sys.stderr,
+ )
+
+ asyncio.run(self._run())
+
+ async def _run(self) -> None:
+ """Do the thing."""
+ import efrotools.pcommand
+
+ # Tell the running pcommand that we're the captain now.
+ efrotools.pcommand.enter_batch_server_mode()
+
+ server = await asyncio.start_server(self._handle_client, '127.0.0.1', 0)
+
+ self._port = server.sockets[0].getsockname()[1]
+ print(
+ f'pcommandbatch server {self._instance} (pid {self._pid})'
+ f' running on port {self._port} at time {time.time():.3f}...',
+ file=sys.stderr,
+ )
+
+ # Write our first state and then unlock the spinup lock. New
+ # spinup attempts will now see that we're here and back off.
+ self._update_worker_state_file(-1)
+ if self._spinup_lock.is_locked:
+ self._spinup_lock.release()
+
+ # Now run until our upkeep task kills us.
+ try:
+ await asyncio.gather(
+ asyncio.create_task(
+ self._upkeep_task(), name='pcommandbatch upkeep'
+ ),
+ server.serve_forever(),
+ )
+ except IdleError:
+ pass
+
+ print(
+ f'pcommandbatch server {self._instance} (pid {self._pid})'
+ f' exiting at time {time.time():.3f}.',
+ file=sys.stderr,
+ )
+
+ async def _handle_client(
+ self, reader: asyncio.StreamReader, writer: asyncio.StreamWriter
+ ) -> None:
+ """Handle a client."""
+ from efrotools.pcommand import run_client_pcommand
+
+ request_id = self._next_request_id
+ self._next_request_id += 1
+ self._client_count_since_last_check += 1
+ self._running_client_count += 1
+ try:
+ argv: list[str] = json.loads((await reader.read()).decode())
+ assert isinstance(argv, list)
+ assert all(isinstance(i, str) for i in argv)
+
+ print(
+ f'pcommandbatch server {self._instance} (pid {self._pid})'
+ f' handling request {request_id} at time {time.time():.3f}:'
+ f' {argv}.',
+ file=sys.stderr,
+ )
+
+ try:
+ if self._server_error is not None:
+ resultcode = 1
+ output = self._server_error
+ else:
+ (
+ resultcode,
+ output,
+ ) = await asyncio.get_running_loop().run_in_executor(
+ None,
+ lambda: run_client_pcommand(
+ argv, self._worker_log_file_path
+ ),
+ )
+ if VERBOSE:
+ print(
+ f'pcommandbatch server {self._instance}'
+ f' (pid {self._pid})'
+ f' request {request_id} finished with code'
+ f' {resultcode}.',
+ file=sys.stderr,
+ )
+ except Exception as exc:
+ if VERBOSE:
+ print(
+ f'pcommandbatch server {self._instance}'
+ f' (pid {self._pid}):'
+ f'error on request {request_id}: {exc}.',
+ file=sys.stderr,
+ )
+ resultcode = 1
+ output = ''
+
+ writer.write(json.dumps({'r': resultcode, 'o': output}).encode())
+ writer.close()
+ await writer.wait_closed()
+
+ finally:
+ self._running_client_count -= 1
+ assert self._running_client_count >= 0
+
+ async def _upkeep_task(self) -> None:
+ """Handle timeouts, updating port file timestamp, etc."""
+
+ start_time = time.monotonic()
+ abs_timeout_secs = 60 * 5
+ idle_secs = 0
+ idle_buffer = 5
+
+ while True:
+ await asyncio.sleep(1.0)
+ now = time.monotonic()
+ since_start = now - start_time
+
+ # Whenever we've run client(s) within the last second, we
+ # reset our idle time and freshen our state file so clients
+ # know they can still use us.
+
+ # Consider ourself idle if there are no currently running
+ # jobs AND nothing has been run since our last check. This
+ # covers both long running jobs and super short ones that
+ # would otherwise slip between our discrete checks.
+ if (
+ self._client_count_since_last_check
+ or self._running_client_count
+ ):
+ idle_secs = 0
+ self._update_worker_state_file(idle_secs)
+ else:
+ idle_secs += 1
+ if VERBOSE:
+ print(
+ f'pcommandbatch server {self._instance}'
+ f' (pid {self._pid})'
+ f' idle {idle_secs}/'
+ f'{self._idle_timeout_secs + idle_buffer} seconds at'
+ f' time {int(time.time())}.',
+ file=sys.stderr,
+ )
+
+ self._client_count_since_last_check = 0
+
+ # Clients should stop trying to contact us when our state
+ # file hits idle_timeout_secs in age, but we actually stay
+ # alive for a few extra seconds extra just to make sure we
+ # don't spin down right as someone is trying to use us.
+ if idle_secs >= self._idle_timeout_secs + idle_buffer:
+ # This insta-kills our server so it should never be
+ # happening while something is running.
+ if self._running_client_count:
+ raise CleanError(
+ f'pcommandbatch server {self._instance}'
+ f' (pid {self._pid}):'
+ f' idle-exiting but have running_client_count'
+ f' {self._running_client_count}; something'
+ f' is probably broken.'
+ )
+ raise IdleError()
+
+ if since_start > abs_timeout_secs:
+ raise CleanError(
+ f'pcommandbatch server {self._instance}'
+ f' (pid {self._pid}): max'
+ f' run-time of {abs_timeout_secs}s reached.'
+ ' Something is probably broken.'
+ )
+
+ def _update_worker_state_file(self, idle_secs: int) -> None:
+ assert self._port is not None
+ # Dump our port to a temp file and move it into place.
+ # Hopefully this will be nice and atomic.
+ if VERBOSE:
+ print(
+ f'pcommandbatch server {self._instance} (pid {self._pid})'
+ f' refreshing state file {self._worker_state_file_path}'
+ f' with port {self._port} and idle-secs {idle_secs}'
+ f' at time {time.time():.3f}.',
+ file=sys.stderr,
+ )
+
+ with tempfile.TemporaryDirectory() as tempdir:
+ outpath = os.path.join(tempdir, 'f')
+ with open(outpath, 'w', encoding='utf-8') as outfile:
+ outfile.write(json.dumps({'p': self._port}))
+ subprocess.run(
+ ['mv', outpath, self._worker_state_file_path], check=True
+ )
diff --git a/tools/efrotools/pcommands.py b/tools/efrotools/pcommands.py
new file mode 100644
index 00000000..d46f4d30
--- /dev/null
+++ b/tools/efrotools/pcommands.py
@@ -0,0 +1,830 @@
+# Released under the MIT License. See LICENSE for details.
+#
+"""A set of lovely pcommands ready for use."""
+
+from __future__ import annotations
+
+import sys
+from typing import TYPE_CHECKING
+
+from efrotools import pcommand
+
+if TYPE_CHECKING:
+ pass
+
+
+def _spelling(words: list[str]) -> None:
+ from efrotools.code import sort_jetbrains_dict
+ import os
+
+ pcommand.disallow_in_batch()
+
+ num_modded_dictionaries = 0
+ for fname in [
+ '.idea/dictionaries/ericf.xml',
+ 'ballisticakit-cmake/.idea/dictionaries/ericf.xml',
+ ]:
+ if not os.path.exists(fname):
+ continue
+ with open(fname, encoding='utf-8') as infile:
+ lines = infile.read().splitlines()
+ if lines[2] != ' ':
+ raise RuntimeError('Unexpected dictionary format.')
+ added_count = 0
+ for word in words:
+ line = f' {word.lower()}'
+ if line not in lines:
+ lines.insert(3, line)
+ added_count += 1
+
+ with open(fname, 'w', encoding='utf-8') as outfile:
+ outfile.write(sort_jetbrains_dict('\n'.join(lines)))
+
+ print(f'Added {added_count} words to {fname}.')
+ num_modded_dictionaries += 1
+ print(f'Modified {num_modded_dictionaries} dictionaries.')
+
+
+def pur() -> None:
+ """Run pur using project's Python version."""
+ import subprocess
+
+ pcommand.disallow_in_batch()
+
+ subprocess.run([sys.executable, '-m', 'pur'] + sys.argv[2:], check=True)
+
+
+def spelling_all() -> None:
+ """Add all misspellings from a pycharm run."""
+ import subprocess
+
+ pcommand.disallow_in_batch()
+
+ print('Running "make pycharm-full"...')
+ lines = [
+ line
+ for line in subprocess.run(
+ ['make', 'pycharm-full'], check=False, capture_output=True
+ )
+ .stdout.decode()
+ .splitlines()
+ if 'Typo: In word' in line
+ ]
+ words = [line.split('Typo: In word')[1].strip() for line in lines]
+
+ # Strip enclosing quotes but not internal ones.
+ for i, word in enumerate(words):
+ assert word[0] == "'"
+ assert word[-1] == "'"
+ words[i] = word[1:-1]
+
+ _spelling(words)
+
+
+def spelling() -> None:
+ """Add words to the PyCharm dictionary."""
+
+ pcommand.disallow_in_batch()
+
+ _spelling(sys.argv[2:])
+
+
+def xcodebuild() -> None:
+ """Run xcodebuild with added smarts."""
+ from efrotools.xcodebuild import XCodeBuild
+
+ pcommand.disallow_in_batch()
+
+ XCodeBuild(projroot=str(pcommand.PROJROOT), args=sys.argv[2:]).run()
+
+
+def xcoderun() -> None:
+ """Run an xcode build in the terminal."""
+ import os
+ import subprocess
+ from efro.error import CleanError
+ from efrotools.xcodebuild import project_build_path
+
+ pcommand.disallow_in_batch()
+
+ if len(sys.argv) != 5:
+ raise CleanError(
+ 'Expected 3 args: '
+ )
+ project_path = os.path.abspath(sys.argv[2])
+ scheme = sys.argv[3]
+ configuration = sys.argv[4]
+ path = project_build_path(
+ projroot=str(pcommand.PROJROOT),
+ project_path=project_path,
+ scheme=scheme,
+ configuration=configuration,
+ )
+ subprocess.run(path, check=True)
+
+
+def pyver() -> None:
+ """Prints the Python version used by this project."""
+ from efrotools import PYVER
+
+ pcommand.disallow_in_batch()
+
+ print(PYVER, end='')
+
+
+def try_repeat() -> None:
+ """Run a command with repeat attempts on failure.
+
+ First arg is the number of retries; remaining args are the command.
+ """
+ import subprocess
+ from efro.error import CleanError
+
+ pcommand.disallow_in_batch()
+
+ # We require one number arg and at least one command arg.
+ if len(sys.argv) < 4:
+ raise CleanError(
+ 'Expected a retry-count arg and at least one command arg'
+ )
+ try:
+ repeats = int(sys.argv[2])
+ except Exception:
+ raise CleanError('Expected int as first arg') from None
+ if repeats < 0:
+ raise CleanError('Retries must be >= 0')
+ cmd = sys.argv[3:]
+ for i in range(repeats + 1):
+ result = subprocess.run(cmd, check=False)
+ if result.returncode == 0:
+ return
+ print(
+ f'try_repeat attempt {i + 1} of {repeats + 1} failed for {cmd}.',
+ file=sys.stderr,
+ flush=True,
+ )
+ raise CleanError(f'Command failed {repeats + 1} time(s): {cmd}')
+
+
+def check_clean_safety() -> None:
+ """Ensure all files are are added to git or in gitignore.
+
+ Use to avoid losing work if we accidentally do a clean without
+ adding something.
+ """
+ import os
+ import subprocess
+ from efro.error import CleanError
+
+ pcommand.disallow_in_batch()
+
+ if len(sys.argv) != 2:
+ raise CleanError('invalid arguments')
+
+ # Make sure we wouldn't be deleting anything not tracked by git
+ # or ignored.
+ output = subprocess.check_output(
+ ['git', 'status', '--porcelain=v2']
+ ).decode()
+ if any(line.startswith('?') for line in output.splitlines()):
+ raise CleanError(
+ 'Untracked file(s) found; aborting.'
+ ' (see "git status" from "'
+ + os.getcwd()
+ + '") Either \'git add\' them, add them to .gitignore,'
+ ' or remove them and try again.'
+ )
+
+
+def gen_empty_py_init() -> None:
+ """Generate an empty __init__.py for a package dir.
+
+ Used as part of meta builds.
+ """
+ from pathlib import Path
+
+ from efro.terminal import Clr
+ from efro.error import CleanError
+
+ pcommand.disallow_in_batch()
+
+ if len(sys.argv) != 3:
+ raise CleanError('Expected a single path arg.')
+
+ outpath = Path(sys.argv[2])
+ outpath.parent.mkdir(parents=True, exist_ok=True)
+ print(f'Meta-building {Clr.BLD}{outpath}{Clr.RST}')
+ with open(outpath, 'w', encoding='utf-8') as outfile:
+ outfile.write('# This file is autogenerated; do not hand-edit.\n')
+
+
+def formatcode() -> None:
+ """Format all of our C/C++/etc. code."""
+ import efrotools.code
+
+ pcommand.disallow_in_batch()
+
+ full = '-full' in sys.argv
+ efrotools.code.format_project_cpp_files(pcommand.PROJROOT, full)
+
+
+def formatscripts() -> None:
+ """Format all of our Python/etc. code."""
+ import efrotools.code
+
+ pcommand.disallow_in_batch()
+
+ full = '-full' in sys.argv
+ efrotools.code.format_project_python_files(pcommand.PROJROOT, full)
+
+
+def formatmakefile() -> None:
+ """Format the main makefile."""
+ from efrotools.makefile import Makefile
+
+ with open('Makefile', encoding='utf-8') as infile:
+ original = infile.read()
+
+ pcommand.disallow_in_batch()
+
+ formatted = Makefile(original).get_output()
+
+ # Only write if it changed.
+ if formatted != original:
+ with open('Makefile', 'w', encoding='utf-8') as outfile:
+ outfile.write(formatted)
+
+
+def cpplint() -> None:
+ """Run lint-checking on all code deemed lint-able."""
+ import efrotools.code
+
+ pcommand.disallow_in_batch()
+
+ full = '-full' in sys.argv
+ efrotools.code.check_cpplint(pcommand.PROJROOT, full)
+
+
+def scriptfiles() -> None:
+ """List project script files.
+
+ Pass -lines to use newlines as separators. The default is spaces.
+ """
+ import efrotools.code
+
+ pcommand.disallow_in_batch()
+
+ paths = efrotools.code.get_script_filenames(projroot=pcommand.PROJROOT)
+ assert not any(' ' in path for path in paths)
+ if '-lines' in sys.argv:
+ print('\n'.join(paths))
+ else:
+ print(' '.join(paths))
+
+
+def pylint() -> None:
+ """Run pylint checks on our scripts."""
+ import efrotools.code
+
+ pcommand.disallow_in_batch()
+
+ full = '-full' in sys.argv
+ fast = '-fast' in sys.argv
+ efrotools.code.pylint(pcommand.PROJROOT, full, fast)
+
+
+def pylint_files() -> None:
+ """Run pylint checks on provided filenames."""
+ from efro.terminal import Clr
+ from efro.error import CleanError
+ import efrotools.code
+
+ pcommand.disallow_in_batch()
+
+ if len(sys.argv) < 3:
+ raise CleanError('Expected at least 1 filename arg.')
+
+ filenames = sys.argv[2:]
+ efrotools.code.runpylint(pcommand.PROJROOT, filenames)
+ print(f'{Clr.GRN}Pylint Passed.{Clr.RST}')
+
+
+def mypy() -> None:
+ """Run mypy checks on our scripts."""
+ import efrotools.code
+
+ pcommand.disallow_in_batch()
+
+ full = '-full' in sys.argv
+ efrotools.code.mypy(pcommand.PROJROOT, full)
+
+
+def mypy_files() -> None:
+ """Run mypy checks on provided filenames."""
+ from efro.terminal import Clr
+ from efro.error import CleanError
+ import efrotools.code
+
+ pcommand.disallow_in_batch()
+
+ if len(sys.argv) < 3:
+ raise CleanError('Expected at least 1 filename arg.')
+
+ filenames = sys.argv[2:]
+ try:
+ efrotools.code.mypy_files(pcommand.PROJROOT, filenames)
+ print(f'{Clr.GRN}Mypy Passed.{Clr.RST}')
+ except Exception as exc:
+ raise CleanError('Mypy Failed.') from exc
+
+
+def dmypy() -> None:
+ """Run mypy checks on our scripts using the mypy daemon."""
+ import efrotools.code
+
+ pcommand.disallow_in_batch()
+
+ efrotools.code.dmypy(pcommand.PROJROOT)
+
+
+def pycharm() -> None:
+ """Run PyCharm checks on our scripts."""
+ import efrotools.code
+
+ pcommand.disallow_in_batch()
+
+ full = '-full' in sys.argv
+ verbose = '-v' in sys.argv
+ efrotools.code.check_pycharm(pcommand.PROJROOT, full, verbose)
+
+
+def clioncode() -> None:
+ """Run CLion checks on our code."""
+ import efrotools.code
+
+ pcommand.disallow_in_batch()
+
+ full = '-full' in sys.argv
+ verbose = '-v' in sys.argv
+ efrotools.code.check_clioncode(pcommand.PROJROOT, full, verbose)
+
+
+def androidstudiocode() -> None:
+ """Run Android Studio checks on our code."""
+ import efrotools.code
+
+ pcommand.disallow_in_batch()
+
+ full = '-full' in sys.argv
+ verbose = '-v' in sys.argv
+ efrotools.code.check_android_studio(pcommand.PROJROOT, full, verbose)
+
+
+def tool_config_install() -> None:
+ """Install a tool config file (with some filtering)."""
+ from pathlib import Path
+
+ from efro.error import CleanError
+
+ import efrotools.toolconfig
+
+ pcommand.disallow_in_batch()
+
+ if len(sys.argv) != 4:
+ raise CleanError('expected 2 args')
+
+ src = Path(sys.argv[2])
+ dst = Path(sys.argv[3])
+
+ efrotools.toolconfig.install_tool_config(pcommand.PROJROOT, src, dst)
+
+
+def sync_all() -> None:
+ """Runs full syncs between all efrotools projects.
+
+ This list is defined in the EFROTOOLS_SYNC_PROJECTS env var.
+ This assumes that there is a 'sync-full' and 'sync-list' Makefile target
+ under each project.
+ """
+ import os
+ import subprocess
+ import concurrent.futures
+ from efro.error import CleanError
+ from efro.terminal import Clr
+
+ pcommand.disallow_in_batch()
+
+ print(f'{Clr.BLD}Updating formatting for all projects...{Clr.RST}')
+ projects_str = os.environ.get('EFROTOOLS_SYNC_PROJECTS')
+ if projects_str is None:
+ raise CleanError('EFROTOOL_SYNC_PROJECTS is not defined.')
+ projects = projects_str.split(':')
+
+ def _format_project(fproject: str) -> None:
+ fcmd = f'cd "{fproject}" && make format'
+ # print(fcmd)
+ subprocess.run(fcmd, shell=True, check=True)
+
+ # No matter what we're doing (even if just listing), run formatting
+ # in all projects before beginning. Otherwise if we do a sync and then
+ # a preflight we'll often wind up getting out-of-sync errors due to
+ # formatting changing after the sync.
+ with concurrent.futures.ThreadPoolExecutor(
+ max_workers=len(projects)
+ ) as executor:
+ # Converting this to a list will propagate any errors.
+ list(executor.map(_format_project, projects))
+
+ if len(sys.argv) > 2 and sys.argv[2] == 'list':
+ # List mode
+ for project in projects_str.split(':'):
+ cmd = f'cd "{project}" && make sync-list'
+ print(cmd)
+ subprocess.run(cmd, shell=True, check=True)
+
+ else:
+ # Real mode
+ for i in range(2):
+ if i == 0:
+ print(
+ f'{Clr.BLD}Running sync pass 1'
+ f' (ensures all changes at dsts are pushed to src):'
+ f'{Clr.RST}'
+ )
+ else:
+ print(
+ f'{Clr.BLD}Running sync pass 2'
+ f' (ensures latest src is pulled to all dsts):{Clr.RST}'
+ )
+ for project in projects_str.split(':'):
+ cmd = f'cd "{project}" && make sync-full'
+ subprocess.run(cmd, shell=True, check=True)
+ print(Clr.BLD + 'Sync-all successful!' + Clr.RST)
+
+
+def sync() -> None:
+ """Runs standard syncs between this project and others."""
+ from efrotools import getprojectconfig
+ from efrotools.sync import Mode, SyncItem, run_standard_syncs
+
+ pcommand.disallow_in_batch()
+
+ mode = Mode(sys.argv[2]) if len(sys.argv) > 2 else Mode.PULL
+
+ # Load sync-items from project config and run them
+ sync_items = [
+ SyncItem(**i)
+ for i in getprojectconfig(pcommand.PROJROOT).get('sync_items', [])
+ ]
+ run_standard_syncs(pcommand.PROJROOT, mode, sync_items)
+
+
+def compile_python_file() -> None:
+ """Compile pyc files for packaging.
+
+ This creates hash-based PYC files in opt level 1 with hash checks
+ defaulting to off, so we don't have to worry about timestamps or
+ loading speed hits due to hash checks. (see PEP 552).
+ We just need to tell modders that they'll need to clear these
+ cache files out or turn on debugging mode if they want to tweak
+ the built-in scripts directly (or go through the asset build system which
+ properly recreates the .pyc files).
+ """
+ import os
+ import py_compile
+
+ from efro.error import CleanError
+
+ args = pcommand.get_args()
+ if len(args) != 1:
+ raise CleanError('Expected a single arg.')
+ fname = args[0]
+ # Print project-relative path when possible.
+ relpath = os.path.abspath(fname).removeprefix(f'{pcommand.PROJROOT}/')
+ pcommand.set_output(f'Compiling script: {relpath}')
+ py_compile.compile(
+ fname,
+ doraise=True,
+ optimize=1,
+ invalidation_mode=py_compile.PycInvalidationMode.UNCHECKED_HASH,
+ )
+
+
+def copy_python_file() -> None:
+ """Copy Python files for packaging."""
+ import os
+ import shutil
+ from efro.error import CleanError
+
+ args = pcommand.get_args()
+ if len(args) != 2:
+ raise CleanError('Expected 2 args.')
+
+ src, dst = args
+
+ relpath = os.path.abspath(dst).removeprefix(f'{pcommand.PROJROOT}/')
+ pcommand.set_output(f'Copying script: {relpath}')
+
+ # Since we're making built files unwritable, we need to blow
+ # away exiting ones to allow this to succeed.
+ if os.path.exists(dst):
+ os.unlink(dst)
+
+ os.makedirs(os.path.dirname(dst), exist_ok=True)
+ shutil.copyfile(src, dst)
+
+ # Make built files unwritable to save myself from accidentally
+ # doing editing there and then blowing away my work.
+ os.chmod(dst, 0o444)
+
+ assert os.path.exists(dst)
+
+
+def pytest() -> None:
+ """Run pytest with project environment set up properly."""
+ import os
+ import platform
+ import subprocess
+ from efrotools import getprojectconfig, PYTHON_BIN
+ from efro.error import CleanError
+
+ pcommand.disallow_in_batch()
+
+ # Grab our python paths for the project and stuff them in PYTHONPATH.
+ pypaths = getprojectconfig(pcommand.PROJROOT).get('python_paths')
+ if pypaths is None:
+ raise CleanError('python_paths not found in project config.')
+
+ separator = ';' if platform.system() == 'Windows' else ':'
+ os.environ['PYTHONPATH'] = separator.join(pypaths)
+
+ # Also tell Python interpreters not to write __pycache__ dirs everywhere
+ # which can screw up our builds.
+ os.environ['PYTHONDONTWRITEBYTECODE'] = '1'
+
+ # Let's flip on dev mode to hopefully be informed on more bad stuff
+ # happening. https://docs.python.org/3/library/devmode.html
+ os.environ['PYTHONDEVMODE'] = '1'
+
+ # Do the thing.
+ results = subprocess.run(
+ [PYTHON_BIN, '-m', 'pytest'] + sys.argv[2:], check=False
+ )
+ if results.returncode != 0:
+ sys.exit(results.returncode)
+
+
+def makefile_target_list() -> None:
+ """Prints targets in a makefile.
+
+ Takes a single argument: a path to a Makefile.
+ """
+ from dataclasses import dataclass
+ from efro.error import CleanError
+ from efro.terminal import Clr
+
+ pcommand.disallow_in_batch()
+
+ @dataclass
+ class _Entry:
+ kind: str
+ line: int
+ title: str
+
+ if len(sys.argv) != 3:
+ raise CleanError('Expected exactly one filename arg.')
+
+ with open(sys.argv[2], encoding='utf-8') as infile:
+ lines = infile.readlines()
+
+ def _docstr(lines2: list[str], linenum: int) -> str:
+ doc = ''
+ j = linenum - 1
+ while j >= 0 and lines2[j].startswith('#'):
+ doc = lines2[j][1:].strip()
+ j -= 1
+ if doc != '':
+ return ' - ' + doc
+ return doc
+
+ print(
+ '----------------------\n'
+ 'Available Make Targets\n'
+ '----------------------'
+ )
+
+ entries: list[_Entry] = []
+ for i, line in enumerate(lines):
+ # Targets.
+ if (
+ ':' in line
+ and line.split(':')[0].replace('-', '').replace('_', '').isalnum()
+ and not line.startswith('_')
+ ):
+ entries.append(
+ _Entry(kind='target', line=i, title=line.split(':')[0])
+ )
+
+ # Section titles.
+ if (
+ line.startswith('# ')
+ and line.endswith(' #\n')
+ and len(line.split()) > 2
+ ):
+ entries.append(
+ _Entry(kind='section', line=i, title=line[1:-2].strip())
+ )
+
+ for i, entry in enumerate(entries):
+ if entry.kind == 'section':
+ # Don't print headers for empty sections.
+ if i + 1 >= len(entries) or entries[i + 1].kind == 'section':
+ continue
+ print('\n' + entry.title + '\n' + '-' * len(entry.title))
+ elif entry.kind == 'target':
+ print(
+ Clr.MAG
+ + entry.title
+ + Clr.BLU
+ + _docstr(lines, entry.line)
+ + Clr.RST
+ )
+
+
+def echo() -> None:
+ """Echo with support for efro.terminal.Clr args (RED, GRN, BLU, etc).
+
+ Prints a Clr.RST at the end so that can be omitted.
+ """
+ from efro.terminal import Clr
+
+ pcommand.disallow_in_batch()
+
+ clrnames = {n for n in dir(Clr) if n.isupper() and not n.startswith('_')}
+ first = True
+ out: list[str] = []
+ for arg in sys.argv[2:]:
+ if arg in clrnames:
+ out.append(getattr(Clr, arg))
+ else:
+ if not first:
+ out.append(' ')
+ first = False
+ out.append(arg)
+ out.append(Clr.RST)
+ print(''.join(out))
+
+
+def urandom_pretty() -> None:
+ """Spits out urandom bytes formatted for source files."""
+ # Note; this is not especially efficient. It should probably be rewritten
+ # if ever needed in a performance-sensitive context.
+ import os
+ from efro.error import CleanError
+
+ pcommand.disallow_in_batch()
+
+ if len(sys.argv) not in (3, 4):
+ raise CleanError(
+ 'Expected one arg (count) and possibly two (line len).'
+ )
+ size = int(sys.argv[2])
+ linemax = 72 if len(sys.argv) < 4 else int(sys.argv[3])
+
+ val = os.urandom(size)
+ lines: list[str] = []
+ line = b''
+
+ for i in range(len(val)):
+ char = val[i : i + 1]
+ thislinelen = len(repr(line + char))
+ if thislinelen > linemax:
+ lines.append(repr(line))
+ line = b''
+ line += char
+ if line:
+ lines.append(repr(line))
+
+ bstr = '\n'.join(str(l) for l in lines)
+ print(f'({bstr})')
+
+
+def tweak_empty_py_files() -> None:
+ """Find any zero-length Python files and make them length 1."""
+ from efro.error import CleanError
+ import efrotools.pybuild
+
+ pcommand.disallow_in_batch()
+
+ if len(sys.argv) != 3:
+ raise CleanError('Expected exactly 1 path arg.')
+ efrotools.pybuild.tweak_empty_py_files(sys.argv[2])
+
+
+def make_ensure() -> None:
+ """Make sure a makefile target is up-to-date.
+
+ This can technically be done by simply `make --question`, but this
+ has some extra bells and whistles such as printing some of the commands
+ that would run.
+ Can be useful to run after cloud-builds to ensure the local results
+ consider themselves up-to-date.
+ """
+ # pylint: disable=too-many-locals
+ from efro.error import CleanError
+ from efro.terminal import Clr
+ import subprocess
+
+ pcommand.disallow_in_batch()
+
+ dirpath: str | None = None
+ args = sys.argv[2:]
+ if '--dir' in args:
+ argindex = args.index('--dir')
+ dirpath = args[argindex + 1]
+ del args[argindex : argindex + 2]
+
+ if len(args) not in (0, 1):
+ raise CleanError('Expected zero or one target args.')
+ target = args[0] if args else None
+
+ cmd = ['make', '--no-print-directory', '--dry-run']
+ if target is not None:
+ cmd.append(target)
+ results = subprocess.run(cmd, check=False, capture_output=True, cwd=dirpath)
+ out = results.stdout.decode()
+ err = results.stderr.decode()
+ if results.returncode != 0:
+ print(f'Failed command stdout:\n{out}\nFailed command stderr:\n{err}')
+ raise CleanError(f"Command failed during make_ensure: '{cmd}'.")
+
+ targetname: str = '' if target is None else target
+ lines = out.splitlines()
+ in_str = '' if dirpath is None else f"in directory '{dirpath}' "
+ if len(lines) == 1 and 'Nothing to be done for ' in lines[0]:
+ print(f"make_ensure: '{targetname}' target {in_str}is up to date.")
+ else:
+ maxlines = 20
+ if len(lines) > maxlines:
+ outlines = '\n'.join(
+ lines[:maxlines] + [f'(plus {len(lines)-maxlines} more lines)']
+ )
+ else:
+ outlines = '\n'.join(lines)
+
+ print(
+ f"make_ensure: '{targetname}' target {in_str}"
+ f'is out of date; would run:\n\n'
+ '-------------------------- MAKE-ENSURE COMMANDS BEGIN '
+ f'--------------------------\n{Clr.YLW}'
+ f'{outlines}{Clr.RST}\n'
+ '--------------------------- MAKE-ENSURE COMMANDS END '
+ '---------------------------\n'
+ )
+ raise CleanError(
+ f"make_ensure: '{targetname}' target {in_str}is out of date."
+ )
+
+
+def make_target_debug() -> None:
+ """Debug makefile src/target mod times given src and dst path.
+
+ Built to debug stubborn Makefile targets that insist on being
+ rebuilt just after being built via a cloud target.
+ """
+ import os
+ import datetime
+
+ from efro.error import CleanError
+
+ pcommand.disallow_in_batch()
+
+ # from efro.util import ago_str, utc_now
+
+ args = sys.argv[2:]
+ if len(args) != 2:
+ raise CleanError('Expected 2 args.')
+
+ def _utc_mod_time(path: str) -> datetime.datetime:
+ mtime = os.path.getmtime(path)
+ mdtime = datetime.datetime.fromtimestamp(mtime, datetime.timezone.utc)
+ # mdtime.replace(tzinfo=datetime.timezone.utc)
+ return mdtime
+
+ # srcname = os.path.basename(args[0])
+ # dstname = os.path.basename(args[1])
+ srctime = _utc_mod_time(args[0])
+ dsttime = _utc_mod_time(args[1])
+ # now = utc_now()
+ # src_ago = ago_str(srctime, maxparts=3, decimals=2, now=now)
+ # dst_ago = ago_str(dsttime, maxparts=3, decimals=2, now=now)
+ srctimestr = (
+ f'{srctime.hour}:{srctime.minute}:{srctime.second}:'
+ f'{srctime.microsecond}'
+ )
+ dsttimestr = (
+ f'{dsttime.hour}:{dsttime.minute}:{dsttime.second}:'
+ f'{dsttime.microsecond}'
+ )
+ print(f'SRC modified at {srctimestr}.')
+ print(f'DST modified at {dsttimestr}.')
diff --git a/tools/efrotools/pcommands2.py b/tools/efrotools/pcommands2.py
new file mode 100644
index 00000000..67b73b07
--- /dev/null
+++ b/tools/efrotools/pcommands2.py
@@ -0,0 +1,216 @@
+# Released under the MIT License. See LICENSE for details.
+#
+"""Standard snippets that can be pulled into project pcommand scripts.
+
+A snippet is a mini-program that directly takes input from stdin and does
+some focused task. This module is a repository of common snippets that can
+be imported into projects' pcommand script for easy reuse.
+"""
+from __future__ import annotations
+
+import sys
+from typing import TYPE_CHECKING
+
+from efrotools import pcommand
+
+if TYPE_CHECKING:
+ pass
+
+
+def with_build_lock() -> None:
+ """Run a shell command wrapped in a build-lock."""
+ from efro.error import CleanError
+ from efrotools.buildlock import BuildLock
+
+ import subprocess
+
+ pcommand.disallow_in_batch()
+
+ args = sys.argv[2:]
+ if len(args) < 2:
+ raise CleanError(
+ 'Expected one lock-name arg and at least one command arg'
+ )
+ with BuildLock(args[0]):
+ subprocess.run(' '.join(args[1:]), check=True, shell=True)
+
+
+def sortlines() -> None:
+ """Sort provided lines. For tidying import lists, etc."""
+ from efro.error import CleanError
+
+ pcommand.disallow_in_batch()
+
+ if len(sys.argv) != 3:
+ raise CleanError('Expected 1 arg.')
+ val = sys.argv[2]
+ lines = val.splitlines()
+ print('\n'.join(sorted(lines, key=lambda l: l.lower())))
+
+
+def openal_build_android() -> None:
+ """Build openalsoft for android."""
+ from efro.error import CleanError
+ from efrotools.openalbuild import build
+
+ pcommand.disallow_in_batch()
+
+ args = sys.argv[2:]
+ if len(args) != 2:
+ raise CleanError(
+ 'Expected one arg: arm, arm64, x86, x86_64'
+ ' and one arg: debug, release'
+ )
+
+ build(args[0], args[1])
+
+
+def openal_gather() -> None:
+ """Gather built opealsoft libs into src."""
+ from efro.error import CleanError
+ from efrotools.openalbuild import gather
+
+ pcommand.disallow_in_batch()
+
+ args = sys.argv[2:]
+ if args:
+ raise CleanError('No args expected.')
+
+ gather()
+
+
+def pyright() -> None:
+ """Run Pyright checks on project Python code."""
+ import subprocess
+
+ from efro.terminal import Clr
+
+ from efro.error import CleanError
+
+ pcommand.disallow_in_batch()
+
+ print(f'{Clr.BLU}Running Pyright (experimental)...{Clr.RST}')
+ try:
+ subprocess.run(
+ ['pyright', '--project', '.pyrightconfig.json'], check=True
+ )
+ except Exception as exc:
+ raise CleanError('Pyright failed.') from exc
+
+
+def build_pcommandbatch() -> None:
+ """Build a version of pcommand geared for large batches of commands."""
+
+ from efro.error import CleanError
+ from efro.terminal import Clr
+
+ import efrotools.pcommandbatch as pcb
+
+ pcommand.disallow_in_batch()
+
+ args = pcommand.get_args()
+ if len(args) < 2:
+ raise CleanError('Expected at least 2 args.')
+
+ inpaths = args[:-1]
+ outpath = args[-1]
+ print(f'Creating batch executable: {Clr.BLD}{outpath}{Clr.RST}')
+ pcb.build_pcommandbatch(inpaths, outpath)
+
+
+def run_pcommandbatch_server() -> None:
+ """Run a server for handling pcommands."""
+ from efro.error import CleanError
+
+ from efrotools import extract_arg
+ import efrotools.pcommandbatch as pcb
+
+ pcommand.disallow_in_batch()
+
+ args = pcommand.get_args()
+
+ idle_timeout_secs = int(extract_arg(args, '--timeout', required=True))
+ state_dir = extract_arg(args, '--state-dir', required=True)
+ instance = extract_arg(args, '--instance', required=True)
+
+ if args:
+ raise CleanError(f'Unexpected args: {args}.')
+
+ pcb.run_pcommandbatch_server(
+ idle_timeout_secs=idle_timeout_secs,
+ state_dir=state_dir,
+ instance=instance,
+ )
+
+
+def pcommandbatch_speed_test() -> None:
+ """Test batch mode speeds."""
+ # pylint: disable=too-many-locals
+
+ import time
+ import subprocess
+ import threading
+ from multiprocessing import cpu_count
+ from concurrent.futures import ThreadPoolExecutor
+
+ from efro.error import CleanError
+ from efro.terminal import Clr
+
+ args = pcommand.get_args()
+ if len(args) != 1:
+ raise CleanError('Expected one arg.')
+
+ batch_binary_path = args[0]
+ thread_count = cpu_count()
+
+ class _Test:
+ def __init__(self) -> None:
+ self.in_flight = 0
+ self.lock = threading.Lock()
+ self.total_runs = 0
+
+ def run_standalone(self) -> None:
+ """Run an instance of the test in standalone mode."""
+ subprocess.run(['tools/pcommand', 'null'], check=True)
+ self._finish_run()
+
+ def run_batch(self) -> None:
+ """Run an instance of the test in batch mode."""
+ subprocess.run([batch_binary_path, 'null'], check=True)
+ self._finish_run()
+
+ def _finish_run(self) -> None:
+ with self.lock:
+ self.in_flight -= 1
+ assert self.in_flight >= 0
+ self.total_runs += 1
+
+ test_duration = 5.0
+ for name, batch in [('regular pcommand', False), ('pcommandbatch', True)]:
+ print(f'{Clr.BLU}Testing {name} speed...{Clr.RST}')
+ start_time = time.monotonic()
+ test = _Test()
+ total_runs_at_timeout = 0
+ with ThreadPoolExecutor(max_workers=thread_count) as executor:
+ # Convert the generator to a list to trigger any
+ # exceptions that occurred.
+ while True:
+ # Try to keep all worker threads busy.
+ while test.in_flight < thread_count * 2:
+ with test.lock:
+ test.in_flight += 1
+ executor.submit(
+ test.run_batch if batch else test.run_standalone
+ )
+ if time.monotonic() - start_time > test_duration:
+ total_runs_at_timeout = test.total_runs
+ break
+ time.sleep(0.0001)
+ print(
+ f'Total runs in {test_duration:.0f} seconds:'
+ f' {Clr.SMAG}{Clr.BLD}{total_runs_at_timeout}{Clr.RST}.'
+ )
+
+
+def null() -> None:
+ """Do nothing. Useful for speed tests and whatnot."""
diff --git a/tools/pcommand b/tools/pcommand
index afc42b70..de48733f 100755
--- a/tools/pcommand
+++ b/tools/pcommand
@@ -4,23 +4,22 @@
"""A collection of commands for use with this project.
All top level functions here can be run by passing them as the first
-argument on the command line. (or pass no arguments to get a list of them).
+argument on the command line. (or pass no arguments to get a list of
+them).
"""
-# Note: we import as little as possible here at the module level to
-# keep launch times fast; most imports should happen within individual command
-# functions.
+# Note: we import as little as possible here at the module level to keep
+# launch times fast; most imports should happen within individual
+# command functions.
from __future__ import annotations
-from typing import TYPE_CHECKING
+from efrotools import pcommand
-# Pull in commands we want to expose. Its more efficient to define them in
-# modules rather than inline here because we'll be able to load them via pyc.
-# pylint: disable=unused-import
-from efrotools.pcommand import (
- PROJROOT,
- pcommand_main,
+# Pull in commands we want to expose. Its more efficient to define them
+# in modules rather than inline here because we'll be able to load them
+# via pyc. pylint: disable=unused-import
+from efrotools.pcommands import (
formatcode,
formatscripts,
formatmakefile,
@@ -42,7 +41,8 @@ from efrotools.pcommand import (
spelling_all,
pytest,
echo,
- compile_python_files,
+ compile_python_file,
+ copy_python_file,
pyver,
try_repeat,
xcodebuild,
@@ -51,14 +51,18 @@ from efrotools.pcommand import (
make_ensure,
make_target_debug,
)
-from efrotools.pcommand2 import (
+from efrotools.pcommands2 import (
with_build_lock,
sortlines,
openal_build_android,
openal_gather,
pyright,
+ build_pcommandbatch,
+ run_pcommandbatch_server,
+ pcommandbatch_speed_test,
+ null,
)
-from batools.pcommand import (
+from batools.pcommands import (
resize_image,
check_clean_safety,
archive_old_builds,
@@ -103,8 +107,6 @@ from batools.pcommand import (
cmake_prep_dir,
gen_binding_code,
gen_flat_data_code,
- wsl_path_to_win,
- wsl_build_check_win_drive,
genchangelog,
android_sdk_utils,
logcat,
@@ -112,7 +114,7 @@ from batools.pcommand import (
gen_dummy_modules,
version,
)
-from batools.pcommand2 import (
+from batools.pcommands2 import (
gen_python_init_module,
gen_monolithic_register_modules,
py_examine,
@@ -124,12 +126,12 @@ from batools.pcommand2 import (
spinoff_test,
spinoff_check_submodule_parent,
tests_warm_start,
+ wsl_path_to_win,
+ wsl_build_check_win_drive,
)
# pylint: enable=unused-import
-if TYPE_CHECKING:
- pass
if __name__ == '__main__':
- pcommand_main(globals())
+ pcommand.pcommand_main(globals())