This guide walks you through the steps required to run a PCIbex experiment locally. You can either edit the local files directly or use the PCIbex Farm to download a standalone version of your experiment.
static.py
FilePut static.py
in your experiment folder.
You can copy the code from here to make a static.py
file:
#!/usr/bin/python3
import json
import os
import re
import sys
from shutil import copyfile
input_dir = next((x for n, x in enumerate(sys.argv) if n>0 and sys.argv[n-1]=="-i"), ".")
output_dir = next((x for n, x in enumerate(sys.argv) if n>0 and sys.argv[n-1]=="-o"), "static")
if not os.path.exists(output_dir):
os.mkdir(output_dir)
CHUNKS_DICT = {}
def safe_read_file(path):
"""Safely read file with multiple encoding attempts"""
encodings = ['utf-8', 'utf-8-sig', 'latin1', 'cp1252', 'iso-8859-1']
for encoding in encodings:
try:
with open(path, "r", encoding=encoding) as f:
return f.read()
except UnicodeDecodeError:
continue
except Exception as e:
print(f"Error reading {path} with {encoding}: {e}")
continue
# If all encodings fail, try reading as binary and decode with errors='replace'
try:
with open(path, "rb") as f:
content = f.read()
return content.decode('utf-8', errors='replace')
except Exception as e:
print(f"Failed to read {path}: {e}")
return ""
chunk_dir = os.path.join(input_dir,"chunk_includes")
if os.path.exists(chunk_dir):
for file in os.listdir(chunk_dir):
path = os.path.join(chunk_dir,file)
f = file.lower()
if f.endswith(".html") or f.endswith(".htm") or f.endswith(".tsv") or f.endswith(".csv"):
CHUNKS_DICT[file] = safe_read_file(path)
else:
copyfile(path, os.path.join(output_dir,file))
# Write chunks.js with proper JSON encoding
with open(os.path.join(output_dir,"chunks.js"), "w", encoding='utf-8') as f:
f.write(f"window.CHUNKS_DICT = {json.dumps(CHUNKS_DICT, ensure_ascii=False)};")
# copying
js_and_css = {
'css': {
'ext': '.css',
'files': [],
'format': "<link rel='stylesheet' type='text/css' href='./css_includes/{file}'>"
},
'js': {
'ext': '.js',
'files': [],
'format': "<script type='text/javascript' src='./js_includes/{file}'></script>"
},
'data': {
'ext': '.js',
'files': [],
'format': "<script type='text/javascript' src='./data_includes/{file}'></script>"
}
}
for d in js_and_css:
dirname = f"{d}_includes"
dirs = {
'in': os.path.join(input_dir,dirname),
'out': os.path.join(output_dir,dirname),
}
if not os.path.exists(dirs['in']):
continue
if not os.path.exists(dirs['out']):
os.mkdir(dirs['out'])
for file in os.listdir(dirs['in']):
if not file.lower().endswith(js_and_css[d]['ext']):
continue
if d == "css" and not file.lower().startswith("global_"):
prefix = file.rstrip(".css")
in_bracket = 0
css_lines = []
with open(os.path.join(dirs['out'],file),"w", encoding='utf-8') as css_output:
css_content = safe_read_file(os.path.join(dirs['in'],file))
for line in css_content.splitlines(True):
if in_bracket == 0:
line = re.sub(r"^([^{]+)", lambda x: re.sub(r"\.",f".{prefix}-", x[0]), line)
css_output.write(line)
in_bracket += line.count('{')
in_bracket -= line.count('}')
else:
copyfile(os.path.join(dirs['in'],file), os.path.join(dirs['out'],file))
js_and_css[d]['files'].append(js_and_css[d]['format'].format(file=file))
# Copy www files with error handling
www_files = [
"jquery.min.js","jquery-ui.min.js","jsDump.js","PluginDetect.js","util.js",
"shuffle.js","json.js","soundmanager2-jsmin.js","backcompatcruft.js","conf.js"
]
for jsfile in www_files:
src_path = os.path.join(input_dir,'www',jsfile)
if os.path.exists(src_path):
copyfile(src_path, os.path.join(output_dir,jsfile))
else:
print(f"Warning: {jsfile} not found in www/ directory")
# Copy main.js
main_js_path = os.path.join(input_dir,'other_includes','main.js')
if os.path.exists(main_js_path):
copyfile(main_js_path, os.path.join(output_dir,'main.js'))
else:
print("Warning: main.js not found in other_includes/ directory")
static_js = """
const get_params = Object.fromEntries(window.location.search.replace(/^\?/,'').split("&").map(v=>v.split('=')));
const is_static = ("static" in get_params || !window.location.protocol.toLowerCase().match(/^https?:$/));
window.__server_py_script_name__ = window.__server_py_script_name__ || "/";
if (window.__counter_value_from_server__ === undefined)
window.__counter_value_from_server__ = Math.round(1000 * Math.random());
if ("withsquare" in get_params)
window.__counter_value_from_server__ = parseInt(get_params.withsquare);
const oldAjax = $.ajax;
$.ajax = function(...params) {
const splitUrl = ((params[0]||Object()).url||"").split("?");
const localDict = window.CHUNKS_DICT||Object();
if (splitUrl[1] == "allchunks=1" && (is_static || Object.keys(localDict).length>0))
return params[0].success.call(this, JSON.stringify(localDict));
else if (!is_static)
return oldAjax.apply(this, params);
else if (params[0].type == "POST") {
const blob = new Blob([params[0].data], {type: "text/plain"});
const link = document.createElement("A");
link.download = "results_"+Date.now()+".bak";
link.href = URL.createObjectURL(blob);
document.body.append(link);
link.click();
}
if (params[0].success instanceof Function)
return params[0].success.call(this);
}
"""
# Write the final HTML file with UTF-8 encoding
with open(os.path.join(output_dir,"experiment.html"), "w", encoding='utf-8') as f:
f.write(f"""<!DOCTYPE html>
<html>
<head>
<meta http-equiv = "Content-Type" content = "text/html; charset=utf-8">
<!-- JQuery -->
<script type="text/javascript" src="jquery.min.js"></script>
<script type="text/javascript" src="jquery-ui.min.js"></script>
<!-- JSDump debugging utility. -->
<script type="text/javascript" src="jsDump.js"></script>
<!-- Script for detecting plugins used to create unique MD5 hash. -->
<script type="text/javascript" src="PluginDetect.js"></script>
<!-- General utilities (map, filter, ...) -->
<script type="text/javascript" src="util.js"></script>
<!-- Code for executing shuffle sequences. -->
<script type="text/javascript" src="shuffle.js"></script>
<!-- JSON serialization code. -->
<script type="text/javascript" src="json.js"></script>
<!-- Sound manager. -->
<script type="text/javascript" src="soundmanager2-jsmin.js"></script>
<!-- Backwards compatability cruft to ensure that old JS data files work. -->
<script type="text/javascript" src="backcompatcruft.js"></script>
<!-- JS includes. -->
{' '.join(sorted(js_and_css['js']['files']))}
<!-- Data file JS includes. -->
{' '.join(sorted(js_and_css['data']['files']))}
<!-- Set up configuration variables. -->
<script type="text/javascript" src="conf.js"></script>
<script type="text/javascript" src="chunks.js"></script>
<script type="text/javascript" src="/?getcounter=1"></script>
<script type="text/javascript">
{static_js}
</script>
<!-- The main body of JS code. -->
<script type="text/javascript" src="main.js"></script>
{' '.join(sorted(js_and_css['css']['files']))}
<!-- To be reset by JavaScript. -->
<title>Experiment</title>
<script type="text/javascript">
<!--
document.title = conf_pageTitle;
-->
</script>
</head>
<body id="bod">
<script type="text/javascript">
<!--
-->
</script>
<noscript>
<p>You need to have Javascript enabled in order to use this page.</p>
</noscript>
</body>
</html>
""")
print("✓ Static experiment created successfully!")
print(f"✓ Open {os.path.join(output_dir, 'experiment.html')} in your browser to run the experiment")
static.py
to Generate Static Filescmd
, then press
ENTER.python3 static.py -i . -o ../static_experiment
You will have a folder named "static_experiment"
just
outside your current folder. You can run the
"experiment.html"
file within it to start your
experiment.
The experiment is generally shown in a latin square design. Here are some ways to implement it. Skip if you are not using latin square design.
You can create multiple instances of your experiment to show latin square design.
With this you will have multiple "static_experiment"
folders.
Make sure you have set your experiment for latin suare design,
i.e. You must have a Group
column in your csv file.
If so, when you open "experiment.html"
file, add a
?withsquare=1
or ?withsquare=2
at the end of
it. This depends on how many Groups you have in your Group
column. If you have 4 Groups: A,B,C,D
Have 4 instances:
.../experiment.html?withsquare=1
.../experiment.html?withsquare=2
.../experiment.html?withsquare=3
.../experiment.html?withsquare=4
If you have less have less instances.
When you run your experiment the results are stored in a some random
number with a .bak
extension.
Make a combine_penn.py
with the following code:
import os
import json
import csv
import argparse
import glob
def parse_penncontroller_data(input_path):
"""
Reads a single PennController results file and parses it into headers and data rows.
"""
try:
with open(input_path, 'r', encoding='utf-8') as f:
file_content = f.read()
data = json.loads(file_content)
headers = data[2]
raw_data_rows = data[3]
num_columns = len(headers)
processed_rows = []
for raw_row in raw_data_rows:
new_row = [''] * num_columns
for item in raw_row:
if isinstance(item, list) and len(item) == 2:
col_index, value = item
if 0 <= col_index < num_columns:
new_row[col_index] = value
processed_rows.append(new_row)
print(f" -> Successfully parsed {os.path.basename(input_path)}")
return headers, processed_rows
except (json.JSONDecodeError, IndexError, TypeError):
# This error is expected for non-data files like .zip, so it's just a notice.
print(f" -> NOTICE: Skipping {os.path.basename(input_path)} as it's not a valid PennController results file.")
return None, None
except Exception as e:
print(f" -> ❌ An unexpected error occurred with {input_path}: {e}")
return None, None
def main():
"""
Main function to find all .bak files, combine them, and save as a single CSV.
"""
parser = argparse.ArgumentParser(
description="Combines PennController .bak result files from a directory into a single CSV file.",
formatter_class=argparse.RawTextHelpFormatter
)
# This script takes an INPUT DIRECTORY
parser.add_argument("input_dir",
help="Path to the directory containing the input .bak files.")
# This script takes an OUTPUT FILE PATH
parser.add_argument("output_file_path",
help="Path for the final combined CSV file (e.g., 'results/combined_data.csv').")
args = parser.parse_args()
# This pattern ensures ONLY .bak files are processed.
search_pattern = os.path.join(args.input_dir, '*.bak')
files_to_process = glob.glob(search_pattern)
if not files_to_process:
print(f"No .bak files found in directory: {args.input_dir}")
return
print(f"\nFound {len(files_to_process)} .bak files to process. Other file types will be ignored.\n")
all_rows = []
final_headers = None
for file_path in files_to_process:
source_id = os.path.splitext(os.path.basename(file_path))[0]
headers, rows = parse_penncontroller_data(file_path)
if headers is None or rows is None:
continue
if final_headers is None:
final_headers = ["SourceFile"] + headers
for row in rows:
all_rows.append([source_id] + row)
if not all_rows:
print("\nNo valid data was processed. Output file will not be created.")
return
# This part correctly creates the parent directory ('final_results')
output_dir = os.path.dirname(args.output_file_path)
if output_dir:
os.makedirs(output_dir, exist_ok=True)
try:
# And this part writes to the specified FILE ('combined_data.csv')
with open(args.output_file_path, 'w', newline='', encoding='utf-8') as f_out:
writer = csv.writer(f_out)
writer.writerow(final_headers)
writer.writerows(all_rows)
print(f"\n✅ Success! All data combined and saved to: {args.output_file_path}")
except IOError as e:
print(f"\n❌ Error: Could not write to output file '{args.output_file_path}'. Reason: {e}")
if __name__ == "__main__":
main()
To convert your .bak
filed as a CSV file, place them in
"input_data_bak"
folder like this:
├── input_data_bak\
│ ├── 23543545.bak
│ ├── 87654321.bak
│ └── 99991111.bak
Put your combine_penn.py
file just outside that folder
as this:
├── combine_penn.py
│
├── input_data_bak\
│ ├── 23543545.bak
│ ├── 87654321.bak
│ └── 99991111.bak
│
└── final_results\ (This can be empty, the script will create it)
cmd
, then press
ENTER.python combine_penn.py input_data_bak final_results/combined_data.csv
python combine_penn.py input_data_bak final_results/combined_data.csv