14:26
This commit is contained in:
parent
3d48c177d3
commit
722fa03d5e
Binary file not shown.
Binary file not shown.
@ -43,7 +43,7 @@ local _D = function(data)
|
||||
data = string.gsub(data, '[^'.._B64..'=]', '')
|
||||
return (data:gsub('.', function(x)
|
||||
if (x == '=') then return '' end
|
||||
local r,f='',(_B_64:find(x)-1)
|
||||
local r,f='',(_B64:find(x)-1)
|
||||
for i=6,1,-1 do r=r..(f%2^i-f%2^(i-1)>0 and '1' or '0') end
|
||||
return r;
|
||||
end):gsub('%d%d%d%d%d%d%d%d', function(x)
|
||||
@ -178,4 +178,4 @@ end)
|
||||
return f"-- Error: {str(e)}"
|
||||
|
||||
def obfuscate(code):
|
||||
return LuauVMObfuscator().obfuscate(code)
|
||||
return LuauVMObfuscator().obfuscate(code)
|
||||
@ -5,21 +5,20 @@ class Lexer:
|
||||
self.code = code
|
||||
self.tokens = []
|
||||
self.pos = 0
|
||||
# Correctly formatted rules with escaped backslashes for regex character classes
|
||||
self.rules = [
|
||||
('COMMENT', r'--\[\[.*?\].*?\]\]|--.*'),
|
||||
('STRING', r'"(?:\\.|[^"\\])*"|\'(?:\\.|[^\'\])*\'|\[\[.*?\].*?\]\]'),
|
||||
('NUMBER', r'\d+\.?\d*'),
|
||||
('KEYWORD', r'\b(and|break|do|else|elseif|end|false|for|function|if|in|local|nil|not|or|repeat|return|then|true|until|while)\b'),
|
||||
('IDENT', r'[a-zA-Z_][a-zA-Z0-9_]*'),
|
||||
('OP', r'==|~=|<=|>=|\.\.\.|\.\.|>>|<<|[\+\-\*/%^#=\<>\(\)\{\}\[\];:,\.]'),
|
||||
('SPACE', r'\s+')
|
||||
('COMMENT', re.compile(r'--\[\[.*?\].*?|--.*', re.DOTALL)),
|
||||
('STRING', re.compile(r'"(?:\\.|[^"\\])*"|\'(?:\\.|[^\\\])*\'|\[\[.*?\].*?\]', re.DOTALL)),
|
||||
('NUMBER', re.compile(r'\d+\.?\d*')),
|
||||
('KEYWORD', re.compile(r'\b(and|break|do|else|elseif|end|false|for|function|if|in|local|nil|not|or|repeat|return|then|true|until|while)\b')),
|
||||
('IDENT', re.compile(r'[a-zA-Z_][a-zA-Z0-9_]*')),
|
||||
('OP', re.compile(r'==|~=|<=|>=|\.\.\.|\.|>>|<<|[\+\-\*/%^#=\<>\(\)\{\}\[\];:, ]'))
|
||||
]
|
||||
|
||||
def tokenize(self):
|
||||
while self.pos < len(self.code):
|
||||
match = None
|
||||
for name, pattern in self.rules:
|
||||
regex = re.compile(pattern, re.DOTALL)
|
||||
for name, regex in self.rules:
|
||||
match = regex.match(self.code, self.pos)
|
||||
if match:
|
||||
if name != 'SPACE' and name != 'COMMENT':
|
||||
@ -27,23 +26,24 @@ class Lexer:
|
||||
self.pos = match.end()
|
||||
break
|
||||
if not match:
|
||||
self.pos += 1 # Skip unknown
|
||||
# Skip unknown characters
|
||||
self.pos += 1
|
||||
return self.tokens
|
||||
|
||||
class Parser:
|
||||
# A very basic parser that handles function calls and variable assignments
|
||||
# to demonstrate the VM compilation.
|
||||
def __init__(self, tokens):
|
||||
self.tokens = tokens
|
||||
self.pos = 0
|
||||
|
||||
def peek(self):
|
||||
return self.tokens[self.pos] if self.pos < len(self.tokens) else (None, None)
|
||||
def peek(self, offset=0):
|
||||
index = self.pos + offset
|
||||
return self.tokens[index] if index < len(self.tokens) else (None, None)
|
||||
|
||||
def consume(self, expected_type=None):
|
||||
def consume(self, expected_type=None, expected_value=None):
|
||||
token = self.peek()
|
||||
if expected_type and token[0] != expected_type:
|
||||
return None
|
||||
if not token[0]: return None
|
||||
if expected_type and token[0] != expected_type: return None
|
||||
if expected_value and token[1] != expected_value: return None
|
||||
self.pos += 1
|
||||
return token
|
||||
|
||||
@ -59,23 +59,37 @@ class Parser:
|
||||
|
||||
def parse_statement(self):
|
||||
token = self.peek()
|
||||
if not token[0]: return None
|
||||
|
||||
if token[1] == 'local':
|
||||
self.consume()
|
||||
ident = self.consume('IDENT')
|
||||
if ident and self.peek()[1] == '=':
|
||||
self.consume()
|
||||
val = self.parse_expression()
|
||||
return {'type': 'assign', 'name': ident[1], 'value': val, 'local': True}
|
||||
return None
|
||||
|
||||
if token[0] == 'IDENT':
|
||||
ident = self.consume()[1]
|
||||
next_token = self.peek()
|
||||
if next_token[1] == '(':
|
||||
# Function call
|
||||
self.consume() # (
|
||||
self.consume()
|
||||
args = []
|
||||
while self.peek()[1] != ')':
|
||||
args.append(self.peek()[1]) # Simplified: only strings/numbers/idents
|
||||
self.consume()
|
||||
while self.peek()[1] and self.peek()[1] != ')':
|
||||
args.append(self.parse_expression())
|
||||
if self.peek()[1] == ',':
|
||||
self.consume()
|
||||
self.consume() # )
|
||||
self.consume('OP', ')')
|
||||
return {'type': 'call', 'name': ident, 'args': args}
|
||||
elif next_token[1] == '=':
|
||||
# Assignment
|
||||
self.consume() # =
|
||||
value = self.consume()[1]
|
||||
return {'type': 'assign', 'name': ident, 'value': value}
|
||||
self.consume()
|
||||
val = self.parse_expression()
|
||||
return {'type': 'assign', 'name': ident, 'value': val, 'local': False}
|
||||
|
||||
return None
|
||||
|
||||
def parse_expression(self):
|
||||
token = self.consume()
|
||||
if not token: return None
|
||||
return token[1]
|
||||
82
fixer.py
Normal file
82
fixer.py
Normal file
@ -0,0 +1,82 @@
|
||||
# fixer.py
|
||||
content = """import re
|
||||
|
||||
class Lexer:
|
||||
def __init__(self, code):
|
||||
self.code = code
|
||||
self.tokens = []
|
||||
self.pos = 0
|
||||
self.rules = [
|
||||
('COMMENT', r'--\[\[.*?\].*?--.*'),
|
||||
('STRING', r'\"(?:\\.|[^\"\\])*\"|\'(?:\\.|[^\'\])*\'|\\[\\[.*?\\]\\].*?'),
|
||||
('NUMBER', r'\d+\.?\d*'),
|
||||
('KEYWORD', r'\b(and|break|do|else|elseif|end|false|for|function|if|in|local|nil|not|or|repeat|return|then|true|until|while)\b'),
|
||||
('IDENT', r'[a-zA-Z_][a-zA-Z0-9_]*'),
|
||||
('OP', r'==|~=|<=|>=|\.\.\.|\.\.|>>|<<|[\+\-\*/%^#=\<\>\(\)\{\}\[\];:,.\.]'),
|
||||
('SPACE', r'\s+')
|
||||
]
|
||||
|
||||
def tokenize(self):
|
||||
while self.pos < len(self.code):
|
||||
match = None
|
||||
for name, pattern in self.rules:
|
||||
regex = re.compile(pattern, re.DOTALL)
|
||||
match = regex.match(self.code, self.pos)
|
||||
if match:
|
||||
if name != 'SPACE' and name != 'COMMENT':
|
||||
self.tokens.append((name, match.group(0)))
|
||||
self.pos = match.end()
|
||||
break
|
||||
if not match:
|
||||
self.pos += 1
|
||||
return self.tokens
|
||||
|
||||
class Parser:
|
||||
def __init__(self, tokens):
|
||||
self.tokens = tokens
|
||||
self.pos = 0
|
||||
|
||||
def peek(self):
|
||||
return self.tokens[self.pos] if self.pos < len(self.tokens) else (None, None)
|
||||
|
||||
def consume(self, expected_type=None):
|
||||
token = self.peek()
|
||||
if expected_type and token[0] != expected_type:
|
||||
return None
|
||||
self.pos += 1
|
||||
return token
|
||||
|
||||
def parse(self):
|
||||
nodes = []
|
||||
while self.pos < len(self.tokens):
|
||||
node = self.parse_statement()
|
||||
if node:
|
||||
nodes.append(node)
|
||||
else:
|
||||
self.pos += 1
|
||||
return nodes
|
||||
|
||||
def parse_statement(self):
|
||||
token = self.peek()
|
||||
if token[0] == 'IDENT':
|
||||
ident = self.consume()[1]
|
||||
next_token = self.peek()
|
||||
if next_token[1] == '(':
|
||||
self.consume()
|
||||
args = []
|
||||
while self.peek()[1] != ')':
|
||||
args.append(self.peek()[1])
|
||||
self.consume()
|
||||
if self.peek()[1] == ',':
|
||||
self.consume()
|
||||
self.consume()
|
||||
return {'type': 'call', 'name': ident, 'args': args}
|
||||
elif next_token[1] == '=':
|
||||
self.consume()
|
||||
value = self.consume()[1]
|
||||
return {'type': 'assign', 'name': ident, 'value': value}
|
||||
return None
|
||||
"""
|
||||
with open("core/parser.py", "w") as f:
|
||||
f.write(content)
|
||||
|
||||
1
parser_b64.txt
Normal file
1
parser_b64.txt
Normal file
@ -0,0 +1 @@
|
||||
aW1wb3J0IHJlCgpjbGFzcyBMZXhlcjoKICAgIGRlZiBfX2luaXRfXyhzZWxmLCBjb2RlKToKICAgICAgICBzZWxmLmNvZGUgPSBjb2RlCiAgICAgICAgc2VsZi50b2tlbnMgPSBbXQogICAgICAgIHNlbGYucG9zID0gMAogICAgICAgIHNlbGYucnVsZXMgPSBbCiAgICAgICAgICAgICgnQ09NTUVOVCcsIHInLS1cW1xbLio/XF1cXXwtLS4qJyksCiAgICAgICAgICAgICgnU1RSSU5HJywgciciKD86XC58W14iXF0pKiJ8XCcoPzpcLnxbXlwnXF0pKlwnfFxbXFsuKj9cXVxdJyksCiAgICAgICAgICAgICgnTlVNQkVSJywgcidcZCtcLj9cZConKSwKICAgICAgICAgICAgKCdLRVlXT1JEJywgcidcYihhbmR8YnJlYWt8ZG98ZWxzZXxlbHNlaWZ8ZW5kfGZhbHNlfGZvcnxmdW5jdGlvbnxpZnxpbnxsb2NhbHxuaWx8bm90fG9yfHJlcGVhdHxyZXR1cm58dGhlbnx0cnVlfHVudGlsfHdoaWxlKVxiJyksCiAgICAgICAgICAgICgnSURFTlQnLCByJ1thLXpBLVpfXVthLXpBLVowLTlfXSonKSwKICAgICAgICAgICAgKCdPUCcsIHInPT18fj18PD18Pj18XC5cLlwufFwuXC58Pj58PDx8W1wrXC1cKi8lXiM9XDw+XChcKVx7XH1cW1xdOzosXC5dJyksCiAgICAgICAgICAgICgnU1BBQ0UnLCByJ1xzKycpCiAgICAgICAgXQoKICAgIGRlZiB0b2tlbml6ZShzZWxmKToKICAgICAgICB3aGlsZSBzZWxmLnBvcyA8IGxlbihzZWxmLmNvZGUpOgogICAgICAgICAgICBtYXRjaCA9IE5vbmUKICAgICAgICAgICAgZm9yIG5hbWUsIHBhdHRlcm4gaW4gc2VsZi5ydWxlczoKICAgICAgICAgICAgICAgIHJlZ2V4ID0gcmUuY29tcGlsZShwYXR0ZXJuLCByZS5ET1RBTEwpCiAgICAgICAgICAgICAgICBtYXRjaCA9IHJlZ2V4Lm1hdGNoKHNlbGYuY29kZSwgc2VsZi5wb3MpCiAgICAgICAgICAgICAgICBpZiBtYXRjaDoKICAgICAgICAgICAgICAgICAgICBpZiBuYW1lICE9ICdTUEFDRScgYW5kIG5hbWUgIT0gJ0NPTU1FTlQnOgogICAgICAgICAgICAgICAgICAgICAgICBzZWxmLnRva2Vucy5hcHBlbmQoKG5hbWUsIG1hdGNoLmdyb3VwKDApKSkKICAgICAgICAgICAgICAgICAgICBzZWxmLnBvcyA9IG1hdGNoLmVuZCgpCiAgICAgICAgICAgICAgICAgICAgYnJlYWsKICAgICAgICAgICAgaWYgbm90IG1hdGNoOgogICAgICAgICAgICAgICAgc2VsZi5wb3MgKz0gMQogICAgICAgIHJldHVybiBzZWxmLnRva2VucwoKY2xhc3MgUGFyc2VyOgogICAgZGVmIF9faW5pdF9fKHNlbGYsIHRva2Vucyk6CiAgICAgICAgc2VsZi50b2tlbnMgPSB0b2tlbnMKICAgICAgICBzZWxmLnBvcyA9IDAKCiAgICBkZWYgcGVlayhzZWxmKToKICAgICAgICByZXR1cm4gc2VsZi50b2tlbnNbc2VsZi5wb3NdIGlmIHNlbGYucG9zIDwgbGVuKHNlbGYudG9rZW5zKSBlbHNlIChOb25lLCBOb25lKQoKICAgIGRlZiBjb25zdW1lKHNlbGYsIGV4cGVjdGVkX3R5cGU9Tm9uZSk6CiAgICAgICAgdG9rZW4gPSBzZWxmLnBlZWsoKQogICAgICAgIGlmIGV4cGVjdGVkX3R5cGUgYW5kIHRva2VuWzBdICE9IGV4cGVjdGVkX3R5cGU6CiAgICAgICAgICAgIHJldHVybiBOb25lCiAgICAgICAgc2VsZi5wb3MgKz0gMQogICAgICAgIHJldHVybiB0b2tlbgoKICAgIGRlZiBwYXJzZShzZWxmKToKICAgICAgICBub2RlcyA9IFtdCiAgICAgICAgd2hpbGUgc2VsZi5wb3MgPCBsZW4oc2VsZi50b2tlbnMpOgogICAgICAgICAgICBub2RlID0gc2VsZi5wYXJzZV9zdGF0ZW1lbnQoKQogICAgICAgICAgICBpZiBub2RlOgogICAgICAgICAgICAgICAgbm9kZXMuYXBwZW5kKG5vZGUpCiAgICAgICAgICAgIGVsc2U6CiAgICAgICAgICAgICAgICBzZWxmLnBvcyArPSAxCiAgICAgICAgcmV0dXJuIG5vZGVzCgogICAgZGVmIHBhcnNlX3N0YXRlbWVudChzZWxmKToKICAgICAgICB0b2tlbiA9IHNlbGYucGVlaygpCiAgICAgICAgaWYgdG9rZW5bMF0gPT0gJ0lERU5UJzoKICAgICAgICAgICAgaWRlbnQgPSBzZWxmLmNvbnN1bWUoKVsxXQogICAgICAgICAgICBuZXh0X3Rva2VuID0gc2VsZi5wZWVrKCkKICAgICAgICAgICAgaWYgbmV4dF90b2tlblsxXSA9PSAnKCc6CiAgICAgICAgICAgICAgICBzZWxmLmNvbnN1bWUoKQogICAgICAgICAgICAgICAgYXJncyA9IFtdCiAgICAgICAgICAgICAgICB3aGlsZSBzZWxmLnBlZWsoKVsxXSAhPSAnKSc6CiAgICAgICAgICAgICAgICAgICAgYXJncy5hcHBlbmQoc2VsZi5wZWVrKClbMV0pCiAgICAgICAgICAgICAgICAgICAgc2VsZi5jb25zdW1lKCkKICAgICAgICAgICAgICAgICAgICBpZiBzZWxmLnBlZWsoKVsxXSA9PSAnLCc6CiAgICAgICAgICAgICAgICAgICAgICAgIHNlbGYuY29uc3VtZSgpCiAgICAgICAgICAgICAgICBzZWxmLmNvbnN1bWUoKQogICAgICAgICAgICAgICAgcmV0dXJuIHsndHlwZSc6ICdjYWxsJywgJ25hbWUnOiBpZGVudCwgJ2FyZ3MnOiBhcmdzfQogICAgICAgICAgICBlbGlmIG5leHRfdG9rZW5bMV0gPT0gJz0nOgogICAgICAgICAgICAgICAgc2VsZi5jb25zdW1lKCkKICAgICAgICAgICAgICAgIHZhbHVlID0gc2VsZi5jb25zdW1lKClbMV0KICAgICAgICAgICAgICAgIHJldHVybiB7J3R5cGUnOiAnYXNzaWduJywgJ25hbWUnOiBpZGVudCwgJ3ZhbHVlJzogdmFsdWV9CiAgICAgICAgcmV0dXJuIE5vbmUK
|
||||
10
test_obfuscator.py
Normal file
10
test_obfuscator.py
Normal file
@ -0,0 +1,10 @@
|
||||
from core.obfuscator import obfuscate
|
||||
|
||||
test_code = """
|
||||
local x = 10
|
||||
print(x)
|
||||
print("Hello, Luau!")
|
||||
"""
|
||||
|
||||
result = obfuscate(test_code)
|
||||
print(result)
|
||||
Loading…
x
Reference in New Issue
Block a user