Linux iad1-shared-b7-18 6.6.49-grsec-jammy+ #10 SMP Thu Sep 12 23:23:08 UTC 2024 x86_64
Apache
: 67.205.6.31 | : 216.73.216.47
Cant Read [ /etc/named.conf ]
8.2.29
fernandoquevedo
Terminal
AUTO ROOT
Adminer
Backdoor Destroyer
Linux Exploit
Lock Shell
Lock File
Create User
CREATE RDP
PHP Mailer
BACKCONNECT
UNLOCK SHELL
HASH IDENTIFIER
README
+ Create Folder
+ Create File
/
usr /
lib /
python3 /
dist-packages /
pygments /
lexers /
[ HOME SHELL ]
Name
Size
Permission
Action
__pycache__
[ DIR ]
drwxr-xr-x
__init__.py
11
KB
-rw-r--r--
_asy_builtins.py
26.65
KB
-rw-r--r--
_cl_builtins.py
13.67
KB
-rw-r--r--
_cocoa_builtins.py
102.72
KB
-rw-r--r--
_csound_builtins.py
17.84
KB
-rw-r--r--
_julia_builtins.py
11.6
KB
-rw-r--r--
_lasso_builtins.py
131.36
KB
-rw-r--r--
_lilypond_builtins.py
101.67
KB
-rw-r--r--
_lua_builtins.py
8.08
KB
-rw-r--r--
_mapping.py
62.33
KB
-rw-r--r--
_mql_builtins.py
24.13
KB
-rw-r--r--
_mysql_builtins.py
23.92
KB
-rw-r--r--
_openedge_builtins.py
48.24
KB
-rw-r--r--
_php_builtins.py
150.72
KB
-rw-r--r--
_postgres_builtins.py
11.9
KB
-rw-r--r--
_scilab_builtins.py
51.15
KB
-rw-r--r--
_sourcemod_builtins.py
26.42
KB
-rw-r--r--
_stan_builtins.py
10.21
KB
-rw-r--r--
_stata_builtins.py
26.59
KB
-rw-r--r--
_tsql_builtins.py
15.1
KB
-rw-r--r--
_usd_builtins.py
1.62
KB
-rw-r--r--
_vbscript_builtins.py
4.13
KB
-rw-r--r--
_vim_builtins.py
55.73
KB
-rw-r--r--
actionscript.py
11.31
KB
-rw-r--r--
agile.py
876
B
-rw-r--r--
algebra.py
7.59
KB
-rw-r--r--
ambient.py
2.54
KB
-rw-r--r--
amdgpu.py
1.57
KB
-rw-r--r--
ampl.py
4.05
KB
-rw-r--r--
apdlexer.py
26.04
KB
-rw-r--r--
apl.py
3.32
KB
-rw-r--r--
archetype.py
11.17
KB
-rw-r--r--
arrow.py
3.47
KB
-rw-r--r--
asc.py
1.54
KB
-rw-r--r--
asm.py
40.19
KB
-rw-r--r--
automation.py
19.34
KB
-rw-r--r--
bare.py
2.97
KB
-rw-r--r--
basic.py
27.22
KB
-rw-r--r--
bdd.py
1.59
KB
-rw-r--r--
bibtex.py
4.62
KB
-rw-r--r--
boa.py
3.87
KB
-rw-r--r--
business.py
27.4
KB
-rw-r--r--
c_cpp.py
15.67
KB
-rw-r--r--
c_like.py
28.45
KB
-rw-r--r--
capnproto.py
2.17
KB
-rw-r--r--
cddl.py
5.2
KB
-rw-r--r--
chapel.py
4.89
KB
-rw-r--r--
clean.py
6.24
KB
-rw-r--r--
compiled.py
1.33
KB
-rw-r--r--
configs.py
39.49
KB
-rw-r--r--
console.py
4.05
KB
-rw-r--r--
crystal.py
15.41
KB
-rw-r--r--
csound.py
16.56
KB
-rw-r--r--
css.py
31.15
KB
-rw-r--r--
d.py
9.61
KB
-rw-r--r--
dalvik.py
4.47
KB
-rw-r--r--
data.py
23.96
KB
-rw-r--r--
devicetree.py
3.92
KB
-rw-r--r--
diff.py
5.04
KB
-rw-r--r--
dotnet.py
28.47
KB
-rw-r--r--
dsls.py
35.45
KB
-rw-r--r--
dylan.py
10.16
KB
-rw-r--r--
ecl.py
6.09
KB
-rw-r--r--
eiffel.py
2.6
KB
-rw-r--r--
elm.py
3.08
KB
-rw-r--r--
elpi.py
5.83
KB
-rw-r--r--
email.py
4.97
KB
-rw-r--r--
erlang.py
18.76
KB
-rw-r--r--
esoteric.py
10.17
KB
-rw-r--r--
ezhil.py
3.27
KB
-rw-r--r--
factor.py
19.12
KB
-rw-r--r--
fantom.py
9.96
KB
-rw-r--r--
felix.py
9.41
KB
-rw-r--r--
floscript.py
2.6
KB
-rw-r--r--
forth.py
6.99
KB
-rw-r--r--
fortran.py
10.07
KB
-rw-r--r--
foxpro.py
25.6
KB
-rw-r--r--
freefem.py
26.43
KB
-rw-r--r--
functional.py
674
B
-rw-r--r--
futhark.py
3.64
KB
-rw-r--r--
gcodelexer.py
826
B
-rw-r--r--
gdscript.py
10.94
KB
-rw-r--r--
go.py
3.65
KB
-rw-r--r--
grammar_notation.py
7.83
KB
-rw-r--r--
graph.py
3.77
KB
-rw-r--r--
graphics.py
38.15
KB
-rw-r--r--
graphviz.py
1.83
KB
-rw-r--r--
gsql.py
3.68
KB
-rw-r--r--
haskell.py
31.98
KB
-rw-r--r--
haxe.py
30.21
KB
-rw-r--r--
hdl.py
21.99
KB
-rw-r--r--
hexdump.py
3.52
KB
-rw-r--r--
html.py
19.34
KB
-rw-r--r--
idl.py
14.87
KB
-rw-r--r--
igor.py
29.87
KB
-rw-r--r--
inferno.py
3.02
KB
-rw-r--r--
installers.py
12.8
KB
-rw-r--r--
int_fiction.py
55.32
KB
-rw-r--r--
iolang.py
1.84
KB
-rw-r--r--
j.py
4.4
KB
-rw-r--r--
javascript.py
58.67
KB
-rw-r--r--
jslt.py
3.61
KB
-rw-r--r--
julia.py
11
KB
-rw-r--r--
jvm.py
70.05
KB
-rw-r--r--
kuin.py
10.5
KB
-rw-r--r--
lilypond.py
8.26
KB
-rw-r--r--
lisp.py
139.4
KB
-rw-r--r--
make.py
7.26
KB
-rw-r--r--
markup.py
26.11
KB
-rw-r--r--
math.py
676
B
-rw-r--r--
matlab.py
129.3
KB
-rw-r--r--
maxima.py
2.65
KB
-rw-r--r--
meson.py
4.33
KB
-rw-r--r--
mime.py
7.36
KB
-rw-r--r--
ml.py
34.47
KB
-rw-r--r--
modeling.py
13.07
KB
-rw-r--r--
modula2.py
51.82
KB
-rw-r--r--
monte.py
6.14
KB
-rw-r--r--
mosel.py
8.97
KB
-rw-r--r--
ncl.py
62.46
KB
-rw-r--r--
nimrod.py
5
KB
-rw-r--r--
nit.py
2.66
KB
-rw-r--r--
nix.py
3.91
KB
-rw-r--r--
oberon.py
4.11
KB
-rw-r--r--
objective.py
22.26
KB
-rw-r--r--
ooc.py
2.91
KB
-rw-r--r--
other.py
1.7
KB
-rw-r--r--
parasail.py
2.65
KB
-rw-r--r--
parsers.py
25.27
KB
-rw-r--r--
pascal.py
31.84
KB
-rw-r--r--
pawn.py
7.96
KB
-rw-r--r--
perl.py
38.16
KB
-rw-r--r--
php.py
12.25
KB
-rw-r--r--
pointless.py
1.92
KB
-rw-r--r--
pony.py
3.17
KB
-rw-r--r--
praat.py
11.99
KB
-rw-r--r--
procfile.py
1.19
KB
-rw-r--r--
prolog.py
12.09
KB
-rw-r--r--
promql.py
4.63
KB
-rw-r--r--
python.py
51.49
KB
-rw-r--r--
qvt.py
5.93
KB
-rw-r--r--
r.py
6.02
KB
-rw-r--r--
rdf.py
15.42
KB
-rw-r--r--
rebol.py
18.16
KB
-rw-r--r--
resource.py
2.83
KB
-rw-r--r--
ride.py
4.93
KB
-rw-r--r--
rita.py
1.16
KB
-rw-r--r--
rnc.py
1.92
KB
-rw-r--r--
roboconf.py
2
KB
-rw-r--r--
robotframework.py
17.99
KB
-rw-r--r--
ruby.py
22.14
KB
-rw-r--r--
rust.py
7.99
KB
-rw-r--r--
sas.py
9.2
KB
-rw-r--r--
savi.py
4.24
KB
-rw-r--r--
scdoc.py
2.19
KB
-rw-r--r--
scripting.py
68.41
KB
-rw-r--r--
sgf.py
2
KB
-rw-r--r--
shell.py
35.06
KB
-rw-r--r--
sieve.py
2.24
KB
-rw-r--r--
slash.py
8.28
KB
-rw-r--r--
smalltalk.py
7.02
KB
-rw-r--r--
smithy.py
2.6
KB
-rw-r--r--
smv.py
2.71
KB
-rw-r--r--
snobol.py
2.67
KB
-rw-r--r--
solidity.py
3.1
KB
-rw-r--r--
sophia.py
3.29
KB
-rw-r--r--
special.py
3.37
KB
-rw-r--r--
spice.py
2.06
KB
-rw-r--r--
sql.py
33.42
KB
-rw-r--r--
srcinfo.py
1.63
KB
-rw-r--r--
stata.py
6.26
KB
-rw-r--r--
supercollider.py
3.61
KB
-rw-r--r--
tcl.py
5.27
KB
-rw-r--r--
teal.py
3.41
KB
-rw-r--r--
templates.py
69.96
KB
-rw-r--r--
teraterm.py
9.65
KB
-rw-r--r--
testing.py
10.52
KB
-rw-r--r--
text.py
1006
B
-rw-r--r--
textedit.py
7.44
KB
-rw-r--r--
textfmts.py
14.8
KB
-rw-r--r--
theorem.py
19.15
KB
-rw-r--r--
thingsdb.py
4.13
KB
-rw-r--r--
tnt.py
10.27
KB
-rw-r--r--
trafficscript.py
1.48
KB
-rw-r--r--
typoscript.py
8.01
KB
-rw-r--r--
unicon.py
18.08
KB
-rw-r--r--
urbi.py
5.9
KB
-rw-r--r--
usd.py
3.37
KB
-rw-r--r--
varnish.py
7.07
KB
-rw-r--r--
verification.py
3.82
KB
-rw-r--r--
web.py
894
B
-rw-r--r--
webassembly.py
5.56
KB
-rw-r--r--
webidl.py
10.23
KB
-rw-r--r--
webmisc.py
39.07
KB
-rw-r--r--
whiley.py
3.89
KB
-rw-r--r--
x10.py
1.84
KB
-rw-r--r--
xorg.py
865
B
-rw-r--r--
yang.py
4.42
KB
-rw-r--r--
zig.py
3.85
KB
-rw-r--r--
Delete
Unzip
Zip
${this.title}
Close
Code Editor : robotframework.py
""" pygments.lexers.robotframework ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Lexer for Robot Framework. :copyright: Copyright 2006-2021 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. """ # Copyright 2012 Nokia Siemens Networks Oyj # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import re from pygments.lexer import Lexer from pygments.token import Token __all__ = ['RobotFrameworkLexer'] HEADING = Token.Generic.Heading SETTING = Token.Keyword.Namespace IMPORT = Token.Name.Namespace TC_KW_NAME = Token.Generic.Subheading KEYWORD = Token.Name.Function ARGUMENT = Token.String VARIABLE = Token.Name.Variable COMMENT = Token.Comment SEPARATOR = Token.Punctuation SYNTAX = Token.Punctuation GHERKIN = Token.Generic.Emph ERROR = Token.Error def normalize(string, remove=''): string = string.lower() for char in remove + ' ': if char in string: string = string.replace(char, '') return string class RobotFrameworkLexer(Lexer): """ For `Robot Framework <http://robotframework.org>`_ test data. Supports both space and pipe separated plain text formats. .. versionadded:: 1.6 """ name = 'RobotFramework' aliases = ['robotframework'] filenames = ['*.robot'] mimetypes = ['text/x-robotframework'] def __init__(self, **options): options['tabsize'] = 2 options['encoding'] = 'UTF-8' Lexer.__init__(self, **options) def get_tokens_unprocessed(self, text): row_tokenizer = RowTokenizer() var_tokenizer = VariableTokenizer() index = 0 for row in text.splitlines(): for value, token in row_tokenizer.tokenize(row): for value, token in var_tokenizer.tokenize(value, token): if value: yield index, token, str(value) index += len(value) class VariableTokenizer: def tokenize(self, string, token): var = VariableSplitter(string, identifiers='$@%&') if var.start < 0 or token in (COMMENT, ERROR): yield string, token return for value, token in self._tokenize(var, string, token): if value: yield value, token def _tokenize(self, var, string, orig_token): before = string[:var.start] yield before, orig_token yield var.identifier + '{', SYNTAX yield from self.tokenize(var.base, VARIABLE) yield '}', SYNTAX if var.index is not None: yield '[', SYNTAX yield from self.tokenize(var.index, VARIABLE) yield ']', SYNTAX yield from self.tokenize(string[var.end:], orig_token) class RowTokenizer: def __init__(self): self._table = UnknownTable() self._splitter = RowSplitter() testcases = TestCaseTable() settings = SettingTable(testcases.set_default_template) variables = VariableTable() keywords = KeywordTable() self._tables = {'settings': settings, 'setting': settings, 'metadata': settings, 'variables': variables, 'variable': variables, 'testcases': testcases, 'testcase': testcases, 'tasks': testcases, 'task': testcases, 'keywords': keywords, 'keyword': keywords, 'userkeywords': keywords, 'userkeyword': keywords} def tokenize(self, row): commented = False heading = False for index, value in enumerate(self._splitter.split(row)): # First value, and every second after that, is a separator. index, separator = divmod(index-1, 2) if value.startswith('#'): commented = True elif index == 0 and value.startswith('*'): self._table = self._start_table(value) heading = True yield from self._tokenize(value, index, commented, separator, heading) self._table.end_row() def _start_table(self, header): name = normalize(header, remove='*') return self._tables.get(name, UnknownTable()) def _tokenize(self, value, index, commented, separator, heading): if commented: yield value, COMMENT elif separator: yield value, SEPARATOR elif heading: yield value, HEADING else: yield from self._table.tokenize(value, index) class RowSplitter: _space_splitter = re.compile('( {2,})') _pipe_splitter = re.compile(r'((?:^| +)\|(?: +|$))') def split(self, row): splitter = (row.startswith('| ') and self._split_from_pipes or self._split_from_spaces) yield from splitter(row) yield '\n' def _split_from_spaces(self, row): yield '' # Start with (pseudo)separator similarly as with pipes yield from self._space_splitter.split(row) def _split_from_pipes(self, row): _, separator, rest = self._pipe_splitter.split(row, 1) yield separator while self._pipe_splitter.search(rest): cell, separator, rest = self._pipe_splitter.split(rest, 1) yield cell yield separator yield rest class Tokenizer: _tokens = None def __init__(self): self._index = 0 def tokenize(self, value): values_and_tokens = self._tokenize(value, self._index) self._index += 1 if isinstance(values_and_tokens, type(Token)): values_and_tokens = [(value, values_and_tokens)] return values_and_tokens def _tokenize(self, value, index): index = min(index, len(self._tokens) - 1) return self._tokens[index] def _is_assign(self, value): if value.endswith('='): value = value[:-1].strip() var = VariableSplitter(value, identifiers='$@&') return var.start == 0 and var.end == len(value) class Comment(Tokenizer): _tokens = (COMMENT,) class Setting(Tokenizer): _tokens = (SETTING, ARGUMENT) _keyword_settings = ('suitesetup', 'suiteprecondition', 'suiteteardown', 'suitepostcondition', 'testsetup', 'tasksetup', 'testprecondition', 'testteardown','taskteardown', 'testpostcondition', 'testtemplate', 'tasktemplate') _import_settings = ('library', 'resource', 'variables') _other_settings = ('documentation', 'metadata', 'forcetags', 'defaulttags', 'testtimeout','tasktimeout') _custom_tokenizer = None def __init__(self, template_setter=None): Tokenizer.__init__(self) self._template_setter = template_setter def _tokenize(self, value, index): if index == 1 and self._template_setter: self._template_setter(value) if index == 0: normalized = normalize(value) if normalized in self._keyword_settings: self._custom_tokenizer = KeywordCall(support_assign=False) elif normalized in self._import_settings: self._custom_tokenizer = ImportSetting() elif normalized not in self._other_settings: return ERROR elif self._custom_tokenizer: return self._custom_tokenizer.tokenize(value) return Tokenizer._tokenize(self, value, index) class ImportSetting(Tokenizer): _tokens = (IMPORT, ARGUMENT) class TestCaseSetting(Setting): _keyword_settings = ('setup', 'precondition', 'teardown', 'postcondition', 'template') _import_settings = () _other_settings = ('documentation', 'tags', 'timeout') def _tokenize(self, value, index): if index == 0: type = Setting._tokenize(self, value[1:-1], index) return [('[', SYNTAX), (value[1:-1], type), (']', SYNTAX)] return Setting._tokenize(self, value, index) class KeywordSetting(TestCaseSetting): _keyword_settings = ('teardown',) _other_settings = ('documentation', 'arguments', 'return', 'timeout', 'tags') class Variable(Tokenizer): _tokens = (SYNTAX, ARGUMENT) def _tokenize(self, value, index): if index == 0 and not self._is_assign(value): return ERROR return Tokenizer._tokenize(self, value, index) class KeywordCall(Tokenizer): _tokens = (KEYWORD, ARGUMENT) def __init__(self, support_assign=True): Tokenizer.__init__(self) self._keyword_found = not support_assign self._assigns = 0 def _tokenize(self, value, index): if not self._keyword_found and self._is_assign(value): self._assigns += 1 return SYNTAX # VariableTokenizer tokenizes this later. if self._keyword_found: return Tokenizer._tokenize(self, value, index - self._assigns) self._keyword_found = True return GherkinTokenizer().tokenize(value, KEYWORD) class GherkinTokenizer: _gherkin_prefix = re.compile('^(Given|When|Then|And) ', re.IGNORECASE) def tokenize(self, value, token): match = self._gherkin_prefix.match(value) if not match: return [(value, token)] end = match.end() return [(value[:end], GHERKIN), (value[end:], token)] class TemplatedKeywordCall(Tokenizer): _tokens = (ARGUMENT,) class ForLoop(Tokenizer): def __init__(self): Tokenizer.__init__(self) self._in_arguments = False def _tokenize(self, value, index): token = self._in_arguments and ARGUMENT or SYNTAX if value.upper() in ('IN', 'IN RANGE'): self._in_arguments = True return token class _Table: _tokenizer_class = None def __init__(self, prev_tokenizer=None): self._tokenizer = self._tokenizer_class() self._prev_tokenizer = prev_tokenizer self._prev_values_on_row = [] def tokenize(self, value, index): if self._continues(value, index): self._tokenizer = self._prev_tokenizer yield value, SYNTAX else: yield from self._tokenize(value, index) self._prev_values_on_row.append(value) def _continues(self, value, index): return value == '...' and all(self._is_empty(t) for t in self._prev_values_on_row) def _is_empty(self, value): return value in ('', '\\') def _tokenize(self, value, index): return self._tokenizer.tokenize(value) def end_row(self): self.__init__(prev_tokenizer=self._tokenizer) class UnknownTable(_Table): _tokenizer_class = Comment def _continues(self, value, index): return False class VariableTable(_Table): _tokenizer_class = Variable class SettingTable(_Table): _tokenizer_class = Setting def __init__(self, template_setter, prev_tokenizer=None): _Table.__init__(self, prev_tokenizer) self._template_setter = template_setter def _tokenize(self, value, index): if index == 0 and normalize(value) == 'testtemplate': self._tokenizer = Setting(self._template_setter) return _Table._tokenize(self, value, index) def end_row(self): self.__init__(self._template_setter, prev_tokenizer=self._tokenizer) class TestCaseTable(_Table): _setting_class = TestCaseSetting _test_template = None _default_template = None @property def _tokenizer_class(self): if self._test_template or (self._default_template and self._test_template is not False): return TemplatedKeywordCall return KeywordCall def _continues(self, value, index): return index > 0 and _Table._continues(self, value, index) def _tokenize(self, value, index): if index == 0: if value: self._test_template = None return GherkinTokenizer().tokenize(value, TC_KW_NAME) if index == 1 and self._is_setting(value): if self._is_template(value): self._test_template = False self._tokenizer = self._setting_class(self.set_test_template) else: self._tokenizer = self._setting_class() if index == 1 and self._is_for_loop(value): self._tokenizer = ForLoop() if index == 1 and self._is_empty(value): return [(value, SYNTAX)] return _Table._tokenize(self, value, index) def _is_setting(self, value): return value.startswith('[') and value.endswith(']') def _is_template(self, value): return normalize(value) == '[template]' def _is_for_loop(self, value): return value.startswith(':') and normalize(value, remove=':') == 'for' def set_test_template(self, template): self._test_template = self._is_template_set(template) def set_default_template(self, template): self._default_template = self._is_template_set(template) def _is_template_set(self, template): return normalize(template) not in ('', '\\', 'none', '${empty}') class KeywordTable(TestCaseTable): _tokenizer_class = KeywordCall _setting_class = KeywordSetting def _is_template(self, value): return False # Following code copied directly from Robot Framework 2.7.5. class VariableSplitter: def __init__(self, string, identifiers): self.identifier = None self.base = None self.index = None self.start = -1 self.end = -1 self._identifiers = identifiers self._may_have_internal_variables = False try: self._split(string) except ValueError: pass else: self._finalize() def get_replaced_base(self, variables): if self._may_have_internal_variables: return variables.replace_string(self.base) return self.base def _finalize(self): self.identifier = self._variable_chars[0] self.base = ''.join(self._variable_chars[2:-1]) self.end = self.start + len(self._variable_chars) if self._has_list_or_dict_variable_index(): self.index = ''.join(self._list_and_dict_variable_index_chars[1:-1]) self.end += len(self._list_and_dict_variable_index_chars) def _has_list_or_dict_variable_index(self): return self._list_and_dict_variable_index_chars\ and self._list_and_dict_variable_index_chars[-1] == ']' def _split(self, string): start_index, max_index = self._find_variable(string) self.start = start_index self._open_curly = 1 self._state = self._variable_state self._variable_chars = [string[start_index], '{'] self._list_and_dict_variable_index_chars = [] self._string = string start_index += 2 for index, char in enumerate(string[start_index:]): index += start_index # Giving start to enumerate only in Py 2.6+ try: self._state(char, index) except StopIteration: return if index == max_index and not self._scanning_list_variable_index(): return def _scanning_list_variable_index(self): return self._state in [self._waiting_list_variable_index_state, self._list_variable_index_state] def _find_variable(self, string): max_end_index = string.rfind('}') if max_end_index == -1: raise ValueError('No variable end found') if self._is_escaped(string, max_end_index): return self._find_variable(string[:max_end_index]) start_index = self._find_start_index(string, 1, max_end_index) if start_index == -1: raise ValueError('No variable start found') return start_index, max_end_index def _find_start_index(self, string, start, end): index = string.find('{', start, end) - 1 if index < 0: return -1 if self._start_index_is_ok(string, index): return index return self._find_start_index(string, index+2, end) def _start_index_is_ok(self, string, index): return string[index] in self._identifiers\ and not self._is_escaped(string, index) def _is_escaped(self, string, index): escaped = False while index > 0 and string[index-1] == '\\': index -= 1 escaped = not escaped return escaped def _variable_state(self, char, index): self._variable_chars.append(char) if char == '}' and not self._is_escaped(self._string, index): self._open_curly -= 1 if self._open_curly == 0: if not self._is_list_or_dict_variable(): raise StopIteration self._state = self._waiting_list_variable_index_state elif char in self._identifiers: self._state = self._internal_variable_start_state def _is_list_or_dict_variable(self): return self._variable_chars[0] in ('@','&') def _internal_variable_start_state(self, char, index): self._state = self._variable_state if char == '{': self._variable_chars.append(char) self._open_curly += 1 self._may_have_internal_variables = True else: self._variable_state(char, index) def _waiting_list_variable_index_state(self, char, index): if char != '[': raise StopIteration self._list_and_dict_variable_index_chars.append(char) self._state = self._list_variable_index_state def _list_variable_index_state(self, char, index): self._list_and_dict_variable_index_chars.append(char) if char == ']': raise StopIteration
Close