Linux iad1-shared-b7-18 6.6.49-grsec-jammy+ #10 SMP Thu Sep 12 23:23:08 UTC 2024 x86_64
Apache
: 67.205.6.31 | : 216.73.216.47
Cant Read [ /etc/named.conf ]
8.2.29
fernandoquevedo
Terminal
AUTO ROOT
Adminer
Backdoor Destroyer
Linux Exploit
Lock Shell
Lock File
Create User
CREATE RDP
PHP Mailer
BACKCONNECT
UNLOCK SHELL
HASH IDENTIFIER
README
+ Create Folder
+ Create File
/
usr /
lib /
python3 /
dist-packages /
pygments /
lexers /
[ HOME SHELL ]
Name
Size
Permission
Action
__pycache__
[ DIR ]
drwxr-xr-x
__init__.py
11
KB
-rw-r--r--
_asy_builtins.py
26.65
KB
-rw-r--r--
_cl_builtins.py
13.67
KB
-rw-r--r--
_cocoa_builtins.py
102.72
KB
-rw-r--r--
_csound_builtins.py
17.84
KB
-rw-r--r--
_julia_builtins.py
11.6
KB
-rw-r--r--
_lasso_builtins.py
131.36
KB
-rw-r--r--
_lilypond_builtins.py
101.67
KB
-rw-r--r--
_lua_builtins.py
8.08
KB
-rw-r--r--
_mapping.py
62.33
KB
-rw-r--r--
_mql_builtins.py
24.13
KB
-rw-r--r--
_mysql_builtins.py
23.92
KB
-rw-r--r--
_openedge_builtins.py
48.24
KB
-rw-r--r--
_php_builtins.py
150.72
KB
-rw-r--r--
_postgres_builtins.py
11.9
KB
-rw-r--r--
_scilab_builtins.py
51.15
KB
-rw-r--r--
_sourcemod_builtins.py
26.42
KB
-rw-r--r--
_stan_builtins.py
10.21
KB
-rw-r--r--
_stata_builtins.py
26.59
KB
-rw-r--r--
_tsql_builtins.py
15.1
KB
-rw-r--r--
_usd_builtins.py
1.62
KB
-rw-r--r--
_vbscript_builtins.py
4.13
KB
-rw-r--r--
_vim_builtins.py
55.73
KB
-rw-r--r--
actionscript.py
11.31
KB
-rw-r--r--
agile.py
876
B
-rw-r--r--
algebra.py
7.59
KB
-rw-r--r--
ambient.py
2.54
KB
-rw-r--r--
amdgpu.py
1.57
KB
-rw-r--r--
ampl.py
4.05
KB
-rw-r--r--
apdlexer.py
26.04
KB
-rw-r--r--
apl.py
3.32
KB
-rw-r--r--
archetype.py
11.17
KB
-rw-r--r--
arrow.py
3.47
KB
-rw-r--r--
asc.py
1.54
KB
-rw-r--r--
asm.py
40.19
KB
-rw-r--r--
automation.py
19.34
KB
-rw-r--r--
bare.py
2.97
KB
-rw-r--r--
basic.py
27.22
KB
-rw-r--r--
bdd.py
1.59
KB
-rw-r--r--
bibtex.py
4.62
KB
-rw-r--r--
boa.py
3.87
KB
-rw-r--r--
business.py
27.4
KB
-rw-r--r--
c_cpp.py
15.67
KB
-rw-r--r--
c_like.py
28.45
KB
-rw-r--r--
capnproto.py
2.17
KB
-rw-r--r--
cddl.py
5.2
KB
-rw-r--r--
chapel.py
4.89
KB
-rw-r--r--
clean.py
6.24
KB
-rw-r--r--
compiled.py
1.33
KB
-rw-r--r--
configs.py
39.49
KB
-rw-r--r--
console.py
4.05
KB
-rw-r--r--
crystal.py
15.41
KB
-rw-r--r--
csound.py
16.56
KB
-rw-r--r--
css.py
31.15
KB
-rw-r--r--
d.py
9.61
KB
-rw-r--r--
dalvik.py
4.47
KB
-rw-r--r--
data.py
23.96
KB
-rw-r--r--
devicetree.py
3.92
KB
-rw-r--r--
diff.py
5.04
KB
-rw-r--r--
dotnet.py
28.47
KB
-rw-r--r--
dsls.py
35.45
KB
-rw-r--r--
dylan.py
10.16
KB
-rw-r--r--
ecl.py
6.09
KB
-rw-r--r--
eiffel.py
2.6
KB
-rw-r--r--
elm.py
3.08
KB
-rw-r--r--
elpi.py
5.83
KB
-rw-r--r--
email.py
4.97
KB
-rw-r--r--
erlang.py
18.76
KB
-rw-r--r--
esoteric.py
10.17
KB
-rw-r--r--
ezhil.py
3.27
KB
-rw-r--r--
factor.py
19.12
KB
-rw-r--r--
fantom.py
9.96
KB
-rw-r--r--
felix.py
9.41
KB
-rw-r--r--
floscript.py
2.6
KB
-rw-r--r--
forth.py
6.99
KB
-rw-r--r--
fortran.py
10.07
KB
-rw-r--r--
foxpro.py
25.6
KB
-rw-r--r--
freefem.py
26.43
KB
-rw-r--r--
functional.py
674
B
-rw-r--r--
futhark.py
3.64
KB
-rw-r--r--
gcodelexer.py
826
B
-rw-r--r--
gdscript.py
10.94
KB
-rw-r--r--
go.py
3.65
KB
-rw-r--r--
grammar_notation.py
7.83
KB
-rw-r--r--
graph.py
3.77
KB
-rw-r--r--
graphics.py
38.15
KB
-rw-r--r--
graphviz.py
1.83
KB
-rw-r--r--
gsql.py
3.68
KB
-rw-r--r--
haskell.py
31.98
KB
-rw-r--r--
haxe.py
30.21
KB
-rw-r--r--
hdl.py
21.99
KB
-rw-r--r--
hexdump.py
3.52
KB
-rw-r--r--
html.py
19.34
KB
-rw-r--r--
idl.py
14.87
KB
-rw-r--r--
igor.py
29.87
KB
-rw-r--r--
inferno.py
3.02
KB
-rw-r--r--
installers.py
12.8
KB
-rw-r--r--
int_fiction.py
55.32
KB
-rw-r--r--
iolang.py
1.84
KB
-rw-r--r--
j.py
4.4
KB
-rw-r--r--
javascript.py
58.67
KB
-rw-r--r--
jslt.py
3.61
KB
-rw-r--r--
julia.py
11
KB
-rw-r--r--
jvm.py
70.05
KB
-rw-r--r--
kuin.py
10.5
KB
-rw-r--r--
lilypond.py
8.26
KB
-rw-r--r--
lisp.py
139.4
KB
-rw-r--r--
make.py
7.26
KB
-rw-r--r--
markup.py
26.11
KB
-rw-r--r--
math.py
676
B
-rw-r--r--
matlab.py
129.3
KB
-rw-r--r--
maxima.py
2.65
KB
-rw-r--r--
meson.py
4.33
KB
-rw-r--r--
mime.py
7.36
KB
-rw-r--r--
ml.py
34.47
KB
-rw-r--r--
modeling.py
13.07
KB
-rw-r--r--
modula2.py
51.82
KB
-rw-r--r--
monte.py
6.14
KB
-rw-r--r--
mosel.py
8.97
KB
-rw-r--r--
ncl.py
62.46
KB
-rw-r--r--
nimrod.py
5
KB
-rw-r--r--
nit.py
2.66
KB
-rw-r--r--
nix.py
3.91
KB
-rw-r--r--
oberon.py
4.11
KB
-rw-r--r--
objective.py
22.26
KB
-rw-r--r--
ooc.py
2.91
KB
-rw-r--r--
other.py
1.7
KB
-rw-r--r--
parasail.py
2.65
KB
-rw-r--r--
parsers.py
25.27
KB
-rw-r--r--
pascal.py
31.84
KB
-rw-r--r--
pawn.py
7.96
KB
-rw-r--r--
perl.py
38.16
KB
-rw-r--r--
php.py
12.25
KB
-rw-r--r--
pointless.py
1.92
KB
-rw-r--r--
pony.py
3.17
KB
-rw-r--r--
praat.py
11.99
KB
-rw-r--r--
procfile.py
1.19
KB
-rw-r--r--
prolog.py
12.09
KB
-rw-r--r--
promql.py
4.63
KB
-rw-r--r--
python.py
51.49
KB
-rw-r--r--
qvt.py
5.93
KB
-rw-r--r--
r.py
6.02
KB
-rw-r--r--
rdf.py
15.42
KB
-rw-r--r--
rebol.py
18.16
KB
-rw-r--r--
resource.py
2.83
KB
-rw-r--r--
ride.py
4.93
KB
-rw-r--r--
rita.py
1.16
KB
-rw-r--r--
rnc.py
1.92
KB
-rw-r--r--
roboconf.py
2
KB
-rw-r--r--
robotframework.py
17.99
KB
-rw-r--r--
ruby.py
22.14
KB
-rw-r--r--
rust.py
7.99
KB
-rw-r--r--
sas.py
9.2
KB
-rw-r--r--
savi.py
4.24
KB
-rw-r--r--
scdoc.py
2.19
KB
-rw-r--r--
scripting.py
68.41
KB
-rw-r--r--
sgf.py
2
KB
-rw-r--r--
shell.py
35.06
KB
-rw-r--r--
sieve.py
2.24
KB
-rw-r--r--
slash.py
8.28
KB
-rw-r--r--
smalltalk.py
7.02
KB
-rw-r--r--
smithy.py
2.6
KB
-rw-r--r--
smv.py
2.71
KB
-rw-r--r--
snobol.py
2.67
KB
-rw-r--r--
solidity.py
3.1
KB
-rw-r--r--
sophia.py
3.29
KB
-rw-r--r--
special.py
3.37
KB
-rw-r--r--
spice.py
2.06
KB
-rw-r--r--
sql.py
33.42
KB
-rw-r--r--
srcinfo.py
1.63
KB
-rw-r--r--
stata.py
6.26
KB
-rw-r--r--
supercollider.py
3.61
KB
-rw-r--r--
tcl.py
5.27
KB
-rw-r--r--
teal.py
3.41
KB
-rw-r--r--
templates.py
69.96
KB
-rw-r--r--
teraterm.py
9.65
KB
-rw-r--r--
testing.py
10.52
KB
-rw-r--r--
text.py
1006
B
-rw-r--r--
textedit.py
7.44
KB
-rw-r--r--
textfmts.py
14.8
KB
-rw-r--r--
theorem.py
19.15
KB
-rw-r--r--
thingsdb.py
4.13
KB
-rw-r--r--
tnt.py
10.27
KB
-rw-r--r--
trafficscript.py
1.48
KB
-rw-r--r--
typoscript.py
8.01
KB
-rw-r--r--
unicon.py
18.08
KB
-rw-r--r--
urbi.py
5.9
KB
-rw-r--r--
usd.py
3.37
KB
-rw-r--r--
varnish.py
7.07
KB
-rw-r--r--
verification.py
3.82
KB
-rw-r--r--
web.py
894
B
-rw-r--r--
webassembly.py
5.56
KB
-rw-r--r--
webidl.py
10.23
KB
-rw-r--r--
webmisc.py
39.07
KB
-rw-r--r--
whiley.py
3.89
KB
-rw-r--r--
x10.py
1.84
KB
-rw-r--r--
xorg.py
865
B
-rw-r--r--
yang.py
4.42
KB
-rw-r--r--
zig.py
3.85
KB
-rw-r--r--
Delete
Unzip
Zip
${this.title}
Close
Code Editor : markup.py
""" pygments.lexers.markup ~~~~~~~~~~~~~~~~~~~~~~ Lexers for non-HTML markup languages. :copyright: Copyright 2006-2021 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. """ import re from pygments.lexers.html import XmlLexer from pygments.lexers.javascript import JavascriptLexer from pygments.lexers.css import CssLexer from pygments.lexer import RegexLexer, DelegatingLexer, include, bygroups, \ using, this, do_insertions, default, words from pygments.token import Text, Comment, Operator, Keyword, Name, String, \ Number, Punctuation, Generic, Other from pygments.util import get_bool_opt, ClassNotFound __all__ = ['BBCodeLexer', 'MoinWikiLexer', 'RstLexer', 'TexLexer', 'GroffLexer', 'MozPreprocHashLexer', 'MozPreprocPercentLexer', 'MozPreprocXulLexer', 'MozPreprocJavascriptLexer', 'MozPreprocCssLexer', 'MarkdownLexer', 'TiddlyWiki5Lexer'] class BBCodeLexer(RegexLexer): """ A lexer that highlights BBCode(-like) syntax. .. versionadded:: 0.6 """ name = 'BBCode' aliases = ['bbcode'] mimetypes = ['text/x-bbcode'] tokens = { 'root': [ (r'[^[]+', Text), # tag/end tag begin (r'\[/?\w+', Keyword, 'tag'), # stray bracket (r'\[', Text), ], 'tag': [ (r'\s+', Text), # attribute with value (r'(\w+)(=)("?[^\s"\]]+"?)', bygroups(Name.Attribute, Operator, String)), # tag argument (a la [color=green]) (r'(=)("?[^\s"\]]+"?)', bygroups(Operator, String)), # tag end (r'\]', Keyword, '#pop'), ], } class MoinWikiLexer(RegexLexer): """ For MoinMoin (and Trac) Wiki markup. .. versionadded:: 0.7 """ name = 'MoinMoin/Trac Wiki markup' aliases = ['trac-wiki', 'moin'] filenames = [] mimetypes = ['text/x-trac-wiki'] flags = re.MULTILINE | re.IGNORECASE tokens = { 'root': [ (r'^#.*$', Comment), (r'(!)(\S+)', bygroups(Keyword, Text)), # Ignore-next # Titles (r'^(=+)([^=]+)(=+)(\s*#.+)?$', bygroups(Generic.Heading, using(this), Generic.Heading, String)), # Literal code blocks, with optional shebang (r'(\{\{\{)(\n#!.+)?', bygroups(Name.Builtin, Name.Namespace), 'codeblock'), (r'(\'\'\'?|\|\||`|__|~~|\^|,,|::)', Comment), # Formatting # Lists (r'^( +)([.*-])( )', bygroups(Text, Name.Builtin, Text)), (r'^( +)([a-z]{1,5}\.)( )', bygroups(Text, Name.Builtin, Text)), # Other Formatting (r'\[\[\w+.*?\]\]', Keyword), # Macro (r'(\[[^\s\]]+)(\s+[^\]]+?)?(\])', bygroups(Keyword, String, Keyword)), # Link (r'^----+$', Keyword), # Horizontal rules (r'[^\n\'\[{!_~^,|]+', Text), (r'\n', Text), (r'.', Text), ], 'codeblock': [ (r'\}\}\}', Name.Builtin, '#pop'), # these blocks are allowed to be nested in Trac, but not MoinMoin (r'\{\{\{', Text, '#push'), (r'[^{}]+', Comment.Preproc), # slurp boring text (r'.', Comment.Preproc), # allow loose { or } ], } class RstLexer(RegexLexer): """ For `reStructuredText <http://docutils.sf.net/rst.html>`_ markup. .. versionadded:: 0.7 Additional options accepted: `handlecodeblocks` Highlight the contents of ``.. sourcecode:: language``, ``.. code:: language`` and ``.. code-block:: language`` directives with a lexer for the given language (default: ``True``). .. versionadded:: 0.8 """ name = 'reStructuredText' aliases = ['restructuredtext', 'rst', 'rest'] filenames = ['*.rst', '*.rest'] mimetypes = ["text/x-rst", "text/prs.fallenstein.rst"] flags = re.MULTILINE def _handle_sourcecode(self, match): from pygments.lexers import get_lexer_by_name # section header yield match.start(1), Punctuation, match.group(1) yield match.start(2), Text, match.group(2) yield match.start(3), Operator.Word, match.group(3) yield match.start(4), Punctuation, match.group(4) yield match.start(5), Text, match.group(5) yield match.start(6), Keyword, match.group(6) yield match.start(7), Text, match.group(7) # lookup lexer if wanted and existing lexer = None if self.handlecodeblocks: try: lexer = get_lexer_by_name(match.group(6).strip()) except ClassNotFound: pass indention = match.group(8) indention_size = len(indention) code = (indention + match.group(9) + match.group(10) + match.group(11)) # no lexer for this language. handle it like it was a code block if lexer is None: yield match.start(8), String, code return # highlight the lines with the lexer. ins = [] codelines = code.splitlines(True) code = '' for line in codelines: if len(line) > indention_size: ins.append((len(code), [(0, Text, line[:indention_size])])) code += line[indention_size:] else: code += line yield from do_insertions(ins, lexer.get_tokens_unprocessed(code)) # from docutils.parsers.rst.states closers = '\'")]}>\u2019\u201d\xbb!?' unicode_delimiters = '\u2010\u2011\u2012\u2013\u2014\u00a0' end_string_suffix = (r'((?=$)|(?=[-/:.,; \n\x00%s%s]))' % (re.escape(unicode_delimiters), re.escape(closers))) tokens = { 'root': [ # Heading with overline (r'^(=+|-+|`+|:+|\.+|\'+|"+|~+|\^+|_+|\*+|\++|#+)([ \t]*\n)' r'(.+)(\n)(\1)(\n)', bygroups(Generic.Heading, Text, Generic.Heading, Text, Generic.Heading, Text)), # Plain heading (r'^(\S.*)(\n)(={3,}|-{3,}|`{3,}|:{3,}|\.{3,}|\'{3,}|"{3,}|' r'~{3,}|\^{3,}|_{3,}|\*{3,}|\+{3,}|#{3,})(\n)', bygroups(Generic.Heading, Text, Generic.Heading, Text)), # Bulleted lists (r'^(\s*)([-*+])( .+\n(?:\1 .+\n)*)', bygroups(Text, Number, using(this, state='inline'))), # Numbered lists (r'^(\s*)([0-9#ivxlcmIVXLCM]+\.)( .+\n(?:\1 .+\n)*)', bygroups(Text, Number, using(this, state='inline'))), (r'^(\s*)(\(?[0-9#ivxlcmIVXLCM]+\))( .+\n(?:\1 .+\n)*)', bygroups(Text, Number, using(this, state='inline'))), # Numbered, but keep words at BOL from becoming lists (r'^(\s*)([A-Z]+\.)( .+\n(?:\1 .+\n)+)', bygroups(Text, Number, using(this, state='inline'))), (r'^(\s*)(\(?[A-Za-z]+\))( .+\n(?:\1 .+\n)+)', bygroups(Text, Number, using(this, state='inline'))), # Line blocks (r'^(\s*)(\|)( .+\n(?:\| .+\n)*)', bygroups(Text, Operator, using(this, state='inline'))), # Sourcecode directives (r'^( *\.\.)(\s*)((?:source)?code(?:-block)?)(::)([ \t]*)([^\n]+)' r'(\n[ \t]*\n)([ \t]+)(.*)(\n)((?:(?:\8.*)?\n)+)', _handle_sourcecode), # A directive (r'^( *\.\.)(\s*)([\w:-]+?)(::)(?:([ \t]*)(.*))', bygroups(Punctuation, Text, Operator.Word, Punctuation, Text, using(this, state='inline'))), # A reference target (r'^( *\.\.)(\s*)(_(?:[^:\\]|\\.)+:)(.*?)$', bygroups(Punctuation, Text, Name.Tag, using(this, state='inline'))), # A footnote/citation target (r'^( *\.\.)(\s*)(\[.+\])(.*?)$', bygroups(Punctuation, Text, Name.Tag, using(this, state='inline'))), # A substitution def (r'^( *\.\.)(\s*)(\|.+\|)(\s*)([\w:-]+?)(::)(?:([ \t]*)(.*))', bygroups(Punctuation, Text, Name.Tag, Text, Operator.Word, Punctuation, Text, using(this, state='inline'))), # Comments (r'^ *\.\..*(\n( +.*\n|\n)+)?', Comment.Preproc), # Field list marker (r'^( *)(:(?:\\\\|\\:|[^:\n])+:(?=\s))([ \t]*)', bygroups(Text, Name.Class, Text)), # Definition list (r'^(\S.*(?<!::)\n)((?:(?: +.*)\n)+)', bygroups(using(this, state='inline'), using(this, state='inline'))), # Code blocks (r'(::)(\n[ \t]*\n)([ \t]+)(.*)(\n)((?:(?:\3.*)?\n)+)', bygroups(String.Escape, Text, String, String, Text, String)), include('inline'), ], 'inline': [ (r'\\.', Text), # escape (r'``', String, 'literal'), # code (r'(`.+?)(<.+?>)(`__?)', # reference with inline target bygroups(String, String.Interpol, String)), (r'`.+?`__?', String), # reference (r'(`.+?`)(:[a-zA-Z0-9:-]+?:)?', bygroups(Name.Variable, Name.Attribute)), # role (r'(:[a-zA-Z0-9:-]+?:)(`.+?`)', bygroups(Name.Attribute, Name.Variable)), # role (content first) (r'\*\*.+?\*\*', Generic.Strong), # Strong emphasis (r'\*.+?\*', Generic.Emph), # Emphasis (r'\[.*?\]_', String), # Footnote or citation (r'<.+?>', Name.Tag), # Hyperlink (r'[^\\\n\[*`:]+', Text), (r'.', Text), ], 'literal': [ (r'[^`]+', String), (r'``' + end_string_suffix, String, '#pop'), (r'`', String), ] } def __init__(self, **options): self.handlecodeblocks = get_bool_opt(options, 'handlecodeblocks', True) RegexLexer.__init__(self, **options) def analyse_text(text): if text[:2] == '..' and text[2:3] != '.': return 0.3 p1 = text.find("\n") p2 = text.find("\n", p1 + 1) if (p2 > -1 and # has two lines p1 * 2 + 1 == p2 and # they are the same length text[p1+1] in '-=' and # the next line both starts and ends with text[p1+1] == text[p2-1]): # ...a sufficiently high header return 0.5 class TexLexer(RegexLexer): """ Lexer for the TeX and LaTeX typesetting languages. """ name = 'TeX' aliases = ['tex', 'latex'] filenames = ['*.tex', '*.aux', '*.toc'] mimetypes = ['text/x-tex', 'text/x-latex'] tokens = { 'general': [ (r'%.*?\n', Comment), (r'[{}]', Name.Builtin), (r'[&_^]', Name.Builtin), ], 'root': [ (r'\\\[', String.Backtick, 'displaymath'), (r'\\\(', String, 'inlinemath'), (r'\$\$', String.Backtick, 'displaymath'), (r'\$', String, 'inlinemath'), (r'\\([a-zA-Z]+|.)', Keyword, 'command'), (r'\\$', Keyword), include('general'), (r'[^\\$%&_^{}]+', Text), ], 'math': [ (r'\\([a-zA-Z]+|.)', Name.Variable), include('general'), (r'[0-9]+', Number), (r'[-=!+*/()\[\]]', Operator), (r'[^=!+*/()\[\]\\$%&_^{}0-9-]+', Name.Builtin), ], 'inlinemath': [ (r'\\\)', String, '#pop'), (r'\$', String, '#pop'), include('math'), ], 'displaymath': [ (r'\\\]', String, '#pop'), (r'\$\$', String, '#pop'), (r'\$', Name.Builtin), include('math'), ], 'command': [ (r'\[.*?\]', Name.Attribute), (r'\*', Keyword), default('#pop'), ], } def analyse_text(text): for start in ("\\documentclass", "\\input", "\\documentstyle", "\\relax"): if text[:len(start)] == start: return True class GroffLexer(RegexLexer): """ Lexer for the (g)roff typesetting language, supporting groff extensions. Mainly useful for highlighting manpage sources. .. versionadded:: 0.6 """ name = 'Groff' aliases = ['groff', 'nroff', 'man'] filenames = ['*.[1-9]', '*.man', '*.1p', '*.3pm'] mimetypes = ['application/x-troff', 'text/troff'] tokens = { 'root': [ (r'(\.)(\w+)', bygroups(Text, Keyword), 'request'), (r'\.', Punctuation, 'request'), # Regular characters, slurp till we find a backslash or newline (r'[^\\\n]+', Text, 'textline'), default('textline'), ], 'textline': [ include('escapes'), (r'[^\\\n]+', Text), (r'\n', Text, '#pop'), ], 'escapes': [ # groff has many ways to write escapes. (r'\\"[^\n]*', Comment), (r'\\[fn]\w', String.Escape), (r'\\\(.{2}', String.Escape), (r'\\.\[.*\]', String.Escape), (r'\\.', String.Escape), (r'\\\n', Text, 'request'), ], 'request': [ (r'\n', Text, '#pop'), include('escapes'), (r'"[^\n"]+"', String.Double), (r'\d+', Number), (r'\S+', String), (r'\s+', Text), ], } def analyse_text(text): if text[:1] != '.': return False if text[:3] == '.\\"': return True if text[:4] == '.TH ': return True if text[1:3].isalnum() and text[3].isspace(): return 0.9 class MozPreprocHashLexer(RegexLexer): """ Lexer for Mozilla Preprocessor files (with '#' as the marker). Other data is left untouched. .. versionadded:: 2.0 """ name = 'mozhashpreproc' aliases = [name] filenames = [] mimetypes = [] tokens = { 'root': [ (r'^#', Comment.Preproc, ('expr', 'exprstart')), (r'.+', Other), ], 'exprstart': [ (r'(literal)(.*)', bygroups(Comment.Preproc, Text), '#pop:2'), (words(( 'define', 'undef', 'if', 'ifdef', 'ifndef', 'else', 'elif', 'elifdef', 'elifndef', 'endif', 'expand', 'filter', 'unfilter', 'include', 'includesubst', 'error')), Comment.Preproc, '#pop'), ], 'expr': [ (words(('!', '!=', '==', '&&', '||')), Operator), (r'(defined)(\()', bygroups(Keyword, Punctuation)), (r'\)', Punctuation), (r'[0-9]+', Number.Decimal), (r'__\w+?__', Name.Variable), (r'@\w+?@', Name.Class), (r'\w+', Name), (r'\n', Text, '#pop'), (r'\s+', Text), (r'\S', Punctuation), ], } class MozPreprocPercentLexer(MozPreprocHashLexer): """ Lexer for Mozilla Preprocessor files (with '%' as the marker). Other data is left untouched. .. versionadded:: 2.0 """ name = 'mozpercentpreproc' aliases = [name] filenames = [] mimetypes = [] tokens = { 'root': [ (r'^%', Comment.Preproc, ('expr', 'exprstart')), (r'.+', Other), ], } class MozPreprocXulLexer(DelegatingLexer): """ Subclass of the `MozPreprocHashLexer` that highlights unlexed data with the `XmlLexer`. .. versionadded:: 2.0 """ name = "XUL+mozpreproc" aliases = ['xul+mozpreproc'] filenames = ['*.xul.in'] mimetypes = [] def __init__(self, **options): super().__init__(XmlLexer, MozPreprocHashLexer, **options) class MozPreprocJavascriptLexer(DelegatingLexer): """ Subclass of the `MozPreprocHashLexer` that highlights unlexed data with the `JavascriptLexer`. .. versionadded:: 2.0 """ name = "Javascript+mozpreproc" aliases = ['javascript+mozpreproc'] filenames = ['*.js.in'] mimetypes = [] def __init__(self, **options): super().__init__(JavascriptLexer, MozPreprocHashLexer, **options) class MozPreprocCssLexer(DelegatingLexer): """ Subclass of the `MozPreprocHashLexer` that highlights unlexed data with the `CssLexer`. .. versionadded:: 2.0 """ name = "CSS+mozpreproc" aliases = ['css+mozpreproc'] filenames = ['*.css.in'] mimetypes = [] def __init__(self, **options): super().__init__(CssLexer, MozPreprocPercentLexer, **options) class MarkdownLexer(RegexLexer): """ For `Markdown <https://help.github.com/categories/writing-on-github/>`_ markup. .. versionadded:: 2.2 """ name = 'Markdown' aliases = ['markdown', 'md'] filenames = ['*.md', '*.markdown'] mimetypes = ["text/x-markdown"] flags = re.MULTILINE def _handle_codeblock(self, match): """ match args: 1:backticks, 2:lang_name, 3:newline, 4:code, 5:backticks """ from pygments.lexers import get_lexer_by_name # section header yield match.start(1), String.Backtick, match.group(1) yield match.start(2), String.Backtick, match.group(2) yield match.start(3), Text , match.group(3) # lookup lexer if wanted and existing lexer = None if self.handlecodeblocks: try: lexer = get_lexer_by_name( match.group(2).strip() ) except ClassNotFound: pass code = match.group(4) # no lexer for this language. handle it like it was a code block if lexer is None: yield match.start(4), String, code else: yield from do_insertions([], lexer.get_tokens_unprocessed(code)) yield match.start(5), String.Backtick, match.group(5) tokens = { 'root': [ # heading with '#' prefix (atx-style) (r'(^#[^#].+)(\n)', bygroups(Generic.Heading, Text)), # subheading with '#' prefix (atx-style) (r'(^#{2,6}[^#].+)(\n)', bygroups(Generic.Subheading, Text)), # heading with '=' underlines (Setext-style) (r'^(.+)(\n)(=+)(\n)', bygroups(Generic.Heading, Text, Generic.Heading, Text)), # subheading with '-' underlines (Setext-style) (r'^(.+)(\n)(-+)(\n)', bygroups(Generic.Subheading, Text, Generic.Subheading, Text)), # task list (r'^(\s*)([*-] )(\[[ xX]\])( .+\n)', bygroups(Text, Keyword, Keyword, using(this, state='inline'))), # bulleted list (r'^(\s*)([*-])(\s)(.+\n)', bygroups(Text, Keyword, Text, using(this, state='inline'))), # numbered list (r'^(\s*)([0-9]+\.)( .+\n)', bygroups(Text, Keyword, using(this, state='inline'))), # quote (r'^(\s*>\s)(.+\n)', bygroups(Keyword, Generic.Emph)), # code block fenced by 3 backticks (r'^(\s*```\n[\w\W]*?^\s*```$\n)', String.Backtick), # code block with language (r'^(\s*```)(\w+)(\n)([\w\W]*?)(^\s*```$\n)', _handle_codeblock), include('inline'), ], 'inline': [ # escape (r'\\.', Text), # inline code (r'([^`]?)(`[^`\n]+`)', bygroups(Text, String.Backtick)), # warning: the following rules eat outer tags. # eg. **foo _bar_ baz** => foo and baz are not recognized as bold # bold fenced by '**' (r'([^\*]?)(\*\*[^* \n][^*\n]*\*\*)', bygroups(Text, Generic.Strong)), # bold fenced by '__' (r'([^_]?)(__[^_ \n][^_\n]*__)', bygroups(Text, Generic.Strong)), # italics fenced by '*' (r'([^\*]?)(\*[^* \n][^*\n]*\*)', bygroups(Text, Generic.Emph)), # italics fenced by '_' (r'([^_]?)(_[^_ \n][^_\n]*_)', bygroups(Text, Generic.Emph)), # strikethrough (r'([^~]?)(~~[^~ \n][^~\n]*~~)', bygroups(Text, Generic.Deleted)), # mentions and topics (twitter and github stuff) (r'[@#][\w/:]+', Name.Entity), # (image?) links eg:  (r'(!?\[)([^]]+)(\])(\()([^)]+)(\))', bygroups(Text, Name.Tag, Text, Text, Name.Attribute, Text)), # reference-style links, e.g.: # [an example][id] # [id]: http://example.com/ (r'(\[)([^]]+)(\])(\[)([^]]*)(\])', bygroups(Text, Name.Tag, Text, Text, Name.Label, Text)), (r'^(\s*\[)([^]]*)(\]:\s*)(.+)', bygroups(Text, Name.Label, Text, Name.Attribute)), # general text, must come last! (r'[^\\\s]+', Text), (r'.', Text), ], } def __init__(self, **options): self.handlecodeblocks = get_bool_opt(options, 'handlecodeblocks', True) RegexLexer.__init__(self, **options) class TiddlyWiki5Lexer(RegexLexer): """ For `TiddlyWiki5 <https://tiddlywiki.com/#TiddlerFiles>`_ markup. .. versionadded:: 2.7 """ name = 'tiddler' aliases = ['tid'] filenames = ['*.tid'] mimetypes = ["text/vnd.tiddlywiki"] flags = re.MULTILINE def _handle_codeblock(self, match): """ match args: 1:backticks, 2:lang_name, 3:newline, 4:code, 5:backticks """ from pygments.lexers import get_lexer_by_name # section header yield match.start(1), String, match.group(1) yield match.start(2), String, match.group(2) yield match.start(3), Text, match.group(3) # lookup lexer if wanted and existing lexer = None if self.handlecodeblocks: try: lexer = get_lexer_by_name(match.group(2).strip()) except ClassNotFound: pass code = match.group(4) # no lexer for this language. handle it like it was a code block if lexer is None: yield match.start(4), String, code return yield from do_insertions([], lexer.get_tokens_unprocessed(code)) yield match.start(5), String, match.group(5) def _handle_cssblock(self, match): """ match args: 1:style tag 2:newline, 3:code, 4:closing style tag """ from pygments.lexers import get_lexer_by_name # section header yield match.start(1), String, match.group(1) yield match.start(2), String, match.group(2) lexer = None if self.handlecodeblocks: try: lexer = get_lexer_by_name('css') except ClassNotFound: pass code = match.group(3) # no lexer for this language. handle it like it was a code block if lexer is None: yield match.start(3), String, code return yield from do_insertions([], lexer.get_tokens_unprocessed(code)) yield match.start(4), String, match.group(4) tokens = { 'root': [ # title in metadata section (r'^(title)(:\s)(.+\n)', bygroups(Keyword, Text, Generic.Heading)), # headings (r'^(!)([^!].+\n)', bygroups(Generic.Heading, Text)), (r'^(!{2,6})(.+\n)', bygroups(Generic.Subheading, Text)), # bulleted or numbered lists or single-line block quotes # (can be mixed) (r'^(\s*)([*#>]+)(\s*)(.+\n)', bygroups(Text, Keyword, Text, using(this, state='inline'))), # multi-line block quotes (r'^(<<<.*\n)([\w\W]*?)(^<<<.*$)', bygroups(String, Text, String)), # table header (r'^(\|.*?\|h)$', bygroups(Generic.Strong)), # table footer or caption (r'^(\|.*?\|[cf])$', bygroups(Generic.Emph)), # table class (r'^(\|.*?\|k)$', bygroups(Name.Tag)), # definitions (r'^(;.*)$', bygroups(Generic.Strong)), # text block (r'^(```\n)([\w\W]*?)(^```$)', bygroups(String, Text, String)), # code block with language (r'^(```)(\w+)(\n)([\w\W]*?)(^```$)', _handle_codeblock), # CSS style block (r'^(<style>)(\n)([\w\W]*?)(^</style>$)', _handle_cssblock), include('keywords'), include('inline'), ], 'keywords': [ (words(( '\\define', '\\end', 'caption', 'created', 'modified', 'tags', 'title', 'type'), prefix=r'^', suffix=r'\b'), Keyword), ], 'inline': [ # escape (r'\\.', Text), # created or modified date (r'\d{17}', Number.Integer), # italics (r'(\s)(//[^/]+//)((?=\W|\n))', bygroups(Text, Generic.Emph, Text)), # superscript (r'(\s)(\^\^[^\^]+\^\^)', bygroups(Text, Generic.Emph)), # subscript (r'(\s)(,,[^,]+,,)', bygroups(Text, Generic.Emph)), # underscore (r'(\s)(__[^_]+__)', bygroups(Text, Generic.Strong)), # bold (r"(\s)(''[^']+'')((?=\W|\n))", bygroups(Text, Generic.Strong, Text)), # strikethrough (r'(\s)(~~[^~]+~~)((?=\W|\n))', bygroups(Text, Generic.Deleted, Text)), # TiddlyWiki variables (r'<<[^>]+>>', Name.Tag), (r'\$\$[^$]+\$\$', Name.Tag), (r'\$\([^)]+\)\$', Name.Tag), # TiddlyWiki style or class (r'^@@.*$', Name.Tag), # HTML tags (r'</?[^>]+>', Name.Tag), # inline code (r'`[^`]+`', String.Backtick), # HTML escaped symbols (r'&\S*?;', String.Regex), # Wiki links (r'(\[{2})([^]\|]+)(\]{2})', bygroups(Text, Name.Tag, Text)), # External links (r'(\[{2})([^]\|]+)(\|)([^]\|]+)(\]{2})', bygroups(Text, Name.Tag, Text, Name.Attribute, Text)), # Transclusion (r'(\{{2})([^}]+)(\}{2})', bygroups(Text, Name.Tag, Text)), # URLs (r'(\b.?.?tps?://[^\s"]+)', bygroups(Name.Attribute)), # general text, must come last! (r'[\w]+', Text), (r'.', Text) ], } def __init__(self, **options): self.handlecodeblocks = get_bool_opt(options, 'handlecodeblocks', True) RegexLexer.__init__(self, **options)
Close