Linux iad1-shared-b7-18 6.6.49-grsec-jammy+ #10 SMP Thu Sep 12 23:23:08 UTC 2024 x86_64
Apache
: 67.205.6.31 | : 216.73.216.47
Cant Read [ /etc/named.conf ]
8.2.29
fernandoquevedo
Terminal
AUTO ROOT
Adminer
Backdoor Destroyer
Linux Exploit
Lock Shell
Lock File
Create User
CREATE RDP
PHP Mailer
BACKCONNECT
UNLOCK SHELL
HASH IDENTIFIER
README
+ Create Folder
+ Create File
/
usr /
lib /
ruby /
vendor_ruby /
[ HOME SHELL ]
Name
Size
Permission
Action
3.0.0
[ DIR ]
drwxr-xr-x
action_dispatch
[ DIR ]
drwxr-xr-x
active_support
[ DIR ]
drwxr-xr-x
bootsnap
[ DIR ]
drwxr-xr-x
childprocess
[ DIR ]
drwxr-xr-x
coffee_script
[ DIR ]
drwxr-xr-x
columnize
[ DIR ]
drwxr-xr-x
domain_name
[ DIR ]
drwxr-xr-x
em
[ DIR ]
drwxr-xr-x
erubis
[ DIR ]
drwxr-xr-x
execjs
[ DIR ]
drwxr-xr-x
generators
[ DIR ]
drwxr-xr-x
hike
[ DIR ]
drwxr-xr-x
http
[ DIR ]
drwxr-xr-x
jbuilder
[ DIR ]
drwxr-xr-x
json
[ DIR ]
drwxr-xr-x
mail
[ DIR ]
drwxr-xr-x
method_source
[ DIR ]
drwxr-xr-x
moneta
[ DIR ]
drwxr-xr-x
multi_json
[ DIR ]
drwxr-xr-x
mysql2
[ DIR ]
drwxr-xr-x
net
[ DIR ]
drwxr-xr-x
openid
[ DIR ]
drwxr-xr-x
phusion_passenger
[ DIR ]
drwxr-xr-x
pkg-config
[ DIR ]
drwxr-xr-x
racc
[ DIR ]
drwxr-xr-x
rack
[ DIR ]
drwxr-xr-x
rails
[ DIR ]
drwxr-xr-x
rake
[ DIR ]
drwxr-xr-x
rb-inotify
[ DIR ]
drwxr-xr-x
rmagick
[ DIR ]
drwxr-xr-x
rubygems
[ DIR ]
drwxr-xr-x
rvg
[ DIR ]
drwxr-xr-x
sass
[ DIR ]
drwxr-xr-x
selenium
[ DIR ]
drwxr-xr-x
spring
[ DIR ]
drwxr-xr-x
sprockets
[ DIR ]
drwxr-xr-x
sqlite3
[ DIR ]
drwxr-xr-x
tilt
[ DIR ]
drwxr-xr-x
uglifier
[ DIR ]
drwxr-xr-x
unf
[ DIR ]
drwxr-xr-x
unf_ext
[ DIR ]
drwxr-xr-x
uuidtools
[ DIR ]
drwxr-xr-x
webrobots
[ DIR ]
drwxr-xr-x
websocket
[ DIR ]
drwxr-xr-x
xmlrpc
[ DIR ]
drwxr-xr-x
xpath
[ DIR ]
drwxr-xr-x
RMagick.rb
100
B
-rw-r--r--
bootsnap.rb
3.94
KB
-rw-r--r--
childprocess.rb
4.84
KB
-rw-r--r--
coffee-script.rb
24
B
-rw-r--r--
coffee_script.rb
1.85
KB
-rw-r--r--
columnize.rb
4.59
KB
-rw-r--r--
domain_name.rb
7.33
KB
-rw-r--r--
erubis.rb
1.71
KB
-rw-r--r--
eventmachine.rb
60.52
KB
-rw-r--r--
execjs.rb
108
B
-rw-r--r--
hike.rb
263
B
-rw-r--r--
hmac-md5.rb
204
B
-rw-r--r--
hmac-rmd160.rb
213
B
-rw-r--r--
hmac-sha1.rb
207
B
-rw-r--r--
hmac-sha2.rb
529
B
-rw-r--r--
hmac.rb
2.86
KB
-rw-r--r--
http-cookie.rb
22
B
-rw-r--r--
jbuilder.rb
8.47
KB
-rw-r--r--
jeventmachine.rb
8.34
KB
-rw-r--r--
json.rb
19.3
KB
-rw-r--r--
mail.rb
1.87
KB
-rw-r--r--
method_source.rb
4.31
KB
-rw-r--r--
moneta.rb
6.92
KB
-rw-r--r--
multi_json.rb
4.15
KB
-rw-r--r--
mysql2.rb
2.84
KB
-rw-r--r--
net-telnet.rb
21
B
-rw-r--r--
net_http_hacked.rb
2.41
KB
-rw-r--r--
openid.rb
675
B
-rw-r--r--
phusion_passenger.rb
11.69
KB
-rw-r--r--
pkg-config.rb
16.38
KB
-rw-r--r--
racc.rb
137
B
-rw-r--r--
rack-proxy.rb
20
B
-rw-r--r--
rails-deprecated_sanitizer.rb
37
B
-rw-r--r--
rb-inotify.rb
461
B
-rw-r--r--
rmagick.rb
65
B
-rw-r--r--
rmagick_internal.rb
57.88
KB
-rw-r--r--
ruby-openid.rb
17
B
-rw-r--r--
ruby_hmac.rb
52
B
-rw-r--r--
rubygems.rb
36.48
KB
-rw-r--r--
sass-rails.rb
22
B
-rw-r--r--
selenium-webdriver.rb
848
B
-rw-r--r--
sprockets.rb
6.5
KB
-rw-r--r--
sqlite3.rb
361
B
-rw-r--r--
systemu.rb
8.47
KB
-rw-r--r--
tilt.rb
6.11
KB
-rw-r--r--
uglifier.rb
10.89
KB
-rw-r--r--
unf.rb
659
B
-rw-r--r--
unf_ext.rb
109
B
-rw-r--r--
uuidtools.rb
24.2
KB
-rw-r--r--
webrobots.rb
5.62
KB
-rw-r--r--
xmlrpc.rb
7.99
KB
-rw-r--r--
xpath.rb
271
B
-rw-r--r--
Delete
Unzip
Zip
${this.title}
Close
Code Editor : webrobots.rb
require 'webrobots/version' require 'webrobots/robotstxt' require 'uri' require 'net/https' require 'thread' if defined?(Nokogiri) require 'webrobots/nokogiri' else autoload :Nokogiri, 'webrobots/nokogiri' end class WebRobots # Creates a WebRobots object for a robot named +user_agent+, with # optional +options+. # # * :http_get => a custom method, proc, or anything that responds to # .call(uri), to be used for fetching robots.txt. It must return # the response body if successful, return an empty string if the # resource is not found, and return nil or raise any error on # failure. Redirects should be handled within this proc. # # * :crawl_delay => determines how to react to Crawl-delay # directives. If +:sleep+ is given, WebRobots sleeps as demanded # when allowed?(url)/disallowed?(url) is called. This is the # default behavior. If +:ignore+ is given, WebRobots does # nothing. If a custom method, proc, or anything that responds to # .call(delay, last_checked_at), it is called. def initialize(user_agent, options = nil) @user_agent = user_agent options ||= {} @http_get = options[:http_get] || method(:http_get) crawl_delay_handler = case value = options[:crawl_delay] || :sleep when :ignore nil when :sleep method(:crawl_delay_handler) else if value.respond_to?(:call) value else raise ArgumentError, "invalid Crawl-delay handler: #{value.inspect}" end end @parser = RobotsTxt::Parser.new(user_agent, crawl_delay_handler) @parser_mutex = Mutex.new @robotstxt = create_cache() end # :nodoc: def create_cache Hash.new # Must respond to [], []=, delete and clear. end # Flushes robots.txt cache. def flush_cache @robotstxt.clear end # Returns the robot name initially given. attr_reader :user_agent # Tests if the robot is allowed to access a resource at +url+. If a # malformed URI string is given, URI::InvalidURIError is raised. If # a relative URI or a non-HTTP/HTTPS URI is given, ArgumentError is # raised. def allowed?(url) site, request_uri = split_uri(url) return true if request_uri == '/robots.txt' robots_txt = get_robots_txt(site) robots_txt.allow?(request_uri) end # Equivalent to !allowed?(url). def disallowed?(url) !allowed?(url) end # Returns the number of seconds that the configured agent should wait # between successive requests to the site identified by +url+ according # to the site's robots.txt +Crawl-delay+ directive. def crawl_delay(url) robots_txt_for(url).crawl_delay() end # Returns extended option values for a resource at +url+ in a hash # with each field name lower-cased. See allowed?() for a list of # errors that may be raised. def options(url) robots_txt_for(url).options end # Equivalent to option(url)[token.downcase]. def option(url, token) options(url)[token.downcase] end # Returns an array of Sitemap URLs. See allowed?() for a list of # errors that may be raised. def sitemaps(url) robots_txt_for(url).sitemaps end # Returns an error object if there is an error in fetching or # parsing robots.txt of the site +url+. def error(url) robots_txt_for(url).error end # Raises the error if there was an error in fetching or parsing # robots.txt of the site +url+. def error!(url) robots_txt_for(url).error! end # Removes robots.txt cache for the site +url+. def reset(url) site, = split_uri(url) @robotstxt.delete(site) end private def split_uri(url) site = if url.is_a?(URI) url.dup else begin URI.parse(url) rescue => e raise ArgumentError, e.message end end site.scheme && site.host or raise ArgumentError, "non-absolute URI: #{url}" site.is_a?(URI::HTTP) or raise ArgumentError, "non-HTTP/HTTPS URI: #{url}" request_uri = site.request_uri if (host = site.host).match(/[[:upper:]]/) site.host = host.downcase end site.path = '/' return site, request_uri end def robots_txt_for(url) site, = split_uri(url) get_robots_txt(site) end def get_robots_txt(site) @robotstxt[site] ||= fetch_robots_txt(site) end def fetch_robots_txt(site) begin body = @http_get.call(site + 'robots.txt') or raise 'robots.txt unfetchable' rescue => e return RobotsTxt.unfetchable(site, e, @user_agent) end @parser_mutex.synchronize { @parser.parse!(body, site) } end def http_get(uri) response = nil referer = nil 5.times { http = Net::HTTP.new(uri.host, uri.port) if http.use_ssl = uri.is_a?(URI::HTTPS) http.verify_mode = OpenSSL::SSL::VERIFY_PEER http.cert_store = OpenSSL::X509::Store.new.tap { |store| store.set_default_paths } end header = { 'User-Agent' => @user_agent } header['Referer'] = referer if referer # header is destroyed by this in ruby 1.9.2! response = http.get(uri.request_uri, header) case response when Net::HTTPSuccess return response.body when Net::HTTPRedirection referer = uri.to_s uri = URI(response['location']) when Net::HTTPClientError return '' end } case response when Net::HTTPRedirection # Treat too many redirections as not found '' else raise "#{response.code} #{response.message}" end end def crawl_delay_handler(delay, last_checked_at) if last_checked_at delay -= Time.now - last_checked_at sleep delay if delay > 0 end end end
Close